code
stringlengths 13
6.09M
| order_type
stringclasses 2
values | original_example
dict | step_ids
listlengths 1
5
|
---|---|---|---|
# -*- coding: utf-8 -*-
import csv
import datetime
from django.conf import settings
from django.contrib import admin
from django.http import HttpResponse
from django.utils.encoding import smart_str
from djforms.scholars.models import *
def export_scholars(modeladmin, request, queryset):
"""Export the presentation data."""
response = HttpResponse('', content_type='text/csv; charset=utf-8')
response['Content-Disposition'] = 'attachment; filename=cos.csv'
writer = csv.writer(response)
writer.writerow([
'Title',
'Reviewer',
'Leader',
'Leader Email',
'Sponsor',
'Other Sponsor',
'Presenters',
'Funding Source',
'Work Type',
'Permission to Reproduce',
'Faculty Sponsor Approval',
'Table',
'Electricity',
'Link',
'Poster',
'Date created',
])
for presentation in queryset:
link = 'http://{0}{1}'.format(
settings.SERVER_URL,
presentation.get_absolute_url(),
)
poster = 'http://{0}/assets/{1}'.format(
settings.SERVER_URL, presentation.poster_file,
)
try:
leader = '{0}, {1}'.format(
presentation.leader.last_name,
presentation.leader.first_name,
)
except Exception:
leader = ''
presenters = ''
for presenter in presentation.presenters.all():
if not presenter.leader:
presenters += '{0}, {1}|'.format(
presenter.last_name, presenter.first_name,
)
title = smart_str(
presentation.title,
encoding='utf-8',
strings_only=False,
errors='strict',
)
funding = smart_str(
presentation.funding,
encoding='utf-8',
strings_only=False,
errors='strict',
)
work_type = smart_str(
presentation.work_type,
encoding='utf-8',
strings_only=False,
errors='strict',
)
sponsor_email = ''
if presentation.leader:
sponsor_email = presentation.leader.sponsor_email
sponsor_other = presentation.leader.sponsor_other
writer.writerow([
title,
presentation.reviewer,
leader,
presentation.user.email,
sponsor_email,
sponsor_other,
presenters[:-1],
funding,
work_type,
presentation.permission,
presentation.shared,
presentation.need_table,
presentation.need_electricity,
link,poster,
presentation.date_created,
])
return response
export_scholars.short_description = """
Export the selected Celebration of Scholars Submissions
"""
class PresentationAdmin(admin.ModelAdmin):
"""Admin class for the presentation data model."""
model = Presentation
actions = [export_scholars]
raw_id_fields = ('user', 'updated_by', 'leader')
list_max_show_all = 500
list_per_page = 500
list_display = (
'title',
'reviewer',
'last_name',
'first_name',
'email',
'sponsor',
'sponsor_other',
'get_presenters',
'funding',
'work_type',
'permission',
'shared',
'need_table',
'need_electricity',
'status',
'poster',
'date_created',
)
ordering = [
'-date_created',
'title',
'work_type',
'permission',
'shared',
'need_table',
'need_electricity',
'status',
]
search_fields = (
'title',
'user__last_name',
'user__email',
'funding',
)
list_filter = ('status', 'date_created')
list_editable = ['reviewer']
def queryset(self, request):
"""Only show presentations that were created after a certain date."""
TODAY = datetime.date.today()
YEAR = int(TODAY.year)
qs = super(PresentationAdmin, self).queryset(request)
start_date = datetime.date(YEAR, 1, 1)
return qs.filter(date_created__gte=start_date)
def save_model(self, request, obj, form, change):
"""Override the save method to update some things."""
if change:
obj.updated_by = request.user
obj.save()
class PresenterAdmin(admin.ModelAdmin):
"""Admin class for the presenter model."""
model = Presenter
list_max_show_all = 500
list_per_page = 500
list_display = (
'date_created',
'last_name',
'first_name',
'email',
'leader',
'prez_type',
'college_year',
'major',
'hometown',
'sponsor',
'sponsor_name',
'sponsor_email',
'sponsor_other',
'department',
)
ordering = [
'date_created',
'last_name',
'first_name',
'email',
'leader',
'prez_type',
'college_year',
'major',
'hometown',
'sponsor',
'sponsor_name',
'sponsor_email',
'sponsor_other',
'department',
]
search_fields = (
'last_name',
'first_name',
'email',
)
admin.site.register(Presenter, PresenterAdmin)
admin.site.register(Presentation, PresentationAdmin)
|
normal
|
{
"blob_id": "1ae69eaaa08a0045faad13281a6a3de8f7529c7a",
"index": 9761,
"step-1": "<mask token>\n\n\nclass PresentationAdmin(admin.ModelAdmin):\n <mask token>\n model = Presentation\n actions = [export_scholars]\n raw_id_fields = 'user', 'updated_by', 'leader'\n list_max_show_all = 500\n list_per_page = 500\n list_display = ('title', 'reviewer', 'last_name', 'first_name', 'email',\n 'sponsor', 'sponsor_other', 'get_presenters', 'funding',\n 'work_type', 'permission', 'shared', 'need_table',\n 'need_electricity', 'status', 'poster', 'date_created')\n ordering = ['-date_created', 'title', 'work_type', 'permission',\n 'shared', 'need_table', 'need_electricity', 'status']\n search_fields = 'title', 'user__last_name', 'user__email', 'funding'\n list_filter = 'status', 'date_created'\n list_editable = ['reviewer']\n\n def queryset(self, request):\n \"\"\"Only show presentations that were created after a certain date.\"\"\"\n TODAY = datetime.date.today()\n YEAR = int(TODAY.year)\n qs = super(PresentationAdmin, self).queryset(request)\n start_date = datetime.date(YEAR, 1, 1)\n return qs.filter(date_created__gte=start_date)\n\n def save_model(self, request, obj, form, change):\n \"\"\"Override the save method to update some things.\"\"\"\n if change:\n obj.updated_by = request.user\n obj.save()\n\n\nclass PresenterAdmin(admin.ModelAdmin):\n \"\"\"Admin class for the presenter model.\"\"\"\n model = Presenter\n list_max_show_all = 500\n list_per_page = 500\n list_display = ('date_created', 'last_name', 'first_name', 'email',\n 'leader', 'prez_type', 'college_year', 'major', 'hometown',\n 'sponsor', 'sponsor_name', 'sponsor_email', 'sponsor_other',\n 'department')\n ordering = ['date_created', 'last_name', 'first_name', 'email',\n 'leader', 'prez_type', 'college_year', 'major', 'hometown',\n 'sponsor', 'sponsor_name', 'sponsor_email', 'sponsor_other',\n 'department']\n search_fields = 'last_name', 'first_name', 'email'\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef export_scholars(modeladmin, request, queryset):\n \"\"\"Export the presentation data.\"\"\"\n response = HttpResponse('', content_type='text/csv; charset=utf-8')\n response['Content-Disposition'] = 'attachment; filename=cos.csv'\n writer = csv.writer(response)\n writer.writerow(['Title', 'Reviewer', 'Leader', 'Leader Email',\n 'Sponsor', 'Other Sponsor', 'Presenters', 'Funding Source',\n 'Work Type', 'Permission to Reproduce', 'Faculty Sponsor Approval',\n 'Table', 'Electricity', 'Link', 'Poster', 'Date created'])\n for presentation in queryset:\n link = 'http://{0}{1}'.format(settings.SERVER_URL, presentation.\n get_absolute_url())\n poster = 'http://{0}/assets/{1}'.format(settings.SERVER_URL,\n presentation.poster_file)\n try:\n leader = '{0}, {1}'.format(presentation.leader.last_name,\n presentation.leader.first_name)\n except Exception:\n leader = ''\n presenters = ''\n for presenter in presentation.presenters.all():\n if not presenter.leader:\n presenters += '{0}, {1}|'.format(presenter.last_name,\n presenter.first_name)\n title = smart_str(presentation.title, encoding='utf-8',\n strings_only=False, errors='strict')\n funding = smart_str(presentation.funding, encoding='utf-8',\n strings_only=False, errors='strict')\n work_type = smart_str(presentation.work_type, encoding='utf-8',\n strings_only=False, errors='strict')\n sponsor_email = ''\n if presentation.leader:\n sponsor_email = presentation.leader.sponsor_email\n sponsor_other = presentation.leader.sponsor_other\n writer.writerow([title, presentation.reviewer, leader, presentation\n .user.email, sponsor_email, sponsor_other, presenters[:-1],\n funding, work_type, presentation.permission, presentation.\n shared, presentation.need_table, presentation.need_electricity,\n link, poster, presentation.date_created])\n return response\n\n\n<mask token>\n\n\nclass PresentationAdmin(admin.ModelAdmin):\n \"\"\"Admin class for the presentation data model.\"\"\"\n model = Presentation\n actions = [export_scholars]\n raw_id_fields = 'user', 'updated_by', 'leader'\n list_max_show_all = 500\n list_per_page = 500\n list_display = ('title', 'reviewer', 'last_name', 'first_name', 'email',\n 'sponsor', 'sponsor_other', 'get_presenters', 'funding',\n 'work_type', 'permission', 'shared', 'need_table',\n 'need_electricity', 'status', 'poster', 'date_created')\n ordering = ['-date_created', 'title', 'work_type', 'permission',\n 'shared', 'need_table', 'need_electricity', 'status']\n search_fields = 'title', 'user__last_name', 'user__email', 'funding'\n list_filter = 'status', 'date_created'\n list_editable = ['reviewer']\n\n def queryset(self, request):\n \"\"\"Only show presentations that were created after a certain date.\"\"\"\n TODAY = datetime.date.today()\n YEAR = int(TODAY.year)\n qs = super(PresentationAdmin, self).queryset(request)\n start_date = datetime.date(YEAR, 1, 1)\n return qs.filter(date_created__gte=start_date)\n\n def save_model(self, request, obj, form, change):\n \"\"\"Override the save method to update some things.\"\"\"\n if change:\n obj.updated_by = request.user\n obj.save()\n\n\nclass PresenterAdmin(admin.ModelAdmin):\n \"\"\"Admin class for the presenter model.\"\"\"\n model = Presenter\n list_max_show_all = 500\n list_per_page = 500\n list_display = ('date_created', 'last_name', 'first_name', 'email',\n 'leader', 'prez_type', 'college_year', 'major', 'hometown',\n 'sponsor', 'sponsor_name', 'sponsor_email', 'sponsor_other',\n 'department')\n ordering = ['date_created', 'last_name', 'first_name', 'email',\n 'leader', 'prez_type', 'college_year', 'major', 'hometown',\n 'sponsor', 'sponsor_name', 'sponsor_email', 'sponsor_other',\n 'department']\n search_fields = 'last_name', 'first_name', 'email'\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef export_scholars(modeladmin, request, queryset):\n \"\"\"Export the presentation data.\"\"\"\n response = HttpResponse('', content_type='text/csv; charset=utf-8')\n response['Content-Disposition'] = 'attachment; filename=cos.csv'\n writer = csv.writer(response)\n writer.writerow(['Title', 'Reviewer', 'Leader', 'Leader Email',\n 'Sponsor', 'Other Sponsor', 'Presenters', 'Funding Source',\n 'Work Type', 'Permission to Reproduce', 'Faculty Sponsor Approval',\n 'Table', 'Electricity', 'Link', 'Poster', 'Date created'])\n for presentation in queryset:\n link = 'http://{0}{1}'.format(settings.SERVER_URL, presentation.\n get_absolute_url())\n poster = 'http://{0}/assets/{1}'.format(settings.SERVER_URL,\n presentation.poster_file)\n try:\n leader = '{0}, {1}'.format(presentation.leader.last_name,\n presentation.leader.first_name)\n except Exception:\n leader = ''\n presenters = ''\n for presenter in presentation.presenters.all():\n if not presenter.leader:\n presenters += '{0}, {1}|'.format(presenter.last_name,\n presenter.first_name)\n title = smart_str(presentation.title, encoding='utf-8',\n strings_only=False, errors='strict')\n funding = smart_str(presentation.funding, encoding='utf-8',\n strings_only=False, errors='strict')\n work_type = smart_str(presentation.work_type, encoding='utf-8',\n strings_only=False, errors='strict')\n sponsor_email = ''\n if presentation.leader:\n sponsor_email = presentation.leader.sponsor_email\n sponsor_other = presentation.leader.sponsor_other\n writer.writerow([title, presentation.reviewer, leader, presentation\n .user.email, sponsor_email, sponsor_other, presenters[:-1],\n funding, work_type, presentation.permission, presentation.\n shared, presentation.need_table, presentation.need_electricity,\n link, poster, presentation.date_created])\n return response\n\n\nexport_scholars.short_description = \"\"\"\n Export the selected Celebration of Scholars Submissions\n\"\"\"\n\n\nclass PresentationAdmin(admin.ModelAdmin):\n \"\"\"Admin class for the presentation data model.\"\"\"\n model = Presentation\n actions = [export_scholars]\n raw_id_fields = 'user', 'updated_by', 'leader'\n list_max_show_all = 500\n list_per_page = 500\n list_display = ('title', 'reviewer', 'last_name', 'first_name', 'email',\n 'sponsor', 'sponsor_other', 'get_presenters', 'funding',\n 'work_type', 'permission', 'shared', 'need_table',\n 'need_electricity', 'status', 'poster', 'date_created')\n ordering = ['-date_created', 'title', 'work_type', 'permission',\n 'shared', 'need_table', 'need_electricity', 'status']\n search_fields = 'title', 'user__last_name', 'user__email', 'funding'\n list_filter = 'status', 'date_created'\n list_editable = ['reviewer']\n\n def queryset(self, request):\n \"\"\"Only show presentations that were created after a certain date.\"\"\"\n TODAY = datetime.date.today()\n YEAR = int(TODAY.year)\n qs = super(PresentationAdmin, self).queryset(request)\n start_date = datetime.date(YEAR, 1, 1)\n return qs.filter(date_created__gte=start_date)\n\n def save_model(self, request, obj, form, change):\n \"\"\"Override the save method to update some things.\"\"\"\n if change:\n obj.updated_by = request.user\n obj.save()\n\n\nclass PresenterAdmin(admin.ModelAdmin):\n \"\"\"Admin class for the presenter model.\"\"\"\n model = Presenter\n list_max_show_all = 500\n list_per_page = 500\n list_display = ('date_created', 'last_name', 'first_name', 'email',\n 'leader', 'prez_type', 'college_year', 'major', 'hometown',\n 'sponsor', 'sponsor_name', 'sponsor_email', 'sponsor_other',\n 'department')\n ordering = ['date_created', 'last_name', 'first_name', 'email',\n 'leader', 'prez_type', 'college_year', 'major', 'hometown',\n 'sponsor', 'sponsor_name', 'sponsor_email', 'sponsor_other',\n 'department']\n search_fields = 'last_name', 'first_name', 'email'\n\n\nadmin.site.register(Presenter, PresenterAdmin)\nadmin.site.register(Presentation, PresentationAdmin)\n",
"step-4": "import csv\nimport datetime\nfrom django.conf import settings\nfrom django.contrib import admin\nfrom django.http import HttpResponse\nfrom django.utils.encoding import smart_str\nfrom djforms.scholars.models import *\n\n\ndef export_scholars(modeladmin, request, queryset):\n \"\"\"Export the presentation data.\"\"\"\n response = HttpResponse('', content_type='text/csv; charset=utf-8')\n response['Content-Disposition'] = 'attachment; filename=cos.csv'\n writer = csv.writer(response)\n writer.writerow(['Title', 'Reviewer', 'Leader', 'Leader Email',\n 'Sponsor', 'Other Sponsor', 'Presenters', 'Funding Source',\n 'Work Type', 'Permission to Reproduce', 'Faculty Sponsor Approval',\n 'Table', 'Electricity', 'Link', 'Poster', 'Date created'])\n for presentation in queryset:\n link = 'http://{0}{1}'.format(settings.SERVER_URL, presentation.\n get_absolute_url())\n poster = 'http://{0}/assets/{1}'.format(settings.SERVER_URL,\n presentation.poster_file)\n try:\n leader = '{0}, {1}'.format(presentation.leader.last_name,\n presentation.leader.first_name)\n except Exception:\n leader = ''\n presenters = ''\n for presenter in presentation.presenters.all():\n if not presenter.leader:\n presenters += '{0}, {1}|'.format(presenter.last_name,\n presenter.first_name)\n title = smart_str(presentation.title, encoding='utf-8',\n strings_only=False, errors='strict')\n funding = smart_str(presentation.funding, encoding='utf-8',\n strings_only=False, errors='strict')\n work_type = smart_str(presentation.work_type, encoding='utf-8',\n strings_only=False, errors='strict')\n sponsor_email = ''\n if presentation.leader:\n sponsor_email = presentation.leader.sponsor_email\n sponsor_other = presentation.leader.sponsor_other\n writer.writerow([title, presentation.reviewer, leader, presentation\n .user.email, sponsor_email, sponsor_other, presenters[:-1],\n funding, work_type, presentation.permission, presentation.\n shared, presentation.need_table, presentation.need_electricity,\n link, poster, presentation.date_created])\n return response\n\n\nexport_scholars.short_description = \"\"\"\n Export the selected Celebration of Scholars Submissions\n\"\"\"\n\n\nclass PresentationAdmin(admin.ModelAdmin):\n \"\"\"Admin class for the presentation data model.\"\"\"\n model = Presentation\n actions = [export_scholars]\n raw_id_fields = 'user', 'updated_by', 'leader'\n list_max_show_all = 500\n list_per_page = 500\n list_display = ('title', 'reviewer', 'last_name', 'first_name', 'email',\n 'sponsor', 'sponsor_other', 'get_presenters', 'funding',\n 'work_type', 'permission', 'shared', 'need_table',\n 'need_electricity', 'status', 'poster', 'date_created')\n ordering = ['-date_created', 'title', 'work_type', 'permission',\n 'shared', 'need_table', 'need_electricity', 'status']\n search_fields = 'title', 'user__last_name', 'user__email', 'funding'\n list_filter = 'status', 'date_created'\n list_editable = ['reviewer']\n\n def queryset(self, request):\n \"\"\"Only show presentations that were created after a certain date.\"\"\"\n TODAY = datetime.date.today()\n YEAR = int(TODAY.year)\n qs = super(PresentationAdmin, self).queryset(request)\n start_date = datetime.date(YEAR, 1, 1)\n return qs.filter(date_created__gte=start_date)\n\n def save_model(self, request, obj, form, change):\n \"\"\"Override the save method to update some things.\"\"\"\n if change:\n obj.updated_by = request.user\n obj.save()\n\n\nclass PresenterAdmin(admin.ModelAdmin):\n \"\"\"Admin class for the presenter model.\"\"\"\n model = Presenter\n list_max_show_all = 500\n list_per_page = 500\n list_display = ('date_created', 'last_name', 'first_name', 'email',\n 'leader', 'prez_type', 'college_year', 'major', 'hometown',\n 'sponsor', 'sponsor_name', 'sponsor_email', 'sponsor_other',\n 'department')\n ordering = ['date_created', 'last_name', 'first_name', 'email',\n 'leader', 'prez_type', 'college_year', 'major', 'hometown',\n 'sponsor', 'sponsor_name', 'sponsor_email', 'sponsor_other',\n 'department']\n search_fields = 'last_name', 'first_name', 'email'\n\n\nadmin.site.register(Presenter, PresenterAdmin)\nadmin.site.register(Presentation, PresentationAdmin)\n",
"step-5": "# -*- coding: utf-8 -*-\r\n\r\nimport csv\r\nimport datetime\r\n\r\nfrom django.conf import settings\r\nfrom django.contrib import admin\r\nfrom django.http import HttpResponse\r\nfrom django.utils.encoding import smart_str\r\nfrom djforms.scholars.models import *\r\n\r\n\r\ndef export_scholars(modeladmin, request, queryset):\r\n \"\"\"Export the presentation data.\"\"\"\r\n response = HttpResponse('', content_type='text/csv; charset=utf-8')\r\n response['Content-Disposition'] = 'attachment; filename=cos.csv'\r\n writer = csv.writer(response)\r\n writer.writerow([\r\n 'Title',\r\n 'Reviewer',\r\n 'Leader',\r\n 'Leader Email',\r\n 'Sponsor',\r\n 'Other Sponsor',\r\n 'Presenters',\r\n 'Funding Source',\r\n 'Work Type',\r\n 'Permission to Reproduce',\r\n 'Faculty Sponsor Approval',\r\n 'Table',\r\n 'Electricity',\r\n 'Link',\r\n 'Poster',\r\n 'Date created',\r\n ])\r\n for presentation in queryset:\r\n link = 'http://{0}{1}'.format(\r\n settings.SERVER_URL,\r\n presentation.get_absolute_url(),\r\n )\r\n poster = 'http://{0}/assets/{1}'.format(\r\n settings.SERVER_URL, presentation.poster_file,\r\n )\r\n try:\r\n leader = '{0}, {1}'.format(\r\n presentation.leader.last_name,\r\n presentation.leader.first_name,\r\n )\r\n except Exception:\r\n leader = ''\r\n presenters = ''\r\n for presenter in presentation.presenters.all():\r\n if not presenter.leader:\r\n presenters += '{0}, {1}|'.format(\r\n presenter.last_name, presenter.first_name,\r\n )\r\n title = smart_str(\r\n presentation.title,\r\n encoding='utf-8',\r\n strings_only=False,\r\n errors='strict',\r\n )\r\n funding = smart_str(\r\n presentation.funding,\r\n encoding='utf-8',\r\n strings_only=False,\r\n errors='strict',\r\n )\r\n work_type = smart_str(\r\n presentation.work_type,\r\n encoding='utf-8',\r\n strings_only=False,\r\n errors='strict',\r\n )\r\n sponsor_email = ''\r\n if presentation.leader:\r\n sponsor_email = presentation.leader.sponsor_email\r\n sponsor_other = presentation.leader.sponsor_other\r\n writer.writerow([\r\n title,\r\n presentation.reviewer,\r\n leader,\r\n presentation.user.email,\r\n sponsor_email,\r\n sponsor_other,\r\n presenters[:-1],\r\n funding,\r\n work_type,\r\n presentation.permission,\r\n presentation.shared,\r\n presentation.need_table,\r\n presentation.need_electricity,\r\n link,poster,\r\n presentation.date_created,\r\n ])\r\n return response\r\nexport_scholars.short_description = \"\"\"\r\n Export the selected Celebration of Scholars Submissions\r\n\"\"\"\r\n\r\n\r\nclass PresentationAdmin(admin.ModelAdmin):\r\n \"\"\"Admin class for the presentation data model.\"\"\"\r\n\r\n model = Presentation\r\n actions = [export_scholars]\r\n raw_id_fields = ('user', 'updated_by', 'leader')\r\n list_max_show_all = 500\r\n list_per_page = 500\r\n list_display = (\r\n 'title',\r\n 'reviewer',\r\n 'last_name',\r\n 'first_name',\r\n 'email',\r\n 'sponsor',\r\n 'sponsor_other',\r\n 'get_presenters',\r\n 'funding',\r\n 'work_type',\r\n 'permission',\r\n 'shared',\r\n 'need_table',\r\n 'need_electricity',\r\n 'status',\r\n 'poster',\r\n 'date_created',\r\n )\r\n ordering = [\r\n '-date_created',\r\n 'title',\r\n 'work_type',\r\n 'permission',\r\n 'shared',\r\n 'need_table',\r\n 'need_electricity',\r\n 'status',\r\n ]\r\n search_fields = (\r\n 'title',\r\n 'user__last_name',\r\n 'user__email',\r\n 'funding',\r\n )\r\n list_filter = ('status', 'date_created')\r\n list_editable = ['reviewer']\r\n\r\n def queryset(self, request):\r\n \"\"\"Only show presentations that were created after a certain date.\"\"\"\r\n TODAY = datetime.date.today()\r\n YEAR = int(TODAY.year)\r\n qs = super(PresentationAdmin, self).queryset(request)\r\n start_date = datetime.date(YEAR, 1, 1)\r\n return qs.filter(date_created__gte=start_date)\r\n\r\n def save_model(self, request, obj, form, change):\r\n \"\"\"Override the save method to update some things.\"\"\"\r\n if change:\r\n obj.updated_by = request.user\r\n obj.save()\r\n\r\n\r\nclass PresenterAdmin(admin.ModelAdmin):\r\n \"\"\"Admin class for the presenter model.\"\"\"\r\n\r\n model = Presenter\r\n list_max_show_all = 500\r\n list_per_page = 500\r\n list_display = (\r\n 'date_created',\r\n 'last_name',\r\n 'first_name',\r\n 'email',\r\n 'leader',\r\n 'prez_type',\r\n 'college_year',\r\n 'major',\r\n 'hometown',\r\n 'sponsor',\r\n 'sponsor_name',\r\n 'sponsor_email',\r\n 'sponsor_other',\r\n 'department',\r\n )\r\n ordering = [\r\n 'date_created',\r\n 'last_name',\r\n 'first_name',\r\n 'email',\r\n 'leader',\r\n 'prez_type',\r\n 'college_year',\r\n 'major',\r\n 'hometown',\r\n 'sponsor',\r\n 'sponsor_name',\r\n 'sponsor_email',\r\n 'sponsor_other',\r\n 'department',\r\n ]\r\n search_fields = (\r\n 'last_name',\r\n 'first_name',\r\n 'email',\r\n )\r\n\r\n\r\nadmin.site.register(Presenter, PresenterAdmin)\r\nadmin.site.register(Presentation, PresentationAdmin)\r\n",
"step-ids": [
7,
9,
11,
12,
13
]
}
|
[
7,
9,
11,
12,
13
] |
# Improting Image class from PIL module
from PIL import Image
# Opens a image in RGB mode
im = Image.open("data/frame1.jpg")
# Setting the points for cropped image
left = 155
top = 65
right = 360
bottom = 270
# Cropped image of above dimension
# (It will not change orginal image)
im1 = im.crop((left, top, right, bottom))
# Shows the image in image viewer
im1.show()
im.show()
|
normal
|
{
"blob_id": "9fd73e0a1dacc46c177f11ce4cf2351b3d622c0d",
"index": 7594,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nim1.show()\nim.show()\n",
"step-3": "<mask token>\nim = Image.open('data/frame1.jpg')\nleft = 155\ntop = 65\nright = 360\nbottom = 270\nim1 = im.crop((left, top, right, bottom))\nim1.show()\nim.show()\n",
"step-4": "from PIL import Image\nim = Image.open('data/frame1.jpg')\nleft = 155\ntop = 65\nright = 360\nbottom = 270\nim1 = im.crop((left, top, right, bottom))\nim1.show()\nim.show()\n",
"step-5": "# Improting Image class from PIL module\nfrom PIL import Image\n\n# Opens a image in RGB mode\nim = Image.open(\"data/frame1.jpg\")\n\n# Setting the points for cropped image\nleft = 155\ntop = 65\nright = 360\nbottom = 270\n\n# Cropped image of above dimension\n# (It will not change orginal image)\nim1 = im.crop((left, top, right, bottom))\n\n# Shows the image in image viewer\nim1.show()\nim.show()\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
print(save_result.text)
<|reserved_special_token_0|>
print(read_result.text)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
save_result = requests.post('http://localhost:5000/save', json={'value':
'witam'})
print(save_result.text)
read_result = requests.get('http://localhost:5000/read')
print(read_result.text)
<|reserved_special_token_1|>
import requests
save_result = requests.post('http://localhost:5000/save', json={'value':
'witam'})
print(save_result.text)
read_result = requests.get('http://localhost:5000/read')
print(read_result.text)
|
flexible
|
{
"blob_id": "43362c564be0dfbc8f246a0589bcebde245ab7b5",
"index": 7015,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(save_result.text)\n<mask token>\nprint(read_result.text)\n",
"step-3": "<mask token>\nsave_result = requests.post('http://localhost:5000/save', json={'value':\n 'witam'})\nprint(save_result.text)\nread_result = requests.get('http://localhost:5000/read')\nprint(read_result.text)\n",
"step-4": "import requests\nsave_result = requests.post('http://localhost:5000/save', json={'value':\n 'witam'})\nprint(save_result.text)\nread_result = requests.get('http://localhost:5000/read')\nprint(read_result.text)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
class ProviderEditAddressHandler(ProviderBaseHandler):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class ProviderChangeURLHandler(ProviderBaseHandler):
@provider_required
def post(self, vanity_url=None):
form = ProviderVanityURLForm().get_form(self.request.POST)
if form.validate():
provider = db.get_provider_from_vanity_url(vanity_url)
form.populate_obj(provider)
provider.put()
self.redirect('/provider/address/' + provider.vanity_url)
self.log_event(user=provider.user, msg='Edit Address: Success')
else:
provider = db.get_provider_from_vanity_url(vanity_url)
address_form = ProviderAddressForm().get_form(obj=provider)
self.render_address(provider, address_form=address_form,
vanity_url_form=form)
self.log_event(user=provider.user, msg=
'Edit Address: Validation Error')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class ProviderEditAddressHandler(ProviderBaseHandler):
@provider_required
def get(self, vanity_url=None):
provider = db.get_provider_from_vanity_url(vanity_url)
logging.info('provider dump before edit:' + str(vars(provider)))
address_form = ProviderAddressForm().get_form(obj=provider)
vanity_url_form = ProviderVanityURLForm().get_form(obj=provider)
self.render_address(provider, address_form=address_form,
vanity_url_form=vanity_url_form)
<|reserved_special_token_0|>
class ProviderChangeURLHandler(ProviderBaseHandler):
@provider_required
def post(self, vanity_url=None):
form = ProviderVanityURLForm().get_form(self.request.POST)
if form.validate():
provider = db.get_provider_from_vanity_url(vanity_url)
form.populate_obj(provider)
provider.put()
self.redirect('/provider/address/' + provider.vanity_url)
self.log_event(user=provider.user, msg='Edit Address: Success')
else:
provider = db.get_provider_from_vanity_url(vanity_url)
address_form = ProviderAddressForm().get_form(obj=provider)
self.render_address(provider, address_form=address_form,
vanity_url_form=form)
self.log_event(user=provider.user, msg=
'Edit Address: Validation Error')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class ProviderEditAddressHandler(ProviderBaseHandler):
@provider_required
def get(self, vanity_url=None):
provider = db.get_provider_from_vanity_url(vanity_url)
logging.info('provider dump before edit:' + str(vars(provider)))
address_form = ProviderAddressForm().get_form(obj=provider)
vanity_url_form = ProviderVanityURLForm().get_form(obj=provider)
self.render_address(provider, address_form=address_form,
vanity_url_form=vanity_url_form)
@provider_required
def post(self, vanity_url=None):
form = ProviderAddressForm().get_form(self.request.POST)
if form.validate():
provider = db.get_provider_from_vanity_url(vanity_url)
form.populate_obj(provider)
provider.put()
vanity_url_form = ProviderVanityURLForm().get_form(obj=provider)
self.render_address(provider, address_form=form,
vanity_url_form=vanity_url_form, success_message=saved_message)
self.log_event(user=provider.user, msg='Edit Address: Success')
else:
provider = db.get_provider_from_vanity_url(vanity_url)
vanity_url_form = ProviderVanityURLForm().get_form(obj=provider)
self.render_address(provider, address_form=form,
vanity_url_form=vanity_url_form)
self.log_event(user=provider.user, msg=
'Edit Address: Validation Error')
class ProviderChangeURLHandler(ProviderBaseHandler):
@provider_required
def post(self, vanity_url=None):
form = ProviderVanityURLForm().get_form(self.request.POST)
if form.validate():
provider = db.get_provider_from_vanity_url(vanity_url)
form.populate_obj(provider)
provider.put()
self.redirect('/provider/address/' + provider.vanity_url)
self.log_event(user=provider.user, msg='Edit Address: Success')
else:
provider = db.get_provider_from_vanity_url(vanity_url)
address_form = ProviderAddressForm().get_form(obj=provider)
self.render_address(provider, address_form=address_form,
vanity_url_form=form)
self.log_event(user=provider.user, msg=
'Edit Address: Validation Error')
<|reserved_special_token_1|>
from handler.auth import provider_required
from handler.provider import ProviderBaseHandler
from forms.provider import ProviderAddressForm, ProviderVanityURLForm
import logging
from data import db
from util import saved_message
class ProviderEditAddressHandler(ProviderBaseHandler):
@provider_required
def get(self, vanity_url=None):
provider = db.get_provider_from_vanity_url(vanity_url)
logging.info('provider dump before edit:' + str(vars(provider)))
address_form = ProviderAddressForm().get_form(obj=provider)
vanity_url_form = ProviderVanityURLForm().get_form(obj=provider)
self.render_address(provider, address_form=address_form,
vanity_url_form=vanity_url_form)
@provider_required
def post(self, vanity_url=None):
form = ProviderAddressForm().get_form(self.request.POST)
if form.validate():
provider = db.get_provider_from_vanity_url(vanity_url)
form.populate_obj(provider)
provider.put()
vanity_url_form = ProviderVanityURLForm().get_form(obj=provider)
self.render_address(provider, address_form=form,
vanity_url_form=vanity_url_form, success_message=saved_message)
self.log_event(user=provider.user, msg='Edit Address: Success')
else:
provider = db.get_provider_from_vanity_url(vanity_url)
vanity_url_form = ProviderVanityURLForm().get_form(obj=provider)
self.render_address(provider, address_form=form,
vanity_url_form=vanity_url_form)
self.log_event(user=provider.user, msg=
'Edit Address: Validation Error')
class ProviderChangeURLHandler(ProviderBaseHandler):
@provider_required
def post(self, vanity_url=None):
form = ProviderVanityURLForm().get_form(self.request.POST)
if form.validate():
provider = db.get_provider_from_vanity_url(vanity_url)
form.populate_obj(provider)
provider.put()
self.redirect('/provider/address/' + provider.vanity_url)
self.log_event(user=provider.user, msg='Edit Address: Success')
else:
provider = db.get_provider_from_vanity_url(vanity_url)
address_form = ProviderAddressForm().get_form(obj=provider)
self.render_address(provider, address_form=address_form,
vanity_url_form=form)
self.log_event(user=provider.user, msg=
'Edit Address: Validation Error')
<|reserved_special_token_1|>
from handler.auth import provider_required
from handler.provider import ProviderBaseHandler
from forms.provider import ProviderAddressForm, ProviderVanityURLForm
import logging
from data import db
from util import saved_message
class ProviderEditAddressHandler(ProviderBaseHandler):
@provider_required
def get(self, vanity_url=None):
provider = db.get_provider_from_vanity_url(vanity_url)
logging.info("provider dump before edit:" + str(vars(provider)))
address_form = ProviderAddressForm().get_form(obj=provider)
vanity_url_form = ProviderVanityURLForm().get_form(obj=provider)
self.render_address(provider, address_form=address_form, vanity_url_form=vanity_url_form)
@provider_required
def post(self, vanity_url=None):
form = ProviderAddressForm().get_form(self.request.POST)
if form.validate():
# Store Provider
provider = db.get_provider_from_vanity_url(vanity_url)
form.populate_obj(provider)
provider.put()
vanity_url_form = ProviderVanityURLForm().get_form(obj=provider)
self.render_address(provider, address_form=form, vanity_url_form=vanity_url_form, success_message=saved_message)
# log the event
self.log_event(user=provider.user, msg="Edit Address: Success")
else:
# show validation error
provider = db.get_provider_from_vanity_url(vanity_url)
vanity_url_form = ProviderVanityURLForm().get_form(obj=provider)
self.render_address(provider, address_form=form, vanity_url_form=vanity_url_form)
# log the event
self.log_event(user=provider.user, msg="Edit Address: Validation Error")
class ProviderChangeURLHandler(ProviderBaseHandler):
@provider_required
def post(self, vanity_url=None):
form = ProviderVanityURLForm().get_form(self.request.POST)
if form.validate():
# Store Provider
provider = db.get_provider_from_vanity_url(vanity_url)
form.populate_obj(provider)
provider.put()
self.redirect('/provider/address/' + provider.vanity_url)
# log the event
self.log_event(user=provider.user, msg="Edit Address: Success")
else:
# show validation error
provider = db.get_provider_from_vanity_url(vanity_url)
address_form = ProviderAddressForm().get_form(obj=provider)
self.render_address(provider, address_form=address_form, vanity_url_form=form)
# log the event
self.log_event(user=provider.user, msg="Edit Address: Validation Error")
|
flexible
|
{
"blob_id": "454f885e2254295ce6508e70c0348f5cbe855520",
"index": 5071,
"step-1": "<mask token>\n\n\nclass ProviderEditAddressHandler(ProviderBaseHandler):\n <mask token>\n <mask token>\n\n\nclass ProviderChangeURLHandler(ProviderBaseHandler):\n\n @provider_required\n def post(self, vanity_url=None):\n form = ProviderVanityURLForm().get_form(self.request.POST)\n if form.validate():\n provider = db.get_provider_from_vanity_url(vanity_url)\n form.populate_obj(provider)\n provider.put()\n self.redirect('/provider/address/' + provider.vanity_url)\n self.log_event(user=provider.user, msg='Edit Address: Success')\n else:\n provider = db.get_provider_from_vanity_url(vanity_url)\n address_form = ProviderAddressForm().get_form(obj=provider)\n self.render_address(provider, address_form=address_form,\n vanity_url_form=form)\n self.log_event(user=provider.user, msg=\n 'Edit Address: Validation Error')\n",
"step-2": "<mask token>\n\n\nclass ProviderEditAddressHandler(ProviderBaseHandler):\n\n @provider_required\n def get(self, vanity_url=None):\n provider = db.get_provider_from_vanity_url(vanity_url)\n logging.info('provider dump before edit:' + str(vars(provider)))\n address_form = ProviderAddressForm().get_form(obj=provider)\n vanity_url_form = ProviderVanityURLForm().get_form(obj=provider)\n self.render_address(provider, address_form=address_form,\n vanity_url_form=vanity_url_form)\n <mask token>\n\n\nclass ProviderChangeURLHandler(ProviderBaseHandler):\n\n @provider_required\n def post(self, vanity_url=None):\n form = ProviderVanityURLForm().get_form(self.request.POST)\n if form.validate():\n provider = db.get_provider_from_vanity_url(vanity_url)\n form.populate_obj(provider)\n provider.put()\n self.redirect('/provider/address/' + provider.vanity_url)\n self.log_event(user=provider.user, msg='Edit Address: Success')\n else:\n provider = db.get_provider_from_vanity_url(vanity_url)\n address_form = ProviderAddressForm().get_form(obj=provider)\n self.render_address(provider, address_form=address_form,\n vanity_url_form=form)\n self.log_event(user=provider.user, msg=\n 'Edit Address: Validation Error')\n",
"step-3": "<mask token>\n\n\nclass ProviderEditAddressHandler(ProviderBaseHandler):\n\n @provider_required\n def get(self, vanity_url=None):\n provider = db.get_provider_from_vanity_url(vanity_url)\n logging.info('provider dump before edit:' + str(vars(provider)))\n address_form = ProviderAddressForm().get_form(obj=provider)\n vanity_url_form = ProviderVanityURLForm().get_form(obj=provider)\n self.render_address(provider, address_form=address_form,\n vanity_url_form=vanity_url_form)\n\n @provider_required\n def post(self, vanity_url=None):\n form = ProviderAddressForm().get_form(self.request.POST)\n if form.validate():\n provider = db.get_provider_from_vanity_url(vanity_url)\n form.populate_obj(provider)\n provider.put()\n vanity_url_form = ProviderVanityURLForm().get_form(obj=provider)\n self.render_address(provider, address_form=form,\n vanity_url_form=vanity_url_form, success_message=saved_message)\n self.log_event(user=provider.user, msg='Edit Address: Success')\n else:\n provider = db.get_provider_from_vanity_url(vanity_url)\n vanity_url_form = ProviderVanityURLForm().get_form(obj=provider)\n self.render_address(provider, address_form=form,\n vanity_url_form=vanity_url_form)\n self.log_event(user=provider.user, msg=\n 'Edit Address: Validation Error')\n\n\nclass ProviderChangeURLHandler(ProviderBaseHandler):\n\n @provider_required\n def post(self, vanity_url=None):\n form = ProviderVanityURLForm().get_form(self.request.POST)\n if form.validate():\n provider = db.get_provider_from_vanity_url(vanity_url)\n form.populate_obj(provider)\n provider.put()\n self.redirect('/provider/address/' + provider.vanity_url)\n self.log_event(user=provider.user, msg='Edit Address: Success')\n else:\n provider = db.get_provider_from_vanity_url(vanity_url)\n address_form = ProviderAddressForm().get_form(obj=provider)\n self.render_address(provider, address_form=address_form,\n vanity_url_form=form)\n self.log_event(user=provider.user, msg=\n 'Edit Address: Validation Error')\n",
"step-4": "from handler.auth import provider_required\nfrom handler.provider import ProviderBaseHandler\nfrom forms.provider import ProviderAddressForm, ProviderVanityURLForm\nimport logging\nfrom data import db\nfrom util import saved_message\n\n\nclass ProviderEditAddressHandler(ProviderBaseHandler):\n\n @provider_required\n def get(self, vanity_url=None):\n provider = db.get_provider_from_vanity_url(vanity_url)\n logging.info('provider dump before edit:' + str(vars(provider)))\n address_form = ProviderAddressForm().get_form(obj=provider)\n vanity_url_form = ProviderVanityURLForm().get_form(obj=provider)\n self.render_address(provider, address_form=address_form,\n vanity_url_form=vanity_url_form)\n\n @provider_required\n def post(self, vanity_url=None):\n form = ProviderAddressForm().get_form(self.request.POST)\n if form.validate():\n provider = db.get_provider_from_vanity_url(vanity_url)\n form.populate_obj(provider)\n provider.put()\n vanity_url_form = ProviderVanityURLForm().get_form(obj=provider)\n self.render_address(provider, address_form=form,\n vanity_url_form=vanity_url_form, success_message=saved_message)\n self.log_event(user=provider.user, msg='Edit Address: Success')\n else:\n provider = db.get_provider_from_vanity_url(vanity_url)\n vanity_url_form = ProviderVanityURLForm().get_form(obj=provider)\n self.render_address(provider, address_form=form,\n vanity_url_form=vanity_url_form)\n self.log_event(user=provider.user, msg=\n 'Edit Address: Validation Error')\n\n\nclass ProviderChangeURLHandler(ProviderBaseHandler):\n\n @provider_required\n def post(self, vanity_url=None):\n form = ProviderVanityURLForm().get_form(self.request.POST)\n if form.validate():\n provider = db.get_provider_from_vanity_url(vanity_url)\n form.populate_obj(provider)\n provider.put()\n self.redirect('/provider/address/' + provider.vanity_url)\n self.log_event(user=provider.user, msg='Edit Address: Success')\n else:\n provider = db.get_provider_from_vanity_url(vanity_url)\n address_form = ProviderAddressForm().get_form(obj=provider)\n self.render_address(provider, address_form=address_form,\n vanity_url_form=form)\n self.log_event(user=provider.user, msg=\n 'Edit Address: Validation Error')\n",
"step-5": "from handler.auth import provider_required\nfrom handler.provider import ProviderBaseHandler\nfrom forms.provider import ProviderAddressForm, ProviderVanityURLForm\nimport logging\nfrom data import db\nfrom util import saved_message\n\nclass ProviderEditAddressHandler(ProviderBaseHandler):\n @provider_required\n def get(self, vanity_url=None):\n provider = db.get_provider_from_vanity_url(vanity_url)\n logging.info(\"provider dump before edit:\" + str(vars(provider)))\n address_form = ProviderAddressForm().get_form(obj=provider)\n vanity_url_form = ProviderVanityURLForm().get_form(obj=provider)\n\n self.render_address(provider, address_form=address_form, vanity_url_form=vanity_url_form)\n\n @provider_required\n def post(self, vanity_url=None):\n form = ProviderAddressForm().get_form(self.request.POST)\n \n if form.validate():\n # Store Provider\n provider = db.get_provider_from_vanity_url(vanity_url)\n \n form.populate_obj(provider)\n provider.put()\n\n vanity_url_form = ProviderVanityURLForm().get_form(obj=provider)\n\n self.render_address(provider, address_form=form, vanity_url_form=vanity_url_form, success_message=saved_message)\n\n # log the event\n self.log_event(user=provider.user, msg=\"Edit Address: Success\")\n\n else:\n # show validation error\n provider = db.get_provider_from_vanity_url(vanity_url)\n vanity_url_form = ProviderVanityURLForm().get_form(obj=provider)\n\n self.render_address(provider, address_form=form, vanity_url_form=vanity_url_form)\n \n # log the event\n self.log_event(user=provider.user, msg=\"Edit Address: Validation Error\")\n\n\n\n \n\n\nclass ProviderChangeURLHandler(ProviderBaseHandler):\n @provider_required\n def post(self, vanity_url=None):\n form = ProviderVanityURLForm().get_form(self.request.POST)\n \n if form.validate():\n # Store Provider\n provider = db.get_provider_from_vanity_url(vanity_url)\n \n form.populate_obj(provider)\n \n provider.put()\n\n self.redirect('/provider/address/' + provider.vanity_url)\n\n # log the event\n self.log_event(user=provider.user, msg=\"Edit Address: Success\")\n\n else:\n # show validation error\n provider = db.get_provider_from_vanity_url(vanity_url)\n address_form = ProviderAddressForm().get_form(obj=provider)\n\n self.render_address(provider, address_form=address_form, vanity_url_form=form)\n \n # log the event\n self.log_event(user=provider.user, msg=\"Edit Address: Validation Error\")\n\n\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class Solution:
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class Solution:
def divide(self, dividend: int, divisor: int) ->int:
if dividend == -2 ** 31 and divisor == -1:
return 2 ** 31 - 1
if dividend == 0:
return 0
sign = dividend >= 0 and divisor >= 0 or dividend < 0 and divisor < 0
left, right = abs(dividend), abs(divisor)
result = 0
while left >= right:
count = 0
while left >= right << count:
count += 1
result += 1 << count - 1
left -= right << count - 1
return result if sign else -result
<|reserved_special_token_1|>
# MEDIUM
# TLE if decrement divisor only
# Bit manipulation.
# input: 100 / 3
# times = 0
# 3 << 0 = 3
# 3 << 1 = 6
# 3 << 2 = 12
# 3 << 3 = 24
# 3 << 4 = 48
# 3 << 5 = 96
# 3 << 6 = 192 => greater than dividend 100 => stop here
# times -=1 because 3 << 6 is too big
# result += 1 << times => divided by 32
# set dividend to dividend -= divisor << times
# times O(log N) Space O(1)
class Solution:
def divide(self, dividend: int, divisor: int) -> int:
if dividend == -2**31 and divisor == -1:
return 2**31-1
if dividend == 0:
return 0
sign = dividend>=0 and divisor>=0 or (dividend<0 and divisor<0)
left,right = abs(dividend),abs(divisor)
result = 0
while left>= right:
count = 0
while left >= right<< count:
count += 1
#print('count',count)
# count -1 because right * count > left
result += 1 << (count-1)
#print("result",result)
left -= right << (count-1)
#print("dividend",left)
return result if sign else -result
|
flexible
|
{
"blob_id": "d1864f454b1909196fd9a6e2279b23f4c4148917",
"index": 7232,
"step-1": "<mask token>\n",
"step-2": "class Solution:\n <mask token>\n",
"step-3": "class Solution:\n\n def divide(self, dividend: int, divisor: int) ->int:\n if dividend == -2 ** 31 and divisor == -1:\n return 2 ** 31 - 1\n if dividend == 0:\n return 0\n sign = dividend >= 0 and divisor >= 0 or dividend < 0 and divisor < 0\n left, right = abs(dividend), abs(divisor)\n result = 0\n while left >= right:\n count = 0\n while left >= right << count:\n count += 1\n result += 1 << count - 1\n left -= right << count - 1\n return result if sign else -result\n",
"step-4": "# MEDIUM\n# TLE if decrement divisor only \n\n# Bit manipulation.\n# input: 100 / 3 \n\n# times = 0\n# 3 << 0 = 3\n# 3 << 1 = 6\n# 3 << 2 = 12\n# 3 << 3 = 24\n# 3 << 4 = 48\n# 3 << 5 = 96\n# 3 << 6 = 192 => greater than dividend 100 => stop here \n# times -=1 because 3 << 6 is too big \n# result += 1 << times => divided by 32 \n# set dividend to dividend -= divisor << times \n\n# times O(log N) Space O(1)\n\n\nclass Solution:\n def divide(self, dividend: int, divisor: int) -> int:\n if dividend == -2**31 and divisor == -1:\n return 2**31-1\n if dividend == 0:\n return 0\n sign = dividend>=0 and divisor>=0 or (dividend<0 and divisor<0)\n left,right = abs(dividend),abs(divisor)\n result = 0\n while left>= right:\n count = 0\n while left >= right<< count:\n \n count += 1\n \n #print('count',count)\n # count -1 because right * count > left\n result += 1 << (count-1)\n #print(\"result\",result)\n left -= right << (count-1)\n #print(\"dividend\",left)\n \n return result if sign else -result \n \n ",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
def build_embed(_video_url: str, _video_image_url: Optional[str],
_video_title: Optional[str], _author_name: Optional[str], _author_url:
Optional[str]) ->discord.Embed:
embed = discord.Embed(type='video', colour=discord.Colour.from_rgb(255,
0, 0))
if _video_image_url is not None:
embed.set_image(url=_video_image_url)
if _author_name is not None:
if _author_url is not None:
embed.set_author(name=_author_name, url=_author_url)
else:
embed.set_author(name=_author_name)
if _video_title is not None:
embed.title = _video_title
embed.url = _video_url
return embed
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
logging.basicConfig(level=logging.INFO, format=
'[%(asctime)s] [%(name)s] [%(levelname)s] %(message)s')
<|reserved_special_token_0|>
@client.event
async def on_ready():
log.info('Connected using discord.py {}!'.format(discord.__version__))
log.info('Username: {0.name}, ID: {0.id}'.format(client.user))
log.info('Connected to {} servers.'.format(len(client.guilds)))
activity = discord.Game("Fixing what Discord can't since 12/5/2019.".
format(client.command_prefix))
await client.change_presence(status=discord.Status.online, activity=
activity)
log.info('------')
async def fix_yt_embed(message: discord.Message) ->Optional[discord.Embed]:
regex_search_string = (
'(?:https?://)?(?:www[.])?youtu(?:[.]be/|be[.]com/watch[?]v=)([^ ]*)')
if len(message.embeds) == 1:
matches = re.findall(regex_search_string, message.content)
if len(matches) > 0:
if message.embeds[0].type == 'link':
await asyncio.sleep(2)
msg_check = discord.utils.get(client.cached_messages, id=
message.id)
if msg_check is not None:
html = await utils.get_video_webpage(matches[0])
video_url = 'https://www.youtube.com/watch?v={}'.format(
matches[0])
video_image = await utils.get_video_image_url(html)
video_title = await utils.get_video_title(html)
author_name = await utils.get_author_name(html)
author_url = await utils.get_author_url(html)
if (video_title is None and video_image is None and
author_name is None and author_url is None):
return None
embed = build_embed(video_url, video_image, video_title,
author_name, author_url)
await send_new_embed(message, embed)
return None
async def send_new_embed(original_msg: discord.Message, embed: discord.Embed):
webhook: discord.Webhook = await utils.get_webhook(client, original_msg
.channel)
try:
if original_msg.guild.me.permissions_in(original_msg.channel
).manage_messages:
await original_msg.delete()
await webhook.send(content=original_msg.content, embed=embed,
username=original_msg.author.display_name, avatar_url=
original_msg.author.avatar_url)
else:
await webhook.send(embed=embed, username=client.user.
display_name, avatar_url=client.user.avatar_url)
except discord.errors.NotFound:
pass
def build_embed(_video_url: str, _video_image_url: Optional[str],
_video_title: Optional[str], _author_name: Optional[str], _author_url:
Optional[str]) ->discord.Embed:
embed = discord.Embed(type='video', colour=discord.Colour.from_rgb(255,
0, 0))
if _video_image_url is not None:
embed.set_image(url=_video_image_url)
if _author_name is not None:
if _author_url is not None:
embed.set_author(name=_author_name, url=_author_url)
else:
embed.set_author(name=_author_name)
if _video_title is not None:
embed.title = _video_title
embed.url = _video_url
return embed
@client.event
async def on_command_error(ctx, error):
if type(error) == discord.ext.commands.NoPrivateMessage:
await ctx.send('⚠ This command can not be used in DMs!!!')
return
elif type(error) == discord.ext.commands.CommandNotFound:
await ctx.send('⚠ Invalid Command!!!')
return
elif type(error) == discord.ext.commands.MissingPermissions:
await ctx.send(
'⚠ You need the **Manage Messages** permission to use this command'
.format(error.missing_perms))
return
elif type(error) == discord.ext.commands.MissingRequiredArgument:
await ctx.send('⚠ {}'.format(error))
elif type(error) == discord.ext.commands.BadArgument:
await ctx.send('⚠ {}'.format(error))
else:
await ctx.send('⚠ {}'.format(error))
raise error
@client.event
async def on_message(message: discord.Message):
await fix_yt_embed(message)
await client.process_commands(message)
@client.event
async def on_message_edit(before: discord.Message, after: discord.Message):
await fix_yt_embed(after)
@client.command(name='invite', brief='Sends the invite link')
async def send_invite_link(ctx: commands.Context):
link = (
'https://discordapp.com/oauth2/authorize?client_id={}&scope=bot&permissions=536882176'
.format(client.user.id))
await ctx.send(link)
if __name__ == '__main__':
with open('config.json') as json_data_file:
config = json.load(json_data_file)
client.command_prefix = config['bot_prefix']
client.run(config['token'])
log.info('cleaning Up and shutting down')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
logging.basicConfig(level=logging.INFO, format=
'[%(asctime)s] [%(name)s] [%(levelname)s] %(message)s')
log = logging.getLogger('YTEmbedFixer')
client = commands.Bot(command_prefix='yt!', max_messages=5000, description=
"""A bot for fixing what Discord can't.
""", owner_id=
389590659335716867, case_insensitive=True)
@client.event
async def on_ready():
log.info('Connected using discord.py {}!'.format(discord.__version__))
log.info('Username: {0.name}, ID: {0.id}'.format(client.user))
log.info('Connected to {} servers.'.format(len(client.guilds)))
activity = discord.Game("Fixing what Discord can't since 12/5/2019.".
format(client.command_prefix))
await client.change_presence(status=discord.Status.online, activity=
activity)
log.info('------')
async def fix_yt_embed(message: discord.Message) ->Optional[discord.Embed]:
regex_search_string = (
'(?:https?://)?(?:www[.])?youtu(?:[.]be/|be[.]com/watch[?]v=)([^ ]*)')
if len(message.embeds) == 1:
matches = re.findall(regex_search_string, message.content)
if len(matches) > 0:
if message.embeds[0].type == 'link':
await asyncio.sleep(2)
msg_check = discord.utils.get(client.cached_messages, id=
message.id)
if msg_check is not None:
html = await utils.get_video_webpage(matches[0])
video_url = 'https://www.youtube.com/watch?v={}'.format(
matches[0])
video_image = await utils.get_video_image_url(html)
video_title = await utils.get_video_title(html)
author_name = await utils.get_author_name(html)
author_url = await utils.get_author_url(html)
if (video_title is None and video_image is None and
author_name is None and author_url is None):
return None
embed = build_embed(video_url, video_image, video_title,
author_name, author_url)
await send_new_embed(message, embed)
return None
async def send_new_embed(original_msg: discord.Message, embed: discord.Embed):
webhook: discord.Webhook = await utils.get_webhook(client, original_msg
.channel)
try:
if original_msg.guild.me.permissions_in(original_msg.channel
).manage_messages:
await original_msg.delete()
await webhook.send(content=original_msg.content, embed=embed,
username=original_msg.author.display_name, avatar_url=
original_msg.author.avatar_url)
else:
await webhook.send(embed=embed, username=client.user.
display_name, avatar_url=client.user.avatar_url)
except discord.errors.NotFound:
pass
def build_embed(_video_url: str, _video_image_url: Optional[str],
_video_title: Optional[str], _author_name: Optional[str], _author_url:
Optional[str]) ->discord.Embed:
embed = discord.Embed(type='video', colour=discord.Colour.from_rgb(255,
0, 0))
if _video_image_url is not None:
embed.set_image(url=_video_image_url)
if _author_name is not None:
if _author_url is not None:
embed.set_author(name=_author_name, url=_author_url)
else:
embed.set_author(name=_author_name)
if _video_title is not None:
embed.title = _video_title
embed.url = _video_url
return embed
@client.event
async def on_command_error(ctx, error):
if type(error) == discord.ext.commands.NoPrivateMessage:
await ctx.send('⚠ This command can not be used in DMs!!!')
return
elif type(error) == discord.ext.commands.CommandNotFound:
await ctx.send('⚠ Invalid Command!!!')
return
elif type(error) == discord.ext.commands.MissingPermissions:
await ctx.send(
'⚠ You need the **Manage Messages** permission to use this command'
.format(error.missing_perms))
return
elif type(error) == discord.ext.commands.MissingRequiredArgument:
await ctx.send('⚠ {}'.format(error))
elif type(error) == discord.ext.commands.BadArgument:
await ctx.send('⚠ {}'.format(error))
else:
await ctx.send('⚠ {}'.format(error))
raise error
@client.event
async def on_message(message: discord.Message):
await fix_yt_embed(message)
await client.process_commands(message)
@client.event
async def on_message_edit(before: discord.Message, after: discord.Message):
await fix_yt_embed(after)
@client.command(name='invite', brief='Sends the invite link')
async def send_invite_link(ctx: commands.Context):
link = (
'https://discordapp.com/oauth2/authorize?client_id={}&scope=bot&permissions=536882176'
.format(client.user.id))
await ctx.send(link)
if __name__ == '__main__':
with open('config.json') as json_data_file:
config = json.load(json_data_file)
client.command_prefix = config['bot_prefix']
client.run(config['token'])
log.info('cleaning Up and shutting down')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
import json
import logging
import re
import asyncio
from typing import Optional
import discord
from discord.ext import commands
import utils
logging.basicConfig(level=logging.INFO, format=
'[%(asctime)s] [%(name)s] [%(levelname)s] %(message)s')
log = logging.getLogger('YTEmbedFixer')
client = commands.Bot(command_prefix='yt!', max_messages=5000, description=
"""A bot for fixing what Discord can't.
""", owner_id=
389590659335716867, case_insensitive=True)
@client.event
async def on_ready():
log.info('Connected using discord.py {}!'.format(discord.__version__))
log.info('Username: {0.name}, ID: {0.id}'.format(client.user))
log.info('Connected to {} servers.'.format(len(client.guilds)))
activity = discord.Game("Fixing what Discord can't since 12/5/2019.".
format(client.command_prefix))
await client.change_presence(status=discord.Status.online, activity=
activity)
log.info('------')
async def fix_yt_embed(message: discord.Message) ->Optional[discord.Embed]:
regex_search_string = (
'(?:https?://)?(?:www[.])?youtu(?:[.]be/|be[.]com/watch[?]v=)([^ ]*)')
if len(message.embeds) == 1:
matches = re.findall(regex_search_string, message.content)
if len(matches) > 0:
if message.embeds[0].type == 'link':
await asyncio.sleep(2)
msg_check = discord.utils.get(client.cached_messages, id=
message.id)
if msg_check is not None:
html = await utils.get_video_webpage(matches[0])
video_url = 'https://www.youtube.com/watch?v={}'.format(
matches[0])
video_image = await utils.get_video_image_url(html)
video_title = await utils.get_video_title(html)
author_name = await utils.get_author_name(html)
author_url = await utils.get_author_url(html)
if (video_title is None and video_image is None and
author_name is None and author_url is None):
return None
embed = build_embed(video_url, video_image, video_title,
author_name, author_url)
await send_new_embed(message, embed)
return None
async def send_new_embed(original_msg: discord.Message, embed: discord.Embed):
webhook: discord.Webhook = await utils.get_webhook(client, original_msg
.channel)
try:
if original_msg.guild.me.permissions_in(original_msg.channel
).manage_messages:
await original_msg.delete()
await webhook.send(content=original_msg.content, embed=embed,
username=original_msg.author.display_name, avatar_url=
original_msg.author.avatar_url)
else:
await webhook.send(embed=embed, username=client.user.
display_name, avatar_url=client.user.avatar_url)
except discord.errors.NotFound:
pass
def build_embed(_video_url: str, _video_image_url: Optional[str],
_video_title: Optional[str], _author_name: Optional[str], _author_url:
Optional[str]) ->discord.Embed:
embed = discord.Embed(type='video', colour=discord.Colour.from_rgb(255,
0, 0))
if _video_image_url is not None:
embed.set_image(url=_video_image_url)
if _author_name is not None:
if _author_url is not None:
embed.set_author(name=_author_name, url=_author_url)
else:
embed.set_author(name=_author_name)
if _video_title is not None:
embed.title = _video_title
embed.url = _video_url
return embed
@client.event
async def on_command_error(ctx, error):
if type(error) == discord.ext.commands.NoPrivateMessage:
await ctx.send('⚠ This command can not be used in DMs!!!')
return
elif type(error) == discord.ext.commands.CommandNotFound:
await ctx.send('⚠ Invalid Command!!!')
return
elif type(error) == discord.ext.commands.MissingPermissions:
await ctx.send(
'⚠ You need the **Manage Messages** permission to use this command'
.format(error.missing_perms))
return
elif type(error) == discord.ext.commands.MissingRequiredArgument:
await ctx.send('⚠ {}'.format(error))
elif type(error) == discord.ext.commands.BadArgument:
await ctx.send('⚠ {}'.format(error))
else:
await ctx.send('⚠ {}'.format(error))
raise error
@client.event
async def on_message(message: discord.Message):
await fix_yt_embed(message)
await client.process_commands(message)
@client.event
async def on_message_edit(before: discord.Message, after: discord.Message):
await fix_yt_embed(after)
@client.command(name='invite', brief='Sends the invite link')
async def send_invite_link(ctx: commands.Context):
link = (
'https://discordapp.com/oauth2/authorize?client_id={}&scope=bot&permissions=536882176'
.format(client.user.id))
await ctx.send(link)
if __name__ == '__main__':
with open('config.json') as json_data_file:
config = json.load(json_data_file)
client.command_prefix = config['bot_prefix']
client.run(config['token'])
log.info('cleaning Up and shutting down')
<|reserved_special_token_1|>
"""
"""
import json
import logging
import re
import asyncio
from typing import Optional
import discord
from discord.ext import commands
import utils
logging.basicConfig(level=logging.INFO, format="[%(asctime)s] [%(name)s] [%(levelname)s] %(message)s")
log = logging.getLogger("YTEmbedFixer")
client = commands.Bot(command_prefix="yt!",
max_messages=5000,
description="A bot for fixing what Discord can't.\n",
owner_id=389590659335716867,
case_insensitive=True)
@client.event
async def on_ready():
log.info('Connected using discord.py {}!'.format(discord.__version__))
log.info('Username: {0.name}, ID: {0.id}'.format(client.user))
log.info("Connected to {} servers.".format(len(client.guilds)))
activity = discord.Game("Fixing what Discord can't since 12/5/2019.".format(client.command_prefix))
await client.change_presence(status=discord.Status.online, activity=activity)
log.info('------')
async def fix_yt_embed(message: discord.Message) -> Optional[discord.Embed]:
regex_search_string = r'(?:https?://)?(?:www[.])?youtu(?:[.]be/|be[.]com/watch[?]v=)([^ ]*)'
if len(message.embeds) == 1:
matches = re.findall(regex_search_string, message.content)
if len(matches) > 0:
# We have a valid youtube link with Embed! Check if it broken.
# We are lazy and trying to get this done quickly, so for the time being ignore all other embeds other than the first one.
if message.embeds[0].type == "link": # description == 'Enjoy the videos and music you love, upload original content, and share it all with friends, family, and the world on YouTube.':
# We have a broken embed!
await asyncio.sleep(2) # Sleep for a bit to let PK delete the message if it a proxy message
msg_check = discord.utils.get(client.cached_messages, id=message.id) # Check if message was deleted by PK.
if msg_check is not None:
html = await utils.get_video_webpage(matches[0])
video_url = "https://www.youtube.com/watch?v={}".format(matches[0])
video_image = await utils.get_video_image_url(html)
video_title = await utils.get_video_title(html)
author_name = await utils.get_author_name(html)
author_url = await utils.get_author_url(html)
if video_title is None and video_image is None and author_name is None and author_url is None:
#We got no info from the video. Prehaps the video is dead on youtube or the DOM has totally changed.
return None # Don't post empty embed.
embed = build_embed(video_url, video_image, video_title, author_name, author_url)
await send_new_embed(message, embed)
return None
async def send_new_embed(original_msg: discord.Message, embed: discord.Embed):
webhook: discord.Webhook = await utils.get_webhook(client, original_msg.channel)
try:
if original_msg.guild.me.permissions_in(original_msg.channel).manage_messages:
await original_msg.delete()
await webhook.send(content=original_msg.content, embed=embed, username=original_msg.author.display_name,
avatar_url=original_msg.author.avatar_url)
else:
await webhook.send(embed=embed, username=client.user.display_name,
avatar_url=client.user.avatar_url)
except discord.errors.NotFound:
pass # SHOULD never get here because we check before deleting, but just in case... Don't post replacement.
def build_embed(_video_url: str, _video_image_url: Optional[str], _video_title: Optional[str],
_author_name: Optional[str], _author_url: Optional[str]) -> discord.Embed:
embed = discord.Embed(type="video", colour=discord.Colour.from_rgb(255, 0, 0))
if _video_image_url is not None:
embed.set_image(url=_video_image_url)
if _author_name is not None:
if _author_url is not None:
embed.set_author(name=_author_name, url=_author_url)
else:
embed.set_author(name=_author_name)
if _video_title is not None:
embed.title = _video_title
embed.url = _video_url
return embed
# ---- Command Error Handling ----- #
@client.event
async def on_command_error(ctx, error):
if type(error) == discord.ext.commands.NoPrivateMessage:
await ctx.send("⚠ This command can not be used in DMs!!!")
return
elif type(error) == discord.ext.commands.CommandNotFound:
await ctx.send("⚠ Invalid Command!!!")
return
elif type(error) == discord.ext.commands.MissingPermissions:
await ctx.send("⚠ You need the **Manage Messages** permission to use this command".format(error.missing_perms))
return
elif type(error) == discord.ext.commands.MissingRequiredArgument:
await ctx.send("⚠ {}".format(error))
elif type(error) == discord.ext.commands.BadArgument:
await ctx.send("⚠ {}".format(error))
else:
await ctx.send("⚠ {}".format(error))
raise error
@client.event
async def on_message(message: discord.Message):
await fix_yt_embed(message)
await client.process_commands(message)
@client.event
async def on_message_edit(before: discord.Message, after: discord.Message):
await fix_yt_embed(after)
@client.command(name="invite", brief="Sends the invite link")
async def send_invite_link(ctx: commands.Context):
# link = "https://discordapp.com/oauth2/authorize?client_id=500711320497160199&scope=bot&permissions=536882176"
link = "https://discordapp.com/oauth2/authorize?client_id={}&scope=bot&permissions=536882176".format(client.user.id)
await ctx.send(link)
if __name__ == '__main__':
with open('config.json') as json_data_file:
config = json.load(json_data_file)
client.command_prefix = config['bot_prefix']
client.run(config['token'])
log.info("cleaning Up and shutting down")
|
flexible
|
{
"blob_id": "d73832d3f0adf22085a207ab223854e11fffa2e8",
"index": 6948,
"step-1": "<mask token>\n\n\ndef build_embed(_video_url: str, _video_image_url: Optional[str],\n _video_title: Optional[str], _author_name: Optional[str], _author_url:\n Optional[str]) ->discord.Embed:\n embed = discord.Embed(type='video', colour=discord.Colour.from_rgb(255,\n 0, 0))\n if _video_image_url is not None:\n embed.set_image(url=_video_image_url)\n if _author_name is not None:\n if _author_url is not None:\n embed.set_author(name=_author_name, url=_author_url)\n else:\n embed.set_author(name=_author_name)\n if _video_title is not None:\n embed.title = _video_title\n embed.url = _video_url\n return embed\n\n\n<mask token>\n",
"step-2": "<mask token>\nlogging.basicConfig(level=logging.INFO, format=\n '[%(asctime)s] [%(name)s] [%(levelname)s] %(message)s')\n<mask token>\n\n\[email protected]\nasync def on_ready():\n log.info('Connected using discord.py {}!'.format(discord.__version__))\n log.info('Username: {0.name}, ID: {0.id}'.format(client.user))\n log.info('Connected to {} servers.'.format(len(client.guilds)))\n activity = discord.Game(\"Fixing what Discord can't since 12/5/2019.\".\n format(client.command_prefix))\n await client.change_presence(status=discord.Status.online, activity=\n activity)\n log.info('------')\n\n\nasync def fix_yt_embed(message: discord.Message) ->Optional[discord.Embed]:\n regex_search_string = (\n '(?:https?://)?(?:www[.])?youtu(?:[.]be/|be[.]com/watch[?]v=)([^ ]*)')\n if len(message.embeds) == 1:\n matches = re.findall(regex_search_string, message.content)\n if len(matches) > 0:\n if message.embeds[0].type == 'link':\n await asyncio.sleep(2)\n msg_check = discord.utils.get(client.cached_messages, id=\n message.id)\n if msg_check is not None:\n html = await utils.get_video_webpage(matches[0])\n video_url = 'https://www.youtube.com/watch?v={}'.format(\n matches[0])\n video_image = await utils.get_video_image_url(html)\n video_title = await utils.get_video_title(html)\n author_name = await utils.get_author_name(html)\n author_url = await utils.get_author_url(html)\n if (video_title is None and video_image is None and \n author_name is None and author_url is None):\n return None\n embed = build_embed(video_url, video_image, video_title,\n author_name, author_url)\n await send_new_embed(message, embed)\n return None\n\n\nasync def send_new_embed(original_msg: discord.Message, embed: discord.Embed):\n webhook: discord.Webhook = await utils.get_webhook(client, original_msg\n .channel)\n try:\n if original_msg.guild.me.permissions_in(original_msg.channel\n ).manage_messages:\n await original_msg.delete()\n await webhook.send(content=original_msg.content, embed=embed,\n username=original_msg.author.display_name, avatar_url=\n original_msg.author.avatar_url)\n else:\n await webhook.send(embed=embed, username=client.user.\n display_name, avatar_url=client.user.avatar_url)\n except discord.errors.NotFound:\n pass\n\n\ndef build_embed(_video_url: str, _video_image_url: Optional[str],\n _video_title: Optional[str], _author_name: Optional[str], _author_url:\n Optional[str]) ->discord.Embed:\n embed = discord.Embed(type='video', colour=discord.Colour.from_rgb(255,\n 0, 0))\n if _video_image_url is not None:\n embed.set_image(url=_video_image_url)\n if _author_name is not None:\n if _author_url is not None:\n embed.set_author(name=_author_name, url=_author_url)\n else:\n embed.set_author(name=_author_name)\n if _video_title is not None:\n embed.title = _video_title\n embed.url = _video_url\n return embed\n\n\[email protected]\nasync def on_command_error(ctx, error):\n if type(error) == discord.ext.commands.NoPrivateMessage:\n await ctx.send('⚠ This command can not be used in DMs!!!')\n return\n elif type(error) == discord.ext.commands.CommandNotFound:\n await ctx.send('⚠ Invalid Command!!!')\n return\n elif type(error) == discord.ext.commands.MissingPermissions:\n await ctx.send(\n '⚠ You need the **Manage Messages** permission to use this command'\n .format(error.missing_perms))\n return\n elif type(error) == discord.ext.commands.MissingRequiredArgument:\n await ctx.send('⚠ {}'.format(error))\n elif type(error) == discord.ext.commands.BadArgument:\n await ctx.send('⚠ {}'.format(error))\n else:\n await ctx.send('⚠ {}'.format(error))\n raise error\n\n\[email protected]\nasync def on_message(message: discord.Message):\n await fix_yt_embed(message)\n await client.process_commands(message)\n\n\[email protected]\nasync def on_message_edit(before: discord.Message, after: discord.Message):\n await fix_yt_embed(after)\n\n\[email protected](name='invite', brief='Sends the invite link')\nasync def send_invite_link(ctx: commands.Context):\n link = (\n 'https://discordapp.com/oauth2/authorize?client_id={}&scope=bot&permissions=536882176'\n .format(client.user.id))\n await ctx.send(link)\n\n\nif __name__ == '__main__':\n with open('config.json') as json_data_file:\n config = json.load(json_data_file)\n client.command_prefix = config['bot_prefix']\n client.run(config['token'])\n log.info('cleaning Up and shutting down')\n",
"step-3": "<mask token>\nlogging.basicConfig(level=logging.INFO, format=\n '[%(asctime)s] [%(name)s] [%(levelname)s] %(message)s')\nlog = logging.getLogger('YTEmbedFixer')\nclient = commands.Bot(command_prefix='yt!', max_messages=5000, description=\n \"\"\"A bot for fixing what Discord can't.\n\"\"\", owner_id=\n 389590659335716867, case_insensitive=True)\n\n\[email protected]\nasync def on_ready():\n log.info('Connected using discord.py {}!'.format(discord.__version__))\n log.info('Username: {0.name}, ID: {0.id}'.format(client.user))\n log.info('Connected to {} servers.'.format(len(client.guilds)))\n activity = discord.Game(\"Fixing what Discord can't since 12/5/2019.\".\n format(client.command_prefix))\n await client.change_presence(status=discord.Status.online, activity=\n activity)\n log.info('------')\n\n\nasync def fix_yt_embed(message: discord.Message) ->Optional[discord.Embed]:\n regex_search_string = (\n '(?:https?://)?(?:www[.])?youtu(?:[.]be/|be[.]com/watch[?]v=)([^ ]*)')\n if len(message.embeds) == 1:\n matches = re.findall(regex_search_string, message.content)\n if len(matches) > 0:\n if message.embeds[0].type == 'link':\n await asyncio.sleep(2)\n msg_check = discord.utils.get(client.cached_messages, id=\n message.id)\n if msg_check is not None:\n html = await utils.get_video_webpage(matches[0])\n video_url = 'https://www.youtube.com/watch?v={}'.format(\n matches[0])\n video_image = await utils.get_video_image_url(html)\n video_title = await utils.get_video_title(html)\n author_name = await utils.get_author_name(html)\n author_url = await utils.get_author_url(html)\n if (video_title is None and video_image is None and \n author_name is None and author_url is None):\n return None\n embed = build_embed(video_url, video_image, video_title,\n author_name, author_url)\n await send_new_embed(message, embed)\n return None\n\n\nasync def send_new_embed(original_msg: discord.Message, embed: discord.Embed):\n webhook: discord.Webhook = await utils.get_webhook(client, original_msg\n .channel)\n try:\n if original_msg.guild.me.permissions_in(original_msg.channel\n ).manage_messages:\n await original_msg.delete()\n await webhook.send(content=original_msg.content, embed=embed,\n username=original_msg.author.display_name, avatar_url=\n original_msg.author.avatar_url)\n else:\n await webhook.send(embed=embed, username=client.user.\n display_name, avatar_url=client.user.avatar_url)\n except discord.errors.NotFound:\n pass\n\n\ndef build_embed(_video_url: str, _video_image_url: Optional[str],\n _video_title: Optional[str], _author_name: Optional[str], _author_url:\n Optional[str]) ->discord.Embed:\n embed = discord.Embed(type='video', colour=discord.Colour.from_rgb(255,\n 0, 0))\n if _video_image_url is not None:\n embed.set_image(url=_video_image_url)\n if _author_name is not None:\n if _author_url is not None:\n embed.set_author(name=_author_name, url=_author_url)\n else:\n embed.set_author(name=_author_name)\n if _video_title is not None:\n embed.title = _video_title\n embed.url = _video_url\n return embed\n\n\[email protected]\nasync def on_command_error(ctx, error):\n if type(error) == discord.ext.commands.NoPrivateMessage:\n await ctx.send('⚠ This command can not be used in DMs!!!')\n return\n elif type(error) == discord.ext.commands.CommandNotFound:\n await ctx.send('⚠ Invalid Command!!!')\n return\n elif type(error) == discord.ext.commands.MissingPermissions:\n await ctx.send(\n '⚠ You need the **Manage Messages** permission to use this command'\n .format(error.missing_perms))\n return\n elif type(error) == discord.ext.commands.MissingRequiredArgument:\n await ctx.send('⚠ {}'.format(error))\n elif type(error) == discord.ext.commands.BadArgument:\n await ctx.send('⚠ {}'.format(error))\n else:\n await ctx.send('⚠ {}'.format(error))\n raise error\n\n\[email protected]\nasync def on_message(message: discord.Message):\n await fix_yt_embed(message)\n await client.process_commands(message)\n\n\[email protected]\nasync def on_message_edit(before: discord.Message, after: discord.Message):\n await fix_yt_embed(after)\n\n\[email protected](name='invite', brief='Sends the invite link')\nasync def send_invite_link(ctx: commands.Context):\n link = (\n 'https://discordapp.com/oauth2/authorize?client_id={}&scope=bot&permissions=536882176'\n .format(client.user.id))\n await ctx.send(link)\n\n\nif __name__ == '__main__':\n with open('config.json') as json_data_file:\n config = json.load(json_data_file)\n client.command_prefix = config['bot_prefix']\n client.run(config['token'])\n log.info('cleaning Up and shutting down')\n",
"step-4": "<mask token>\nimport json\nimport logging\nimport re\nimport asyncio\nfrom typing import Optional\nimport discord\nfrom discord.ext import commands\nimport utils\nlogging.basicConfig(level=logging.INFO, format=\n '[%(asctime)s] [%(name)s] [%(levelname)s] %(message)s')\nlog = logging.getLogger('YTEmbedFixer')\nclient = commands.Bot(command_prefix='yt!', max_messages=5000, description=\n \"\"\"A bot for fixing what Discord can't.\n\"\"\", owner_id=\n 389590659335716867, case_insensitive=True)\n\n\[email protected]\nasync def on_ready():\n log.info('Connected using discord.py {}!'.format(discord.__version__))\n log.info('Username: {0.name}, ID: {0.id}'.format(client.user))\n log.info('Connected to {} servers.'.format(len(client.guilds)))\n activity = discord.Game(\"Fixing what Discord can't since 12/5/2019.\".\n format(client.command_prefix))\n await client.change_presence(status=discord.Status.online, activity=\n activity)\n log.info('------')\n\n\nasync def fix_yt_embed(message: discord.Message) ->Optional[discord.Embed]:\n regex_search_string = (\n '(?:https?://)?(?:www[.])?youtu(?:[.]be/|be[.]com/watch[?]v=)([^ ]*)')\n if len(message.embeds) == 1:\n matches = re.findall(regex_search_string, message.content)\n if len(matches) > 0:\n if message.embeds[0].type == 'link':\n await asyncio.sleep(2)\n msg_check = discord.utils.get(client.cached_messages, id=\n message.id)\n if msg_check is not None:\n html = await utils.get_video_webpage(matches[0])\n video_url = 'https://www.youtube.com/watch?v={}'.format(\n matches[0])\n video_image = await utils.get_video_image_url(html)\n video_title = await utils.get_video_title(html)\n author_name = await utils.get_author_name(html)\n author_url = await utils.get_author_url(html)\n if (video_title is None and video_image is None and \n author_name is None and author_url is None):\n return None\n embed = build_embed(video_url, video_image, video_title,\n author_name, author_url)\n await send_new_embed(message, embed)\n return None\n\n\nasync def send_new_embed(original_msg: discord.Message, embed: discord.Embed):\n webhook: discord.Webhook = await utils.get_webhook(client, original_msg\n .channel)\n try:\n if original_msg.guild.me.permissions_in(original_msg.channel\n ).manage_messages:\n await original_msg.delete()\n await webhook.send(content=original_msg.content, embed=embed,\n username=original_msg.author.display_name, avatar_url=\n original_msg.author.avatar_url)\n else:\n await webhook.send(embed=embed, username=client.user.\n display_name, avatar_url=client.user.avatar_url)\n except discord.errors.NotFound:\n pass\n\n\ndef build_embed(_video_url: str, _video_image_url: Optional[str],\n _video_title: Optional[str], _author_name: Optional[str], _author_url:\n Optional[str]) ->discord.Embed:\n embed = discord.Embed(type='video', colour=discord.Colour.from_rgb(255,\n 0, 0))\n if _video_image_url is not None:\n embed.set_image(url=_video_image_url)\n if _author_name is not None:\n if _author_url is not None:\n embed.set_author(name=_author_name, url=_author_url)\n else:\n embed.set_author(name=_author_name)\n if _video_title is not None:\n embed.title = _video_title\n embed.url = _video_url\n return embed\n\n\[email protected]\nasync def on_command_error(ctx, error):\n if type(error) == discord.ext.commands.NoPrivateMessage:\n await ctx.send('⚠ This command can not be used in DMs!!!')\n return\n elif type(error) == discord.ext.commands.CommandNotFound:\n await ctx.send('⚠ Invalid Command!!!')\n return\n elif type(error) == discord.ext.commands.MissingPermissions:\n await ctx.send(\n '⚠ You need the **Manage Messages** permission to use this command'\n .format(error.missing_perms))\n return\n elif type(error) == discord.ext.commands.MissingRequiredArgument:\n await ctx.send('⚠ {}'.format(error))\n elif type(error) == discord.ext.commands.BadArgument:\n await ctx.send('⚠ {}'.format(error))\n else:\n await ctx.send('⚠ {}'.format(error))\n raise error\n\n\[email protected]\nasync def on_message(message: discord.Message):\n await fix_yt_embed(message)\n await client.process_commands(message)\n\n\[email protected]\nasync def on_message_edit(before: discord.Message, after: discord.Message):\n await fix_yt_embed(after)\n\n\[email protected](name='invite', brief='Sends the invite link')\nasync def send_invite_link(ctx: commands.Context):\n link = (\n 'https://discordapp.com/oauth2/authorize?client_id={}&scope=bot&permissions=536882176'\n .format(client.user.id))\n await ctx.send(link)\n\n\nif __name__ == '__main__':\n with open('config.json') as json_data_file:\n config = json.load(json_data_file)\n client.command_prefix = config['bot_prefix']\n client.run(config['token'])\n log.info('cleaning Up and shutting down')\n",
"step-5": "\"\"\"\n\n\"\"\"\n\nimport json\nimport logging\nimport re\nimport asyncio\nfrom typing import Optional\n\nimport discord\nfrom discord.ext import commands\nimport utils\n\n\nlogging.basicConfig(level=logging.INFO, format=\"[%(asctime)s] [%(name)s] [%(levelname)s] %(message)s\")\nlog = logging.getLogger(\"YTEmbedFixer\")\n\n\nclient = commands.Bot(command_prefix=\"yt!\",\n max_messages=5000,\n description=\"A bot for fixing what Discord can't.\\n\",\n owner_id=389590659335716867,\n case_insensitive=True)\n\n\[email protected]\nasync def on_ready():\n log.info('Connected using discord.py {}!'.format(discord.__version__))\n log.info('Username: {0.name}, ID: {0.id}'.format(client.user))\n log.info(\"Connected to {} servers.\".format(len(client.guilds)))\n activity = discord.Game(\"Fixing what Discord can't since 12/5/2019.\".format(client.command_prefix))\n await client.change_presence(status=discord.Status.online, activity=activity)\n\n log.info('------')\n\n\nasync def fix_yt_embed(message: discord.Message) -> Optional[discord.Embed]:\n regex_search_string = r'(?:https?://)?(?:www[.])?youtu(?:[.]be/|be[.]com/watch[?]v=)([^ ]*)'\n if len(message.embeds) == 1:\n matches = re.findall(regex_search_string, message.content)\n if len(matches) > 0:\n # We have a valid youtube link with Embed! Check if it broken.\n # We are lazy and trying to get this done quickly, so for the time being ignore all other embeds other than the first one.\n if message.embeds[0].type == \"link\": # description == 'Enjoy the videos and music you love, upload original content, and share it all with friends, family, and the world on YouTube.':\n # We have a broken embed!\n\n await asyncio.sleep(2) # Sleep for a bit to let PK delete the message if it a proxy message\n\n msg_check = discord.utils.get(client.cached_messages, id=message.id) # Check if message was deleted by PK.\n if msg_check is not None:\n\n html = await utils.get_video_webpage(matches[0])\n\n video_url = \"https://www.youtube.com/watch?v={}\".format(matches[0])\n\n video_image = await utils.get_video_image_url(html)\n video_title = await utils.get_video_title(html)\n author_name = await utils.get_author_name(html)\n author_url = await utils.get_author_url(html)\n\n if video_title is None and video_image is None and author_name is None and author_url is None:\n #We got no info from the video. Prehaps the video is dead on youtube or the DOM has totally changed.\n return None # Don't post empty embed.\n embed = build_embed(video_url, video_image, video_title, author_name, author_url)\n await send_new_embed(message, embed)\n return None\n\n\nasync def send_new_embed(original_msg: discord.Message, embed: discord.Embed):\n webhook: discord.Webhook = await utils.get_webhook(client, original_msg.channel)\n\n try:\n if original_msg.guild.me.permissions_in(original_msg.channel).manage_messages:\n await original_msg.delete()\n await webhook.send(content=original_msg.content, embed=embed, username=original_msg.author.display_name,\n avatar_url=original_msg.author.avatar_url)\n else:\n await webhook.send(embed=embed, username=client.user.display_name,\n avatar_url=client.user.avatar_url)\n except discord.errors.NotFound:\n pass # SHOULD never get here because we check before deleting, but just in case... Don't post replacement.\n\n\ndef build_embed(_video_url: str, _video_image_url: Optional[str], _video_title: Optional[str],\n _author_name: Optional[str], _author_url: Optional[str]) -> discord.Embed:\n embed = discord.Embed(type=\"video\", colour=discord.Colour.from_rgb(255, 0, 0))\n\n if _video_image_url is not None:\n embed.set_image(url=_video_image_url)\n\n if _author_name is not None:\n if _author_url is not None:\n embed.set_author(name=_author_name, url=_author_url)\n else:\n embed.set_author(name=_author_name)\n\n if _video_title is not None:\n embed.title = _video_title\n embed.url = _video_url\n return embed\n\n\n# ---- Command Error Handling ----- #\[email protected]\nasync def on_command_error(ctx, error):\n if type(error) == discord.ext.commands.NoPrivateMessage:\n await ctx.send(\"⚠ This command can not be used in DMs!!!\")\n return\n elif type(error) == discord.ext.commands.CommandNotFound:\n await ctx.send(\"⚠ Invalid Command!!!\")\n return\n elif type(error) == discord.ext.commands.MissingPermissions:\n await ctx.send(\"⚠ You need the **Manage Messages** permission to use this command\".format(error.missing_perms))\n return\n elif type(error) == discord.ext.commands.MissingRequiredArgument:\n await ctx.send(\"⚠ {}\".format(error))\n elif type(error) == discord.ext.commands.BadArgument:\n await ctx.send(\"⚠ {}\".format(error))\n else:\n await ctx.send(\"⚠ {}\".format(error))\n raise error\n\n\[email protected]\nasync def on_message(message: discord.Message):\n await fix_yt_embed(message)\n await client.process_commands(message)\n\n\[email protected]\nasync def on_message_edit(before: discord.Message, after: discord.Message):\n await fix_yt_embed(after)\n\n\[email protected](name=\"invite\", brief=\"Sends the invite link\")\nasync def send_invite_link(ctx: commands.Context):\n # link = \"https://discordapp.com/oauth2/authorize?client_id=500711320497160199&scope=bot&permissions=536882176\"\n link = \"https://discordapp.com/oauth2/authorize?client_id={}&scope=bot&permissions=536882176\".format(client.user.id)\n await ctx.send(link)\n\n\nif __name__ == '__main__':\n\n with open('config.json') as json_data_file:\n config = json.load(json_data_file)\n\n client.command_prefix = config['bot_prefix']\n client.run(config['token'])\n\n log.info(\"cleaning Up and shutting down\")\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def main(dir_train, C, gamma, number_partitions, do_subsampling, write_labels):
hlp.setup_logging()
if number_partitions is None or number_partitions == 0:
do_concat = False
partitions_from_files = True
early_subsampling = False
late_subsampling = True
else:
do_concat = True
partitions_from_files = False
early_subsampling = True
late_subsampling = False
if not do_subsampling:
early_subsampling = late_subsampling = False
X, y = pre.get_multiple_data_and_targets(dir_filepath=dir_train,
do_subsampling=early_subsampling, do_concat=do_concat)
clf = s.get_svclassifier(C=C, gamma=gamma)
scores, y_pred = s.get_crossval_scores_prediction(X, y, n_folds=
number_partitions, clf=clf, files_as_folds=partitions_from_files,
do_subsampling=late_subsampling)
evaluation = s.get_eval_report(scores)
hlp.log(scores)
hlp.log(evaluation)
if write_labels:
dbg.write_list_to_dir(dir_train, y_pred, 'y_pred.txt')
if do_concat:
dbg.write_list_to_dir(dir_train, y, 'y_true.txt')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def main(dir_train, C, gamma, number_partitions, do_subsampling, write_labels):
hlp.setup_logging()
if number_partitions is None or number_partitions == 0:
do_concat = False
partitions_from_files = True
early_subsampling = False
late_subsampling = True
else:
do_concat = True
partitions_from_files = False
early_subsampling = True
late_subsampling = False
if not do_subsampling:
early_subsampling = late_subsampling = False
X, y = pre.get_multiple_data_and_targets(dir_filepath=dir_train,
do_subsampling=early_subsampling, do_concat=do_concat)
clf = s.get_svclassifier(C=C, gamma=gamma)
scores, y_pred = s.get_crossval_scores_prediction(X, y, n_folds=
number_partitions, clf=clf, files_as_folds=partitions_from_files,
do_subsampling=late_subsampling)
evaluation = s.get_eval_report(scores)
hlp.log(scores)
hlp.log(evaluation)
if write_labels:
dbg.write_list_to_dir(dir_train, y_pred, 'y_pred.txt')
if do_concat:
dbg.write_list_to_dir(dir_train, y, 'y_true.txt')
if __name__ == '__main__':
parser = argparse.ArgumentParser(description=
'Print evaluation metrics for cross validating an HSV classifier.')
parser.add_argument('dir_train', help=
'Directory containing all feature XMLs and label CSVs for cross validating the classifier. CSVs need to have the same file name as their corresponding XML.'
)
parser.add_argument('-c', '--C_value', help=
'Omit the grid search and directly specify a C value.', type=float)
parser.add_argument('-g', '--gamma_value', help=
'Omit the grid search and directly specify a gamma value.', type=float)
parser.add_argument('-p', '--number_partitions', help=
'Set the number of partitions for cross validation. If omitted, take each file as a partition.'
, type=int)
parser.add_argument('-s', '--subsampling', help=
'Subsample majority class', action='store_true')
parser.add_argument('-wl', '--write_labels', help=
'Write both true and predicted labels of the eval file(s) to TXT files.'
, action='store_true')
args = parser.parse_args()
main(args.dir_train, args.C_value, args.gamma_value, args.
number_partitions, args.subsampling, args.write_labels)
<|reserved_special_token_1|>
import argparse
import debug.debug as dbg
import helper.helper as hlp
import prep.preprocessor as pre
import sample.sample as s
def main(dir_train, C, gamma, number_partitions, do_subsampling, write_labels):
hlp.setup_logging()
if number_partitions is None or number_partitions == 0:
do_concat = False
partitions_from_files = True
early_subsampling = False
late_subsampling = True
else:
do_concat = True
partitions_from_files = False
early_subsampling = True
late_subsampling = False
if not do_subsampling:
early_subsampling = late_subsampling = False
X, y = pre.get_multiple_data_and_targets(dir_filepath=dir_train,
do_subsampling=early_subsampling, do_concat=do_concat)
clf = s.get_svclassifier(C=C, gamma=gamma)
scores, y_pred = s.get_crossval_scores_prediction(X, y, n_folds=
number_partitions, clf=clf, files_as_folds=partitions_from_files,
do_subsampling=late_subsampling)
evaluation = s.get_eval_report(scores)
hlp.log(scores)
hlp.log(evaluation)
if write_labels:
dbg.write_list_to_dir(dir_train, y_pred, 'y_pred.txt')
if do_concat:
dbg.write_list_to_dir(dir_train, y, 'y_true.txt')
if __name__ == '__main__':
parser = argparse.ArgumentParser(description=
'Print evaluation metrics for cross validating an HSV classifier.')
parser.add_argument('dir_train', help=
'Directory containing all feature XMLs and label CSVs for cross validating the classifier. CSVs need to have the same file name as their corresponding XML.'
)
parser.add_argument('-c', '--C_value', help=
'Omit the grid search and directly specify a C value.', type=float)
parser.add_argument('-g', '--gamma_value', help=
'Omit the grid search and directly specify a gamma value.', type=float)
parser.add_argument('-p', '--number_partitions', help=
'Set the number of partitions for cross validation. If omitted, take each file as a partition.'
, type=int)
parser.add_argument('-s', '--subsampling', help=
'Subsample majority class', action='store_true')
parser.add_argument('-wl', '--write_labels', help=
'Write both true and predicted labels of the eval file(s) to TXT files.'
, action='store_true')
args = parser.parse_args()
main(args.dir_train, args.C_value, args.gamma_value, args.
number_partitions, args.subsampling, args.write_labels)
<|reserved_special_token_1|>
import argparse
import debug.debug as dbg
import helper.helper as hlp
import prep.preprocessor as pre
import sample.sample as s
def main(dir_train, C, gamma, number_partitions, do_subsampling, write_labels):
hlp.setup_logging()
# Files as folds?
if number_partitions is None or number_partitions == 0: # Yes
do_concat = False
partitions_from_files = True
early_subsampling = False
late_subsampling = True
else: # No
do_concat = True
partitions_from_files = False
early_subsampling = True
late_subsampling = False
if not do_subsampling:
early_subsampling = late_subsampling = False
X, y = pre.get_multiple_data_and_targets(dir_filepath=dir_train, do_subsampling=early_subsampling,
do_concat=do_concat)
clf = s.get_svclassifier(C=C, gamma=gamma)
scores, y_pred = s.get_crossval_scores_prediction(X, y, n_folds=number_partitions, clf=clf,
files_as_folds=partitions_from_files, do_subsampling=late_subsampling)
evaluation = s.get_eval_report(scores)
hlp.log(scores)
hlp.log(evaluation)
if write_labels:
dbg.write_list_to_dir(dir_train, y_pred, "y_pred.txt")
if do_concat:
dbg.write_list_to_dir(dir_train, y, "y_true.txt")
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Print evaluation metrics for cross validating an HSV classifier.")
parser.add_argument("dir_train",
help="Directory containing all feature XMLs and label CSVs for cross validating the "
"classifier. CSVs need to have the same file name as their corresponding XML.")
parser.add_argument("-c", "--C_value", help="Omit the grid search and directly specify a C value.", type=float)
parser.add_argument("-g", "--gamma_value", help="Omit the grid search and directly specify a gamma value.",
type=float)
parser.add_argument("-p", "--number_partitions",
help="Set the number of partitions for cross validation. If omitted, take each file "
"as a partition.", type=int)
parser.add_argument("-s", "--subsampling", help="Subsample majority class", action="store_true")
parser.add_argument("-wl", "--write_labels",
help="Write both true and predicted labels of the eval file(s) to TXT files.",
action="store_true")
args = parser.parse_args()
main(args.dir_train, args.C_value, args.gamma_value, args.number_partitions, args.subsampling, args.write_labels)
|
flexible
|
{
"blob_id": "4a63431aa71ca3f4b75fcd89a50bf599e7717645",
"index": 2442,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef main(dir_train, C, gamma, number_partitions, do_subsampling, write_labels):\n hlp.setup_logging()\n if number_partitions is None or number_partitions == 0:\n do_concat = False\n partitions_from_files = True\n early_subsampling = False\n late_subsampling = True\n else:\n do_concat = True\n partitions_from_files = False\n early_subsampling = True\n late_subsampling = False\n if not do_subsampling:\n early_subsampling = late_subsampling = False\n X, y = pre.get_multiple_data_and_targets(dir_filepath=dir_train,\n do_subsampling=early_subsampling, do_concat=do_concat)\n clf = s.get_svclassifier(C=C, gamma=gamma)\n scores, y_pred = s.get_crossval_scores_prediction(X, y, n_folds=\n number_partitions, clf=clf, files_as_folds=partitions_from_files,\n do_subsampling=late_subsampling)\n evaluation = s.get_eval_report(scores)\n hlp.log(scores)\n hlp.log(evaluation)\n if write_labels:\n dbg.write_list_to_dir(dir_train, y_pred, 'y_pred.txt')\n if do_concat:\n dbg.write_list_to_dir(dir_train, y, 'y_true.txt')\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef main(dir_train, C, gamma, number_partitions, do_subsampling, write_labels):\n hlp.setup_logging()\n if number_partitions is None or number_partitions == 0:\n do_concat = False\n partitions_from_files = True\n early_subsampling = False\n late_subsampling = True\n else:\n do_concat = True\n partitions_from_files = False\n early_subsampling = True\n late_subsampling = False\n if not do_subsampling:\n early_subsampling = late_subsampling = False\n X, y = pre.get_multiple_data_and_targets(dir_filepath=dir_train,\n do_subsampling=early_subsampling, do_concat=do_concat)\n clf = s.get_svclassifier(C=C, gamma=gamma)\n scores, y_pred = s.get_crossval_scores_prediction(X, y, n_folds=\n number_partitions, clf=clf, files_as_folds=partitions_from_files,\n do_subsampling=late_subsampling)\n evaluation = s.get_eval_report(scores)\n hlp.log(scores)\n hlp.log(evaluation)\n if write_labels:\n dbg.write_list_to_dir(dir_train, y_pred, 'y_pred.txt')\n if do_concat:\n dbg.write_list_to_dir(dir_train, y, 'y_true.txt')\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description=\n 'Print evaluation metrics for cross validating an HSV classifier.')\n parser.add_argument('dir_train', help=\n 'Directory containing all feature XMLs and label CSVs for cross validating the classifier. CSVs need to have the same file name as their corresponding XML.'\n )\n parser.add_argument('-c', '--C_value', help=\n 'Omit the grid search and directly specify a C value.', type=float)\n parser.add_argument('-g', '--gamma_value', help=\n 'Omit the grid search and directly specify a gamma value.', type=float)\n parser.add_argument('-p', '--number_partitions', help=\n 'Set the number of partitions for cross validation. If omitted, take each file as a partition.'\n , type=int)\n parser.add_argument('-s', '--subsampling', help=\n 'Subsample majority class', action='store_true')\n parser.add_argument('-wl', '--write_labels', help=\n 'Write both true and predicted labels of the eval file(s) to TXT files.'\n , action='store_true')\n args = parser.parse_args()\n main(args.dir_train, args.C_value, args.gamma_value, args.\n number_partitions, args.subsampling, args.write_labels)\n",
"step-4": "import argparse\nimport debug.debug as dbg\nimport helper.helper as hlp\nimport prep.preprocessor as pre\nimport sample.sample as s\n\n\ndef main(dir_train, C, gamma, number_partitions, do_subsampling, write_labels):\n hlp.setup_logging()\n if number_partitions is None or number_partitions == 0:\n do_concat = False\n partitions_from_files = True\n early_subsampling = False\n late_subsampling = True\n else:\n do_concat = True\n partitions_from_files = False\n early_subsampling = True\n late_subsampling = False\n if not do_subsampling:\n early_subsampling = late_subsampling = False\n X, y = pre.get_multiple_data_and_targets(dir_filepath=dir_train,\n do_subsampling=early_subsampling, do_concat=do_concat)\n clf = s.get_svclassifier(C=C, gamma=gamma)\n scores, y_pred = s.get_crossval_scores_prediction(X, y, n_folds=\n number_partitions, clf=clf, files_as_folds=partitions_from_files,\n do_subsampling=late_subsampling)\n evaluation = s.get_eval_report(scores)\n hlp.log(scores)\n hlp.log(evaluation)\n if write_labels:\n dbg.write_list_to_dir(dir_train, y_pred, 'y_pred.txt')\n if do_concat:\n dbg.write_list_to_dir(dir_train, y, 'y_true.txt')\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description=\n 'Print evaluation metrics for cross validating an HSV classifier.')\n parser.add_argument('dir_train', help=\n 'Directory containing all feature XMLs and label CSVs for cross validating the classifier. CSVs need to have the same file name as their corresponding XML.'\n )\n parser.add_argument('-c', '--C_value', help=\n 'Omit the grid search and directly specify a C value.', type=float)\n parser.add_argument('-g', '--gamma_value', help=\n 'Omit the grid search and directly specify a gamma value.', type=float)\n parser.add_argument('-p', '--number_partitions', help=\n 'Set the number of partitions for cross validation. If omitted, take each file as a partition.'\n , type=int)\n parser.add_argument('-s', '--subsampling', help=\n 'Subsample majority class', action='store_true')\n parser.add_argument('-wl', '--write_labels', help=\n 'Write both true and predicted labels of the eval file(s) to TXT files.'\n , action='store_true')\n args = parser.parse_args()\n main(args.dir_train, args.C_value, args.gamma_value, args.\n number_partitions, args.subsampling, args.write_labels)\n",
"step-5": "import argparse\n\nimport debug.debug as dbg\nimport helper.helper as hlp\nimport prep.preprocessor as pre\nimport sample.sample as s\n\n\ndef main(dir_train, C, gamma, number_partitions, do_subsampling, write_labels):\n hlp.setup_logging()\n\n # Files as folds?\n if number_partitions is None or number_partitions == 0: # Yes\n do_concat = False\n partitions_from_files = True\n early_subsampling = False\n late_subsampling = True\n else: # No\n do_concat = True\n partitions_from_files = False\n early_subsampling = True\n late_subsampling = False\n\n if not do_subsampling:\n early_subsampling = late_subsampling = False\n\n X, y = pre.get_multiple_data_and_targets(dir_filepath=dir_train, do_subsampling=early_subsampling,\n do_concat=do_concat)\n clf = s.get_svclassifier(C=C, gamma=gamma)\n scores, y_pred = s.get_crossval_scores_prediction(X, y, n_folds=number_partitions, clf=clf,\n files_as_folds=partitions_from_files, do_subsampling=late_subsampling)\n evaluation = s.get_eval_report(scores)\n hlp.log(scores)\n hlp.log(evaluation)\n\n if write_labels:\n dbg.write_list_to_dir(dir_train, y_pred, \"y_pred.txt\")\n if do_concat:\n dbg.write_list_to_dir(dir_train, y, \"y_true.txt\")\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description=\"Print evaluation metrics for cross validating an HSV classifier.\")\n parser.add_argument(\"dir_train\",\n help=\"Directory containing all feature XMLs and label CSVs for cross validating the \"\n \"classifier. CSVs need to have the same file name as their corresponding XML.\")\n parser.add_argument(\"-c\", \"--C_value\", help=\"Omit the grid search and directly specify a C value.\", type=float)\n parser.add_argument(\"-g\", \"--gamma_value\", help=\"Omit the grid search and directly specify a gamma value.\",\n type=float)\n parser.add_argument(\"-p\", \"--number_partitions\",\n help=\"Set the number of partitions for cross validation. If omitted, take each file \"\n \"as a partition.\", type=int)\n parser.add_argument(\"-s\", \"--subsampling\", help=\"Subsample majority class\", action=\"store_true\")\n parser.add_argument(\"-wl\", \"--write_labels\",\n help=\"Write both true and predicted labels of the eval file(s) to TXT files.\",\n action=\"store_true\")\n args = parser.parse_args()\n main(args.dir_train, args.C_value, args.gamma_value, args.number_partitions, args.subsampling, args.write_labels)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import time
from sqlalchemy import Column, Unicode, UnicodeText, Integer
from models.base_model import SQLMixin, db, SQLBase
class Messages(SQLMixin, SQLBase):
__tablename__ = 'Messages'
title = Column(Unicode(50), nullable=False)
content = Column(UnicodeText, nullable=False)
sender_id = Column(Integer, nullable=False)
receiver_id = Column(Integer, nullable=False)
|
normal
|
{
"blob_id": "6fbf64e2dc2836a54e54ee009be1d0d8d7c7037a",
"index": 1688,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Messages(SQLMixin, SQLBase):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Messages(SQLMixin, SQLBase):\n __tablename__ = 'Messages'\n title = Column(Unicode(50), nullable=False)\n content = Column(UnicodeText, nullable=False)\n sender_id = Column(Integer, nullable=False)\n receiver_id = Column(Integer, nullable=False)\n",
"step-4": "import time\nfrom sqlalchemy import Column, Unicode, UnicodeText, Integer\nfrom models.base_model import SQLMixin, db, SQLBase\n\n\nclass Messages(SQLMixin, SQLBase):\n __tablename__ = 'Messages'\n title = Column(Unicode(50), nullable=False)\n content = Column(UnicodeText, nullable=False)\n sender_id = Column(Integer, nullable=False)\n receiver_id = Column(Integer, nullable=False)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
for num in nums:
count = array.count(num)
occ.append((int(num), int(count)))
<|reserved_special_token_0|>
print(occ)
occ.sort(key=lambda x: x[1], reverse=True)
print(occ)
for number, count in occ:
for i in range(count):
ans.append(number)
print(ans)
<|reserved_special_token_1|>
array = list(map(int, input().split(' ')))
nums = list(set(array))
occ = []
for num in nums:
count = array.count(num)
occ.append((int(num), int(count)))
ans = []
print(occ)
occ.sort(key=lambda x: x[1], reverse=True)
print(occ)
for number, count in occ:
for i in range(count):
ans.append(number)
print(ans)
<|reserved_special_token_1|>
array=list(map(int,input().split(" ")))
nums=list(set(array))
occ=[]
for num in nums:
count=array.count(num)
occ.append((int(num),int(count)))
ans=[]
print(occ)
occ.sort(key=lambda x: x[1],reverse=True)
print(occ)
for number,count in (occ):
for i in range(count):
ans.append(number)
print (ans)
|
flexible
|
{
"blob_id": "acbe4afee81cb6b9c0b8404d470c3f7f5685477c",
"index": 1700,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor num in nums:\n count = array.count(num)\n occ.append((int(num), int(count)))\n<mask token>\nprint(occ)\nocc.sort(key=lambda x: x[1], reverse=True)\nprint(occ)\nfor number, count in occ:\n for i in range(count):\n ans.append(number)\nprint(ans)\n",
"step-3": "array = list(map(int, input().split(' ')))\nnums = list(set(array))\nocc = []\nfor num in nums:\n count = array.count(num)\n occ.append((int(num), int(count)))\nans = []\nprint(occ)\nocc.sort(key=lambda x: x[1], reverse=True)\nprint(occ)\nfor number, count in occ:\n for i in range(count):\n ans.append(number)\nprint(ans)\n",
"step-4": "array=list(map(int,input().split(\" \")))\nnums=list(set(array))\nocc=[]\nfor num in nums:\n count=array.count(num)\n occ.append((int(num),int(count)))\nans=[]\nprint(occ)\nocc.sort(key=lambda x: x[1],reverse=True)\nprint(occ)\nfor number,count in (occ):\n for i in range(count):\n ans.append(number)\nprint (ans)",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
a = 'Hello, World!'
print
|
normal
|
{
"blob_id": "b779cfc6d6456a370092bf1cfa5904c869b7466a",
"index": 9219,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint\n",
"step-3": "a = 'Hello, World!'\nprint\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
# -*- coding: utf-8 -*-
"""
Created on Thu Mar 19 17:24:25 2015
@author: Damien
"""
import numpy as np
from operator import itemgetter
import itertools
def writeOBJ(vertlist,trilist,filename):
print "number of triangles: " + str(len(trilist))
print "number of vertices: " + str(len(vertlist))
OBJ = open(filename, "w")
OBJ.write('# Created with OBJ writer test version DM\n')
OBJ.write('# COORDINATE_SYSTEM: OGC_DEF PROJCS["Netherlands, Amersfoort RD 2008 datum, New System",GEOGCS["Amersfoort",DATUM["Amersfoort",SPHEROID["Bessel, 1841",6377397.155,299.1528153513275,AUTHORITY["EPSG","7004"]],AUTHORITY["EPSG","6289"]],PRIMEM["Greenwich",0],UNIT["degree",0.0174532925199433],AUTHORITY["EPSG","4289"]],PROJECTION["Stereographic"],PARAMETER["latitude_of_origin",52.1561605555556],PARAMETER["central_meridian",5.38763888888889],PARAMETER["scale_factor",0.9999079],PARAMETER["false_easting",155000],PARAMETER["false_northing",463000],UNIT["METER",1],AUTHORITY["EPSG","28992"]]\n')
OBJ.write('# Number of Geometry Coordinates : ' + str(len(vertlist)) + '\n')
OBJ.write('# Number of Texture Coordinates : 0\n')
OBJ.write('# Number of Normal Coordinates : 0\n')
# loop through vertices and write to obj
for vert in vertlist:
OBJ.write("v " + str(vert[0]) + " " + str(vert[1]) + " " + str(vert[2]) + "\n")
OBJ.write('# Number of Elements in set : ' + str(len(trilist)) + '\n')
# loop through triangles and write to obj
for tri in trilist:
OBJ.write("f " + str(tri[0]) + " " + str(tri[1]) + " " + str(tri[2]) + "\n")
OBJ.write('# Total Number of Elements in file: ' + str(len(trilist)) + '\n')
OBJ.write('# EOF')
OBJ.close()
class Vertice:
def __init__(self,x,y,z,vertID):# ,vertID
self.X = float(x)
self.Y = float(y)
self.Z = float(z)
self.ID = int(vertID)
self.string = "(%s , %s , %s)" % (self.X,self.Y,self.Z)
self.neighbourNormals = []
def getVerticePosition(self):
#def getvoxelpos(model,scale,dims,translate,i,j,k): #centroid!
"""self.X = self.scale * ((self.I+.5)/self.dims) + self.translate[0]
self.Y = self.scale * ((self.J+.5)/self.dims) + self.translate[1]
self.Z = self.scale * ((self.K+.5)/self.dims) + self.translate[2] # klopt dit, centroid vs vertice? """
return(self.X,self.Y,self.Z)
def addNeighbourNormal(self,normalvec):
self.neighbourNormals.append(normalvec)
##############################################################################
# triangle class #
##############################################################################
class Triangle:
def __init__(self,n1,n2,n3): # should node indexes be stored?
self.n1 = n1
self.n2 = n2
self.n3 = n3
self.position = [n1,n2,n3]
#self.fullpos = (n1,n2,n3)
self.neighbourNormals = []
# [n1.neighbourNormals,n2.neighbourNormals,n3.neighbourNormals]
#def addPosition(self,p1,p2,p3):
#self.position = [p1,p2,p3]
#def getNodeIndexes(self):
#return (self.n1,self.n2,self.n3)
#self.id = triID
#triID += 1 # werkt dit? # niet nodig?
def getNormalizedNormalVec(self):
"""# create Vertice for each node
Vert1 = Vertice(self.n1[0],self.n1[1],self.n1[2],self.model,self.dims,self.scale,self.translate)
Vert2 = Vertice(self.n2[0],self.n2[1],self.n2[2],self.model,self.dims,self.scale,self.translate)
Vert3 = Vertice(self.n3[0],self.n3[1],self.n3[2],self.model,self.dims,self.scale,self.translate)
# get real pos for each Vertice, list as TriPos
Vert1Pos = Vert1.getVerticePosition()
Vert2Pos = Vert2.getVerticePosition()
Vert3Pos = Vert3.getVerticePosition()"""
TriPos = self.position
# calc normalized normal vecor for Tri
# get vectors Vert1Vert2 & Vert2Vert3
TriVectors = np.subtract(TriPos[1:],TriPos[:-1])
# get crossproduct of Vert1Vert2 & Vert2Vert3 (= surface normal)
TriNorm = np.cross(TriVectors[0],TriVectors[1])+0.0
# get length of surface normal
length = np.linalg.norm(TriNorm)
# divide each component of surface normal by length (= normalized surface normal)
NormalizedNormalVec = np.around(TriNorm / length, decimals=5) # rounded, otherwise different values, equals not found
# create string of tuple for segment dict
#SegmDict = str(tuple(NormalizedNormalVec))
return NormalizedNormalVec.tolist()
##############################################################################
# get angle between vectors #
##############################################################################
def unit_vector(vector):
""" Returns the unit vector of the vector. """
#print 'unit_vector'
#print vector
#print type(vector)
#npvector = np.array(vector)
return vector / np.linalg.norm(vector)
def angle_between(v1, v2):
""" Returns the angle in radians between vectors 'v1' and 'v2'::
>>> angle_between((1, 0, 0), (0, 1, 0))
1.5707963267948966
>>> angle_between((1, 0, 0), (1, 0, 0))
0.0
>>> angle_between((1, 0, 0), (-1, 0, 0))
3.141592653589793
"""
v1_u = unit_vector(v1)
v2_u = unit_vector(v2)
#print v1, v2
angle = np.arccos(np.dot(v1_u, v2_u))
#print angle
if np.isnan(angle):
if (v1_u == v2_u).all():
return 0.0
else:
return np.pi
return angle
def thinNVList(nvlist):
NVset_thinned = sorted(nvlist, key=itemgetter(0,1,2))
for i in range(len(NVset_thinned)-1,0,-1):
vec1 = NVset_thinned[i]
vec2 = NVset_thinned[i-1]
if np.array_equal(vec1,vec2):
del NVset_thinned[i]
else:
continue
#print 'nvset thinned'
#print NVset_thinned
#a = [subset[0] for subset in NVset_thinned]
#print a
#return a
#if len(NVset_thinned) > 0:
return NVset_thinned #[0]
#else:
#return []
def testlist2OBJ(testlist,filename):
temp_vertlist = []
temp_trilist = []
vertID = 1
for tri in testlist:
index_tri = []
#print tri.position
for vert in tri.position:
temp_vertlist.append(vert)
index_tri.append(vertID)
vertID+=1
temp_trilist.append(index_tri)
writeOBJ(temp_vertlist,temp_trilist,filename)
#print temp_vertlist,temp_trilist
return
def checkOrtho(NVset):
flatNVset = [NV for subset in NVset for NV in subset]
thinned_flatNVset = thinNVList(flatNVset)
numberNVs = len(thinned_flatNVset)
#print numberNVs
#numberNVs = 3
count = [i for i in range(numberNVs)]
#print count
indexes = list(itertools.combinations(count,2))
for indexpair in indexes:
#print list(indexpair)
pair = [thinned_flatNVset[ids] for ids in indexpair]
#print pair
angle = angle_between(pair[0],pair[1])
if not np.allclose(angle, 1.57079632679):
return False
else:
continue
return True
# extend later? remove non ortho?
#print checkOrtho([[[0.0, 0.0, 1.0]], [[0.0, 1.0, 0.0]], [[1.0, 0.0, 0.0]]])
def distance(point1,point2):
nppoint1 = np.array(point1)
nppoint2 = np.array(point2)
dist = np.linalg.norm(nppoint1-nppoint2)
return dist
def testEquilateral(Tri):
dist1 = distance(Tri[0],Tri[1])
dist2 = distance(Tri[1],Tri[2])
dist3 = distance(Tri[2],Tri[0])
if np.allclose(dist1,dist2) and np.allclose(dist2,dist3) and np.allclose(dist3,dist1):
# get center
##print "*********** EQUILAT TRI***************"
##print "p1 = " + str(Tri[0])
#print Tri[0][0], Tri[0][1],Tri[0][2]
##print "p2 = " + str(Tri[1])
#print Tri[1][0], Tri[1][1],Tri[1][2]
##print "p3 = "+ str(Tri[2])
center = [np.sum([Tri[0][0],Tri[1][0],Tri[2][0]])/3.0,np.sum([Tri[0][1],Tri[1][1],Tri[2][1]])/3.0, np.sum([Tri[0][2],Tri[1][2],Tri[2][2]])/3.0 ]
##print "p4 = " + str(center)
return [True, center ]
else:
return [False ]
def testCornerConcaveConvex(Tri,NVset): # cornerTriTest only run on single corner triangles? or extend?
dist1, dist2,dist3 = distance(Tri[0],Tri[1]),distance(Tri[1],Tri[2]),distance(Tri[2],Tri[0])
stepsize = dist1/10.0 # must be much smaller than polygon
NVset = [subset[0] for subset in NVset]
movedTri = (np.array(Tri) + (np.array(NVset) * stepsize)).tolist()
moved_dist1 = distance(movedTri[0],movedTri[1])
moved_dist2 = distance(movedTri[1],movedTri[2])
moved_dist3 = distance(movedTri[2],movedTri[0])
boolList = [moved_dist1 >dist1,moved_dist2 > dist2,moved_dist3 > dist3]
if sum(boolList) == 3:
return "CONVEX"
elif sum(boolList) == 0:
return "CONCAVE"
elif sum(boolList) == 2: # for convex chamfer, change later
return "CONVEX"
else:
#print "ONVERWACHTE CASE????????????????"
#print sum(boolList)
return "CONVEX/CONCAVE"
def testChamferConcaveConvex(Tri,NVset):
dist1, dist2,dist3 = distance(Tri[0],Tri[1]),distance(Tri[1],Tri[2]),distance(Tri[2],Tri[0])
stepsize = dist1/10.0 # must be much smaller than polygon
pass
##############################################################################
# filter triangles based on NV collection #
##############################################################################
def testAngles(triNV,NVset):
#print "********** START testAngles ************"
# GET UNIQUE NUMBER
flatNVset = [NV for subset in NVset for NV in subset]
NVset_thinned = thinNVList(flatNVset)
numberUnique = len(NVset_thinned)
#print "numberUnique = "+ str(numberUnique)
# GET CORNER AND CHAMFER VECTORS
if numberUnique > 2:
original_CornerNVs = [[],[],[]]
original_ChamferNVs = [[],[],[]]
for subset in range(3):
for vec in NVset[subset]:
if np.allclose([angle_between(triNV, vec)],[0.955316618125]): # assumption, correct for MC?
original_CornerNVs[subset].append(vec)
elif np.allclose([angle_between(triNV, vec)],[0.785398163397]):
original_ChamferNVs[subset].append(vec)
flat_original_CornerNVs = [NV for subset in original_CornerNVs for NV in subset]
unique_CornerNVs = thinNVList(flat_original_CornerNVs)
numberCornerNVs = len(unique_CornerNVs)
flat_original_ChamferNVs = [NV for subset in original_ChamferNVs for NV in subset]
unique_ChamferNVs = thinNVList(flat_original_ChamferNVs)
numberChamferNVs = len(unique_ChamferNVs)
# for subset in original, thin the list?
for i in range(3):
#print i
#print original_CornerNVs
original_CornerNVs[i] = thinNVList(original_CornerNVs[i])
#print original_CornerNVs
original_ChamferNVs[i] = thinNVList(original_ChamferNVs[i])
return numberUnique, numberCornerNVs, numberChamferNVs, original_CornerNVs, original_ChamferNVs
else:
return numberUnique, 0, 0, [], [] # niet netjes?
def detectTriangles(vertlist,trilist,voxelsize):
print "starting triangle detection"
vertDict = {}
vertID = 1
triDict = {}
triID = 1
new_vertlist = []
#print len(vertlist)
# CREATE VER DICT
# for tri in tri list, get normal vec
# for vert in tri, check in vertdict, if there: add normal vec, if not there, add it, vertid +1 add normal vec
for index in range(len(trilist)):
tri = trilist[index]
node1,node2,node3 = tri[0], tri[1], tri[2]
updateTri = []
TRI = Triangle(vertlist[node1-1],vertlist[node2-1],vertlist[node3-1])
NNV = TRI.getNormalizedNormalVec()
for node in tri:
Node = vertlist[node-1]
VERT = Vertice(Node[0],Node[1],Node[2],vertID)
# if not in dict: attach NNV, add vert to dict
if VERT.string not in vertDict:
#print "**********CHECK******"
#print NNV
VERT.addNeighbourNormal(NNV)
vertDict[VERT.string] = VERT
#print vertDict[VERT.string].neighbourNormals
updateTri.append(vertID)
new_vertlist.append(VERT.getVerticePosition())
vertID +=1
# if in dict, attach NNV to existing vert
else:
#print "**********CHECK******"
#print vertDict[VERT.string].neighbourNormals
vertDict[VERT.string].addNeighbourNormal(NNV)
#print vertDict[VERT.string].neighbourNormals
updateTri.append(vertDict[VERT.string].ID)
trilist[index] = updateTri
# CREATE TRI DICT
# get all NNVs from single points in triangle list
#for index in range(0,1):
for index in range(len(trilist)):
TRI = Triangle(new_vertlist[trilist[index][0]-1],new_vertlist[trilist[index][1]-1],new_vertlist[trilist[index][2]-1]) #-1 needed?
for node in trilist[index]:
#print node
dict_string = "(%s , %s , %s)" % (new_vertlist[node-1][0],new_vertlist[node-1][1],new_vertlist[node-1][2]) #-1 needed!
#print dict_string
vertObject = vertDict[dict_string]
#print vertObject.neighbourNormals
TRI.neighbourNormals.append(vertObject.neighbourNormals)
triDict[index+1] = TRI
convexCornerList = []
concaveCornerList = []
convexChamferList = []
concaveChamferList = []
concaveConvexCase1List = []
concaveConvexCase2List = []
# SHARPENING
sharpenedTriList = []
# order not important right?
for tri in triDict.values():
triNV = tri.getNormalizedNormalVec()
angleResults = testAngles(tri.getNormalizedNormalVec(),tri.neighbourNormals)
# FLAT TRI
if angleResults[0] < 3:
sharpenedTriList.append(tri)
# CORNER TRI
elif angleResults[1] == 3:
equilateralTest = testEquilateral(tri.position)
if equilateralTest[0]:
directionVec = [-1 if val < 0 else 1 for val in triNV]
moveVec = np.array(directionVec) * voxelsize/3.0
convexConcavetest = testCornerConcaveConvex(tri.position,angleResults[3])
if convexConcavetest == "CONCAVE":
# DETECTION
#print "CONCAVE"
concaveCornerList.append(tri)
# SHARPENING
newpoint = np.around(np.array(equilateralTest[1]) - np.array(moveVec), decimals=5)
moveVecs = [(-np.multiply(item[0],voxelsize)/2.0) for item in angleResults[3]]
n15 = np.add(tri.n1,moveVecs[1])
n25 = np.add(tri.n2,moveVecs[2])
n35 = np.add(tri.n3,moveVecs[0])
TRI1 = Triangle(tri.n1,n15,newpoint)
sharpenedTriList.append(TRI1)
TRI2 = Triangle(n15,tri.n2,newpoint)
sharpenedTriList.append(TRI2)
TRI3 = Triangle(tri.n2,n25,newpoint)
sharpenedTriList.append(TRI3)
TRI4 = Triangle(n25,tri.n3,newpoint)
sharpenedTriList.append(TRI4)
TRI5 = Triangle(tri.n3,n35,newpoint)
sharpenedTriList.append(TRI5)
TRI6 = Triangle(n35,tri.n1,newpoint)
sharpenedTriList.append(TRI6)
elif convexConcavetest == "CONVEX":
# DETECTION
#print "CONVEX"
convexCornerList.append(tri)
# SHARPENING
newpoint = np.around(np.array(equilateralTest[1]) + np.array(moveVec), decimals=5)
moveVecs = [(np.multiply(item[0],voxelsize)/2.0) for item in angleResults[3]]
n15 = np.add(tri.n1,moveVecs[1])
n25 = np.add(tri.n2,moveVecs[2])
n35 = np.add(tri.n3,moveVecs[0])
TRI1 = Triangle(tri.n1,n15,newpoint)
sharpenedTriList.append(TRI1)
TRI2 = Triangle(n15,tri.n2,newpoint)
sharpenedTriList.append(TRI2)
TRI3 = Triangle(tri.n2,n25,newpoint)
sharpenedTriList.append(TRI3)
TRI4 = Triangle(n25,tri.n3,newpoint)
sharpenedTriList.append(TRI4)
TRI5 = Triangle(tri.n3,n35,newpoint)
sharpenedTriList.append(TRI5)
TRI6 = Triangle(n35,tri.n1,newpoint)
sharpenedTriList.append(TRI6)
"""else:
# change nothing
sharpenedTriList.append(tri)"""
else:
print '***********************CASE1******************* '
# DETECTION
concaveConvexCase1List.append(tri)
# SHARPENING
dist1,dist2,dist3 = distance(tri.n1,tri.n2),distance(tri.n2,tri.n3),distance(tri.n3,tri.n1)
#print dist1, dist2,dist3
if np.isclose(dist3,dist1):
middleIndex,middlePos = 0,tri.n1
elif np.isclose(dist1,dist2):
middleIndex,middlePos = 1,tri.n2
elif np.isclose(dist2,dist3):
middleIndex,middlePos = 2,tri.n3
sideIndexes = [0,1,2]#.remove(singleIndex)
sideIndexes.remove(middleIndex)
moveVecs = [(np.multiply(item[0],voxelsize)/2.0) for item in angleResults[3]]
#print moveVecs
triList = [tri.n1,tri.n2,tri.n3]
moveCheck = np.add(moveVecs,triList)
if np.allclose(moveCheck[sideIndexes[0]],moveCheck[middleIndex]):
print moveCheck[sideIndexes[0]]
print moveCheck[middleIndex]
sideOutterIndex,sideInnerIndex = sideIndexes[0],sideIndexes[1]
elif np.allclose(moveCheck[sideIndexes[1]],moveCheck[middleIndex]):
print moveCheck[sideIndexes[1]]
print moveCheck[middleIndex]
sideOutterIndex,sideInnerIndex = sideIndexes[1],sideIndexes[0]
else:
print "OOPS"
# sideOutter / sideInner check is fout?
singleFirst = np.add(triList[sideOutterIndex],np.subtract(triList[sideInnerIndex],middlePos))
singleSecond = np.subtract(triList[sideOutterIndex],moveVecs[middleIndex])
singleThird = np.add(singleFirst,moveVecs[sideOutterIndex])
TRI1 = Triangle(singleFirst,singleSecond,middlePos)
sharpenedTriList.append(TRI1)
#TRI2 = Triangle(singleFirst,singleSecond,triList[sideOutterIndex])
#sharpenedTriList.append(TRI2)
TRI3 = Triangle(middlePos,singleThird,singleFirst)
sharpenedTriList.append(TRI3)
elif angleResults[1] == 2:
# DETECTION
concaveConvexCase2List.append(tri)
# SHARPENING
#print "****************** CASE 2 ****************************"
vecList = [[1.0, 0.0, 0.0],[0.0, 1.0, 0.0],[0.0, 0.0, 1.0],[-1.0, 0.0, 0.0],[0.0, -1.0, 0.0],[0.0, 0.0, -1.0]]
for cornerVec in vecList:
# write above in better way, unnecessary searches?)
if angleResults[3].count([cornerVec]) == 2:
double = cornerVec # niet nodig
elif angleResults[3].count([cornerVec]) == 1:
single = cornerVec
singleIndex = angleResults[3].index([single])
#print singleIndex
triList = [tri.n1,tri.n2,tri.n3]
doubleIndexes = [0,1,2]#.remove(singleIndex)
#print doubleIndexes
doubleIndexes.remove(singleIndex)
#print doubleIndexes
# check distances from double to single
if distance(triList[doubleIndexes[0]],triList[singleIndex]) > distance(triList[doubleIndexes[1]],triList[singleIndex]):
doubleFIndex, doubleCIndex = doubleIndexes[0],doubleIndexes[1]
elif distance(triList[doubleIndexes[1]],triList[singleIndex]) > distance(triList[doubleIndexes[0]],triList[singleIndex]):
doubleFIndex,doubleCIndex = doubleIndexes[1],doubleIndexes[0]
# triangle vertices defined
singlePos = triList[singleIndex]
doubleFPos = triList[doubleFIndex]
doubleCPos = triList[doubleCIndex]
# define 3 new vertices
singleFirst = np.add(doubleCPos,doubleFPos)/2.0
#print triList
empty = [0.0,0.0,0.0]
vecList = [ ]
moveVec = np.subtract(singlePos,singleFirst)
for i in range(len(moveVec)):
if moveVec[i] != 0:
temp = np.copy(empty)
temp[i] = moveVec[i]
vecList.append(temp)
if np.allclose(distance(singleFirst,np.add(singleFirst,vecList[1])),distance(singlePos,np.add(singleFirst,vecList[1]))):
singleSecond,singleThird = np.add(singleFirst,vecList[0]),np.add(singleFirst,vecList[1])
elif np.allclose(distance(singleFirst,np.add(singleFirst,vecList[0])),distance(singlePos,np.add(singleFirst,vecList[0]))):
singleSecond,singleThird = np.add(singleFirst,vecList[1]),np.add(singleFirst,vecList[0])
# write sharpened triangles
TRI1 = Triangle(singleFirst,doubleCPos,singleSecond)
sharpenedTriList.append(TRI1)
TRI2 = Triangle(singleFirst,singleSecond,singlePos)
sharpenedTriList.append(TRI2)
TRI3 = Triangle(singleFirst,singlePos,singleThird)
sharpenedTriList.append(TRI3)
singleFourth = np.add(singleFirst,np.subtract(singleSecond,doubleCPos,))
TRI4 = Triangle(singleFirst,doubleFPos,singleFourth)
sharpenedTriList.append(TRI4)
"""moveVecs = [(np.multiply(item[0],voxelsize)/2.0) for item in angleResults[3]]
#print moveVecs
singleSecond = np.add(singleFirst,moveVecs[doubleFIndex])
singleThird = np.subtract(singlePos,moveVecs[doubleFIndex])
# write sharpened triangles
TRI1 = Triangle(doubleCPos,singleFirst,singleThird)
sharpenedTriList.append(TRI1)
TRI2 = Triangle(singleFirst,singleSecond,singleThird)
sharpenedTriList.append(TRI2)
TRI3 = Triangle(singleThird,singleSecond,singlePos)
sharpenedTriList.append(TRI3)"""
# CHAMFER TRI
elif angleResults[2] == 2:
if checkOrtho(angleResults[4]):
convexConcavetest = testCornerConcaveConvex(tri.position,angleResults[4])
if convexConcavetest == "CONCAVE":
# DETECTION
convexChamferList.append(tri)
# SHARPENING
vecList = [[1.0, 0.0, 0.0],[0.0, 1.0, 0.0],[0.0, 0.0, 1.0],[-1.0, 0.0, 0.0],[0.0, -1.0, 0.0],[0.0, 0.0, -1.0]]
for chamferVec in vecList:
# write above in better way, unnecessary searches?)
if angleResults[4].count([chamferVec]) == 2:
double = chamferVec # niet nodig
elif angleResults[4].count([chamferVec]) == 1:
single = chamferVec
singleIndex = angleResults[4].index([single]) # only finds first, allowed in case of single
triList = [tri.n1,tri.n2,tri.n3]
doubleIndexes = [0,1,2]#.remove(singleIndex)
doubleIndexes.remove(singleIndex)
# check distances from double to single
if distance(triList[doubleIndexes[0]],triList[singleIndex]) > distance(triList[doubleIndexes[1]],triList[singleIndex]):
doubleFIndex, doubleCIndex = doubleIndexes[0],doubleIndexes[1]
elif distance(triList[doubleIndexes[1]],triList[singleIndex]) > distance(triList[doubleIndexes[0]],triList[singleIndex]):
doubleFIndex,doubleCIndex = doubleIndexes[1],doubleIndexes[0]
# triangle vertices defined
singlePos = triList[singleIndex]
doubleFPos = triList[doubleFIndex]
doubleCPos = triList[doubleCIndex]
# construct next 2 vertices
moveVec = [(-np.multiply(item,voxelsize)/2.0) for item in double]
singleFirst = np.add(singlePos,moveVec)
singleSecond = np.add(singleFirst,np.subtract(doubleFPos,doubleCPos))
# write sharpened triangles
TRI1 = Triangle(singleFirst,doubleCPos,doubleFPos)
sharpenedTriList.append(TRI1)
TRI2 = Triangle(singleFirst,doubleFPos,singleSecond)
sharpenedTriList.append(TRI2)
elif convexConcavetest == "CONVEX":
concaveChamferList.append(tri)
#print "*************** CONVEX CHAMFER *****************"
#print tri
#print angleResults[4]
vecList = [[1.0, 0.0, 0.0],[0.0, 1.0, 0.0],[0.0, 0.0, 1.0],[-1.0, 0.0, 0.0],[0.0, -1.0, 0.0],[0.0, 0.0, -1.0]]
for chamferVec in vecList:
# write above in better way, unnecessary searches?)
if angleResults[4].count([chamferVec]) == 2:
double = chamferVec # niet nodig
elif angleResults[4].count([chamferVec]) == 1:
single = chamferVec
singleIndex = angleResults[4].index([single]) # only finds first, allowed in case of single
triList = [tri.n1,tri.n2,tri.n3]
doubleIndexes = [0,1,2]#.remove(singleIndex)
doubleIndexes.remove(singleIndex)
# check distances from double to single
if distance(triList[doubleIndexes[0]],triList[singleIndex]) > distance(triList[doubleIndexes[1]],triList[singleIndex]):
doubleFIndex, doubleCIndex = doubleIndexes[0],doubleIndexes[1]
elif distance(triList[doubleIndexes[1]],triList[singleIndex]) > distance(triList[doubleIndexes[0]],triList[singleIndex]):
doubleFIndex,doubleCIndex = doubleIndexes[1],doubleIndexes[0]
# triangle vertices defined
singlePos = triList[singleIndex]
doubleFPos = triList[doubleFIndex]
doubleCPos = triList[doubleCIndex]
# construct next 2 vertices
moveVec = [(np.multiply(item,voxelsize)/2.0) for item in double]
singleFirst = np.add(singlePos,moveVec)
singleSecond = np.add(singleFirst,np.subtract(doubleFPos,doubleCPos))
# write sharpened triangles
TRI1 = Triangle(singleFirst,doubleCPos,doubleFPos)
sharpenedTriList.append(TRI1)
TRI2 = Triangle(singleFirst,doubleFPos,singleSecond)
sharpenedTriList.append(TRI2)
else:
# change nothing
sharpenedTriList.append(tri)
else:
# change nothing
sharpenedTriList.append(tri)
else:
# change nothing
#print tri
sharpenedTriList.append(tri)
# WRITE SHARPENED TRIANGLES TO VERT AND TRI LIST
sharp_vertlist = []
sharp_trilist = []
vertID = 1
for tri in sharpenedTriList:
index_tri = []
#print tri.position
for vert in tri.position:
sharp_vertlist.append(vert)
index_tri.append(vertID)
vertID+=1
sharp_trilist.append(index_tri)
"""
testlist2OBJ(convexCornerList, "convexCornerList.obj")
testlist2OBJ(concaveCornerList, "concaveCornerList.obj")
testlist2OBJ(concaveConvexCase1List, "concaveConvexCase1List.obj")
testlist2OBJ(concaveConvexCase2List, "concaveConvexCase2List.obj")
testlist2OBJ(convexChamferList, "convexChamferList.obj")
testlist2OBJ(concaveChamferList, "concaveChamferList.obj")
testlist2OBJ(sharpenedTriList, "sharpenedTriList.obj")"""
return sharp_vertlist, sharp_trilist
|
normal
|
{
"blob_id": "471d4cc95d6cb8d02f1c96e940c2a2235affbc52",
"index": 4127,
"step-1": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Mar 19 17:24:25 2015\n\n@author: Damien\n\"\"\"\nimport numpy as np\nfrom operator import itemgetter\nimport itertools\n\n\ndef writeOBJ(vertlist,trilist,filename):\n print \"number of triangles: \" + str(len(trilist))\n print \"number of vertices: \" + str(len(vertlist))\n OBJ = open(filename, \"w\")\n OBJ.write('# Created with OBJ writer test version DM\\n')\n OBJ.write('# COORDINATE_SYSTEM: OGC_DEF PROJCS[\"Netherlands, Amersfoort RD 2008 datum, New System\",GEOGCS[\"Amersfoort\",DATUM[\"Amersfoort\",SPHEROID[\"Bessel, 1841\",6377397.155,299.1528153513275,AUTHORITY[\"EPSG\",\"7004\"]],AUTHORITY[\"EPSG\",\"6289\"]],PRIMEM[\"Greenwich\",0],UNIT[\"degree\",0.0174532925199433],AUTHORITY[\"EPSG\",\"4289\"]],PROJECTION[\"Stereographic\"],PARAMETER[\"latitude_of_origin\",52.1561605555556],PARAMETER[\"central_meridian\",5.38763888888889],PARAMETER[\"scale_factor\",0.9999079],PARAMETER[\"false_easting\",155000],PARAMETER[\"false_northing\",463000],UNIT[\"METER\",1],AUTHORITY[\"EPSG\",\"28992\"]]\\n')\n OBJ.write('# Number of Geometry Coordinates : ' + str(len(vertlist)) + '\\n')\n OBJ.write('# Number of Texture Coordinates : 0\\n')\n OBJ.write('# Number of Normal Coordinates : 0\\n')\n # loop through vertices and write to obj \n for vert in vertlist:\n OBJ.write(\"v \" + str(vert[0]) + \" \" + str(vert[1]) + \" \" + str(vert[2]) + \"\\n\")\n OBJ.write('# Number of Elements in set : ' + str(len(trilist)) + '\\n') \n # loop through triangles and write to obj\n for tri in trilist:\n OBJ.write(\"f \" + str(tri[0]) + \" \" + str(tri[1]) + \" \" + str(tri[2]) + \"\\n\")\n OBJ.write('# Total Number of Elements in file: ' + str(len(trilist)) + '\\n') \n OBJ.write('# EOF')\n OBJ.close()\n\n\nclass Vertice:\n def __init__(self,x,y,z,vertID):# ,vertID\n self.X = float(x)\n self.Y = float(y)\n self.Z = float(z)\n self.ID = int(vertID)\n self.string = \"(%s , %s , %s)\" % (self.X,self.Y,self.Z)\n self.neighbourNormals = []\n \n \n def getVerticePosition(self):\n #def getvoxelpos(model,scale,dims,translate,i,j,k): #centroid!\n \"\"\"self.X = self.scale * ((self.I+.5)/self.dims) + self.translate[0]\n self.Y = self.scale * ((self.J+.5)/self.dims) + self.translate[1] \n self.Z = self.scale * ((self.K+.5)/self.dims) + self.translate[2] # klopt dit, centroid vs vertice? \"\"\"\n return(self.X,self.Y,self.Z)\n \n def addNeighbourNormal(self,normalvec):\n self.neighbourNormals.append(normalvec)\n \n##############################################################################\n# triangle class #\n##############################################################################\n \nclass Triangle: \n def __init__(self,n1,n2,n3): # should node indexes be stored? \n self.n1 = n1\n self.n2 = n2\n self.n3 = n3\n self.position = [n1,n2,n3]\n #self.fullpos = (n1,n2,n3)\n self.neighbourNormals = [] \n # [n1.neighbourNormals,n2.neighbourNormals,n3.neighbourNormals]\n #def addPosition(self,p1,p2,p3):\n #self.position = [p1,p2,p3]\n #def getNodeIndexes(self):\n #return (self.n1,self.n2,self.n3)\n #self.id = triID\n #triID += 1 # werkt dit? # niet nodig?\n def getNormalizedNormalVec(self):\n \"\"\"# create Vertice for each node\n Vert1 = Vertice(self.n1[0],self.n1[1],self.n1[2],self.model,self.dims,self.scale,self.translate)\n Vert2 = Vertice(self.n2[0],self.n2[1],self.n2[2],self.model,self.dims,self.scale,self.translate)\n Vert3 = Vertice(self.n3[0],self.n3[1],self.n3[2],self.model,self.dims,self.scale,self.translate)\n # get real pos for each Vertice, list as TriPos\n Vert1Pos = Vert1.getVerticePosition()\n Vert2Pos = Vert2.getVerticePosition()\n Vert3Pos = Vert3.getVerticePosition()\"\"\"\n TriPos = self.position\n # calc normalized normal vecor for Tri\n # get vectors Vert1Vert2 & Vert2Vert3\n TriVectors = np.subtract(TriPos[1:],TriPos[:-1])\n # get crossproduct of Vert1Vert2 & Vert2Vert3 (= surface normal)\n TriNorm = np.cross(TriVectors[0],TriVectors[1])+0.0\n # get length of surface normal\n length = np.linalg.norm(TriNorm)\n # divide each component of surface normal by length (= normalized surface normal)\n NormalizedNormalVec = np.around(TriNorm / length, decimals=5) # rounded, otherwise different values, equals not found\n # create string of tuple for segment dict \n #SegmDict = str(tuple(NormalizedNormalVec))\n return NormalizedNormalVec.tolist()\n \n##############################################################################\n# get angle between vectors #\n##############################################################################\ndef unit_vector(vector):\n \"\"\" Returns the unit vector of the vector. \"\"\"\n #print 'unit_vector'\n #print vector\n #print type(vector)\n #npvector = np.array(vector)\n return vector / np.linalg.norm(vector)\n\ndef angle_between(v1, v2):\n \"\"\" Returns the angle in radians between vectors 'v1' and 'v2'::\n\n >>> angle_between((1, 0, 0), (0, 1, 0))\n 1.5707963267948966\n >>> angle_between((1, 0, 0), (1, 0, 0))\n 0.0\n >>> angle_between((1, 0, 0), (-1, 0, 0))\n 3.141592653589793\n \"\"\"\n v1_u = unit_vector(v1)\n v2_u = unit_vector(v2)\n #print v1, v2\n angle = np.arccos(np.dot(v1_u, v2_u))\n #print angle\n if np.isnan(angle):\n if (v1_u == v2_u).all():\n return 0.0\n else:\n return np.pi\n return angle\n\ndef thinNVList(nvlist):\n NVset_thinned = sorted(nvlist, key=itemgetter(0,1,2))\n for i in range(len(NVset_thinned)-1,0,-1):\n vec1 = NVset_thinned[i]\n vec2 = NVset_thinned[i-1]\n if np.array_equal(vec1,vec2):\n del NVset_thinned[i]\n else:\n continue\n #print 'nvset thinned'\n #print NVset_thinned\n #a = [subset[0] for subset in NVset_thinned]\n #print a\n #return a\n #if len(NVset_thinned) > 0:\n return NVset_thinned #[0]\n #else: \n #return []\n \ndef testlist2OBJ(testlist,filename):\n temp_vertlist = []\n temp_trilist = []\n vertID = 1\n for tri in testlist:\n index_tri = []\n #print tri.position\n for vert in tri.position:\n temp_vertlist.append(vert)\n index_tri.append(vertID)\n vertID+=1\n temp_trilist.append(index_tri)\n writeOBJ(temp_vertlist,temp_trilist,filename) \n #print temp_vertlist,temp_trilist\n \n return\n \ndef checkOrtho(NVset):\n flatNVset = [NV for subset in NVset for NV in subset] \n thinned_flatNVset = thinNVList(flatNVset)\n numberNVs = len(thinned_flatNVset)\n #print numberNVs\n #numberNVs = 3\n count = [i for i in range(numberNVs)]\n #print count\n indexes = list(itertools.combinations(count,2))\n for indexpair in indexes:\n #print list(indexpair)\n pair = [thinned_flatNVset[ids] for ids in indexpair]\n #print pair\n angle = angle_between(pair[0],pair[1])\n if not np.allclose(angle, 1.57079632679): \n return False\n else:\n continue\n return True \n # extend later? remove non ortho? \n \n \n \n#print checkOrtho([[[0.0, 0.0, 1.0]], [[0.0, 1.0, 0.0]], [[1.0, 0.0, 0.0]]])\n\n\ndef distance(point1,point2):\n nppoint1 = np.array(point1)\n nppoint2 = np.array(point2)\n dist = np.linalg.norm(nppoint1-nppoint2)\n return dist\n\n\ndef testEquilateral(Tri): \n dist1 = distance(Tri[0],Tri[1])\n dist2 = distance(Tri[1],Tri[2])\n dist3 = distance(Tri[2],Tri[0])\n if np.allclose(dist1,dist2) and np.allclose(dist2,dist3) and np.allclose(dist3,dist1):\n # get center\n ##print \"*********** EQUILAT TRI***************\"\n ##print \"p1 = \" + str(Tri[0])\n #print Tri[0][0], Tri[0][1],Tri[0][2]\n ##print \"p2 = \" + str(Tri[1])\n #print Tri[1][0], Tri[1][1],Tri[1][2]\n ##print \"p3 = \"+ str(Tri[2])\n center = [np.sum([Tri[0][0],Tri[1][0],Tri[2][0]])/3.0,np.sum([Tri[0][1],Tri[1][1],Tri[2][1]])/3.0, np.sum([Tri[0][2],Tri[1][2],Tri[2][2]])/3.0 ]\n ##print \"p4 = \" + str(center)\n return [True, center ]\n else:\n return [False ]\n\ndef testCornerConcaveConvex(Tri,NVset): # cornerTriTest only run on single corner triangles? or extend? \n dist1, dist2,dist3 = distance(Tri[0],Tri[1]),distance(Tri[1],Tri[2]),distance(Tri[2],Tri[0])\n stepsize = dist1/10.0 # must be much smaller than polygon\n NVset = [subset[0] for subset in NVset]\n movedTri = (np.array(Tri) + (np.array(NVset) * stepsize)).tolist()\n moved_dist1 = distance(movedTri[0],movedTri[1])\n moved_dist2 = distance(movedTri[1],movedTri[2])\n moved_dist3 = distance(movedTri[2],movedTri[0])\n boolList = [moved_dist1 >dist1,moved_dist2 > dist2,moved_dist3 > dist3]\n if sum(boolList) == 3: \n return \"CONVEX\"\n elif sum(boolList) == 0: \n return \"CONCAVE\"\n elif sum(boolList) == 2: # for convex chamfer, change later \n return \"CONVEX\"\n else:\n #print \"ONVERWACHTE CASE????????????????\"\n #print sum(boolList)\n \n return \"CONVEX/CONCAVE\"\n \ndef testChamferConcaveConvex(Tri,NVset):\n dist1, dist2,dist3 = distance(Tri[0],Tri[1]),distance(Tri[1],Tri[2]),distance(Tri[2],Tri[0])\n stepsize = dist1/10.0 # must be much smaller than polygon\n pass \n \n##############################################################################\n# filter triangles based on NV collection #\n##############################################################################\ndef testAngles(triNV,NVset):\n #print \"********** START testAngles ************\"\n # GET UNIQUE NUMBER \n flatNVset = [NV for subset in NVset for NV in subset]\n NVset_thinned = thinNVList(flatNVset)\n numberUnique = len(NVset_thinned)\n #print \"numberUnique = \"+ str(numberUnique)\n # GET CORNER AND CHAMFER VECTORS \n if numberUnique > 2:\n original_CornerNVs = [[],[],[]]\n original_ChamferNVs = [[],[],[]]\n for subset in range(3):\n for vec in NVset[subset]:\n if np.allclose([angle_between(triNV, vec)],[0.955316618125]): # assumption, correct for MC?\n original_CornerNVs[subset].append(vec)\n elif np.allclose([angle_between(triNV, vec)],[0.785398163397]):\n original_ChamferNVs[subset].append(vec)\n flat_original_CornerNVs = [NV for subset in original_CornerNVs for NV in subset]\n unique_CornerNVs = thinNVList(flat_original_CornerNVs)\n numberCornerNVs = len(unique_CornerNVs)\n flat_original_ChamferNVs = [NV for subset in original_ChamferNVs for NV in subset]\n unique_ChamferNVs = thinNVList(flat_original_ChamferNVs) \n numberChamferNVs = len(unique_ChamferNVs)\n # for subset in original, thin the list?\n for i in range(3):\n #print i\n #print original_CornerNVs\n original_CornerNVs[i] = thinNVList(original_CornerNVs[i])\n #print original_CornerNVs\n original_ChamferNVs[i] = thinNVList(original_ChamferNVs[i])\n \n \n \n return numberUnique, numberCornerNVs, numberChamferNVs, original_CornerNVs, original_ChamferNVs\n else:\n return numberUnique, 0, 0, [], [] # niet netjes? \n\n\ndef detectTriangles(vertlist,trilist,voxelsize):\n print \"starting triangle detection\"\n vertDict = {}\n vertID = 1\n triDict = {}\n triID = 1\n new_vertlist = []\n #print len(vertlist)\n # CREATE VER DICT\n # for tri in tri list, get normal vec\n # for vert in tri, check in vertdict, if there: add normal vec, if not there, add it, vertid +1 add normal vec \n for index in range(len(trilist)):\n tri = trilist[index]\n node1,node2,node3 = tri[0], tri[1], tri[2]\n updateTri = []\n TRI = Triangle(vertlist[node1-1],vertlist[node2-1],vertlist[node3-1])\n NNV = TRI.getNormalizedNormalVec()\n for node in tri:\n Node = vertlist[node-1]\n VERT = Vertice(Node[0],Node[1],Node[2],vertID)\n # if not in dict: attach NNV, add vert to dict\n if VERT.string not in vertDict: \n #print \"**********CHECK******\"\n #print NNV\n VERT.addNeighbourNormal(NNV)\n vertDict[VERT.string] = VERT\n #print vertDict[VERT.string].neighbourNormals\n updateTri.append(vertID)\n new_vertlist.append(VERT.getVerticePosition())\n vertID +=1\n # if in dict, attach NNV to existing vert \n else:\n #print \"**********CHECK******\"\n #print vertDict[VERT.string].neighbourNormals\n vertDict[VERT.string].addNeighbourNormal(NNV)\n #print vertDict[VERT.string].neighbourNormals\n updateTri.append(vertDict[VERT.string].ID)\n trilist[index] = updateTri\n \n \n # CREATE TRI DICT \n # get all NNVs from single points in triangle list\n #for index in range(0,1):\n for index in range(len(trilist)):\n TRI = Triangle(new_vertlist[trilist[index][0]-1],new_vertlist[trilist[index][1]-1],new_vertlist[trilist[index][2]-1]) #-1 needed?\n for node in trilist[index]:\n #print node\n dict_string = \"(%s , %s , %s)\" % (new_vertlist[node-1][0],new_vertlist[node-1][1],new_vertlist[node-1][2]) #-1 needed!\n #print dict_string \n vertObject = vertDict[dict_string]\n #print vertObject.neighbourNormals\n TRI.neighbourNormals.append(vertObject.neighbourNormals)\n triDict[index+1] = TRI\n \n convexCornerList = []\n concaveCornerList = []\n convexChamferList = []\n concaveChamferList = []\n concaveConvexCase1List = []\n concaveConvexCase2List = []\n \n # SHARPENING \n sharpenedTriList = []\n # order not important right? \n for tri in triDict.values():\n triNV = tri.getNormalizedNormalVec()\n angleResults = testAngles(tri.getNormalizedNormalVec(),tri.neighbourNormals)\n \n # FLAT TRI\n if angleResults[0] < 3: \n sharpenedTriList.append(tri)\n \n # CORNER TRI\n elif angleResults[1] == 3:\n equilateralTest = testEquilateral(tri.position)\n if equilateralTest[0]:\n directionVec = [-1 if val < 0 else 1 for val in triNV]\n moveVec = np.array(directionVec) * voxelsize/3.0\n convexConcavetest = testCornerConcaveConvex(tri.position,angleResults[3])\n if convexConcavetest == \"CONCAVE\": \n # DETECTION \n #print \"CONCAVE\"\n concaveCornerList.append(tri)\n # SHARPENING \n newpoint = np.around(np.array(equilateralTest[1]) - np.array(moveVec), decimals=5)\n moveVecs = [(-np.multiply(item[0],voxelsize)/2.0) for item in angleResults[3]]\n n15 = np.add(tri.n1,moveVecs[1])\n n25 = np.add(tri.n2,moveVecs[2])\n n35 = np.add(tri.n3,moveVecs[0])\n TRI1 = Triangle(tri.n1,n15,newpoint)\n sharpenedTriList.append(TRI1)\n TRI2 = Triangle(n15,tri.n2,newpoint)\n sharpenedTriList.append(TRI2)\n TRI3 = Triangle(tri.n2,n25,newpoint)\n sharpenedTriList.append(TRI3)\n TRI4 = Triangle(n25,tri.n3,newpoint)\n sharpenedTriList.append(TRI4)\n TRI5 = Triangle(tri.n3,n35,newpoint)\n sharpenedTriList.append(TRI5)\n TRI6 = Triangle(n35,tri.n1,newpoint)\n sharpenedTriList.append(TRI6)\n \n elif convexConcavetest == \"CONVEX\": \n # DETECTION \n #print \"CONVEX\"\n convexCornerList.append(tri)\n # SHARPENING\n newpoint = np.around(np.array(equilateralTest[1]) + np.array(moveVec), decimals=5)\n moveVecs = [(np.multiply(item[0],voxelsize)/2.0) for item in angleResults[3]]\n n15 = np.add(tri.n1,moveVecs[1])\n n25 = np.add(tri.n2,moveVecs[2])\n n35 = np.add(tri.n3,moveVecs[0])\n TRI1 = Triangle(tri.n1,n15,newpoint)\n sharpenedTriList.append(TRI1)\n TRI2 = Triangle(n15,tri.n2,newpoint)\n sharpenedTriList.append(TRI2)\n TRI3 = Triangle(tri.n2,n25,newpoint)\n sharpenedTriList.append(TRI3)\n TRI4 = Triangle(n25,tri.n3,newpoint)\n sharpenedTriList.append(TRI4)\n TRI5 = Triangle(tri.n3,n35,newpoint)\n sharpenedTriList.append(TRI5)\n TRI6 = Triangle(n35,tri.n1,newpoint)\n sharpenedTriList.append(TRI6)\n \n \n \n \n \"\"\"else: \n # change nothing\n sharpenedTriList.append(tri)\"\"\"\n \n else:\n print '***********************CASE1******************* '\n # DETECTION\n concaveConvexCase1List.append(tri)\n # SHARPENING\n dist1,dist2,dist3 = distance(tri.n1,tri.n2),distance(tri.n2,tri.n3),distance(tri.n3,tri.n1)\n #print dist1, dist2,dist3\n if np.isclose(dist3,dist1):\n middleIndex,middlePos = 0,tri.n1\n elif np.isclose(dist1,dist2):\n middleIndex,middlePos = 1,tri.n2\n elif np.isclose(dist2,dist3):\n middleIndex,middlePos = 2,tri.n3\n \n \n sideIndexes = [0,1,2]#.remove(singleIndex)\n sideIndexes.remove(middleIndex)\n moveVecs = [(np.multiply(item[0],voxelsize)/2.0) for item in angleResults[3]]\n #print moveVecs\n triList = [tri.n1,tri.n2,tri.n3]\n moveCheck = np.add(moveVecs,triList)\n if np.allclose(moveCheck[sideIndexes[0]],moveCheck[middleIndex]):\n print moveCheck[sideIndexes[0]]\n print moveCheck[middleIndex] \n sideOutterIndex,sideInnerIndex = sideIndexes[0],sideIndexes[1]\n elif np.allclose(moveCheck[sideIndexes[1]],moveCheck[middleIndex]):\n print moveCheck[sideIndexes[1]]\n print moveCheck[middleIndex] \n sideOutterIndex,sideInnerIndex = sideIndexes[1],sideIndexes[0]\n else:\n print \"OOPS\"\n \n # sideOutter / sideInner check is fout? \n \n \n singleFirst = np.add(triList[sideOutterIndex],np.subtract(triList[sideInnerIndex],middlePos))\n singleSecond = np.subtract(triList[sideOutterIndex],moveVecs[middleIndex])\n singleThird = np.add(singleFirst,moveVecs[sideOutterIndex])\n TRI1 = Triangle(singleFirst,singleSecond,middlePos)\n sharpenedTriList.append(TRI1)\n #TRI2 = Triangle(singleFirst,singleSecond,triList[sideOutterIndex])\n #sharpenedTriList.append(TRI2)\n TRI3 = Triangle(middlePos,singleThird,singleFirst)\n sharpenedTriList.append(TRI3)\n \n \n elif angleResults[1] == 2:\n # DETECTION\n concaveConvexCase2List.append(tri) \n # SHARPENING\n #print \"****************** CASE 2 ****************************\"\n vecList = [[1.0, 0.0, 0.0],[0.0, 1.0, 0.0],[0.0, 0.0, 1.0],[-1.0, 0.0, 0.0],[0.0, -1.0, 0.0],[0.0, 0.0, -1.0]]\n for cornerVec in vecList:\n # write above in better way, unnecessary searches?)\n if angleResults[3].count([cornerVec]) == 2:\n double = cornerVec # niet nodig \n elif angleResults[3].count([cornerVec]) == 1:\n single = cornerVec\n singleIndex = angleResults[3].index([single])\n #print singleIndex\n triList = [tri.n1,tri.n2,tri.n3]\n doubleIndexes = [0,1,2]#.remove(singleIndex)\n #print doubleIndexes\n doubleIndexes.remove(singleIndex)\n #print doubleIndexes \n # check distances from double to single\n if distance(triList[doubleIndexes[0]],triList[singleIndex]) > distance(triList[doubleIndexes[1]],triList[singleIndex]):\n doubleFIndex, doubleCIndex = doubleIndexes[0],doubleIndexes[1]\n elif distance(triList[doubleIndexes[1]],triList[singleIndex]) > distance(triList[doubleIndexes[0]],triList[singleIndex]):\n doubleFIndex,doubleCIndex = doubleIndexes[1],doubleIndexes[0]\n # triangle vertices defined\n singlePos = triList[singleIndex]\n doubleFPos = triList[doubleFIndex]\n doubleCPos = triList[doubleCIndex]\n # define 3 new vertices\n singleFirst = np.add(doubleCPos,doubleFPos)/2.0\n #print triList\n empty = [0.0,0.0,0.0]\n vecList = [ ]\n moveVec = np.subtract(singlePos,singleFirst)\n for i in range(len(moveVec)):\n if moveVec[i] != 0:\n temp = np.copy(empty)\n temp[i] = moveVec[i]\n vecList.append(temp) \n if np.allclose(distance(singleFirst,np.add(singleFirst,vecList[1])),distance(singlePos,np.add(singleFirst,vecList[1]))):\n singleSecond,singleThird = np.add(singleFirst,vecList[0]),np.add(singleFirst,vecList[1])\n \n elif np.allclose(distance(singleFirst,np.add(singleFirst,vecList[0])),distance(singlePos,np.add(singleFirst,vecList[0]))): \n singleSecond,singleThird = np.add(singleFirst,vecList[1]),np.add(singleFirst,vecList[0])\n # write sharpened triangles\n TRI1 = Triangle(singleFirst,doubleCPos,singleSecond)\n sharpenedTriList.append(TRI1)\n TRI2 = Triangle(singleFirst,singleSecond,singlePos)\n sharpenedTriList.append(TRI2)\n TRI3 = Triangle(singleFirst,singlePos,singleThird)\n sharpenedTriList.append(TRI3) \n singleFourth = np.add(singleFirst,np.subtract(singleSecond,doubleCPos,))\n TRI4 = Triangle(singleFirst,doubleFPos,singleFourth)\n sharpenedTriList.append(TRI4) \n \n \"\"\"moveVecs = [(np.multiply(item[0],voxelsize)/2.0) for item in angleResults[3]]\n #print moveVecs\n singleSecond = np.add(singleFirst,moveVecs[doubleFIndex])\n singleThird = np.subtract(singlePos,moveVecs[doubleFIndex])\n # write sharpened triangles\n TRI1 = Triangle(doubleCPos,singleFirst,singleThird)\n sharpenedTriList.append(TRI1)\n TRI2 = Triangle(singleFirst,singleSecond,singleThird)\n sharpenedTriList.append(TRI2)\n TRI3 = Triangle(singleThird,singleSecond,singlePos)\n sharpenedTriList.append(TRI3)\"\"\"\n \n \n \n \n \n \n # CHAMFER TRI\n elif angleResults[2] == 2:\n if checkOrtho(angleResults[4]):\n convexConcavetest = testCornerConcaveConvex(tri.position,angleResults[4])\n if convexConcavetest == \"CONCAVE\": \n # DETECTION\n convexChamferList.append(tri)\n # SHARPENING\n vecList = [[1.0, 0.0, 0.0],[0.0, 1.0, 0.0],[0.0, 0.0, 1.0],[-1.0, 0.0, 0.0],[0.0, -1.0, 0.0],[0.0, 0.0, -1.0]]\n for chamferVec in vecList:\n # write above in better way, unnecessary searches?)\n if angleResults[4].count([chamferVec]) == 2:\n double = chamferVec # niet nodig \n elif angleResults[4].count([chamferVec]) == 1:\n single = chamferVec\n singleIndex = angleResults[4].index([single]) # only finds first, allowed in case of single\n triList = [tri.n1,tri.n2,tri.n3]\n doubleIndexes = [0,1,2]#.remove(singleIndex)\n doubleIndexes.remove(singleIndex)\n # check distances from double to single\n if distance(triList[doubleIndexes[0]],triList[singleIndex]) > distance(triList[doubleIndexes[1]],triList[singleIndex]):\n doubleFIndex, doubleCIndex = doubleIndexes[0],doubleIndexes[1]\n elif distance(triList[doubleIndexes[1]],triList[singleIndex]) > distance(triList[doubleIndexes[0]],triList[singleIndex]):\n doubleFIndex,doubleCIndex = doubleIndexes[1],doubleIndexes[0]\n \n # triangle vertices defined\n singlePos = triList[singleIndex]\n doubleFPos = triList[doubleFIndex]\n doubleCPos = triList[doubleCIndex]\n # construct next 2 vertices \n moveVec = [(-np.multiply(item,voxelsize)/2.0) for item in double]\n singleFirst = np.add(singlePos,moveVec)\n singleSecond = np.add(singleFirst,np.subtract(doubleFPos,doubleCPos))\n # write sharpened triangles \n TRI1 = Triangle(singleFirst,doubleCPos,doubleFPos)\n sharpenedTriList.append(TRI1)\n TRI2 = Triangle(singleFirst,doubleFPos,singleSecond)\n sharpenedTriList.append(TRI2)\n \n elif convexConcavetest == \"CONVEX\": \n concaveChamferList.append(tri)\n #print \"*************** CONVEX CHAMFER *****************\"\n #print tri\n #print angleResults[4]\n vecList = [[1.0, 0.0, 0.0],[0.0, 1.0, 0.0],[0.0, 0.0, 1.0],[-1.0, 0.0, 0.0],[0.0, -1.0, 0.0],[0.0, 0.0, -1.0]]\n for chamferVec in vecList:\n # write above in better way, unnecessary searches?)\n if angleResults[4].count([chamferVec]) == 2:\n double = chamferVec # niet nodig \n elif angleResults[4].count([chamferVec]) == 1:\n single = chamferVec\n singleIndex = angleResults[4].index([single]) # only finds first, allowed in case of single\n \n triList = [tri.n1,tri.n2,tri.n3]\n doubleIndexes = [0,1,2]#.remove(singleIndex)\n doubleIndexes.remove(singleIndex)\n # check distances from double to single\n if distance(triList[doubleIndexes[0]],triList[singleIndex]) > distance(triList[doubleIndexes[1]],triList[singleIndex]):\n doubleFIndex, doubleCIndex = doubleIndexes[0],doubleIndexes[1]\n elif distance(triList[doubleIndexes[1]],triList[singleIndex]) > distance(triList[doubleIndexes[0]],triList[singleIndex]):\n doubleFIndex,doubleCIndex = doubleIndexes[1],doubleIndexes[0]\n \n # triangle vertices defined\n singlePos = triList[singleIndex]\n doubleFPos = triList[doubleFIndex]\n doubleCPos = triList[doubleCIndex]\n # construct next 2 vertices \n moveVec = [(np.multiply(item,voxelsize)/2.0) for item in double]\n singleFirst = np.add(singlePos,moveVec)\n singleSecond = np.add(singleFirst,np.subtract(doubleFPos,doubleCPos))\n # write sharpened triangles \n TRI1 = Triangle(singleFirst,doubleCPos,doubleFPos)\n sharpenedTriList.append(TRI1)\n TRI2 = Triangle(singleFirst,doubleFPos,singleSecond)\n sharpenedTriList.append(TRI2)\n \n \n else:\n # change nothing\n sharpenedTriList.append(tri)\n \n else: \n # change nothing\n sharpenedTriList.append(tri)\n else:\n # change nothing\n #print tri\n sharpenedTriList.append(tri)\n # WRITE SHARPENED TRIANGLES TO VERT AND TRI LIST \n sharp_vertlist = []\n sharp_trilist = []\n vertID = 1\n for tri in sharpenedTriList:\n index_tri = []\n #print tri.position\n for vert in tri.position:\n sharp_vertlist.append(vert)\n index_tri.append(vertID)\n vertID+=1\n sharp_trilist.append(index_tri)\n\n \n \n \n \n \"\"\" \n testlist2OBJ(convexCornerList, \"convexCornerList.obj\")\n testlist2OBJ(concaveCornerList, \"concaveCornerList.obj\")\n testlist2OBJ(concaveConvexCase1List, \"concaveConvexCase1List.obj\")\n testlist2OBJ(concaveConvexCase2List, \"concaveConvexCase2List.obj\")\n testlist2OBJ(convexChamferList, \"convexChamferList.obj\")\n testlist2OBJ(concaveChamferList, \"concaveChamferList.obj\")\n testlist2OBJ(sharpenedTriList, \"sharpenedTriList.obj\")\"\"\"\n return sharp_vertlist, sharp_trilist\n \n \n ",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
<|reserved_special_token_0|>
class FirewallFacts(object):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class FirewallFacts(object):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def populate_facts(self, connection, ansible_facts, data=None):
""" Populate the facts for firewall
:param connection: the device connection
:param ansible_facts: Facts dictionary
:param data: previously collected conf
:rtype: dictionary
:returns: facts
"""
fos = self._fos if self._fos else connection
vdom = self._module.params['vdom']
ansible_facts['ansible_network_resources'].pop('system', None)
facts = {}
if self._uri.startswith(tuple(FACT_SYSTEM_SUBSETS)):
resp = fos.monitor('system', self._uri[len('system_'):].replace
('_', '/'), vdom=vdom)
facts.update({self._uri: resp})
ansible_facts['ansible_network_resources'].update(facts)
return ansible_facts
<|reserved_special_token_1|>
<|reserved_special_token_0|>
__metaclass__ = type
<|reserved_special_token_0|>
FACT_SYSTEM_SUBSETS = frozenset(['system_current-admins_select',
'system_firmware_select', 'system_fortimanager_status',
'system_ha-checksums_select', 'system_interface_select',
'system_status_select', 'system_time_select'])
class FirewallFacts(object):
""" The fortios firewall fact class
"""
def __init__(self, module, fos, uri=None, subspec='config', options=
'options'):
self._module = module
self._fos = fos
self._uri = uri
def populate_facts(self, connection, ansible_facts, data=None):
""" Populate the facts for firewall
:param connection: the device connection
:param ansible_facts: Facts dictionary
:param data: previously collected conf
:rtype: dictionary
:returns: facts
"""
fos = self._fos if self._fos else connection
vdom = self._module.params['vdom']
ansible_facts['ansible_network_resources'].pop('system', None)
facts = {}
if self._uri.startswith(tuple(FACT_SYSTEM_SUBSETS)):
resp = fos.monitor('system', self._uri[len('system_'):].replace
('_', '/'), vdom=vdom)
facts.update({self._uri: resp})
ansible_facts['ansible_network_resources'].update(facts)
return ansible_facts
<|reserved_special_token_1|>
<|reserved_special_token_0|>
from __future__ import absolute_import, division, print_function
__metaclass__ = type
import re
from copy import deepcopy
from ansible.module_utils.network.common import utils
from ansible.module_utils.network.fortios.argspec.firewall.firewall import FirewallArgs
FACT_SYSTEM_SUBSETS = frozenset(['system_current-admins_select',
'system_firmware_select', 'system_fortimanager_status',
'system_ha-checksums_select', 'system_interface_select',
'system_status_select', 'system_time_select'])
class FirewallFacts(object):
""" The fortios firewall fact class
"""
def __init__(self, module, fos, uri=None, subspec='config', options=
'options'):
self._module = module
self._fos = fos
self._uri = uri
def populate_facts(self, connection, ansible_facts, data=None):
""" Populate the facts for firewall
:param connection: the device connection
:param ansible_facts: Facts dictionary
:param data: previously collected conf
:rtype: dictionary
:returns: facts
"""
fos = self._fos if self._fos else connection
vdom = self._module.params['vdom']
ansible_facts['ansible_network_resources'].pop('system', None)
facts = {}
if self._uri.startswith(tuple(FACT_SYSTEM_SUBSETS)):
resp = fos.monitor('system', self._uri[len('system_'):].replace
('_', '/'), vdom=vdom)
facts.update({self._uri: resp})
ansible_facts['ansible_network_resources'].update(facts)
return ansible_facts
<|reserved_special_token_1|>
#
# -*- coding: utf-8 -*-
# Copyright 2019 Fortinet, Inc.
# GNU General Public License v3.0+
# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
"""
The fortios firewall monitor class
It is in this file the runtime information is collected from the device
for a given resource, parsed, and the facts tree is populated
based on the configuration.
"""
from __future__ import absolute_import, division, print_function
__metaclass__ = type
import re
from copy import deepcopy
from ansible.module_utils.network.common import utils
from ansible.module_utils.network.fortios.argspec.firewall.firewall import FirewallArgs
FACT_SYSTEM_SUBSETS = frozenset([
'system_current-admins_select',
'system_firmware_select',
'system_fortimanager_status',
'system_ha-checksums_select',
'system_interface_select',
'system_status_select',
'system_time_select',
])
class FirewallFacts(object):
""" The fortios firewall fact class
"""
def __init__(self, module, fos, uri=None, subspec='config', options='options'):
self._module = module
self._fos = fos
self._uri = uri
def populate_facts(self, connection, ansible_facts, data=None):
""" Populate the facts for firewall
:param connection: the device connection
:param ansible_facts: Facts dictionary
:param data: previously collected conf
:rtype: dictionary
:returns: facts
"""
fos = self._fos if self._fos else connection
vdom = self._module.params['vdom']
ansible_facts['ansible_network_resources'].pop('system', None)
facts = {}
if self._uri.startswith(tuple(FACT_SYSTEM_SUBSETS)):
resp = fos.monitor('system', self._uri[len('system_'):].replace('_', '/'), vdom=vdom)
facts.update({self._uri: resp})
ansible_facts['ansible_network_resources'].update(facts)
return ansible_facts
|
flexible
|
{
"blob_id": "62bc8fec6833c5e8bc1598941eaad141ab6c9d5a",
"index": 3758,
"step-1": "<mask token>\n\n\nclass FirewallFacts(object):\n <mask token>\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass FirewallFacts(object):\n <mask token>\n <mask token>\n\n def populate_facts(self, connection, ansible_facts, data=None):\n \"\"\" Populate the facts for firewall\n :param connection: the device connection\n :param ansible_facts: Facts dictionary\n :param data: previously collected conf\n :rtype: dictionary\n :returns: facts\n \"\"\"\n fos = self._fos if self._fos else connection\n vdom = self._module.params['vdom']\n ansible_facts['ansible_network_resources'].pop('system', None)\n facts = {}\n if self._uri.startswith(tuple(FACT_SYSTEM_SUBSETS)):\n resp = fos.monitor('system', self._uri[len('system_'):].replace\n ('_', '/'), vdom=vdom)\n facts.update({self._uri: resp})\n ansible_facts['ansible_network_resources'].update(facts)\n return ansible_facts\n",
"step-3": "<mask token>\n__metaclass__ = type\n<mask token>\nFACT_SYSTEM_SUBSETS = frozenset(['system_current-admins_select',\n 'system_firmware_select', 'system_fortimanager_status',\n 'system_ha-checksums_select', 'system_interface_select',\n 'system_status_select', 'system_time_select'])\n\n\nclass FirewallFacts(object):\n \"\"\" The fortios firewall fact class\n \"\"\"\n\n def __init__(self, module, fos, uri=None, subspec='config', options=\n 'options'):\n self._module = module\n self._fos = fos\n self._uri = uri\n\n def populate_facts(self, connection, ansible_facts, data=None):\n \"\"\" Populate the facts for firewall\n :param connection: the device connection\n :param ansible_facts: Facts dictionary\n :param data: previously collected conf\n :rtype: dictionary\n :returns: facts\n \"\"\"\n fos = self._fos if self._fos else connection\n vdom = self._module.params['vdom']\n ansible_facts['ansible_network_resources'].pop('system', None)\n facts = {}\n if self._uri.startswith(tuple(FACT_SYSTEM_SUBSETS)):\n resp = fos.monitor('system', self._uri[len('system_'):].replace\n ('_', '/'), vdom=vdom)\n facts.update({self._uri: resp})\n ansible_facts['ansible_network_resources'].update(facts)\n return ansible_facts\n",
"step-4": "<mask token>\nfrom __future__ import absolute_import, division, print_function\n__metaclass__ = type\nimport re\nfrom copy import deepcopy\nfrom ansible.module_utils.network.common import utils\nfrom ansible.module_utils.network.fortios.argspec.firewall.firewall import FirewallArgs\nFACT_SYSTEM_SUBSETS = frozenset(['system_current-admins_select',\n 'system_firmware_select', 'system_fortimanager_status',\n 'system_ha-checksums_select', 'system_interface_select',\n 'system_status_select', 'system_time_select'])\n\n\nclass FirewallFacts(object):\n \"\"\" The fortios firewall fact class\n \"\"\"\n\n def __init__(self, module, fos, uri=None, subspec='config', options=\n 'options'):\n self._module = module\n self._fos = fos\n self._uri = uri\n\n def populate_facts(self, connection, ansible_facts, data=None):\n \"\"\" Populate the facts for firewall\n :param connection: the device connection\n :param ansible_facts: Facts dictionary\n :param data: previously collected conf\n :rtype: dictionary\n :returns: facts\n \"\"\"\n fos = self._fos if self._fos else connection\n vdom = self._module.params['vdom']\n ansible_facts['ansible_network_resources'].pop('system', None)\n facts = {}\n if self._uri.startswith(tuple(FACT_SYSTEM_SUBSETS)):\n resp = fos.monitor('system', self._uri[len('system_'):].replace\n ('_', '/'), vdom=vdom)\n facts.update({self._uri: resp})\n ansible_facts['ansible_network_resources'].update(facts)\n return ansible_facts\n",
"step-5": "#\n# -*- coding: utf-8 -*-\n# Copyright 2019 Fortinet, Inc.\n# GNU General Public License v3.0+\n# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)\n\"\"\"\nThe fortios firewall monitor class\nIt is in this file the runtime information is collected from the device\nfor a given resource, parsed, and the facts tree is populated\nbased on the configuration.\n\"\"\"\nfrom __future__ import absolute_import, division, print_function\n__metaclass__ = type\n\nimport re\nfrom copy import deepcopy\n\nfrom ansible.module_utils.network.common import utils\nfrom ansible.module_utils.network.fortios.argspec.firewall.firewall import FirewallArgs\n\n\nFACT_SYSTEM_SUBSETS = frozenset([\n 'system_current-admins_select',\n 'system_firmware_select',\n 'system_fortimanager_status',\n 'system_ha-checksums_select',\n 'system_interface_select',\n 'system_status_select',\n 'system_time_select',\n])\n\n\nclass FirewallFacts(object):\n \"\"\" The fortios firewall fact class\n \"\"\"\n\n def __init__(self, module, fos, uri=None, subspec='config', options='options'):\n self._module = module\n self._fos = fos\n self._uri = uri\n\n def populate_facts(self, connection, ansible_facts, data=None):\n \"\"\" Populate the facts for firewall\n :param connection: the device connection\n :param ansible_facts: Facts dictionary\n :param data: previously collected conf\n :rtype: dictionary\n :returns: facts\n \"\"\"\n fos = self._fos if self._fos else connection\n vdom = self._module.params['vdom']\n ansible_facts['ansible_network_resources'].pop('system', None)\n facts = {}\n if self._uri.startswith(tuple(FACT_SYSTEM_SUBSETS)):\n resp = fos.monitor('system', self._uri[len('system_'):].replace('_', '/'), vdom=vdom)\n facts.update({self._uri: resp})\n ansible_facts['ansible_network_resources'].update(facts)\n return ansible_facts\n\n",
"step-ids": [
1,
2,
5,
6,
7
]
}
|
[
1,
2,
5,
6,
7
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def make_wave(freq, time=1, amp=1, phase=0, samplerate=44100, bitspersample=16
):
bytelist = []
TwoPiDivSamplerate = 2 * math.pi / samplerate
increment = TwoPiDivSamplerate * freq
incadd = phase * increment
count = 0
mid = None
for i in range(int(samplerate * time)):
if incadd > 2 ** (bitspersample - 1) - 1:
incadd = 2 ** (bitspersample - 1) - 1 - (incadd - (2 ** (
bitspersample - 1) - 1))
elif incadd < -(2 ** (bitspersample - 1) - 1):
incadd = -(2 ** (bitspersample - 1) - 1) + (-(2 ** (
bitspersample - 1) - 1) - incadd)
f = math.e ** (-(i - int(samplerate * time) / 2) ** 2 / (2 * (int(
samplerate * time) / 4) ** 2))
bytelist.append(int(round(f * amp * (2 ** (bitspersample - 1) - 1) *
math.sin(incadd))))
incadd += increment
return bytelist
<|reserved_special_token_0|>
for octave in range(2, 4):
for note in notes:
f = note[1]
data += make_wave(f * 2 ** octave, 0.3)
<|reserved_special_token_0|>
print(scaled)
write('test0.wav', 44100, scaled)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
notes = [('c', 32.7), ('c#', 34.65), ('d', 36.71), ('d#', 38.89), ('e',
41.2), ('f', 43.65), ('f#', 46.25), ('g', 49), ('g#', 51.91), ('a', 55),
('a#', 58.27), ('b', 61.47)]
tempo = 80
beatLen = 1 / (tempo / 60)
noteTypes = {'q': 1, 'h': 2, 'dh': 3, 'w': 4, 'e': 0.5, 's': 0.25}
def make_wave(freq, time=1, amp=1, phase=0, samplerate=44100, bitspersample=16
):
bytelist = []
TwoPiDivSamplerate = 2 * math.pi / samplerate
increment = TwoPiDivSamplerate * freq
incadd = phase * increment
count = 0
mid = None
for i in range(int(samplerate * time)):
if incadd > 2 ** (bitspersample - 1) - 1:
incadd = 2 ** (bitspersample - 1) - 1 - (incadd - (2 ** (
bitspersample - 1) - 1))
elif incadd < -(2 ** (bitspersample - 1) - 1):
incadd = -(2 ** (bitspersample - 1) - 1) + (-(2 ** (
bitspersample - 1) - 1) - incadd)
f = math.e ** (-(i - int(samplerate * time) / 2) ** 2 / (2 * (int(
samplerate * time) / 4) ** 2))
bytelist.append(int(round(f * amp * (2 ** (bitspersample - 1) - 1) *
math.sin(incadd))))
incadd += increment
return bytelist
data = []
for octave in range(2, 4):
for note in notes:
f = note[1]
data += make_wave(f * 2 ** octave, 0.3)
scaled = numpy.int16(data / numpy.max(numpy.abs(data)) * 32767)
print(scaled)
write('test0.wav', 44100, scaled)
<|reserved_special_token_1|>
import numpy, math, random
from scipy.io.wavfile import write
notes = [('c', 32.7), ('c#', 34.65), ('d', 36.71), ('d#', 38.89), ('e',
41.2), ('f', 43.65), ('f#', 46.25), ('g', 49), ('g#', 51.91), ('a', 55),
('a#', 58.27), ('b', 61.47)]
tempo = 80
beatLen = 1 / (tempo / 60)
noteTypes = {'q': 1, 'h': 2, 'dh': 3, 'w': 4, 'e': 0.5, 's': 0.25}
def make_wave(freq, time=1, amp=1, phase=0, samplerate=44100, bitspersample=16
):
bytelist = []
TwoPiDivSamplerate = 2 * math.pi / samplerate
increment = TwoPiDivSamplerate * freq
incadd = phase * increment
count = 0
mid = None
for i in range(int(samplerate * time)):
if incadd > 2 ** (bitspersample - 1) - 1:
incadd = 2 ** (bitspersample - 1) - 1 - (incadd - (2 ** (
bitspersample - 1) - 1))
elif incadd < -(2 ** (bitspersample - 1) - 1):
incadd = -(2 ** (bitspersample - 1) - 1) + (-(2 ** (
bitspersample - 1) - 1) - incadd)
f = math.e ** (-(i - int(samplerate * time) / 2) ** 2 / (2 * (int(
samplerate * time) / 4) ** 2))
bytelist.append(int(round(f * amp * (2 ** (bitspersample - 1) - 1) *
math.sin(incadd))))
incadd += increment
return bytelist
data = []
for octave in range(2, 4):
for note in notes:
f = note[1]
data += make_wave(f * 2 ** octave, 0.3)
scaled = numpy.int16(data / numpy.max(numpy.abs(data)) * 32767)
print(scaled)
write('test0.wav', 44100, scaled)
<|reserved_special_token_1|>
import numpy,math,random
from scipy.io.wavfile import write
notes=[('c',32.7),('c#',34.65),('d',36.71),('d#',38.89),('e',41.2),('f',43.65),
('f#',46.25),('g',49),('g#',51.91),('a',55),('a#',58.27),('b',61.47)]
#notes={'c':32.7,'c#':34.65,'d':36.71,'d#':38.89,'e':41.2,'f':43.65,'f#':46.25,
# 'g':49,'g#':51.91,'a':55,'a#':58.27,'b':61.47}
tempo=80
beatLen=1/(tempo/60)
noteTypes={'q':1,'h':2,'dh':3,'w':4,'e':.5,'s':.25,}
def make_wave(freq, time=1, amp=1, phase=0, samplerate=44100, bitspersample=16):
bytelist = []
TwoPiDivSamplerate = 2*math.pi/samplerate
increment = TwoPiDivSamplerate * freq
incadd = phase*increment
count=0
mid=None
for i in range(int(samplerate*time)):
if incadd > (2**(bitspersample - 1) - 1):
incadd = (2**(bitspersample - 1) - 1) - (incadd - (2**(bitspersample - 1) - 1))
elif incadd < -(2**(bitspersample - 1) - 1):
incadd = -(2**(bitspersample - 1) - 1) + (-(2**(bitspersample - 1) - 1) - incadd)
f=math.e**(-((i-int(samplerate*time)/2)**2)/(2*(int(samplerate*time)/4)**2))
bytelist.append(int(round(f*amp*(2**(bitspersample - 1) - 1)*math.sin(incadd))))
incadd += increment
return bytelist
data = []
for octave in range(2,4):
for note in notes:
f=note[1]
data+=make_wave(f*2**octave,.3)
scaled = numpy.int16(data/numpy.max(numpy.abs(data)) * 32767)
print(scaled)
write('test0.wav', 44100, scaled)
|
flexible
|
{
"blob_id": "2ad1b44027b72499c1961f2d2b1c12c356c63d2b",
"index": 5350,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef make_wave(freq, time=1, amp=1, phase=0, samplerate=44100, bitspersample=16\n ):\n bytelist = []\n TwoPiDivSamplerate = 2 * math.pi / samplerate\n increment = TwoPiDivSamplerate * freq\n incadd = phase * increment\n count = 0\n mid = None\n for i in range(int(samplerate * time)):\n if incadd > 2 ** (bitspersample - 1) - 1:\n incadd = 2 ** (bitspersample - 1) - 1 - (incadd - (2 ** (\n bitspersample - 1) - 1))\n elif incadd < -(2 ** (bitspersample - 1) - 1):\n incadd = -(2 ** (bitspersample - 1) - 1) + (-(2 ** (\n bitspersample - 1) - 1) - incadd)\n f = math.e ** (-(i - int(samplerate * time) / 2) ** 2 / (2 * (int(\n samplerate * time) / 4) ** 2))\n bytelist.append(int(round(f * amp * (2 ** (bitspersample - 1) - 1) *\n math.sin(incadd))))\n incadd += increment\n return bytelist\n\n\n<mask token>\nfor octave in range(2, 4):\n for note in notes:\n f = note[1]\n data += make_wave(f * 2 ** octave, 0.3)\n<mask token>\nprint(scaled)\nwrite('test0.wav', 44100, scaled)\n",
"step-3": "<mask token>\nnotes = [('c', 32.7), ('c#', 34.65), ('d', 36.71), ('d#', 38.89), ('e', \n 41.2), ('f', 43.65), ('f#', 46.25), ('g', 49), ('g#', 51.91), ('a', 55),\n ('a#', 58.27), ('b', 61.47)]\ntempo = 80\nbeatLen = 1 / (tempo / 60)\nnoteTypes = {'q': 1, 'h': 2, 'dh': 3, 'w': 4, 'e': 0.5, 's': 0.25}\n\n\ndef make_wave(freq, time=1, amp=1, phase=0, samplerate=44100, bitspersample=16\n ):\n bytelist = []\n TwoPiDivSamplerate = 2 * math.pi / samplerate\n increment = TwoPiDivSamplerate * freq\n incadd = phase * increment\n count = 0\n mid = None\n for i in range(int(samplerate * time)):\n if incadd > 2 ** (bitspersample - 1) - 1:\n incadd = 2 ** (bitspersample - 1) - 1 - (incadd - (2 ** (\n bitspersample - 1) - 1))\n elif incadd < -(2 ** (bitspersample - 1) - 1):\n incadd = -(2 ** (bitspersample - 1) - 1) + (-(2 ** (\n bitspersample - 1) - 1) - incadd)\n f = math.e ** (-(i - int(samplerate * time) / 2) ** 2 / (2 * (int(\n samplerate * time) / 4) ** 2))\n bytelist.append(int(round(f * amp * (2 ** (bitspersample - 1) - 1) *\n math.sin(incadd))))\n incadd += increment\n return bytelist\n\n\ndata = []\nfor octave in range(2, 4):\n for note in notes:\n f = note[1]\n data += make_wave(f * 2 ** octave, 0.3)\nscaled = numpy.int16(data / numpy.max(numpy.abs(data)) * 32767)\nprint(scaled)\nwrite('test0.wav', 44100, scaled)\n",
"step-4": "import numpy, math, random\nfrom scipy.io.wavfile import write\nnotes = [('c', 32.7), ('c#', 34.65), ('d', 36.71), ('d#', 38.89), ('e', \n 41.2), ('f', 43.65), ('f#', 46.25), ('g', 49), ('g#', 51.91), ('a', 55),\n ('a#', 58.27), ('b', 61.47)]\ntempo = 80\nbeatLen = 1 / (tempo / 60)\nnoteTypes = {'q': 1, 'h': 2, 'dh': 3, 'w': 4, 'e': 0.5, 's': 0.25}\n\n\ndef make_wave(freq, time=1, amp=1, phase=0, samplerate=44100, bitspersample=16\n ):\n bytelist = []\n TwoPiDivSamplerate = 2 * math.pi / samplerate\n increment = TwoPiDivSamplerate * freq\n incadd = phase * increment\n count = 0\n mid = None\n for i in range(int(samplerate * time)):\n if incadd > 2 ** (bitspersample - 1) - 1:\n incadd = 2 ** (bitspersample - 1) - 1 - (incadd - (2 ** (\n bitspersample - 1) - 1))\n elif incadd < -(2 ** (bitspersample - 1) - 1):\n incadd = -(2 ** (bitspersample - 1) - 1) + (-(2 ** (\n bitspersample - 1) - 1) - incadd)\n f = math.e ** (-(i - int(samplerate * time) / 2) ** 2 / (2 * (int(\n samplerate * time) / 4) ** 2))\n bytelist.append(int(round(f * amp * (2 ** (bitspersample - 1) - 1) *\n math.sin(incadd))))\n incadd += increment\n return bytelist\n\n\ndata = []\nfor octave in range(2, 4):\n for note in notes:\n f = note[1]\n data += make_wave(f * 2 ** octave, 0.3)\nscaled = numpy.int16(data / numpy.max(numpy.abs(data)) * 32767)\nprint(scaled)\nwrite('test0.wav', 44100, scaled)\n",
"step-5": "import numpy,math,random\nfrom scipy.io.wavfile import write\n\nnotes=[('c',32.7),('c#',34.65),('d',36.71),('d#',38.89),('e',41.2),('f',43.65),\n ('f#',46.25),('g',49),('g#',51.91),('a',55),('a#',58.27),('b',61.47)]\n#notes={'c':32.7,'c#':34.65,'d':36.71,'d#':38.89,'e':41.2,'f':43.65,'f#':46.25,\n # 'g':49,'g#':51.91,'a':55,'a#':58.27,'b':61.47}\ntempo=80\nbeatLen=1/(tempo/60)\nnoteTypes={'q':1,'h':2,'dh':3,'w':4,'e':.5,'s':.25,}\n\ndef make_wave(freq, time=1, amp=1, phase=0, samplerate=44100, bitspersample=16):\n bytelist = []\n TwoPiDivSamplerate = 2*math.pi/samplerate\n increment = TwoPiDivSamplerate * freq\n incadd = phase*increment\n count=0\n mid=None\n for i in range(int(samplerate*time)):\n if incadd > (2**(bitspersample - 1) - 1):\n incadd = (2**(bitspersample - 1) - 1) - (incadd - (2**(bitspersample - 1) - 1))\n elif incadd < -(2**(bitspersample - 1) - 1):\n incadd = -(2**(bitspersample - 1) - 1) + (-(2**(bitspersample - 1) - 1) - incadd)\n f=math.e**(-((i-int(samplerate*time)/2)**2)/(2*(int(samplerate*time)/4)**2))\n bytelist.append(int(round(f*amp*(2**(bitspersample - 1) - 1)*math.sin(incadd))))\n incadd += increment\n return bytelist\n\n\ndata = []\nfor octave in range(2,4):\n for note in notes:\n f=note[1]\n data+=make_wave(f*2**octave,.3)\n\n\n\n\n\nscaled = numpy.int16(data/numpy.max(numpy.abs(data)) * 32767)\nprint(scaled)\nwrite('test0.wav', 44100, scaled)\n",
"step-ids": [
0,
2,
3,
4,
5
]
}
|
[
0,
2,
3,
4,
5
] |
'''
quarter = 0.25
dime = 0.10
nickel = 0.05
penny = 0.01
'''
#def poschg(dollar_amount,number):
|
normal
|
{
"blob_id": "0deec9058c6f7b77ba4fa3bfc0269c8596ce9612",
"index": 1215,
"step-1": "<mask token>\n",
"step-2": "'''\nquarter = 0.25\ndime = 0.10\nnickel = 0.05\npenny = 0.01\n'''\n\n#def poschg(dollar_amount,number):\n",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
with open('config.yml') as f:
content = yaml.load(f)
<|reserved_special_token_0|>
for k, v in response.items():
if k == 'jobDefinitions':
new_dict = v[0]['containerProperties']
print(content.items())
print(new_dict2)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
new_dict = {}
count = 0
new_dict2 = {}
mybatch = boto3.client('batch')
with open('config.yml') as f:
content = yaml.load(f)
response = mybatch.describe_job_definitions(jobDefinitions=[
'axiom-staging-abcfinewine:1'], status='ACTIVE')
for k, v in response.items():
if k == 'jobDefinitions':
new_dict = v[0]['containerProperties']
print(content.items())
print(new_dict2)
<|reserved_special_token_1|>
import boto3
import pprint
import yaml
new_dict = {}
count = 0
new_dict2 = {}
mybatch = boto3.client('batch')
with open('config.yml') as f:
content = yaml.load(f)
response = mybatch.describe_job_definitions(jobDefinitions=[
'axiom-staging-abcfinewine:1'], status='ACTIVE')
for k, v in response.items():
if k == 'jobDefinitions':
new_dict = v[0]['containerProperties']
print(content.items())
print(new_dict2)
<|reserved_special_token_1|>
import boto3
import pprint
import yaml
#initialize empty dictionary to store values
new_dict = {}
count = 0
new_dict2 = {}
# dev = boto3.session.Session(profile_name='shipt')
mybatch = boto3.client('batch')
#load config properties
with open('config.yml') as f:
content = yaml.load(f)
# pprint.pprint(content) #to print config properties in file
#get current job definition
response = mybatch.describe_job_definitions(
jobDefinitions = [
'axiom-staging-abcfinewine:1'
# 'axiom-staging-costco:1'
],
status='ACTIVE'
)
# print(type(response))
for k, v in response.items():
if k == 'jobDefinitions':
# pprint.pprint(v) #to print container properties
# pprint.pprint(v[0]['containerProperties'])
new_dict = v[0]['containerProperties']
#check if config properties match with current job definition properties
# for key in new_dict.keys():
# if key in content.keys():
# count = count + 1
# if content[key] == new_dict[key]:
# new_dict2[key] == content[key]
print(content.items())
# new_dict2 = dict(content.items() & new_dict.items())
print(new_dict2)
# if v == new_dict[k]:
# # print('woooh00!')
# print(content[k])
# print(v)
# print(new_dict[k])
# for k,v in new_dict.items():
# print(v)
# if content != new_dict:
# print('\n\n\n\twooohooo!')
# print(response)
# pp = pprint.PrettyPrinter(indent = 4)
# pp.pprint(response)
|
flexible
|
{
"blob_id": "3ba9ff00b0d6a2006c714a9818c8b561d884e252",
"index": 2302,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwith open('config.yml') as f:\n content = yaml.load(f)\n<mask token>\nfor k, v in response.items():\n if k == 'jobDefinitions':\n new_dict = v[0]['containerProperties']\nprint(content.items())\nprint(new_dict2)\n",
"step-3": "<mask token>\nnew_dict = {}\ncount = 0\nnew_dict2 = {}\nmybatch = boto3.client('batch')\nwith open('config.yml') as f:\n content = yaml.load(f)\nresponse = mybatch.describe_job_definitions(jobDefinitions=[\n 'axiom-staging-abcfinewine:1'], status='ACTIVE')\nfor k, v in response.items():\n if k == 'jobDefinitions':\n new_dict = v[0]['containerProperties']\nprint(content.items())\nprint(new_dict2)\n",
"step-4": "import boto3\nimport pprint\nimport yaml\nnew_dict = {}\ncount = 0\nnew_dict2 = {}\nmybatch = boto3.client('batch')\nwith open('config.yml') as f:\n content = yaml.load(f)\nresponse = mybatch.describe_job_definitions(jobDefinitions=[\n 'axiom-staging-abcfinewine:1'], status='ACTIVE')\nfor k, v in response.items():\n if k == 'jobDefinitions':\n new_dict = v[0]['containerProperties']\nprint(content.items())\nprint(new_dict2)\n",
"step-5": "import boto3\nimport pprint\nimport yaml\n\n#initialize empty dictionary to store values\nnew_dict = {}\ncount = 0\nnew_dict2 = {}\n\n# dev = boto3.session.Session(profile_name='shipt')\nmybatch = boto3.client('batch')\n\n#load config properties\nwith open('config.yml') as f:\n content = yaml.load(f)\n\n# pprint.pprint(content) #to print config properties in file\n\n#get current job definition\nresponse = mybatch.describe_job_definitions(\n jobDefinitions = [\n 'axiom-staging-abcfinewine:1'\n # 'axiom-staging-costco:1'\n ],\n status='ACTIVE'\n)\n\n# print(type(response))\n\nfor k, v in response.items():\n if k == 'jobDefinitions':\n # pprint.pprint(v) #to print container properties\n # pprint.pprint(v[0]['containerProperties'])\n new_dict = v[0]['containerProperties']\n\n\n#check if config properties match with current job definition properties\n # for key in new_dict.keys():\n # if key in content.keys():\n # count = count + 1\n # if content[key] == new_dict[key]:\n # new_dict2[key] == content[key]\n\nprint(content.items())\n# new_dict2 = dict(content.items() & new_dict.items())\n\nprint(new_dict2)\n # if v == new_dict[k]:\n # # print('woooh00!')\n # print(content[k])\n # print(v)\n # print(new_dict[k])\n\n# for k,v in new_dict.items():\n# print(v)\n# if content != new_dict:\n# print('\\n\\n\\n\\twooohooo!')\n\n\n# print(response)\n# pp = pprint.PrettyPrinter(indent = 4)\n# pp.pprint(response)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Migration(migrations.Migration):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Migration(migrations.Migration):
initial = True
dependencies = [migrations.swappable_dependency(settings.AUTH_USER_MODEL)]
operations = [migrations.CreateModel(name='userProfile', fields=[('id',
models.AutoField(auto_created=True, primary_key=True, serialize=
False, verbose_name='ID')), ('kassa', models.FloatField(null=True)),
('sirket', models.CharField(max_length=50, null=True)), ('telefon',
models.CharField(max_length=50, null=True)), ('adress', models.
CharField(max_length=200, null=True)), ('profilsekli', models.
ImageField(null=True, upload_to='')), ('dogumtarixi', models.
DateField(null=True)), ('user', models.OneToOneField(null=True,
on_delete=django.db.models.deletion.CASCADE, to=settings.
AUTH_USER_MODEL))])]
<|reserved_special_token_1|>
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [migrations.swappable_dependency(settings.AUTH_USER_MODEL)]
operations = [migrations.CreateModel(name='userProfile', fields=[('id',
models.AutoField(auto_created=True, primary_key=True, serialize=
False, verbose_name='ID')), ('kassa', models.FloatField(null=True)),
('sirket', models.CharField(max_length=50, null=True)), ('telefon',
models.CharField(max_length=50, null=True)), ('adress', models.
CharField(max_length=200, null=True)), ('profilsekli', models.
ImageField(null=True, upload_to='')), ('dogumtarixi', models.
DateField(null=True)), ('user', models.OneToOneField(null=True,
on_delete=django.db.models.deletion.CASCADE, to=settings.
AUTH_USER_MODEL))])]
<|reserved_special_token_1|>
# Generated by Django 3.1.2 on 2020-10-29 06:04
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='userProfile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('kassa', models.FloatField(null=True)),
('sirket', models.CharField(max_length=50, null=True)),
('telefon', models.CharField(max_length=50, null=True)),
('adress', models.CharField(max_length=200, null=True)),
('profilsekli', models.ImageField(null=True, upload_to='')),
('dogumtarixi', models.DateField(null=True)),
('user', models.OneToOneField(null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
|
flexible
|
{
"blob_id": "87e17eb6fa91be09ac9afa43c4e58054faa77477",
"index": 5944,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n initial = True\n dependencies = [migrations.swappable_dependency(settings.AUTH_USER_MODEL)]\n operations = [migrations.CreateModel(name='userProfile', fields=[('id',\n models.AutoField(auto_created=True, primary_key=True, serialize=\n False, verbose_name='ID')), ('kassa', models.FloatField(null=True)),\n ('sirket', models.CharField(max_length=50, null=True)), ('telefon',\n models.CharField(max_length=50, null=True)), ('adress', models.\n CharField(max_length=200, null=True)), ('profilsekli', models.\n ImageField(null=True, upload_to='')), ('dogumtarixi', models.\n DateField(null=True)), ('user', models.OneToOneField(null=True,\n on_delete=django.db.models.deletion.CASCADE, to=settings.\n AUTH_USER_MODEL))])]\n",
"step-4": "from django.conf import settings\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n initial = True\n dependencies = [migrations.swappable_dependency(settings.AUTH_USER_MODEL)]\n operations = [migrations.CreateModel(name='userProfile', fields=[('id',\n models.AutoField(auto_created=True, primary_key=True, serialize=\n False, verbose_name='ID')), ('kassa', models.FloatField(null=True)),\n ('sirket', models.CharField(max_length=50, null=True)), ('telefon',\n models.CharField(max_length=50, null=True)), ('adress', models.\n CharField(max_length=200, null=True)), ('profilsekli', models.\n ImageField(null=True, upload_to='')), ('dogumtarixi', models.\n DateField(null=True)), ('user', models.OneToOneField(null=True,\n on_delete=django.db.models.deletion.CASCADE, to=settings.\n AUTH_USER_MODEL))])]\n",
"step-5": "# Generated by Django 3.1.2 on 2020-10-29 06:04\n\nfrom django.conf import settings\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ]\n\n operations = [\n migrations.CreateModel(\n name='userProfile',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('kassa', models.FloatField(null=True)),\n ('sirket', models.CharField(max_length=50, null=True)),\n ('telefon', models.CharField(max_length=50, null=True)),\n ('adress', models.CharField(max_length=200, null=True)),\n ('profilsekli', models.ImageField(null=True, upload_to='')),\n ('dogumtarixi', models.DateField(null=True)),\n ('user', models.OneToOneField(null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),\n ],\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
"""
Faça um algoritmo que solicita ao usuário as notas de três provas. Calcule a média aritmética e
informe se o aluno foi Aprovado ou Reprovado (o aluno é considerado aprovado com a média igual ou superior a 6).
"""
nota1 = float(input("Digite sua primeira nota: "))
nota2 = float(input("Digite sua segunda nota: "))
nota3 = float(input("Digite sua terceira nota: "))
media = (nota1 + nota2 + nota3)/3
if media >= 6:
print("Parabéns!! Você foi aprovado.")
else:
print("Que pena!! Você foi reprovado.")
|
normal
|
{
"blob_id": "033d1b39dd3ebaa81c8c6c52386909acf076ef47",
"index": 2011,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif media >= 6:\n print('Parabéns!! Você foi aprovado.')\nelse:\n print('Que pena!! Você foi reprovado.')\n",
"step-3": "<mask token>\nnota1 = float(input('Digite sua primeira nota: '))\nnota2 = float(input('Digite sua segunda nota: '))\nnota3 = float(input('Digite sua terceira nota: '))\nmedia = (nota1 + nota2 + nota3) / 3\nif media >= 6:\n print('Parabéns!! Você foi aprovado.')\nelse:\n print('Que pena!! Você foi reprovado.')\n",
"step-4": "\"\"\"\r\nFaça um algoritmo que solicita ao usuário as notas de três provas. Calcule a média aritmética e\r\ninforme se o aluno foi Aprovado ou Reprovado (o aluno é considerado aprovado com a média igual ou superior a 6).\r\n\"\"\"\r\n\r\nnota1 = float(input(\"Digite sua primeira nota: \"))\r\nnota2 = float(input(\"Digite sua segunda nota: \"))\r\nnota3 = float(input(\"Digite sua terceira nota: \"))\r\n\r\nmedia = (nota1 + nota2 + nota3)/3\r\n\r\nif media >= 6:\r\n print(\"Parabéns!! Você foi aprovado.\")\r\nelse:\r\n print(\"Que pena!! Você foi reprovado.\")\r\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import numpy as np
import matplotlib.pyplot as plt
import csv
def save_cp_csvdata(reward, err, filename):
with open(filename, mode='w') as data_file:
data_writer = csv.writer(data_file, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
data_writer.writerow(['epoch', 'reward', 'error'])
for i in range(reward.shape[0]):
data_writer.writerow([i, reward[i], err[i]])
def read_cp_csvdata(epoch, filename):
reward = np.zeros(epoch)
err = np.zeros(epoch)
with open(filename) as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
line_count = 0
for row in csv_reader:
if line_count == 0:
pass
else:
# print(f'\t{row[0]} works in the {row[1]} department, and was born in {row[2]}.')
reward[line_count-1] = row[1]
err[line_count-1] = row[2]
line_count += 1
print(f'Processed {line_count} lines.')
return reward, err
def draw_plot(data, error, epoch=100, filename='tests.png'):
fig, ax = plt.subplots()
plt.xlabel('episode')
plt.ylabel('reward')
ax.errorbar(np.array(range(epoch)), data, yerr=error, fmt='o')
plt.savefig(filename, dpi=200)
plt.show()
def draw_multi_bar(x, y_map, filename='result.png'):
labels = list(y_map.keys())
plt.xlabel('episode')
plt.ylabel('reward')
plt.xticks([x.index(0), x.index(49), x.index(99)], [0, 49, 99])
for l in labels:
plt.plot(range(len(x)), y_map[l], linestyle='-', label=l)
plt.legend(loc='lower right')
plt.savefig(filename, dpi=200)
plt.show()
def draw_multi_err(x, y_map, filename):
labels = list(y_map.keys())
fig, ax = plt.subplots()
plt.xlabel('episode')
plt.ylabel('reward')
for l in labels:
ax.errorbar(np.array(range(x)), y_map[l][0], yerr=y_map[l][1], fmt='o')
plt.legend(loc='lower right')
plt.savefig(filename, dpi=200)
plt.show()
def draw_plot1():
reward, err = read_cp_csvdata(100, 'sarsa_grid_f_1.csv')
draw_plot(reward, err, filename='sarsa_grid.png')
reward, err = read_cp_csvdata(100, 'sarsa_cartpole_f_1.csv')
draw_plot(reward, err, filename='sarsa_cartpole.png')
reward, err = read_cp_csvdata(100, 'qlearning_grid_f_1.csv')
draw_plot(reward, err, filename='qlearning_grid.png')
reward, err = read_cp_csvdata(100, 'qlearning_cartpole_f.csv')
draw_plot(reward, err, filename='qlearning_cartpole.png')
def draw_plot3():
grid_map = {}
cp_map = {}
grid_map['sarsa'] = read_cp_csvdata(100, 'sarsa_grid_f_1.csv')[0]
cp_map['sarsa'] = read_cp_csvdata(100, 'sarsa_cartpole_f_1.csv')[0]
grid_map['qlearning'] = read_cp_csvdata(100, 'qlearning_grid_f_1.csv')[0]
cp_map['qlearning'] = read_cp_csvdata(100, 'qlearning_cartpole_f.csv')[0]
grid_map['cem'] = read_cp_csvdata(100, 'ce_grid.csv')[0]
cp_map['cem'] = read_cp_csvdata(100, 'ce_cartpole.csv')[0]
draw_multi_bar(range(100), grid_map, filename='grid_comparision.png')
draw_multi_bar(range(100), cp_map, filename='cartpole_comparision.png')
def draw_plot4():
sarsagrid_map = {}
sarsacp_map = {}
qgrid_map = {}
qcp_map = {}
sarsagrid_map['epsilon greedy'] = read_cp_csvdata(100, 'sarsa_grid_f_1.csv')[0]
sarsagrid_map['softmax'] = read_cp_csvdata(100, 'softmax/sarsa_grid_f_1.csv')[0]
sarsacp_map['epsilon greedy'] = read_cp_csvdata(100, 'sarsa_cartpole_f_1.csv')[0]
sarsacp_map['softmax'] = read_cp_csvdata(100, 'softmax/sarsa_cartpole_f_1.csv')[0]
qgrid_map['epsilon greedy'] = read_cp_csvdata(100, 'qlearning_grid_f_1.csv')[0]
qgrid_map['softmax'] = read_cp_csvdata(100, 'softmax/qlearning_grid_f_1.csv')[0]
qcp_map['epsilon greedy'] = read_cp_csvdata(100, 'qlearning_cartpole_f.csv')[0]
qcp_map['softmax'] = read_cp_csvdata(100, 'softmax/q_cartpole_f.csv')[0]
draw_multi_bar(range(100), sarsagrid_map, filename='sarsa_grid_se.png')
draw_multi_bar(range(100), sarsacp_map, filename='sarsa_cp_se.png')
draw_multi_bar(range(100), qgrid_map, filename='q_grid_se.png')
draw_multi_bar(range(100), qcp_map, filename='q_cp_se.png')
def draw_plot5():
pass
# draw_plot1()
|
normal
|
{
"blob_id": "a91d2f32afdc20516e56036c352cc267c728e886",
"index": 3051,
"step-1": "<mask token>\n\n\ndef save_cp_csvdata(reward, err, filename):\n with open(filename, mode='w') as data_file:\n data_writer = csv.writer(data_file, delimiter=',', quotechar='\"',\n quoting=csv.QUOTE_MINIMAL)\n data_writer.writerow(['epoch', 'reward', 'error'])\n for i in range(reward.shape[0]):\n data_writer.writerow([i, reward[i], err[i]])\n\n\ndef read_cp_csvdata(epoch, filename):\n reward = np.zeros(epoch)\n err = np.zeros(epoch)\n with open(filename) as csv_file:\n csv_reader = csv.reader(csv_file, delimiter=',')\n line_count = 0\n for row in csv_reader:\n if line_count == 0:\n pass\n else:\n reward[line_count - 1] = row[1]\n err[line_count - 1] = row[2]\n line_count += 1\n print(f'Processed {line_count} lines.')\n return reward, err\n\n\ndef draw_plot(data, error, epoch=100, filename='tests.png'):\n fig, ax = plt.subplots()\n plt.xlabel('episode')\n plt.ylabel('reward')\n ax.errorbar(np.array(range(epoch)), data, yerr=error, fmt='o')\n plt.savefig(filename, dpi=200)\n plt.show()\n\n\ndef draw_multi_bar(x, y_map, filename='result.png'):\n labels = list(y_map.keys())\n plt.xlabel('episode')\n plt.ylabel('reward')\n plt.xticks([x.index(0), x.index(49), x.index(99)], [0, 49, 99])\n for l in labels:\n plt.plot(range(len(x)), y_map[l], linestyle='-', label=l)\n plt.legend(loc='lower right')\n plt.savefig(filename, dpi=200)\n plt.show()\n\n\n<mask token>\n\n\ndef draw_plot3():\n grid_map = {}\n cp_map = {}\n grid_map['sarsa'] = read_cp_csvdata(100, 'sarsa_grid_f_1.csv')[0]\n cp_map['sarsa'] = read_cp_csvdata(100, 'sarsa_cartpole_f_1.csv')[0]\n grid_map['qlearning'] = read_cp_csvdata(100, 'qlearning_grid_f_1.csv')[0]\n cp_map['qlearning'] = read_cp_csvdata(100, 'qlearning_cartpole_f.csv')[0]\n grid_map['cem'] = read_cp_csvdata(100, 'ce_grid.csv')[0]\n cp_map['cem'] = read_cp_csvdata(100, 'ce_cartpole.csv')[0]\n draw_multi_bar(range(100), grid_map, filename='grid_comparision.png')\n draw_multi_bar(range(100), cp_map, filename='cartpole_comparision.png')\n\n\ndef draw_plot4():\n sarsagrid_map = {}\n sarsacp_map = {}\n qgrid_map = {}\n qcp_map = {}\n sarsagrid_map['epsilon greedy'] = read_cp_csvdata(100, 'sarsa_grid_f_1.csv'\n )[0]\n sarsagrid_map['softmax'] = read_cp_csvdata(100,\n 'softmax/sarsa_grid_f_1.csv')[0]\n sarsacp_map['epsilon greedy'] = read_cp_csvdata(100,\n 'sarsa_cartpole_f_1.csv')[0]\n sarsacp_map['softmax'] = read_cp_csvdata(100,\n 'softmax/sarsa_cartpole_f_1.csv')[0]\n qgrid_map['epsilon greedy'] = read_cp_csvdata(100, 'qlearning_grid_f_1.csv'\n )[0]\n qgrid_map['softmax'] = read_cp_csvdata(100,\n 'softmax/qlearning_grid_f_1.csv')[0]\n qcp_map['epsilon greedy'] = read_cp_csvdata(100, 'qlearning_cartpole_f.csv'\n )[0]\n qcp_map['softmax'] = read_cp_csvdata(100, 'softmax/q_cartpole_f.csv')[0]\n draw_multi_bar(range(100), sarsagrid_map, filename='sarsa_grid_se.png')\n draw_multi_bar(range(100), sarsacp_map, filename='sarsa_cp_se.png')\n draw_multi_bar(range(100), qgrid_map, filename='q_grid_se.png')\n draw_multi_bar(range(100), qcp_map, filename='q_cp_se.png')\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef save_cp_csvdata(reward, err, filename):\n with open(filename, mode='w') as data_file:\n data_writer = csv.writer(data_file, delimiter=',', quotechar='\"',\n quoting=csv.QUOTE_MINIMAL)\n data_writer.writerow(['epoch', 'reward', 'error'])\n for i in range(reward.shape[0]):\n data_writer.writerow([i, reward[i], err[i]])\n\n\ndef read_cp_csvdata(epoch, filename):\n reward = np.zeros(epoch)\n err = np.zeros(epoch)\n with open(filename) as csv_file:\n csv_reader = csv.reader(csv_file, delimiter=',')\n line_count = 0\n for row in csv_reader:\n if line_count == 0:\n pass\n else:\n reward[line_count - 1] = row[1]\n err[line_count - 1] = row[2]\n line_count += 1\n print(f'Processed {line_count} lines.')\n return reward, err\n\n\ndef draw_plot(data, error, epoch=100, filename='tests.png'):\n fig, ax = plt.subplots()\n plt.xlabel('episode')\n plt.ylabel('reward')\n ax.errorbar(np.array(range(epoch)), data, yerr=error, fmt='o')\n plt.savefig(filename, dpi=200)\n plt.show()\n\n\ndef draw_multi_bar(x, y_map, filename='result.png'):\n labels = list(y_map.keys())\n plt.xlabel('episode')\n plt.ylabel('reward')\n plt.xticks([x.index(0), x.index(49), x.index(99)], [0, 49, 99])\n for l in labels:\n plt.plot(range(len(x)), y_map[l], linestyle='-', label=l)\n plt.legend(loc='lower right')\n plt.savefig(filename, dpi=200)\n plt.show()\n\n\ndef draw_multi_err(x, y_map, filename):\n labels = list(y_map.keys())\n fig, ax = plt.subplots()\n plt.xlabel('episode')\n plt.ylabel('reward')\n for l in labels:\n ax.errorbar(np.array(range(x)), y_map[l][0], yerr=y_map[l][1], fmt='o')\n plt.legend(loc='lower right')\n plt.savefig(filename, dpi=200)\n plt.show()\n\n\ndef draw_plot1():\n reward, err = read_cp_csvdata(100, 'sarsa_grid_f_1.csv')\n draw_plot(reward, err, filename='sarsa_grid.png')\n reward, err = read_cp_csvdata(100, 'sarsa_cartpole_f_1.csv')\n draw_plot(reward, err, filename='sarsa_cartpole.png')\n reward, err = read_cp_csvdata(100, 'qlearning_grid_f_1.csv')\n draw_plot(reward, err, filename='qlearning_grid.png')\n reward, err = read_cp_csvdata(100, 'qlearning_cartpole_f.csv')\n draw_plot(reward, err, filename='qlearning_cartpole.png')\n\n\ndef draw_plot3():\n grid_map = {}\n cp_map = {}\n grid_map['sarsa'] = read_cp_csvdata(100, 'sarsa_grid_f_1.csv')[0]\n cp_map['sarsa'] = read_cp_csvdata(100, 'sarsa_cartpole_f_1.csv')[0]\n grid_map['qlearning'] = read_cp_csvdata(100, 'qlearning_grid_f_1.csv')[0]\n cp_map['qlearning'] = read_cp_csvdata(100, 'qlearning_cartpole_f.csv')[0]\n grid_map['cem'] = read_cp_csvdata(100, 'ce_grid.csv')[0]\n cp_map['cem'] = read_cp_csvdata(100, 'ce_cartpole.csv')[0]\n draw_multi_bar(range(100), grid_map, filename='grid_comparision.png')\n draw_multi_bar(range(100), cp_map, filename='cartpole_comparision.png')\n\n\ndef draw_plot4():\n sarsagrid_map = {}\n sarsacp_map = {}\n qgrid_map = {}\n qcp_map = {}\n sarsagrid_map['epsilon greedy'] = read_cp_csvdata(100, 'sarsa_grid_f_1.csv'\n )[0]\n sarsagrid_map['softmax'] = read_cp_csvdata(100,\n 'softmax/sarsa_grid_f_1.csv')[0]\n sarsacp_map['epsilon greedy'] = read_cp_csvdata(100,\n 'sarsa_cartpole_f_1.csv')[0]\n sarsacp_map['softmax'] = read_cp_csvdata(100,\n 'softmax/sarsa_cartpole_f_1.csv')[0]\n qgrid_map['epsilon greedy'] = read_cp_csvdata(100, 'qlearning_grid_f_1.csv'\n )[0]\n qgrid_map['softmax'] = read_cp_csvdata(100,\n 'softmax/qlearning_grid_f_1.csv')[0]\n qcp_map['epsilon greedy'] = read_cp_csvdata(100, 'qlearning_cartpole_f.csv'\n )[0]\n qcp_map['softmax'] = read_cp_csvdata(100, 'softmax/q_cartpole_f.csv')[0]\n draw_multi_bar(range(100), sarsagrid_map, filename='sarsa_grid_se.png')\n draw_multi_bar(range(100), sarsacp_map, filename='sarsa_cp_se.png')\n draw_multi_bar(range(100), qgrid_map, filename='q_grid_se.png')\n draw_multi_bar(range(100), qcp_map, filename='q_cp_se.png')\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef save_cp_csvdata(reward, err, filename):\n with open(filename, mode='w') as data_file:\n data_writer = csv.writer(data_file, delimiter=',', quotechar='\"',\n quoting=csv.QUOTE_MINIMAL)\n data_writer.writerow(['epoch', 'reward', 'error'])\n for i in range(reward.shape[0]):\n data_writer.writerow([i, reward[i], err[i]])\n\n\ndef read_cp_csvdata(epoch, filename):\n reward = np.zeros(epoch)\n err = np.zeros(epoch)\n with open(filename) as csv_file:\n csv_reader = csv.reader(csv_file, delimiter=',')\n line_count = 0\n for row in csv_reader:\n if line_count == 0:\n pass\n else:\n reward[line_count - 1] = row[1]\n err[line_count - 1] = row[2]\n line_count += 1\n print(f'Processed {line_count} lines.')\n return reward, err\n\n\ndef draw_plot(data, error, epoch=100, filename='tests.png'):\n fig, ax = plt.subplots()\n plt.xlabel('episode')\n plt.ylabel('reward')\n ax.errorbar(np.array(range(epoch)), data, yerr=error, fmt='o')\n plt.savefig(filename, dpi=200)\n plt.show()\n\n\ndef draw_multi_bar(x, y_map, filename='result.png'):\n labels = list(y_map.keys())\n plt.xlabel('episode')\n plt.ylabel('reward')\n plt.xticks([x.index(0), x.index(49), x.index(99)], [0, 49, 99])\n for l in labels:\n plt.plot(range(len(x)), y_map[l], linestyle='-', label=l)\n plt.legend(loc='lower right')\n plt.savefig(filename, dpi=200)\n plt.show()\n\n\ndef draw_multi_err(x, y_map, filename):\n labels = list(y_map.keys())\n fig, ax = plt.subplots()\n plt.xlabel('episode')\n plt.ylabel('reward')\n for l in labels:\n ax.errorbar(np.array(range(x)), y_map[l][0], yerr=y_map[l][1], fmt='o')\n plt.legend(loc='lower right')\n plt.savefig(filename, dpi=200)\n plt.show()\n\n\ndef draw_plot1():\n reward, err = read_cp_csvdata(100, 'sarsa_grid_f_1.csv')\n draw_plot(reward, err, filename='sarsa_grid.png')\n reward, err = read_cp_csvdata(100, 'sarsa_cartpole_f_1.csv')\n draw_plot(reward, err, filename='sarsa_cartpole.png')\n reward, err = read_cp_csvdata(100, 'qlearning_grid_f_1.csv')\n draw_plot(reward, err, filename='qlearning_grid.png')\n reward, err = read_cp_csvdata(100, 'qlearning_cartpole_f.csv')\n draw_plot(reward, err, filename='qlearning_cartpole.png')\n\n\ndef draw_plot3():\n grid_map = {}\n cp_map = {}\n grid_map['sarsa'] = read_cp_csvdata(100, 'sarsa_grid_f_1.csv')[0]\n cp_map['sarsa'] = read_cp_csvdata(100, 'sarsa_cartpole_f_1.csv')[0]\n grid_map['qlearning'] = read_cp_csvdata(100, 'qlearning_grid_f_1.csv')[0]\n cp_map['qlearning'] = read_cp_csvdata(100, 'qlearning_cartpole_f.csv')[0]\n grid_map['cem'] = read_cp_csvdata(100, 'ce_grid.csv')[0]\n cp_map['cem'] = read_cp_csvdata(100, 'ce_cartpole.csv')[0]\n draw_multi_bar(range(100), grid_map, filename='grid_comparision.png')\n draw_multi_bar(range(100), cp_map, filename='cartpole_comparision.png')\n\n\ndef draw_plot4():\n sarsagrid_map = {}\n sarsacp_map = {}\n qgrid_map = {}\n qcp_map = {}\n sarsagrid_map['epsilon greedy'] = read_cp_csvdata(100, 'sarsa_grid_f_1.csv'\n )[0]\n sarsagrid_map['softmax'] = read_cp_csvdata(100,\n 'softmax/sarsa_grid_f_1.csv')[0]\n sarsacp_map['epsilon greedy'] = read_cp_csvdata(100,\n 'sarsa_cartpole_f_1.csv')[0]\n sarsacp_map['softmax'] = read_cp_csvdata(100,\n 'softmax/sarsa_cartpole_f_1.csv')[0]\n qgrid_map['epsilon greedy'] = read_cp_csvdata(100, 'qlearning_grid_f_1.csv'\n )[0]\n qgrid_map['softmax'] = read_cp_csvdata(100,\n 'softmax/qlearning_grid_f_1.csv')[0]\n qcp_map['epsilon greedy'] = read_cp_csvdata(100, 'qlearning_cartpole_f.csv'\n )[0]\n qcp_map['softmax'] = read_cp_csvdata(100, 'softmax/q_cartpole_f.csv')[0]\n draw_multi_bar(range(100), sarsagrid_map, filename='sarsa_grid_se.png')\n draw_multi_bar(range(100), sarsacp_map, filename='sarsa_cp_se.png')\n draw_multi_bar(range(100), qgrid_map, filename='q_grid_se.png')\n draw_multi_bar(range(100), qcp_map, filename='q_cp_se.png')\n\n\ndef draw_plot5():\n pass\n",
"step-4": "import numpy as np\nimport matplotlib.pyplot as plt\nimport csv\n\n\ndef save_cp_csvdata(reward, err, filename):\n with open(filename, mode='w') as data_file:\n data_writer = csv.writer(data_file, delimiter=',', quotechar='\"',\n quoting=csv.QUOTE_MINIMAL)\n data_writer.writerow(['epoch', 'reward', 'error'])\n for i in range(reward.shape[0]):\n data_writer.writerow([i, reward[i], err[i]])\n\n\ndef read_cp_csvdata(epoch, filename):\n reward = np.zeros(epoch)\n err = np.zeros(epoch)\n with open(filename) as csv_file:\n csv_reader = csv.reader(csv_file, delimiter=',')\n line_count = 0\n for row in csv_reader:\n if line_count == 0:\n pass\n else:\n reward[line_count - 1] = row[1]\n err[line_count - 1] = row[2]\n line_count += 1\n print(f'Processed {line_count} lines.')\n return reward, err\n\n\ndef draw_plot(data, error, epoch=100, filename='tests.png'):\n fig, ax = plt.subplots()\n plt.xlabel('episode')\n plt.ylabel('reward')\n ax.errorbar(np.array(range(epoch)), data, yerr=error, fmt='o')\n plt.savefig(filename, dpi=200)\n plt.show()\n\n\ndef draw_multi_bar(x, y_map, filename='result.png'):\n labels = list(y_map.keys())\n plt.xlabel('episode')\n plt.ylabel('reward')\n plt.xticks([x.index(0), x.index(49), x.index(99)], [0, 49, 99])\n for l in labels:\n plt.plot(range(len(x)), y_map[l], linestyle='-', label=l)\n plt.legend(loc='lower right')\n plt.savefig(filename, dpi=200)\n plt.show()\n\n\ndef draw_multi_err(x, y_map, filename):\n labels = list(y_map.keys())\n fig, ax = plt.subplots()\n plt.xlabel('episode')\n plt.ylabel('reward')\n for l in labels:\n ax.errorbar(np.array(range(x)), y_map[l][0], yerr=y_map[l][1], fmt='o')\n plt.legend(loc='lower right')\n plt.savefig(filename, dpi=200)\n plt.show()\n\n\ndef draw_plot1():\n reward, err = read_cp_csvdata(100, 'sarsa_grid_f_1.csv')\n draw_plot(reward, err, filename='sarsa_grid.png')\n reward, err = read_cp_csvdata(100, 'sarsa_cartpole_f_1.csv')\n draw_plot(reward, err, filename='sarsa_cartpole.png')\n reward, err = read_cp_csvdata(100, 'qlearning_grid_f_1.csv')\n draw_plot(reward, err, filename='qlearning_grid.png')\n reward, err = read_cp_csvdata(100, 'qlearning_cartpole_f.csv')\n draw_plot(reward, err, filename='qlearning_cartpole.png')\n\n\ndef draw_plot3():\n grid_map = {}\n cp_map = {}\n grid_map['sarsa'] = read_cp_csvdata(100, 'sarsa_grid_f_1.csv')[0]\n cp_map['sarsa'] = read_cp_csvdata(100, 'sarsa_cartpole_f_1.csv')[0]\n grid_map['qlearning'] = read_cp_csvdata(100, 'qlearning_grid_f_1.csv')[0]\n cp_map['qlearning'] = read_cp_csvdata(100, 'qlearning_cartpole_f.csv')[0]\n grid_map['cem'] = read_cp_csvdata(100, 'ce_grid.csv')[0]\n cp_map['cem'] = read_cp_csvdata(100, 'ce_cartpole.csv')[0]\n draw_multi_bar(range(100), grid_map, filename='grid_comparision.png')\n draw_multi_bar(range(100), cp_map, filename='cartpole_comparision.png')\n\n\ndef draw_plot4():\n sarsagrid_map = {}\n sarsacp_map = {}\n qgrid_map = {}\n qcp_map = {}\n sarsagrid_map['epsilon greedy'] = read_cp_csvdata(100, 'sarsa_grid_f_1.csv'\n )[0]\n sarsagrid_map['softmax'] = read_cp_csvdata(100,\n 'softmax/sarsa_grid_f_1.csv')[0]\n sarsacp_map['epsilon greedy'] = read_cp_csvdata(100,\n 'sarsa_cartpole_f_1.csv')[0]\n sarsacp_map['softmax'] = read_cp_csvdata(100,\n 'softmax/sarsa_cartpole_f_1.csv')[0]\n qgrid_map['epsilon greedy'] = read_cp_csvdata(100, 'qlearning_grid_f_1.csv'\n )[0]\n qgrid_map['softmax'] = read_cp_csvdata(100,\n 'softmax/qlearning_grid_f_1.csv')[0]\n qcp_map['epsilon greedy'] = read_cp_csvdata(100, 'qlearning_cartpole_f.csv'\n )[0]\n qcp_map['softmax'] = read_cp_csvdata(100, 'softmax/q_cartpole_f.csv')[0]\n draw_multi_bar(range(100), sarsagrid_map, filename='sarsa_grid_se.png')\n draw_multi_bar(range(100), sarsacp_map, filename='sarsa_cp_se.png')\n draw_multi_bar(range(100), qgrid_map, filename='q_grid_se.png')\n draw_multi_bar(range(100), qcp_map, filename='q_cp_se.png')\n\n\ndef draw_plot5():\n pass\n",
"step-5": "import numpy as np\nimport matplotlib.pyplot as plt\nimport csv\n\n\ndef save_cp_csvdata(reward, err, filename):\n with open(filename, mode='w') as data_file:\n data_writer = csv.writer(data_file, delimiter=',', quotechar='\"', quoting=csv.QUOTE_MINIMAL)\n\n data_writer.writerow(['epoch', 'reward', 'error'])\n for i in range(reward.shape[0]):\n data_writer.writerow([i, reward[i], err[i]])\n\n\ndef read_cp_csvdata(epoch, filename):\n reward = np.zeros(epoch)\n err = np.zeros(epoch)\n with open(filename) as csv_file:\n csv_reader = csv.reader(csv_file, delimiter=',')\n line_count = 0\n for row in csv_reader:\n if line_count == 0:\n pass\n else:\n # print(f'\\t{row[0]} works in the {row[1]} department, and was born in {row[2]}.')\n reward[line_count-1] = row[1]\n err[line_count-1] = row[2]\n line_count += 1\n print(f'Processed {line_count} lines.')\n return reward, err\n\n\ndef draw_plot(data, error, epoch=100, filename='tests.png'):\n fig, ax = plt.subplots()\n plt.xlabel('episode')\n plt.ylabel('reward')\n ax.errorbar(np.array(range(epoch)), data, yerr=error, fmt='o')\n plt.savefig(filename, dpi=200)\n\n plt.show()\n\n\ndef draw_multi_bar(x, y_map, filename='result.png'):\n labels = list(y_map.keys())\n\n plt.xlabel('episode')\n plt.ylabel('reward')\n\n plt.xticks([x.index(0), x.index(49), x.index(99)], [0, 49, 99])\n\n for l in labels:\n plt.plot(range(len(x)), y_map[l], linestyle='-', label=l)\n\n plt.legend(loc='lower right')\n\n plt.savefig(filename, dpi=200)\n plt.show()\n\n\ndef draw_multi_err(x, y_map, filename):\n labels = list(y_map.keys())\n\n fig, ax = plt.subplots()\n plt.xlabel('episode')\n plt.ylabel('reward')\n for l in labels:\n ax.errorbar(np.array(range(x)), y_map[l][0], yerr=y_map[l][1], fmt='o')\n plt.legend(loc='lower right')\n plt.savefig(filename, dpi=200)\n\n plt.show()\n\n\ndef draw_plot1():\n reward, err = read_cp_csvdata(100, 'sarsa_grid_f_1.csv')\n draw_plot(reward, err, filename='sarsa_grid.png')\n\n reward, err = read_cp_csvdata(100, 'sarsa_cartpole_f_1.csv')\n draw_plot(reward, err, filename='sarsa_cartpole.png')\n\n reward, err = read_cp_csvdata(100, 'qlearning_grid_f_1.csv')\n draw_plot(reward, err, filename='qlearning_grid.png')\n\n reward, err = read_cp_csvdata(100, 'qlearning_cartpole_f.csv')\n draw_plot(reward, err, filename='qlearning_cartpole.png')\n\n\ndef draw_plot3():\n grid_map = {}\n cp_map = {}\n\n grid_map['sarsa'] = read_cp_csvdata(100, 'sarsa_grid_f_1.csv')[0]\n cp_map['sarsa'] = read_cp_csvdata(100, 'sarsa_cartpole_f_1.csv')[0]\n grid_map['qlearning'] = read_cp_csvdata(100, 'qlearning_grid_f_1.csv')[0]\n cp_map['qlearning'] = read_cp_csvdata(100, 'qlearning_cartpole_f.csv')[0]\n grid_map['cem'] = read_cp_csvdata(100, 'ce_grid.csv')[0]\n cp_map['cem'] = read_cp_csvdata(100, 'ce_cartpole.csv')[0]\n draw_multi_bar(range(100), grid_map, filename='grid_comparision.png')\n draw_multi_bar(range(100), cp_map, filename='cartpole_comparision.png')\n\n\ndef draw_plot4():\n sarsagrid_map = {}\n sarsacp_map = {}\n qgrid_map = {}\n qcp_map = {}\n\n sarsagrid_map['epsilon greedy'] = read_cp_csvdata(100, 'sarsa_grid_f_1.csv')[0]\n sarsagrid_map['softmax'] = read_cp_csvdata(100, 'softmax/sarsa_grid_f_1.csv')[0]\n\n sarsacp_map['epsilon greedy'] = read_cp_csvdata(100, 'sarsa_cartpole_f_1.csv')[0]\n sarsacp_map['softmax'] = read_cp_csvdata(100, 'softmax/sarsa_cartpole_f_1.csv')[0]\n\n qgrid_map['epsilon greedy'] = read_cp_csvdata(100, 'qlearning_grid_f_1.csv')[0]\n qgrid_map['softmax'] = read_cp_csvdata(100, 'softmax/qlearning_grid_f_1.csv')[0]\n\n qcp_map['epsilon greedy'] = read_cp_csvdata(100, 'qlearning_cartpole_f.csv')[0]\n qcp_map['softmax'] = read_cp_csvdata(100, 'softmax/q_cartpole_f.csv')[0]\n\n draw_multi_bar(range(100), sarsagrid_map, filename='sarsa_grid_se.png')\n draw_multi_bar(range(100), sarsacp_map, filename='sarsa_cp_se.png')\n draw_multi_bar(range(100), qgrid_map, filename='q_grid_se.png')\n draw_multi_bar(range(100), qcp_map, filename='q_cp_se.png')\n\n\ndef draw_plot5():\n pass\n\n# draw_plot1()\n",
"step-ids": [
6,
8,
9,
10,
11
]
}
|
[
6,
8,
9,
10,
11
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
with open('Civ VI Modding Companion - Events.csv', newline='') as csvfile:
reader = csv.reader(csvfile, delimiter=',', quotechar='|')
for row in reader:
if i < 4:
i += 1
continue
eventName = row[3]
eventType = 'GameEvents' if len(row[10]) > 0 else 'Events'
argumentName = row[4]
argumentType = row[5][1:]
try:
events[eventName]
except Exception:
events[eventName] = {'eventType': eventType, 'arguments': []}
eventTypes.add(eventType)
if argumentName:
argumentText = '`' + argumentName
if argumentType:
argumentText += ' [' + argumentType + ']'
argumentText += '`'
events[eventName]['arguments'].append(argumentText)
for eventType in eventTypes:
filename = '../EventObjects/' + eventType + '.md'
os.makedirs(os.path.dirname(filename), exist_ok=True)
f = open(filename, 'w')
eventIndices[eventType] = f
f.write('## Static Events\n')
f.write('Events can be subscribed by using `' + eventType +
'.SomeEvent.Add(SomeFunction)`.\n')
f.write('\n')
f.write('| Name | Parameters |\n')
f.write('|:---- |:--------- |\n')
for eventName in events:
event = events[eventName]
eventType = event['eventType']
eventIndex = eventIndices[eventType]
arguments = event['arguments']
indexEntry = '| [[' + eventType + '.' + eventName + ']] | '
if len(arguments) > 0:
indexEntry += '<br/>'.join(arguments)
indexEntry += ' |\n'
eventIndex.write(indexEntry)
fullName = eventType + '.' + eventName
filename = ('../EventObjects/' + eventType + '/' + eventType + '.' +
eventName + '.md')
os.makedirs(os.path.dirname(filename), exist_ok=True)
f = open(filename, 'w')
f.write('# ' + fullName + '\n')
f.write('## Description\n')
f.write('TBD\n')
f.write('\n')
f.write('## Usage\n')
argumentsText = ', '.join(arguments)
argumentsText = argumentsText.replace('`', '')
f.write('> `' + fullName + '(' + argumentsText + ')`\n\n')
f.write('Regular event: you can subscribe to it through `' + fullName +
""".Add(<function handler>)`
""")
f.write('\n')
f.write('### Parameters\n')
argumentsList = '\n- '.join(arguments)
if len(argumentsList) > 0:
argumentsList = '- ' + argumentsList
f.write(argumentsList)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
events = {}
eventTypes = set()
eventIndices = {}
i = 0
with open('Civ VI Modding Companion - Events.csv', newline='') as csvfile:
reader = csv.reader(csvfile, delimiter=',', quotechar='|')
for row in reader:
if i < 4:
i += 1
continue
eventName = row[3]
eventType = 'GameEvents' if len(row[10]) > 0 else 'Events'
argumentName = row[4]
argumentType = row[5][1:]
try:
events[eventName]
except Exception:
events[eventName] = {'eventType': eventType, 'arguments': []}
eventTypes.add(eventType)
if argumentName:
argumentText = '`' + argumentName
if argumentType:
argumentText += ' [' + argumentType + ']'
argumentText += '`'
events[eventName]['arguments'].append(argumentText)
for eventType in eventTypes:
filename = '../EventObjects/' + eventType + '.md'
os.makedirs(os.path.dirname(filename), exist_ok=True)
f = open(filename, 'w')
eventIndices[eventType] = f
f.write('## Static Events\n')
f.write('Events can be subscribed by using `' + eventType +
'.SomeEvent.Add(SomeFunction)`.\n')
f.write('\n')
f.write('| Name | Parameters |\n')
f.write('|:---- |:--------- |\n')
for eventName in events:
event = events[eventName]
eventType = event['eventType']
eventIndex = eventIndices[eventType]
arguments = event['arguments']
indexEntry = '| [[' + eventType + '.' + eventName + ']] | '
if len(arguments) > 0:
indexEntry += '<br/>'.join(arguments)
indexEntry += ' |\n'
eventIndex.write(indexEntry)
fullName = eventType + '.' + eventName
filename = ('../EventObjects/' + eventType + '/' + eventType + '.' +
eventName + '.md')
os.makedirs(os.path.dirname(filename), exist_ok=True)
f = open(filename, 'w')
f.write('# ' + fullName + '\n')
f.write('## Description\n')
f.write('TBD\n')
f.write('\n')
f.write('## Usage\n')
argumentsText = ', '.join(arguments)
argumentsText = argumentsText.replace('`', '')
f.write('> `' + fullName + '(' + argumentsText + ')`\n\n')
f.write('Regular event: you can subscribe to it through `' + fullName +
""".Add(<function handler>)`
""")
f.write('\n')
f.write('### Parameters\n')
argumentsList = '\n- '.join(arguments)
if len(argumentsList) > 0:
argumentsList = '- ' + argumentsList
f.write(argumentsList)
<|reserved_special_token_1|>
import csv
import os
events = {}
eventTypes = set()
eventIndices = {}
i = 0
with open('Civ VI Modding Companion - Events.csv', newline='') as csvfile:
reader = csv.reader(csvfile, delimiter=',', quotechar='|')
for row in reader:
if i < 4:
i += 1
continue
eventName = row[3]
eventType = 'GameEvents' if len(row[10]) > 0 else 'Events'
argumentName = row[4]
argumentType = row[5][1:]
try:
events[eventName]
except Exception:
events[eventName] = {'eventType': eventType, 'arguments': []}
eventTypes.add(eventType)
if argumentName:
argumentText = '`' + argumentName
if argumentType:
argumentText += ' [' + argumentType + ']'
argumentText += '`'
events[eventName]['arguments'].append(argumentText)
for eventType in eventTypes:
filename = '../EventObjects/' + eventType + '.md'
os.makedirs(os.path.dirname(filename), exist_ok=True)
f = open(filename, 'w')
eventIndices[eventType] = f
f.write('## Static Events\n')
f.write('Events can be subscribed by using `' + eventType +
'.SomeEvent.Add(SomeFunction)`.\n')
f.write('\n')
f.write('| Name | Parameters |\n')
f.write('|:---- |:--------- |\n')
for eventName in events:
event = events[eventName]
eventType = event['eventType']
eventIndex = eventIndices[eventType]
arguments = event['arguments']
indexEntry = '| [[' + eventType + '.' + eventName + ']] | '
if len(arguments) > 0:
indexEntry += '<br/>'.join(arguments)
indexEntry += ' |\n'
eventIndex.write(indexEntry)
fullName = eventType + '.' + eventName
filename = ('../EventObjects/' + eventType + '/' + eventType + '.' +
eventName + '.md')
os.makedirs(os.path.dirname(filename), exist_ok=True)
f = open(filename, 'w')
f.write('# ' + fullName + '\n')
f.write('## Description\n')
f.write('TBD\n')
f.write('\n')
f.write('## Usage\n')
argumentsText = ', '.join(arguments)
argumentsText = argumentsText.replace('`', '')
f.write('> `' + fullName + '(' + argumentsText + ')`\n\n')
f.write('Regular event: you can subscribe to it through `' + fullName +
""".Add(<function handler>)`
""")
f.write('\n')
f.write('### Parameters\n')
argumentsList = '\n- '.join(arguments)
if len(argumentsList) > 0:
argumentsList = '- ' + argumentsList
f.write(argumentsList)
<|reserved_special_token_1|>
import csv
import os
events = {}
eventTypes = set()
eventIndices = {}
i = 0
with open('Civ VI Modding Companion - Events.csv', newline='') as csvfile:
reader = csv.reader(csvfile, delimiter=',', quotechar='|')
for row in reader:
if i < 4:
i += 1
continue
eventName = row[3]
eventType = "GameEvents" if len(row[10]) > 0 else "Events"
argumentName = row[4]
argumentType = row[5][1:]
try:
events[eventName]
except Exception:
events[eventName] = {'eventType': eventType, 'arguments': []}
eventTypes.add(eventType)
if argumentName:
argumentText = '`' + argumentName
if argumentType:
argumentText += ' [' + argumentType + ']'
argumentText += '`'
# argument = {'argumentName': argumentName, 'argumentType': argumentType, 'argumentText': argumentText}
events[eventName]['arguments'].append(argumentText)
for eventType in eventTypes:
filename = '../EventObjects/' + eventType + '.md'
os.makedirs(os.path.dirname(filename), exist_ok=True)
f = open(filename, "w")
eventIndices[eventType] = f
f.write('## Static Events\n')
f.write('Events can be subscribed by using `' + eventType + '.SomeEvent.Add(SomeFunction)`.\n')
f.write('\n')
f.write('| Name | Parameters |\n')
f.write('|:---- |:--------- |\n')
for eventName in events:
event = events[eventName]
eventType = event['eventType']
eventIndex = eventIndices[eventType]
arguments = event['arguments']
# -----------------------
# Create Index Entry
# -----------------------
indexEntry = '| [[' + eventType + "." + eventName + ']] | '
if len(arguments) > 0:
indexEntry += "<br/>".join(arguments)
indexEntry += ' |\n'
eventIndex.write(indexEntry)
# -----------------------
# Create Event File
# -----------------------
fullName = eventType + '.' + eventName
filename = '../EventObjects/' + eventType + '/' + eventType + "." + eventName + '.md'
os.makedirs(os.path.dirname(filename), exist_ok=True)
f = open(filename, "w")
f.write('# ' + fullName + "\n")
f.write('## Description\n')
f.write('TBD\n')
f.write('\n')
f.write('## Usage\n')
argumentsText = (", ".join(arguments))
argumentsText = argumentsText.replace('`', '')
f.write('> `' + fullName + '(' + argumentsText + ')`\n\n')
f.write('Regular event: you can subscribe to it through `' + fullName + '.Add(<function handler>)`\n')
f.write('\n')
f.write('### Parameters\n')
argumentsList = "\n- ".join(arguments)
if len(argumentsList) > 0:
argumentsList = '- ' + argumentsList
f.write(argumentsList)
|
flexible
|
{
"blob_id": "5ce98ae241c0982eeb1027ffcff5b770f94ff1a3",
"index": 77,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwith open('Civ VI Modding Companion - Events.csv', newline='') as csvfile:\n reader = csv.reader(csvfile, delimiter=',', quotechar='|')\n for row in reader:\n if i < 4:\n i += 1\n continue\n eventName = row[3]\n eventType = 'GameEvents' if len(row[10]) > 0 else 'Events'\n argumentName = row[4]\n argumentType = row[5][1:]\n try:\n events[eventName]\n except Exception:\n events[eventName] = {'eventType': eventType, 'arguments': []}\n eventTypes.add(eventType)\n if argumentName:\n argumentText = '`' + argumentName\n if argumentType:\n argumentText += ' [' + argumentType + ']'\n argumentText += '`'\n events[eventName]['arguments'].append(argumentText)\nfor eventType in eventTypes:\n filename = '../EventObjects/' + eventType + '.md'\n os.makedirs(os.path.dirname(filename), exist_ok=True)\n f = open(filename, 'w')\n eventIndices[eventType] = f\n f.write('## Static Events\\n')\n f.write('Events can be subscribed by using `' + eventType +\n '.SomeEvent.Add(SomeFunction)`.\\n')\n f.write('\\n')\n f.write('| Name | Parameters |\\n')\n f.write('|:---- |:--------- |\\n')\nfor eventName in events:\n event = events[eventName]\n eventType = event['eventType']\n eventIndex = eventIndices[eventType]\n arguments = event['arguments']\n indexEntry = '| [[' + eventType + '.' + eventName + ']] | '\n if len(arguments) > 0:\n indexEntry += '<br/>'.join(arguments)\n indexEntry += ' |\\n'\n eventIndex.write(indexEntry)\n fullName = eventType + '.' + eventName\n filename = ('../EventObjects/' + eventType + '/' + eventType + '.' +\n eventName + '.md')\n os.makedirs(os.path.dirname(filename), exist_ok=True)\n f = open(filename, 'w')\n f.write('# ' + fullName + '\\n')\n f.write('## Description\\n')\n f.write('TBD\\n')\n f.write('\\n')\n f.write('## Usage\\n')\n argumentsText = ', '.join(arguments)\n argumentsText = argumentsText.replace('`', '')\n f.write('> `' + fullName + '(' + argumentsText + ')`\\n\\n')\n f.write('Regular event: you can subscribe to it through `' + fullName +\n \"\"\".Add(<function handler>)`\n\"\"\")\n f.write('\\n')\n f.write('### Parameters\\n')\n argumentsList = '\\n- '.join(arguments)\n if len(argumentsList) > 0:\n argumentsList = '- ' + argumentsList\n f.write(argumentsList)\n",
"step-3": "<mask token>\nevents = {}\neventTypes = set()\neventIndices = {}\ni = 0\nwith open('Civ VI Modding Companion - Events.csv', newline='') as csvfile:\n reader = csv.reader(csvfile, delimiter=',', quotechar='|')\n for row in reader:\n if i < 4:\n i += 1\n continue\n eventName = row[3]\n eventType = 'GameEvents' if len(row[10]) > 0 else 'Events'\n argumentName = row[4]\n argumentType = row[5][1:]\n try:\n events[eventName]\n except Exception:\n events[eventName] = {'eventType': eventType, 'arguments': []}\n eventTypes.add(eventType)\n if argumentName:\n argumentText = '`' + argumentName\n if argumentType:\n argumentText += ' [' + argumentType + ']'\n argumentText += '`'\n events[eventName]['arguments'].append(argumentText)\nfor eventType in eventTypes:\n filename = '../EventObjects/' + eventType + '.md'\n os.makedirs(os.path.dirname(filename), exist_ok=True)\n f = open(filename, 'w')\n eventIndices[eventType] = f\n f.write('## Static Events\\n')\n f.write('Events can be subscribed by using `' + eventType +\n '.SomeEvent.Add(SomeFunction)`.\\n')\n f.write('\\n')\n f.write('| Name | Parameters |\\n')\n f.write('|:---- |:--------- |\\n')\nfor eventName in events:\n event = events[eventName]\n eventType = event['eventType']\n eventIndex = eventIndices[eventType]\n arguments = event['arguments']\n indexEntry = '| [[' + eventType + '.' + eventName + ']] | '\n if len(arguments) > 0:\n indexEntry += '<br/>'.join(arguments)\n indexEntry += ' |\\n'\n eventIndex.write(indexEntry)\n fullName = eventType + '.' + eventName\n filename = ('../EventObjects/' + eventType + '/' + eventType + '.' +\n eventName + '.md')\n os.makedirs(os.path.dirname(filename), exist_ok=True)\n f = open(filename, 'w')\n f.write('# ' + fullName + '\\n')\n f.write('## Description\\n')\n f.write('TBD\\n')\n f.write('\\n')\n f.write('## Usage\\n')\n argumentsText = ', '.join(arguments)\n argumentsText = argumentsText.replace('`', '')\n f.write('> `' + fullName + '(' + argumentsText + ')`\\n\\n')\n f.write('Regular event: you can subscribe to it through `' + fullName +\n \"\"\".Add(<function handler>)`\n\"\"\")\n f.write('\\n')\n f.write('### Parameters\\n')\n argumentsList = '\\n- '.join(arguments)\n if len(argumentsList) > 0:\n argumentsList = '- ' + argumentsList\n f.write(argumentsList)\n",
"step-4": "import csv\nimport os\nevents = {}\neventTypes = set()\neventIndices = {}\ni = 0\nwith open('Civ VI Modding Companion - Events.csv', newline='') as csvfile:\n reader = csv.reader(csvfile, delimiter=',', quotechar='|')\n for row in reader:\n if i < 4:\n i += 1\n continue\n eventName = row[3]\n eventType = 'GameEvents' if len(row[10]) > 0 else 'Events'\n argumentName = row[4]\n argumentType = row[5][1:]\n try:\n events[eventName]\n except Exception:\n events[eventName] = {'eventType': eventType, 'arguments': []}\n eventTypes.add(eventType)\n if argumentName:\n argumentText = '`' + argumentName\n if argumentType:\n argumentText += ' [' + argumentType + ']'\n argumentText += '`'\n events[eventName]['arguments'].append(argumentText)\nfor eventType in eventTypes:\n filename = '../EventObjects/' + eventType + '.md'\n os.makedirs(os.path.dirname(filename), exist_ok=True)\n f = open(filename, 'w')\n eventIndices[eventType] = f\n f.write('## Static Events\\n')\n f.write('Events can be subscribed by using `' + eventType +\n '.SomeEvent.Add(SomeFunction)`.\\n')\n f.write('\\n')\n f.write('| Name | Parameters |\\n')\n f.write('|:---- |:--------- |\\n')\nfor eventName in events:\n event = events[eventName]\n eventType = event['eventType']\n eventIndex = eventIndices[eventType]\n arguments = event['arguments']\n indexEntry = '| [[' + eventType + '.' + eventName + ']] | '\n if len(arguments) > 0:\n indexEntry += '<br/>'.join(arguments)\n indexEntry += ' |\\n'\n eventIndex.write(indexEntry)\n fullName = eventType + '.' + eventName\n filename = ('../EventObjects/' + eventType + '/' + eventType + '.' +\n eventName + '.md')\n os.makedirs(os.path.dirname(filename), exist_ok=True)\n f = open(filename, 'w')\n f.write('# ' + fullName + '\\n')\n f.write('## Description\\n')\n f.write('TBD\\n')\n f.write('\\n')\n f.write('## Usage\\n')\n argumentsText = ', '.join(arguments)\n argumentsText = argumentsText.replace('`', '')\n f.write('> `' + fullName + '(' + argumentsText + ')`\\n\\n')\n f.write('Regular event: you can subscribe to it through `' + fullName +\n \"\"\".Add(<function handler>)`\n\"\"\")\n f.write('\\n')\n f.write('### Parameters\\n')\n argumentsList = '\\n- '.join(arguments)\n if len(argumentsList) > 0:\n argumentsList = '- ' + argumentsList\n f.write(argumentsList)\n",
"step-5": "import csv\nimport os\n\nevents = {}\neventTypes = set()\neventIndices = {}\n\ni = 0\n\nwith open('Civ VI Modding Companion - Events.csv', newline='') as csvfile:\n\treader = csv.reader(csvfile, delimiter=',', quotechar='|')\n\tfor row in reader:\n\n\t\tif i < 4:\n\t\t\ti += 1\n\t\t\tcontinue\n\n\t\teventName = row[3]\n\t\teventType = \"GameEvents\" if len(row[10]) > 0 else \"Events\"\n\n\t\targumentName = row[4]\n\t\targumentType = row[5][1:]\n\n\t\ttry:\n\t\t\tevents[eventName]\n\t\texcept Exception:\n\t\t\tevents[eventName] = {'eventType': eventType, 'arguments': []}\n\n\t\teventTypes.add(eventType)\n\n\t\tif argumentName:\n\t\t\targumentText = '`' + argumentName\n\t\t\tif argumentType:\n\t\t\t\targumentText += ' [' + argumentType + ']'\n\t\t\targumentText += '`'\n\n\t\t\t# argument = {'argumentName': argumentName, 'argumentType': argumentType, 'argumentText': argumentText}\n\t\t\tevents[eventName]['arguments'].append(argumentText)\n\nfor eventType in eventTypes:\n\n\tfilename = '../EventObjects/' + eventType + '.md'\n\n\tos.makedirs(os.path.dirname(filename), exist_ok=True)\n\tf = open(filename, \"w\")\n\teventIndices[eventType] = f\n\n\tf.write('## Static Events\\n')\n\tf.write('Events can be subscribed by using `' + eventType + '.SomeEvent.Add(SomeFunction)`.\\n')\n\tf.write('\\n')\n\tf.write('| Name | Parameters |\\n')\n\tf.write('|:---- |:--------- |\\n')\n\nfor eventName in events:\n\n\tevent\t\t= events[eventName]\n\teventType\t= event['eventType']\n\teventIndex\t= eventIndices[eventType]\n\n\targuments\t= event['arguments']\n\n\t# -----------------------\n\t# Create Index Entry\n\t# -----------------------\n\tindexEntry = '| [[' + eventType + \".\" + eventName + ']] | '\n\n\tif len(arguments) > 0:\n\t\tindexEntry += \"<br/>\".join(arguments)\n\n\tindexEntry += ' |\\n'\n\teventIndex.write(indexEntry)\n\t# -----------------------\n\t# Create Event File\n\t# -----------------------\n\tfullName = eventType + '.' + eventName\n\n\tfilename = '../EventObjects/' + eventType + '/' + eventType + \".\" + eventName + '.md'\n\tos.makedirs(os.path.dirname(filename), exist_ok=True)\n\tf = open(filename, \"w\")\n\n\tf.write('# ' + fullName + \"\\n\")\n\tf.write('## Description\\n')\n\tf.write('TBD\\n')\n\tf.write('\\n')\n\tf.write('## Usage\\n')\n\n\targumentsText = (\", \".join(arguments))\n\targumentsText = argumentsText.replace('`', '')\n\n\tf.write('> `' + fullName + '(' + argumentsText + ')`\\n\\n')\n\tf.write('Regular event: you can subscribe to it through `' + fullName + '.Add(<function handler>)`\\n')\n\tf.write('\\n')\n\tf.write('### Parameters\\n')\n\n\targumentsList = \"\\n- \".join(arguments)\n\tif len(argumentsList) > 0:\n\t\targumentsList = '- ' + argumentsList\n\n\tf.write(argumentsList)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
"""
File: ex17_map_reduce.py
Author: TonyDeep
Date: 2020-07-21
"""
from functools import reduce
print('#1 map')
a_list = [2, 18, 9, 22, 17, 24, 8, 12, 27]
map_data = map(lambda x: x * 2 + 1, a_list)
new_list = list(map_data)
print(new_list)
print('\n#2 reduce')
b_list = [1, 2, 3, 4, 5]
reduce_data = reduce(lambda x, y: x + y, b_list)
print(reduce_data)
|
normal
|
{
"blob_id": "8e3b26826752b6b3482e8a29b9b58f5025c7ef58",
"index": 4758,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint('#1 map')\n<mask token>\nprint(new_list)\nprint('\\n#2 reduce')\n<mask token>\nprint(reduce_data)\n",
"step-3": "<mask token>\nprint('#1 map')\na_list = [2, 18, 9, 22, 17, 24, 8, 12, 27]\nmap_data = map(lambda x: x * 2 + 1, a_list)\nnew_list = list(map_data)\nprint(new_list)\nprint('\\n#2 reduce')\nb_list = [1, 2, 3, 4, 5]\nreduce_data = reduce(lambda x, y: x + y, b_list)\nprint(reduce_data)\n",
"step-4": "<mask token>\nfrom functools import reduce\nprint('#1 map')\na_list = [2, 18, 9, 22, 17, 24, 8, 12, 27]\nmap_data = map(lambda x: x * 2 + 1, a_list)\nnew_list = list(map_data)\nprint(new_list)\nprint('\\n#2 reduce')\nb_list = [1, 2, 3, 4, 5]\nreduce_data = reduce(lambda x, y: x + y, b_list)\nprint(reduce_data)\n",
"step-5": "\"\"\"\nFile: ex17_map_reduce.py\nAuthor: TonyDeep\nDate: 2020-07-21\n\"\"\"\n\nfrom functools import reduce\n\nprint('#1 map')\na_list = [2, 18, 9, 22, 17, 24, 8, 12, 27]\nmap_data = map(lambda x: x * 2 + 1, a_list)\nnew_list = list(map_data)\nprint(new_list)\n\nprint('\\n#2 reduce')\nb_list = [1, 2, 3, 4, 5]\nreduce_data = reduce(lambda x, y: x + y, b_list)\nprint(reduce_data)\n\n\n\n\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
#!/usr/bin/env python
#coding:utf-8
import os
def listDir(path):
allFile = []
subFile = os.listdir(path) #列出当前路径下的目录或者文件,返回列表
for fileName in subFile:
fullFile = os.path.join(path, fileName) #os提供方法连接路径与文件名形成完整路径名,作用同:字符串+“/”+字符串
if os.path.isdir(fullFile): #判断是否为目录或者文件,有isfile()方法
listDir(fullFile) #递归
allFile.append(fullFile.decode('gbk').encode('utf-8')) #对于中文的编码
print fullFile.decode('gbk').encode('utf-8')
return allFile
#递归方式获取文件目录
#递归方法的测试
#listDir("C:/Users/13160/Desktop")
#系统提供遍历目录的方法os.walk(path),返回3元元组(遍历路径名,目录列表,文件列表)
for path, dir, file in os.walk("C:/Users/13160/Desktop"):
for f in file:
print os.path.join(path, f).decode('gbk').encode('utf-8')
for d in dir:
print os.path.join(path, d).decode('gbk').encode('utf-8')
|
normal
|
{
"blob_id": "a4f446d6fd2a34c0ef591d7cbda59dccc0a36611",
"index": 2069,
"step-1": "#!/usr/bin/env python\n#coding:utf-8\n\nimport os\n\ndef listDir(path):\n allFile = []\n subFile = os.listdir(path) #列出当前路径下的目录或者文件,返回列表\n for fileName in subFile:\n fullFile = os.path.join(path, fileName) #os提供方法连接路径与文件名形成完整路径名,作用同:字符串+“/”+字符串\n if os.path.isdir(fullFile): #判断是否为目录或者文件,有isfile()方法\n listDir(fullFile) #递归\n allFile.append(fullFile.decode('gbk').encode('utf-8')) #对于中文的编码\n print fullFile.decode('gbk').encode('utf-8')\n return allFile\n#递归方式获取文件目录\n#递归方法的测试\n#listDir(\"C:/Users/13160/Desktop\")\n\n#系统提供遍历目录的方法os.walk(path),返回3元元组(遍历路径名,目录列表,文件列表)\nfor path, dir, file in os.walk(\"C:/Users/13160/Desktop\"):\n for f in file:\n print os.path.join(path, f).decode('gbk').encode('utf-8')\n for d in dir:\n print os.path.join(path, d).decode('gbk').encode('utf-8')",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
<|reserved_special_token_0|>
def patientSelect(CONN, staff):
c = CONN.cursor()
print('Search for Patient')
select = input("Enter patient name(type 'exit' to leave): ")
if select == 'exit':
os.system('clear')
return
c.execute('SELECT hcno, name FROM patients WHERE name LIKE ?', ('%' +
select + '%',))
rows = c.fetchall()
if len(rows) == 0:
print('No patient found, please try again')
return patientSelect(CONN, staff)
count = 1
for x in rows:
print(str(count) + ': patient hcno ' + x[0] + '; patient name: ' + x[1]
)
count = count + 1
try:
select = input('Please select your patient: ')
selectedPatient = int(select) - 1
patientHCNO = rows[selectedPatient][0]
patientName = rows[selectedPatient][1]
patient = patientHCNO, patientName
except:
print('Invalid input, please try again')
return patientSelect(CONN, staff)
return patientChart(CONN, staff, patient)
<|reserved_special_token_0|>
def doctorChartMenu(CONN, patient, chart_id, staff):
print('==========Chart Menu==========')
print('1. Add a symptoms')
print('2. Add a Diagnosis')
print('3. Add a medication')
print('4. Exit')
select = input('Please select an option to continue: ')
if select == '1':
print('Add symptoms')
os.system('clear')
return addSymptoms(CONN, patient, chart_id, staff)
elif select == '2':
print('Add Diagnosis')
os.system('clear')
return addDiagnosis(CONN, patient, chart_id, staff)
elif select == '3':
print('Add medication')
os.system('clear')
return addMedication(CONN, patient, chart_id, staff)
elif select == '4':
return patientChart(CONN, staff, patient)
else:
print('Invalid entry, please try again')
return patientSelect(CONN, staff)
def nurseChartMenu(CONN, patient, chart_id, staff):
print('Chart Menu')
print('1. Add a symptoms')
print('2. close chart')
print('3. Exit')
select = input('Please select an option to cintinue: ')
if select == '1':
os.system('clear')
return addSymptoms(CONN, patient, chart_id, staff)
elif select == '2':
print('xx')
return closeChart(CONN, patient, chart_id, staff)
elif select == '3':
return patientChart(CONN, staff, patient)
else:
print('Invalid, please try again')
return patientSelect(CONN, staff)
def addSymptoms(CONN, patient, chart_id, staff):
c = CONN.cursor()
symptoms = input('Please enter a symptom: ')
while len(symptoms) == 0:
symptoms = input('Please enter a symptom: ')
c.execute(
"""INSERT INTO symptoms VALUES
(?,?,?,DateTime('now','localtime'),?);"""
, (patient[0], chart_id, staff[0], symptoms))
CONN.commit()
return viewChart(CONN, chart_id, staff, patient, 1)
<|reserved_special_token_0|>
def closeChart(CONN, patient, chart_id, staff):
c = CONN.cursor()
c.execute('SELECT * FROM charts WHERE chart_id = ?;', (chart_id,))
rows = c.fetchone()
if rows[3] is None:
print('Close chart id ' + str(chart_id) + '?')
print('1. Yes.')
print('2. No.')
result = input('Please enter your choice: ')
if result == '1':
print('Closing chart.')
c.execute(
"""UPDATE charts SET edate = DateTime('now','localtime')
WHERE chart_id = ?;"""
, (chart_id,))
CONN.commit()
return viewChart(CONN, chart_id, staff, patient, 1)
elif result == '2':
return viewChart(CONN, chart_id, staff, patient, 1)
else:
print('Invalid')
return closeChart(CONN, patient, chart_id, staff)
def addPatient(CONN, staff):
c = CONN.cursor()
print('==========New Patient Record==========')
name = input('Please enter patient name: ')
hcno = input('Please enter patient HCNO: ')
try:
testHcno = int(hcno)
except:
print('Invalid HCNO, please try again')
return addPatient(CONN, staff)
age_group = input('Please enter age group: ')
address = input('Please enter address: ')
phone = input('Please enter phone number: ')
emg_phone = input('Please enter emergency phone number: ')
try:
c.execute("""INSERT INTO patients VALUES
(?,?,?,?,?,?);""",
(hcno, name, age_group, address, phone, emg_phone))
CONN.commit()
print('Patient record created.')
except:
print('Invalid entry, patient already exists')
def addChart(CONN, staff, patient):
c = CONN.cursor()
c.execute('SELECT chart_id FROM charts ORDER BY chart_id DESC LIMIT 1;')
last_chart = c.fetchone()
if last_chart[0] is not None:
new_chart_id = int(last_chart[0]) + 1
else:
new_chart_id = '00001'
c.execute(
"""INSERT INTO charts VALUES
(?,?, DateTime('now','localtime'), ?);"""
, (new_chart_id, patient[0], None))
c.execute(
'SELECT * FROM charts WHERE hcno = ? ORDER BY adate DESC LIMIT 1;',
(patient[0],))
CONN.commit()
print('A new chart had been create. Chart ID: ' + str(new_chart_id))
return patientChart(CONN, staff, patient)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def patientSelect(CONN, staff):
c = CONN.cursor()
print('Search for Patient')
select = input("Enter patient name(type 'exit' to leave): ")
if select == 'exit':
os.system('clear')
return
c.execute('SELECT hcno, name FROM patients WHERE name LIKE ?', ('%' +
select + '%',))
rows = c.fetchall()
if len(rows) == 0:
print('No patient found, please try again')
return patientSelect(CONN, staff)
count = 1
for x in rows:
print(str(count) + ': patient hcno ' + x[0] + '; patient name: ' + x[1]
)
count = count + 1
try:
select = input('Please select your patient: ')
selectedPatient = int(select) - 1
patientHCNO = rows[selectedPatient][0]
patientName = rows[selectedPatient][1]
patient = patientHCNO, patientName
except:
print('Invalid input, please try again')
return patientSelect(CONN, staff)
return patientChart(CONN, staff, patient)
<|reserved_special_token_0|>
def doctorChartMenu(CONN, patient, chart_id, staff):
print('==========Chart Menu==========')
print('1. Add a symptoms')
print('2. Add a Diagnosis')
print('3. Add a medication')
print('4. Exit')
select = input('Please select an option to continue: ')
if select == '1':
print('Add symptoms')
os.system('clear')
return addSymptoms(CONN, patient, chart_id, staff)
elif select == '2':
print('Add Diagnosis')
os.system('clear')
return addDiagnosis(CONN, patient, chart_id, staff)
elif select == '3':
print('Add medication')
os.system('clear')
return addMedication(CONN, patient, chart_id, staff)
elif select == '4':
return patientChart(CONN, staff, patient)
else:
print('Invalid entry, please try again')
return patientSelect(CONN, staff)
def nurseChartMenu(CONN, patient, chart_id, staff):
print('Chart Menu')
print('1. Add a symptoms')
print('2. close chart')
print('3. Exit')
select = input('Please select an option to cintinue: ')
if select == '1':
os.system('clear')
return addSymptoms(CONN, patient, chart_id, staff)
elif select == '2':
print('xx')
return closeChart(CONN, patient, chart_id, staff)
elif select == '3':
return patientChart(CONN, staff, patient)
else:
print('Invalid, please try again')
return patientSelect(CONN, staff)
def addSymptoms(CONN, patient, chart_id, staff):
c = CONN.cursor()
symptoms = input('Please enter a symptom: ')
while len(symptoms) == 0:
symptoms = input('Please enter a symptom: ')
c.execute(
"""INSERT INTO symptoms VALUES
(?,?,?,DateTime('now','localtime'),?);"""
, (patient[0], chart_id, staff[0], symptoms))
CONN.commit()
return viewChart(CONN, chart_id, staff, patient, 1)
def addDiagnosis(CONN, patient, chart_id, staff):
c = CONN.cursor()
diagnosis = input('Please enter a diagnosis: ')
while len(diagnosis) == 0:
diagnosis = input('Please enter a diagnosis: ')
c.execute(
"""INSERT INTO diagnoses VALUES
(?,?,?,DateTime('now', 'localtime'),?);"""
, (patient[0], chart_id, staff[0], diagnosis))
CONN.commit()
return viewChart(CONN, chart_id, staff, patient, 1)
def addMedication(CONN, patient, chart_id, staff):
c = CONN.cursor()
c.execute('SELECT * FROM patients WHERE hcno = ?;', (patient[0],))
rows = c.fetchone()
patientAge = rows[2]
medicationName = input('Please enter a medication: ')
c.execute(
'SELECT sug_amount FROM dosage WHERE drug_name = ? AND age_group = ?;',
(medicationName, patientAge))
dosageAmount = c.fetchone()
if dosageAmount == None:
print('Drug Name not exist')
input('Press any key to return')
return viewChart(CONN, chart_id, staff, patient, 1)
c.execute('SELECT drug_name FROM reportedallergies WHERE hcno = ?;', (
patient[0],))
allergies = c.fetchone()
for x in allergies:
if x == medicationName:
print('WARNING, the patinet is allergic to ' + x)
c.execute('SELECT canbe_alg FROM inferredallergies WHERE alg = ?;', (
medicationName,))
inferallergies = c.fetchall()
for x in inferallergies:
print('Patinet can be allergic to: ' + x[0])
amount = int(input('Medication amount: '))
if amount > dosageAmount[0]:
print('Suggest Amount: ' + str(dosageAmount[0]))
confirm = input(
'WARNING: Prescibe Amount is greater than suggest amount.Confirm (y/n)'
)
if confirm == 'n':
return viewChart(CONN, chart_id, staff, patient, 1)
day = input('Medication length(in days): ')
c.execute(
"""INSERT INTO medications VALUES
(?,?,?,DateTime('now', 'localtime'), DateTime('now','localtime'),DateTime('now',?,'localtime'),?,?);"""
, (patient[0], chart_id, staff[0], '+' + day + ' day', amount,
medicationName))
CONN.commit()
return viewChart(CONN, chart_id, staff, patient, 1)
def closeChart(CONN, patient, chart_id, staff):
c = CONN.cursor()
c.execute('SELECT * FROM charts WHERE chart_id = ?;', (chart_id,))
rows = c.fetchone()
if rows[3] is None:
print('Close chart id ' + str(chart_id) + '?')
print('1. Yes.')
print('2. No.')
result = input('Please enter your choice: ')
if result == '1':
print('Closing chart.')
c.execute(
"""UPDATE charts SET edate = DateTime('now','localtime')
WHERE chart_id = ?;"""
, (chart_id,))
CONN.commit()
return viewChart(CONN, chart_id, staff, patient, 1)
elif result == '2':
return viewChart(CONN, chart_id, staff, patient, 1)
else:
print('Invalid')
return closeChart(CONN, patient, chart_id, staff)
def addPatient(CONN, staff):
c = CONN.cursor()
print('==========New Patient Record==========')
name = input('Please enter patient name: ')
hcno = input('Please enter patient HCNO: ')
try:
testHcno = int(hcno)
except:
print('Invalid HCNO, please try again')
return addPatient(CONN, staff)
age_group = input('Please enter age group: ')
address = input('Please enter address: ')
phone = input('Please enter phone number: ')
emg_phone = input('Please enter emergency phone number: ')
try:
c.execute("""INSERT INTO patients VALUES
(?,?,?,?,?,?);""",
(hcno, name, age_group, address, phone, emg_phone))
CONN.commit()
print('Patient record created.')
except:
print('Invalid entry, patient already exists')
def addChart(CONN, staff, patient):
c = CONN.cursor()
c.execute('SELECT chart_id FROM charts ORDER BY chart_id DESC LIMIT 1;')
last_chart = c.fetchone()
if last_chart[0] is not None:
new_chart_id = int(last_chart[0]) + 1
else:
new_chart_id = '00001'
c.execute(
"""INSERT INTO charts VALUES
(?,?, DateTime('now','localtime'), ?);"""
, (new_chart_id, patient[0], None))
c.execute(
'SELECT * FROM charts WHERE hcno = ? ORDER BY adate DESC LIMIT 1;',
(patient[0],))
CONN.commit()
print('A new chart had been create. Chart ID: ' + str(new_chart_id))
return patientChart(CONN, staff, patient)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def patientSelect(CONN, staff):
c = CONN.cursor()
print('Search for Patient')
select = input("Enter patient name(type 'exit' to leave): ")
if select == 'exit':
os.system('clear')
return
c.execute('SELECT hcno, name FROM patients WHERE name LIKE ?', ('%' +
select + '%',))
rows = c.fetchall()
if len(rows) == 0:
print('No patient found, please try again')
return patientSelect(CONN, staff)
count = 1
for x in rows:
print(str(count) + ': patient hcno ' + x[0] + '; patient name: ' + x[1]
)
count = count + 1
try:
select = input('Please select your patient: ')
selectedPatient = int(select) - 1
patientHCNO = rows[selectedPatient][0]
patientName = rows[selectedPatient][1]
patient = patientHCNO, patientName
except:
print('Invalid input, please try again')
return patientSelect(CONN, staff)
return patientChart(CONN, staff, patient)
def patientChart(CONN, staff, patient):
c = CONN.cursor()
os.system('clear')
print('Patient HCNO: ' + patient[0] + ', Patient Name: ' + patient[1])
c.execute(
"""SELECT *
FROM charts
WHERE hcno = ?
ORDER BY adate
"""
, (patient[0],))
rows = c.fetchall()
count = 1
checkOpenChart = 0
for x in rows:
print(str(count) + ': chart id: ' + x[0] + '; patient hcno: ' + x[1
] + '; admission time: ' + x[2], end='')
if x[3] is None:
print(' discharge time: ' + 'Status: open.')
checkOpenChart = checkOpenChart + 1
else:
print(' discharge time: ' + x[3] + 'Status: close.')
count = count + 1
if checkOpenChart == 0:
print('No open chart')
openChart = input('Do you want to create a new chart (y/n):')
if openChart == 'y':
print('Open chart')
return addChart(CONN, staff, patient)
else:
print('')
print(
'You have an open chart. If you want a new chart, close the open chart first'
)
try:
select = input(
"Please select a chart to continue(type 'exit' to leave): ")
if select == 'exit':
os.system('clear')
return patientSelect(CONN, staff)
selectChart = int(select) - 1
chart_id = rows[selectChart][0]
except:
print('Invalid enry')
return patientChart(CONN, staff, patient)
if rows[selectChart][3] is None:
editAble = 1
else:
editAble = 0
return viewChart(CONN, chart_id, staff, patient, editAble)
def viewChart(CONN, chart_id, staff, patient, editAble):
c = CONN.cursor()
os.system('clear')
print('Patient HCNO: ' + patient[0] + ', Patient Name: ' + patient[1])
print('symptoms table')
c.execute(
"""SELECT *
FROM symptoms
WHERE hcno = ? AND chart_id = ?
ORDER BY obs_date;"""
, (patient[0], chart_id))
rows = c.fetchall()
for x in rows:
print(x)
print('diagnosis table')
c.execute(
"""SELECT *
FROM diagnoses
WHERE hcno = ? AND chart_id = ?
ORDER BY ddate;"""
, (patient[0], chart_id))
rows = c.fetchall()
for x in rows:
print(x)
print('medication table')
c.execute(
"""SELECT *
FROM medications
WHERE hcno = ? AND chart_id = ?
ORDER BY mdate;"""
, (patient[0], chart_id))
rows = c.fetchall()
for x in rows:
print(x)
if editAble == 0:
input('Press any key to return: ')
return patientChart(CONN, staff, patient)
if staff[1] == 'D':
return doctorChartMenu(CONN, patient, chart_id, staff)
elif staff[1] == 'N':
return nurseChartMenu(CONN, patient, chart_id, staff)
def doctorChartMenu(CONN, patient, chart_id, staff):
print('==========Chart Menu==========')
print('1. Add a symptoms')
print('2. Add a Diagnosis')
print('3. Add a medication')
print('4. Exit')
select = input('Please select an option to continue: ')
if select == '1':
print('Add symptoms')
os.system('clear')
return addSymptoms(CONN, patient, chart_id, staff)
elif select == '2':
print('Add Diagnosis')
os.system('clear')
return addDiagnosis(CONN, patient, chart_id, staff)
elif select == '3':
print('Add medication')
os.system('clear')
return addMedication(CONN, patient, chart_id, staff)
elif select == '4':
return patientChart(CONN, staff, patient)
else:
print('Invalid entry, please try again')
return patientSelect(CONN, staff)
def nurseChartMenu(CONN, patient, chart_id, staff):
print('Chart Menu')
print('1. Add a symptoms')
print('2. close chart')
print('3. Exit')
select = input('Please select an option to cintinue: ')
if select == '1':
os.system('clear')
return addSymptoms(CONN, patient, chart_id, staff)
elif select == '2':
print('xx')
return closeChart(CONN, patient, chart_id, staff)
elif select == '3':
return patientChart(CONN, staff, patient)
else:
print('Invalid, please try again')
return patientSelect(CONN, staff)
def addSymptoms(CONN, patient, chart_id, staff):
c = CONN.cursor()
symptoms = input('Please enter a symptom: ')
while len(symptoms) == 0:
symptoms = input('Please enter a symptom: ')
c.execute(
"""INSERT INTO symptoms VALUES
(?,?,?,DateTime('now','localtime'),?);"""
, (patient[0], chart_id, staff[0], symptoms))
CONN.commit()
return viewChart(CONN, chart_id, staff, patient, 1)
def addDiagnosis(CONN, patient, chart_id, staff):
c = CONN.cursor()
diagnosis = input('Please enter a diagnosis: ')
while len(diagnosis) == 0:
diagnosis = input('Please enter a diagnosis: ')
c.execute(
"""INSERT INTO diagnoses VALUES
(?,?,?,DateTime('now', 'localtime'),?);"""
, (patient[0], chart_id, staff[0], diagnosis))
CONN.commit()
return viewChart(CONN, chart_id, staff, patient, 1)
def addMedication(CONN, patient, chart_id, staff):
c = CONN.cursor()
c.execute('SELECT * FROM patients WHERE hcno = ?;', (patient[0],))
rows = c.fetchone()
patientAge = rows[2]
medicationName = input('Please enter a medication: ')
c.execute(
'SELECT sug_amount FROM dosage WHERE drug_name = ? AND age_group = ?;',
(medicationName, patientAge))
dosageAmount = c.fetchone()
if dosageAmount == None:
print('Drug Name not exist')
input('Press any key to return')
return viewChart(CONN, chart_id, staff, patient, 1)
c.execute('SELECT drug_name FROM reportedallergies WHERE hcno = ?;', (
patient[0],))
allergies = c.fetchone()
for x in allergies:
if x == medicationName:
print('WARNING, the patinet is allergic to ' + x)
c.execute('SELECT canbe_alg FROM inferredallergies WHERE alg = ?;', (
medicationName,))
inferallergies = c.fetchall()
for x in inferallergies:
print('Patinet can be allergic to: ' + x[0])
amount = int(input('Medication amount: '))
if amount > dosageAmount[0]:
print('Suggest Amount: ' + str(dosageAmount[0]))
confirm = input(
'WARNING: Prescibe Amount is greater than suggest amount.Confirm (y/n)'
)
if confirm == 'n':
return viewChart(CONN, chart_id, staff, patient, 1)
day = input('Medication length(in days): ')
c.execute(
"""INSERT INTO medications VALUES
(?,?,?,DateTime('now', 'localtime'), DateTime('now','localtime'),DateTime('now',?,'localtime'),?,?);"""
, (patient[0], chart_id, staff[0], '+' + day + ' day', amount,
medicationName))
CONN.commit()
return viewChart(CONN, chart_id, staff, patient, 1)
def closeChart(CONN, patient, chart_id, staff):
c = CONN.cursor()
c.execute('SELECT * FROM charts WHERE chart_id = ?;', (chart_id,))
rows = c.fetchone()
if rows[3] is None:
print('Close chart id ' + str(chart_id) + '?')
print('1. Yes.')
print('2. No.')
result = input('Please enter your choice: ')
if result == '1':
print('Closing chart.')
c.execute(
"""UPDATE charts SET edate = DateTime('now','localtime')
WHERE chart_id = ?;"""
, (chart_id,))
CONN.commit()
return viewChart(CONN, chart_id, staff, patient, 1)
elif result == '2':
return viewChart(CONN, chart_id, staff, patient, 1)
else:
print('Invalid')
return closeChart(CONN, patient, chart_id, staff)
def addPatient(CONN, staff):
c = CONN.cursor()
print('==========New Patient Record==========')
name = input('Please enter patient name: ')
hcno = input('Please enter patient HCNO: ')
try:
testHcno = int(hcno)
except:
print('Invalid HCNO, please try again')
return addPatient(CONN, staff)
age_group = input('Please enter age group: ')
address = input('Please enter address: ')
phone = input('Please enter phone number: ')
emg_phone = input('Please enter emergency phone number: ')
try:
c.execute("""INSERT INTO patients VALUES
(?,?,?,?,?,?);""",
(hcno, name, age_group, address, phone, emg_phone))
CONN.commit()
print('Patient record created.')
except:
print('Invalid entry, patient already exists')
def addChart(CONN, staff, patient):
c = CONN.cursor()
c.execute('SELECT chart_id FROM charts ORDER BY chart_id DESC LIMIT 1;')
last_chart = c.fetchone()
if last_chart[0] is not None:
new_chart_id = int(last_chart[0]) + 1
else:
new_chart_id = '00001'
c.execute(
"""INSERT INTO charts VALUES
(?,?, DateTime('now','localtime'), ?);"""
, (new_chart_id, patient[0], None))
c.execute(
'SELECT * FROM charts WHERE hcno = ? ORDER BY adate DESC LIMIT 1;',
(patient[0],))
CONN.commit()
print('A new chart had been create. Chart ID: ' + str(new_chart_id))
return patientChart(CONN, staff, patient)
<|reserved_special_token_1|>
import sqlite3
import os
def patientSelect(CONN, staff):
c = CONN.cursor()
print('Search for Patient')
select = input("Enter patient name(type 'exit' to leave): ")
if select == 'exit':
os.system('clear')
return
c.execute('SELECT hcno, name FROM patients WHERE name LIKE ?', ('%' +
select + '%',))
rows = c.fetchall()
if len(rows) == 0:
print('No patient found, please try again')
return patientSelect(CONN, staff)
count = 1
for x in rows:
print(str(count) + ': patient hcno ' + x[0] + '; patient name: ' + x[1]
)
count = count + 1
try:
select = input('Please select your patient: ')
selectedPatient = int(select) - 1
patientHCNO = rows[selectedPatient][0]
patientName = rows[selectedPatient][1]
patient = patientHCNO, patientName
except:
print('Invalid input, please try again')
return patientSelect(CONN, staff)
return patientChart(CONN, staff, patient)
def patientChart(CONN, staff, patient):
c = CONN.cursor()
os.system('clear')
print('Patient HCNO: ' + patient[0] + ', Patient Name: ' + patient[1])
c.execute(
"""SELECT *
FROM charts
WHERE hcno = ?
ORDER BY adate
"""
, (patient[0],))
rows = c.fetchall()
count = 1
checkOpenChart = 0
for x in rows:
print(str(count) + ': chart id: ' + x[0] + '; patient hcno: ' + x[1
] + '; admission time: ' + x[2], end='')
if x[3] is None:
print(' discharge time: ' + 'Status: open.')
checkOpenChart = checkOpenChart + 1
else:
print(' discharge time: ' + x[3] + 'Status: close.')
count = count + 1
if checkOpenChart == 0:
print('No open chart')
openChart = input('Do you want to create a new chart (y/n):')
if openChart == 'y':
print('Open chart')
return addChart(CONN, staff, patient)
else:
print('')
print(
'You have an open chart. If you want a new chart, close the open chart first'
)
try:
select = input(
"Please select a chart to continue(type 'exit' to leave): ")
if select == 'exit':
os.system('clear')
return patientSelect(CONN, staff)
selectChart = int(select) - 1
chart_id = rows[selectChart][0]
except:
print('Invalid enry')
return patientChart(CONN, staff, patient)
if rows[selectChart][3] is None:
editAble = 1
else:
editAble = 0
return viewChart(CONN, chart_id, staff, patient, editAble)
def viewChart(CONN, chart_id, staff, patient, editAble):
c = CONN.cursor()
os.system('clear')
print('Patient HCNO: ' + patient[0] + ', Patient Name: ' + patient[1])
print('symptoms table')
c.execute(
"""SELECT *
FROM symptoms
WHERE hcno = ? AND chart_id = ?
ORDER BY obs_date;"""
, (patient[0], chart_id))
rows = c.fetchall()
for x in rows:
print(x)
print('diagnosis table')
c.execute(
"""SELECT *
FROM diagnoses
WHERE hcno = ? AND chart_id = ?
ORDER BY ddate;"""
, (patient[0], chart_id))
rows = c.fetchall()
for x in rows:
print(x)
print('medication table')
c.execute(
"""SELECT *
FROM medications
WHERE hcno = ? AND chart_id = ?
ORDER BY mdate;"""
, (patient[0], chart_id))
rows = c.fetchall()
for x in rows:
print(x)
if editAble == 0:
input('Press any key to return: ')
return patientChart(CONN, staff, patient)
if staff[1] == 'D':
return doctorChartMenu(CONN, patient, chart_id, staff)
elif staff[1] == 'N':
return nurseChartMenu(CONN, patient, chart_id, staff)
def doctorChartMenu(CONN, patient, chart_id, staff):
print('==========Chart Menu==========')
print('1. Add a symptoms')
print('2. Add a Diagnosis')
print('3. Add a medication')
print('4. Exit')
select = input('Please select an option to continue: ')
if select == '1':
print('Add symptoms')
os.system('clear')
return addSymptoms(CONN, patient, chart_id, staff)
elif select == '2':
print('Add Diagnosis')
os.system('clear')
return addDiagnosis(CONN, patient, chart_id, staff)
elif select == '3':
print('Add medication')
os.system('clear')
return addMedication(CONN, patient, chart_id, staff)
elif select == '4':
return patientChart(CONN, staff, patient)
else:
print('Invalid entry, please try again')
return patientSelect(CONN, staff)
def nurseChartMenu(CONN, patient, chart_id, staff):
print('Chart Menu')
print('1. Add a symptoms')
print('2. close chart')
print('3. Exit')
select = input('Please select an option to cintinue: ')
if select == '1':
os.system('clear')
return addSymptoms(CONN, patient, chart_id, staff)
elif select == '2':
print('xx')
return closeChart(CONN, patient, chart_id, staff)
elif select == '3':
return patientChart(CONN, staff, patient)
else:
print('Invalid, please try again')
return patientSelect(CONN, staff)
def addSymptoms(CONN, patient, chart_id, staff):
c = CONN.cursor()
symptoms = input('Please enter a symptom: ')
while len(symptoms) == 0:
symptoms = input('Please enter a symptom: ')
c.execute(
"""INSERT INTO symptoms VALUES
(?,?,?,DateTime('now','localtime'),?);"""
, (patient[0], chart_id, staff[0], symptoms))
CONN.commit()
return viewChart(CONN, chart_id, staff, patient, 1)
def addDiagnosis(CONN, patient, chart_id, staff):
c = CONN.cursor()
diagnosis = input('Please enter a diagnosis: ')
while len(diagnosis) == 0:
diagnosis = input('Please enter a diagnosis: ')
c.execute(
"""INSERT INTO diagnoses VALUES
(?,?,?,DateTime('now', 'localtime'),?);"""
, (patient[0], chart_id, staff[0], diagnosis))
CONN.commit()
return viewChart(CONN, chart_id, staff, patient, 1)
def addMedication(CONN, patient, chart_id, staff):
c = CONN.cursor()
c.execute('SELECT * FROM patients WHERE hcno = ?;', (patient[0],))
rows = c.fetchone()
patientAge = rows[2]
medicationName = input('Please enter a medication: ')
c.execute(
'SELECT sug_amount FROM dosage WHERE drug_name = ? AND age_group = ?;',
(medicationName, patientAge))
dosageAmount = c.fetchone()
if dosageAmount == None:
print('Drug Name not exist')
input('Press any key to return')
return viewChart(CONN, chart_id, staff, patient, 1)
c.execute('SELECT drug_name FROM reportedallergies WHERE hcno = ?;', (
patient[0],))
allergies = c.fetchone()
for x in allergies:
if x == medicationName:
print('WARNING, the patinet is allergic to ' + x)
c.execute('SELECT canbe_alg FROM inferredallergies WHERE alg = ?;', (
medicationName,))
inferallergies = c.fetchall()
for x in inferallergies:
print('Patinet can be allergic to: ' + x[0])
amount = int(input('Medication amount: '))
if amount > dosageAmount[0]:
print('Suggest Amount: ' + str(dosageAmount[0]))
confirm = input(
'WARNING: Prescibe Amount is greater than suggest amount.Confirm (y/n)'
)
if confirm == 'n':
return viewChart(CONN, chart_id, staff, patient, 1)
day = input('Medication length(in days): ')
c.execute(
"""INSERT INTO medications VALUES
(?,?,?,DateTime('now', 'localtime'), DateTime('now','localtime'),DateTime('now',?,'localtime'),?,?);"""
, (patient[0], chart_id, staff[0], '+' + day + ' day', amount,
medicationName))
CONN.commit()
return viewChart(CONN, chart_id, staff, patient, 1)
def closeChart(CONN, patient, chart_id, staff):
c = CONN.cursor()
c.execute('SELECT * FROM charts WHERE chart_id = ?;', (chart_id,))
rows = c.fetchone()
if rows[3] is None:
print('Close chart id ' + str(chart_id) + '?')
print('1. Yes.')
print('2. No.')
result = input('Please enter your choice: ')
if result == '1':
print('Closing chart.')
c.execute(
"""UPDATE charts SET edate = DateTime('now','localtime')
WHERE chart_id = ?;"""
, (chart_id,))
CONN.commit()
return viewChart(CONN, chart_id, staff, patient, 1)
elif result == '2':
return viewChart(CONN, chart_id, staff, patient, 1)
else:
print('Invalid')
return closeChart(CONN, patient, chart_id, staff)
def addPatient(CONN, staff):
c = CONN.cursor()
print('==========New Patient Record==========')
name = input('Please enter patient name: ')
hcno = input('Please enter patient HCNO: ')
try:
testHcno = int(hcno)
except:
print('Invalid HCNO, please try again')
return addPatient(CONN, staff)
age_group = input('Please enter age group: ')
address = input('Please enter address: ')
phone = input('Please enter phone number: ')
emg_phone = input('Please enter emergency phone number: ')
try:
c.execute("""INSERT INTO patients VALUES
(?,?,?,?,?,?);""",
(hcno, name, age_group, address, phone, emg_phone))
CONN.commit()
print('Patient record created.')
except:
print('Invalid entry, patient already exists')
def addChart(CONN, staff, patient):
c = CONN.cursor()
c.execute('SELECT chart_id FROM charts ORDER BY chart_id DESC LIMIT 1;')
last_chart = c.fetchone()
if last_chart[0] is not None:
new_chart_id = int(last_chart[0]) + 1
else:
new_chart_id = '00001'
c.execute(
"""INSERT INTO charts VALUES
(?,?, DateTime('now','localtime'), ?);"""
, (new_chart_id, patient[0], None))
c.execute(
'SELECT * FROM charts WHERE hcno = ? ORDER BY adate DESC LIMIT 1;',
(patient[0],))
CONN.commit()
print('A new chart had been create. Chart ID: ' + str(new_chart_id))
return patientChart(CONN, staff, patient)
<|reserved_special_token_1|>
import sqlite3
import os
#Search for a patient name
#Every doctor enter a name, it will find the patinet name that is similar to the patient name
#Once a match is found, the system will output a list of matched patient names.
#Then, the doctor select the patient to continue
def patientSelect(CONN, staff):
c = CONN.cursor()
print("Search for Patient")
select = input("Enter patient name(type 'exit' to leave): ")
if select == 'exit':
os.system('clear')
#return doctorMenu(CONN, staff[0])
return
c.execute('''SELECT hcno, name FROM patients WHERE name LIKE ?''', ('%'+select+'%',))
rows = c.fetchall()
if len(rows) == 0:
print("No patient found, please try again")
return patientSelect(CONN, staff)
count = 1
for x in rows:
print(str(count)+": patient hcno "+x[0]+"; patient name: "+x[1])
count = count + 1
try:
select = input("Please select your patient: ")
selectedPatient = int(select)-1
patientHCNO = rows[selectedPatient][0]
patientName = rows[selectedPatient][1]
patient = (patientHCNO, patientName)
except:
print("Invalid input, please try again")
return patientSelect(CONN, staff)
return patientChart(CONN, staff, patient)
#Output the tables related to the patient
#The doctor can select an open chart to continue
def patientChart(CONN, staff, patient):
c = CONN.cursor()
os.system('clear')
print("Patient HCNO: " + patient[0] + ", Patient Name: " + patient[1])
c.execute('''SELECT *
FROM charts
WHERE hcno = ?
ORDER BY adate
''', (patient[0],))
rows = c.fetchall()
count = 1
checkOpenChart = 0
for x in rows:
print(str(count)+": chart id: "+x[0]+"; patient hcno: "+ x[1] + "; admission time: "+x[2], end="")
if x[3] is None:
print(" discharge time: " + "Status: open.")
checkOpenChart = checkOpenChart + 1
else:
print(" discharge time: " + x[3] + "Status: close.")
count = count + 1;
if checkOpenChart == 0:
print("No open chart")
openChart = input("Do you want to create a new chart (y/n):")
if openChart == 'y':
print("Open chart")
return addChart(CONN, staff, patient)
else:
print("")
print("You have an open chart. If you want a new chart, close the open chart first")
try:
select = input("Please select a chart to continue(type 'exit' to leave): ")
if select == 'exit':
os.system('clear')
return patientSelect(CONN, staff)
selectChart = int(select)-1
chart_id = rows[selectChart][0]
except:
print("Invalid enry")
return patientChart(CONN, staff, patient)
if rows[selectChart][3] is None:
editAble = 1
else:
editAble = 0
return viewChart(CONN, chart_id, staff, patient, editAble)
#View a list of charts that related to the patient
def viewChart(CONN, chart_id, staff, patient, editAble):
c = CONN.cursor()
os.system('clear')
print("Patient HCNO: " + patient[0] + ", Patient Name: " + patient[1])
print("symptoms table")
c.execute('''SELECT *
FROM symptoms
WHERE hcno = ? AND chart_id = ?
ORDER BY obs_date;''', (patient[0], chart_id))
rows = c.fetchall()
for x in rows:
print(x)
print("diagnosis table")
c.execute('''SELECT *
FROM diagnoses
WHERE hcno = ? AND chart_id = ?
ORDER BY ddate;''', (patient[0], chart_id))
rows = c.fetchall()
for x in rows:
print(x)
print("medication table")
c.execute('''SELECT *
FROM medications
WHERE hcno = ? AND chart_id = ?
ORDER BY mdate;''', (patient[0], chart_id))
rows = c.fetchall()
for x in rows:
print(x)
if editAble == 0:
input("Press any key to return: ")
return patientChart(CONN, staff, patient)
if staff[1] == 'D':
return doctorChartMenu(CONN, patient, chart_id, staff)
elif staff[1] == 'N':
return nurseChartMenu(CONN, patient, chart_id, staff)
#If the chart is open, able to edit the chart
def doctorChartMenu(CONN, patient, chart_id, staff):
print("==========Chart Menu==========")
print("1. Add a symptoms")
print("2. Add a Diagnosis")
print("3. Add a medication")
print("4. Exit")
select = input("Please select an option to continue: ")
if select == '1':
print("Add symptoms")
os.system('clear')
return addSymptoms(CONN, patient, chart_id, staff)
elif select == '2':
print("Add Diagnosis")
os.system('clear')
return addDiagnosis(CONN, patient, chart_id, staff)
elif select == '3':
print("Add medication")
os.system('clear')
return addMedication(CONN, patient, chart_id, staff)
elif select == '4':
return patientChart(CONN, staff, patient)
else:
print("Invalid entry, please try again")
return patientSelect(CONN, staff)
def nurseChartMenu(CONN, patient, chart_id, staff):
print("Chart Menu")
print("1. Add a symptoms")
print("2. close chart")
print("3. Exit")
select = input("Please select an option to cintinue: ")
if select == '1':
os.system('clear')
return addSymptoms(CONN, patient, chart_id, staff)
elif select == '2':
print("xx")
return closeChart(CONN, patient, chart_id, staff)
elif select == '3':
return patientChart(CONN, staff, patient)
else:
print("Invalid, please try again")
return patientSelect(CONN, staff)
#Insert a symptom
#Ask doctor for symptom name
#Observer date will be current time
# The function will return to viewChart()
def addSymptoms(CONN, patient, chart_id, staff):
c = CONN.cursor()
symptoms = input("Please enter a symptom: ")
while len(symptoms) == 0:
symptoms = input("Please enter a symptom: ")
c.execute('''INSERT INTO symptoms VALUES
(?,?,?,DateTime('now','localtime'),?);''',(patient[0], chart_id, staff[0], symptoms))
CONN.commit()
return viewChart(CONN, chart_id, staff, patient, 1)
# Insert a diagnosis
# Will prompt for a diagnose name
# Observe date will be current time
# Return to viewChart() after finish
def addDiagnosis(CONN, patient, chart_id, staff):
#Insert a diagnosis
c = CONN.cursor()
diagnosis = input("Please enter a diagnosis: ")
while len(diagnosis) == 0:
diagnosis = input("Please enter a diagnosis: ")
c.execute('''INSERT INTO diagnoses VALUES
(?,?,?,DateTime('now', 'localtime'),?);''',(patient[0], chart_id, staff[0], diagnosis))
CONN.commit()
return viewChart(CONN, chart_id, staff, patient, 1)
# Insert a medication
# Will prompt for a medication name
# start date will be today
# Return to viewChart() after finish
def addMedication(CONN, patient, chart_id, staff):
c = CONN.cursor()
c.execute("SELECT * FROM patients WHERE hcno = ?;",(patient[0],))
rows = c.fetchone()
patientAge = rows[2]
#Get Medication Name, if not exist in database, return to previous page
medicationName = input("Please enter a medication: ")
c.execute("SELECT sug_amount FROM dosage WHERE drug_name = ? AND age_group = ?;", (medicationName,patientAge))
dosageAmount = c.fetchone()
if dosageAmount == None:
print("Drug Name not exist")
input("Press any key to return")
return viewChart(CONN, chart_id, staff, patient, 1)
c.execute('''SELECT drug_name FROM reportedallergies WHERE hcno = ?;''', (patient[0],))
allergies = c.fetchone()
for x in allergies:
if x == medicationName:
print("WARNING, the patinet is allergic to "+ x)
c.execute('''SELECT canbe_alg FROM inferredallergies WHERE alg = ?;''',(medicationName,))
inferallergies = c.fetchall()
for x in inferallergies:
print("Patinet can be allergic to: " + x[0])
# Get prescripbtion amount, if larger than suggest amount, display warning message
amount = int(input("Medication amount: "))
if amount > dosageAmount[0]:
print("Suggest Amount: "+ str(dosageAmount[0]))
confirm = input("WARNING: Prescibe Amount is greater than suggest amount.Confirm (y/n)")
if confirm == 'n':
return viewChart(CONN, chart_id, staff, patient, 1)
#Get medication period
day = input("Medication length(in days): ")
c.execute('''INSERT INTO medications VALUES
(?,?,?,DateTime('now', 'localtime'), DateTime('now','localtime'),DateTime('now',?,'localtime'),?,?);''',(patient[0], chart_id, staff[0], '+'+day+' day', amount, medicationName))
CONN.commit()
return viewChart(CONN, chart_id, staff, patient, 1)
def closeChart(CONN, patient, chart_id, staff):
c = CONN.cursor()
c.execute("SELECT * FROM charts WHERE chart_id = ?;", (chart_id,))
rows = c.fetchone()
if rows[3] is None:
print("Close chart id "+str(chart_id)+"?")
print("1. Yes.")
print("2. No.")
result = input("Please enter your choice: ")
if result == '1':
print("Closing chart.")
c.execute('''UPDATE charts SET edate = DateTime('now','localtime')
WHERE chart_id = ?;''', (chart_id,))
CONN.commit()
return viewChart(CONN, chart_id, staff, patient, 1)
elif result == '2':
return viewChart(CONN, chart_id, staff, patient, 1)
else:
print("Invalid")
return closeChart(CONN, patient, chart_id, staff)
def addPatient(CONN, staff):
c = CONN.cursor()
print("==========New Patient Record==========")
name = input("Please enter patient name: ")
hcno = input("Please enter patient HCNO: ")
try:
testHcno = int(hcno)
except:
print("Invalid HCNO, please try again")
return addPatient(CONN, staff)
age_group = input("Please enter age group: ")
address = input("Please enter address: ")
phone = input("Please enter phone number: ")
emg_phone = input("Please enter emergency phone number: ")
try:
c.execute('''INSERT INTO patients VALUES
(?,?,?,?,?,?);''',(hcno, name, age_group, address, phone, emg_phone))
CONN.commit()
print("Patient record created.")
except:
print("Invalid entry, patient already exists")
def addChart(CONN, staff, patient):
c = CONN.cursor()
#phcno = input("Please enter patient health care #: ")
c.execute("SELECT chart_id FROM charts ORDER BY chart_id DESC LIMIT 1;")
last_chart = c.fetchone()
if last_chart[0] is not None:
new_chart_id = int(last_chart[0])+1
else:
new_chart_id = '00001'
c.execute('''INSERT INTO charts VALUES
(?,?, DateTime('now','localtime'), ?);''', (new_chart_id, patient[0], None))
c.execute("SELECT * FROM charts WHERE hcno = ? ORDER BY adate DESC LIMIT 1;", (patient[0],))
CONN.commit()
print("A new chart had been create. Chart ID: "+ str(new_chart_id))
return patientChart(CONN, staff, patient)
|
flexible
|
{
"blob_id": "b3b4d27b60c71cbd979ad4887fa80408665ea1ac",
"index": 2853,
"step-1": "<mask token>\n\n\ndef patientSelect(CONN, staff):\n c = CONN.cursor()\n print('Search for Patient')\n select = input(\"Enter patient name(type 'exit' to leave): \")\n if select == 'exit':\n os.system('clear')\n return\n c.execute('SELECT hcno, name FROM patients WHERE name LIKE ?', ('%' +\n select + '%',))\n rows = c.fetchall()\n if len(rows) == 0:\n print('No patient found, please try again')\n return patientSelect(CONN, staff)\n count = 1\n for x in rows:\n print(str(count) + ': patient hcno ' + x[0] + '; patient name: ' + x[1]\n )\n count = count + 1\n try:\n select = input('Please select your patient: ')\n selectedPatient = int(select) - 1\n patientHCNO = rows[selectedPatient][0]\n patientName = rows[selectedPatient][1]\n patient = patientHCNO, patientName\n except:\n print('Invalid input, please try again')\n return patientSelect(CONN, staff)\n return patientChart(CONN, staff, patient)\n\n\n<mask token>\n\n\ndef doctorChartMenu(CONN, patient, chart_id, staff):\n print('==========Chart Menu==========')\n print('1. Add a symptoms')\n print('2. Add a Diagnosis')\n print('3. Add a medication')\n print('4. Exit')\n select = input('Please select an option to continue: ')\n if select == '1':\n print('Add symptoms')\n os.system('clear')\n return addSymptoms(CONN, patient, chart_id, staff)\n elif select == '2':\n print('Add Diagnosis')\n os.system('clear')\n return addDiagnosis(CONN, patient, chart_id, staff)\n elif select == '3':\n print('Add medication')\n os.system('clear')\n return addMedication(CONN, patient, chart_id, staff)\n elif select == '4':\n return patientChart(CONN, staff, patient)\n else:\n print('Invalid entry, please try again')\n return patientSelect(CONN, staff)\n\n\ndef nurseChartMenu(CONN, patient, chart_id, staff):\n print('Chart Menu')\n print('1. Add a symptoms')\n print('2. close chart')\n print('3. Exit')\n select = input('Please select an option to cintinue: ')\n if select == '1':\n os.system('clear')\n return addSymptoms(CONN, patient, chart_id, staff)\n elif select == '2':\n print('xx')\n return closeChart(CONN, patient, chart_id, staff)\n elif select == '3':\n return patientChart(CONN, staff, patient)\n else:\n print('Invalid, please try again')\n return patientSelect(CONN, staff)\n\n\ndef addSymptoms(CONN, patient, chart_id, staff):\n c = CONN.cursor()\n symptoms = input('Please enter a symptom: ')\n while len(symptoms) == 0:\n symptoms = input('Please enter a symptom: ')\n c.execute(\n \"\"\"INSERT INTO symptoms VALUES\n (?,?,?,DateTime('now','localtime'),?);\"\"\"\n , (patient[0], chart_id, staff[0], symptoms))\n CONN.commit()\n return viewChart(CONN, chart_id, staff, patient, 1)\n\n\n<mask token>\n\n\ndef closeChart(CONN, patient, chart_id, staff):\n c = CONN.cursor()\n c.execute('SELECT * FROM charts WHERE chart_id = ?;', (chart_id,))\n rows = c.fetchone()\n if rows[3] is None:\n print('Close chart id ' + str(chart_id) + '?')\n print('1. Yes.')\n print('2. No.')\n result = input('Please enter your choice: ')\n if result == '1':\n print('Closing chart.')\n c.execute(\n \"\"\"UPDATE charts SET edate = DateTime('now','localtime')\n WHERE chart_id = ?;\"\"\"\n , (chart_id,))\n CONN.commit()\n return viewChart(CONN, chart_id, staff, patient, 1)\n elif result == '2':\n return viewChart(CONN, chart_id, staff, patient, 1)\n else:\n print('Invalid')\n return closeChart(CONN, patient, chart_id, staff)\n\n\ndef addPatient(CONN, staff):\n c = CONN.cursor()\n print('==========New Patient Record==========')\n name = input('Please enter patient name: ')\n hcno = input('Please enter patient HCNO: ')\n try:\n testHcno = int(hcno)\n except:\n print('Invalid HCNO, please try again')\n return addPatient(CONN, staff)\n age_group = input('Please enter age group: ')\n address = input('Please enter address: ')\n phone = input('Please enter phone number: ')\n emg_phone = input('Please enter emergency phone number: ')\n try:\n c.execute(\"\"\"INSERT INTO patients VALUES\n (?,?,?,?,?,?);\"\"\",\n (hcno, name, age_group, address, phone, emg_phone))\n CONN.commit()\n print('Patient record created.')\n except:\n print('Invalid entry, patient already exists')\n\n\ndef addChart(CONN, staff, patient):\n c = CONN.cursor()\n c.execute('SELECT chart_id FROM charts ORDER BY chart_id DESC LIMIT 1;')\n last_chart = c.fetchone()\n if last_chart[0] is not None:\n new_chart_id = int(last_chart[0]) + 1\n else:\n new_chart_id = '00001'\n c.execute(\n \"\"\"INSERT INTO charts VALUES\n (?,?, DateTime('now','localtime'), ?);\"\"\"\n , (new_chart_id, patient[0], None))\n c.execute(\n 'SELECT * FROM charts WHERE hcno = ? ORDER BY adate DESC LIMIT 1;',\n (patient[0],))\n CONN.commit()\n print('A new chart had been create. Chart ID: ' + str(new_chart_id))\n return patientChart(CONN, staff, patient)\n",
"step-2": "<mask token>\n\n\ndef patientSelect(CONN, staff):\n c = CONN.cursor()\n print('Search for Patient')\n select = input(\"Enter patient name(type 'exit' to leave): \")\n if select == 'exit':\n os.system('clear')\n return\n c.execute('SELECT hcno, name FROM patients WHERE name LIKE ?', ('%' +\n select + '%',))\n rows = c.fetchall()\n if len(rows) == 0:\n print('No patient found, please try again')\n return patientSelect(CONN, staff)\n count = 1\n for x in rows:\n print(str(count) + ': patient hcno ' + x[0] + '; patient name: ' + x[1]\n )\n count = count + 1\n try:\n select = input('Please select your patient: ')\n selectedPatient = int(select) - 1\n patientHCNO = rows[selectedPatient][0]\n patientName = rows[selectedPatient][1]\n patient = patientHCNO, patientName\n except:\n print('Invalid input, please try again')\n return patientSelect(CONN, staff)\n return patientChart(CONN, staff, patient)\n\n\n<mask token>\n\n\ndef doctorChartMenu(CONN, patient, chart_id, staff):\n print('==========Chart Menu==========')\n print('1. Add a symptoms')\n print('2. Add a Diagnosis')\n print('3. Add a medication')\n print('4. Exit')\n select = input('Please select an option to continue: ')\n if select == '1':\n print('Add symptoms')\n os.system('clear')\n return addSymptoms(CONN, patient, chart_id, staff)\n elif select == '2':\n print('Add Diagnosis')\n os.system('clear')\n return addDiagnosis(CONN, patient, chart_id, staff)\n elif select == '3':\n print('Add medication')\n os.system('clear')\n return addMedication(CONN, patient, chart_id, staff)\n elif select == '4':\n return patientChart(CONN, staff, patient)\n else:\n print('Invalid entry, please try again')\n return patientSelect(CONN, staff)\n\n\ndef nurseChartMenu(CONN, patient, chart_id, staff):\n print('Chart Menu')\n print('1. Add a symptoms')\n print('2. close chart')\n print('3. Exit')\n select = input('Please select an option to cintinue: ')\n if select == '1':\n os.system('clear')\n return addSymptoms(CONN, patient, chart_id, staff)\n elif select == '2':\n print('xx')\n return closeChart(CONN, patient, chart_id, staff)\n elif select == '3':\n return patientChart(CONN, staff, patient)\n else:\n print('Invalid, please try again')\n return patientSelect(CONN, staff)\n\n\ndef addSymptoms(CONN, patient, chart_id, staff):\n c = CONN.cursor()\n symptoms = input('Please enter a symptom: ')\n while len(symptoms) == 0:\n symptoms = input('Please enter a symptom: ')\n c.execute(\n \"\"\"INSERT INTO symptoms VALUES\n (?,?,?,DateTime('now','localtime'),?);\"\"\"\n , (patient[0], chart_id, staff[0], symptoms))\n CONN.commit()\n return viewChart(CONN, chart_id, staff, patient, 1)\n\n\ndef addDiagnosis(CONN, patient, chart_id, staff):\n c = CONN.cursor()\n diagnosis = input('Please enter a diagnosis: ')\n while len(diagnosis) == 0:\n diagnosis = input('Please enter a diagnosis: ')\n c.execute(\n \"\"\"INSERT INTO diagnoses VALUES\n (?,?,?,DateTime('now', 'localtime'),?);\"\"\"\n , (patient[0], chart_id, staff[0], diagnosis))\n CONN.commit()\n return viewChart(CONN, chart_id, staff, patient, 1)\n\n\ndef addMedication(CONN, patient, chart_id, staff):\n c = CONN.cursor()\n c.execute('SELECT * FROM patients WHERE hcno = ?;', (patient[0],))\n rows = c.fetchone()\n patientAge = rows[2]\n medicationName = input('Please enter a medication: ')\n c.execute(\n 'SELECT sug_amount FROM dosage WHERE drug_name = ? AND age_group = ?;',\n (medicationName, patientAge))\n dosageAmount = c.fetchone()\n if dosageAmount == None:\n print('Drug Name not exist')\n input('Press any key to return')\n return viewChart(CONN, chart_id, staff, patient, 1)\n c.execute('SELECT drug_name FROM reportedallergies WHERE hcno = ?;', (\n patient[0],))\n allergies = c.fetchone()\n for x in allergies:\n if x == medicationName:\n print('WARNING, the patinet is allergic to ' + x)\n c.execute('SELECT canbe_alg FROM inferredallergies WHERE alg = ?;', (\n medicationName,))\n inferallergies = c.fetchall()\n for x in inferallergies:\n print('Patinet can be allergic to: ' + x[0])\n amount = int(input('Medication amount: '))\n if amount > dosageAmount[0]:\n print('Suggest Amount: ' + str(dosageAmount[0]))\n confirm = input(\n 'WARNING: Prescibe Amount is greater than suggest amount.Confirm (y/n)'\n )\n if confirm == 'n':\n return viewChart(CONN, chart_id, staff, patient, 1)\n day = input('Medication length(in days): ')\n c.execute(\n \"\"\"INSERT INTO medications VALUES\n (?,?,?,DateTime('now', 'localtime'), DateTime('now','localtime'),DateTime('now',?,'localtime'),?,?);\"\"\"\n , (patient[0], chart_id, staff[0], '+' + day + ' day', amount,\n medicationName))\n CONN.commit()\n return viewChart(CONN, chart_id, staff, patient, 1)\n\n\ndef closeChart(CONN, patient, chart_id, staff):\n c = CONN.cursor()\n c.execute('SELECT * FROM charts WHERE chart_id = ?;', (chart_id,))\n rows = c.fetchone()\n if rows[3] is None:\n print('Close chart id ' + str(chart_id) + '?')\n print('1. Yes.')\n print('2. No.')\n result = input('Please enter your choice: ')\n if result == '1':\n print('Closing chart.')\n c.execute(\n \"\"\"UPDATE charts SET edate = DateTime('now','localtime')\n WHERE chart_id = ?;\"\"\"\n , (chart_id,))\n CONN.commit()\n return viewChart(CONN, chart_id, staff, patient, 1)\n elif result == '2':\n return viewChart(CONN, chart_id, staff, patient, 1)\n else:\n print('Invalid')\n return closeChart(CONN, patient, chart_id, staff)\n\n\ndef addPatient(CONN, staff):\n c = CONN.cursor()\n print('==========New Patient Record==========')\n name = input('Please enter patient name: ')\n hcno = input('Please enter patient HCNO: ')\n try:\n testHcno = int(hcno)\n except:\n print('Invalid HCNO, please try again')\n return addPatient(CONN, staff)\n age_group = input('Please enter age group: ')\n address = input('Please enter address: ')\n phone = input('Please enter phone number: ')\n emg_phone = input('Please enter emergency phone number: ')\n try:\n c.execute(\"\"\"INSERT INTO patients VALUES\n (?,?,?,?,?,?);\"\"\",\n (hcno, name, age_group, address, phone, emg_phone))\n CONN.commit()\n print('Patient record created.')\n except:\n print('Invalid entry, patient already exists')\n\n\ndef addChart(CONN, staff, patient):\n c = CONN.cursor()\n c.execute('SELECT chart_id FROM charts ORDER BY chart_id DESC LIMIT 1;')\n last_chart = c.fetchone()\n if last_chart[0] is not None:\n new_chart_id = int(last_chart[0]) + 1\n else:\n new_chart_id = '00001'\n c.execute(\n \"\"\"INSERT INTO charts VALUES\n (?,?, DateTime('now','localtime'), ?);\"\"\"\n , (new_chart_id, patient[0], None))\n c.execute(\n 'SELECT * FROM charts WHERE hcno = ? ORDER BY adate DESC LIMIT 1;',\n (patient[0],))\n CONN.commit()\n print('A new chart had been create. Chart ID: ' + str(new_chart_id))\n return patientChart(CONN, staff, patient)\n",
"step-3": "<mask token>\n\n\ndef patientSelect(CONN, staff):\n c = CONN.cursor()\n print('Search for Patient')\n select = input(\"Enter patient name(type 'exit' to leave): \")\n if select == 'exit':\n os.system('clear')\n return\n c.execute('SELECT hcno, name FROM patients WHERE name LIKE ?', ('%' +\n select + '%',))\n rows = c.fetchall()\n if len(rows) == 0:\n print('No patient found, please try again')\n return patientSelect(CONN, staff)\n count = 1\n for x in rows:\n print(str(count) + ': patient hcno ' + x[0] + '; patient name: ' + x[1]\n )\n count = count + 1\n try:\n select = input('Please select your patient: ')\n selectedPatient = int(select) - 1\n patientHCNO = rows[selectedPatient][0]\n patientName = rows[selectedPatient][1]\n patient = patientHCNO, patientName\n except:\n print('Invalid input, please try again')\n return patientSelect(CONN, staff)\n return patientChart(CONN, staff, patient)\n\n\ndef patientChart(CONN, staff, patient):\n c = CONN.cursor()\n os.system('clear')\n print('Patient HCNO: ' + patient[0] + ', Patient Name: ' + patient[1])\n c.execute(\n \"\"\"SELECT * \n FROM charts \n WHERE hcno = ?\n ORDER BY adate\n \"\"\"\n , (patient[0],))\n rows = c.fetchall()\n count = 1\n checkOpenChart = 0\n for x in rows:\n print(str(count) + ': chart id: ' + x[0] + '; patient hcno: ' + x[1\n ] + '; admission time: ' + x[2], end='')\n if x[3] is None:\n print(' discharge time: ' + 'Status: open.')\n checkOpenChart = checkOpenChart + 1\n else:\n print(' discharge time: ' + x[3] + 'Status: close.')\n count = count + 1\n if checkOpenChart == 0:\n print('No open chart')\n openChart = input('Do you want to create a new chart (y/n):')\n if openChart == 'y':\n print('Open chart')\n return addChart(CONN, staff, patient)\n else:\n print('')\n print(\n 'You have an open chart. If you want a new chart, close the open chart first'\n )\n try:\n select = input(\n \"Please select a chart to continue(type 'exit' to leave): \")\n if select == 'exit':\n os.system('clear')\n return patientSelect(CONN, staff)\n selectChart = int(select) - 1\n chart_id = rows[selectChart][0]\n except:\n print('Invalid enry')\n return patientChart(CONN, staff, patient)\n if rows[selectChart][3] is None:\n editAble = 1\n else:\n editAble = 0\n return viewChart(CONN, chart_id, staff, patient, editAble)\n\n\ndef viewChart(CONN, chart_id, staff, patient, editAble):\n c = CONN.cursor()\n os.system('clear')\n print('Patient HCNO: ' + patient[0] + ', Patient Name: ' + patient[1])\n print('symptoms table')\n c.execute(\n \"\"\"SELECT * \n FROM symptoms \n WHERE hcno = ? AND chart_id = ?\n ORDER BY obs_date;\"\"\"\n , (patient[0], chart_id))\n rows = c.fetchall()\n for x in rows:\n print(x)\n print('diagnosis table')\n c.execute(\n \"\"\"SELECT * \n FROM diagnoses \n WHERE hcno = ? AND chart_id = ?\n ORDER BY ddate;\"\"\"\n , (patient[0], chart_id))\n rows = c.fetchall()\n for x in rows:\n print(x)\n print('medication table')\n c.execute(\n \"\"\"SELECT * \n FROM medications \n WHERE hcno = ? AND chart_id = ?\n ORDER BY mdate;\"\"\"\n , (patient[0], chart_id))\n rows = c.fetchall()\n for x in rows:\n print(x)\n if editAble == 0:\n input('Press any key to return: ')\n return patientChart(CONN, staff, patient)\n if staff[1] == 'D':\n return doctorChartMenu(CONN, patient, chart_id, staff)\n elif staff[1] == 'N':\n return nurseChartMenu(CONN, patient, chart_id, staff)\n\n\ndef doctorChartMenu(CONN, patient, chart_id, staff):\n print('==========Chart Menu==========')\n print('1. Add a symptoms')\n print('2. Add a Diagnosis')\n print('3. Add a medication')\n print('4. Exit')\n select = input('Please select an option to continue: ')\n if select == '1':\n print('Add symptoms')\n os.system('clear')\n return addSymptoms(CONN, patient, chart_id, staff)\n elif select == '2':\n print('Add Diagnosis')\n os.system('clear')\n return addDiagnosis(CONN, patient, chart_id, staff)\n elif select == '3':\n print('Add medication')\n os.system('clear')\n return addMedication(CONN, patient, chart_id, staff)\n elif select == '4':\n return patientChart(CONN, staff, patient)\n else:\n print('Invalid entry, please try again')\n return patientSelect(CONN, staff)\n\n\ndef nurseChartMenu(CONN, patient, chart_id, staff):\n print('Chart Menu')\n print('1. Add a symptoms')\n print('2. close chart')\n print('3. Exit')\n select = input('Please select an option to cintinue: ')\n if select == '1':\n os.system('clear')\n return addSymptoms(CONN, patient, chart_id, staff)\n elif select == '2':\n print('xx')\n return closeChart(CONN, patient, chart_id, staff)\n elif select == '3':\n return patientChart(CONN, staff, patient)\n else:\n print('Invalid, please try again')\n return patientSelect(CONN, staff)\n\n\ndef addSymptoms(CONN, patient, chart_id, staff):\n c = CONN.cursor()\n symptoms = input('Please enter a symptom: ')\n while len(symptoms) == 0:\n symptoms = input('Please enter a symptom: ')\n c.execute(\n \"\"\"INSERT INTO symptoms VALUES\n (?,?,?,DateTime('now','localtime'),?);\"\"\"\n , (patient[0], chart_id, staff[0], symptoms))\n CONN.commit()\n return viewChart(CONN, chart_id, staff, patient, 1)\n\n\ndef addDiagnosis(CONN, patient, chart_id, staff):\n c = CONN.cursor()\n diagnosis = input('Please enter a diagnosis: ')\n while len(diagnosis) == 0:\n diagnosis = input('Please enter a diagnosis: ')\n c.execute(\n \"\"\"INSERT INTO diagnoses VALUES\n (?,?,?,DateTime('now', 'localtime'),?);\"\"\"\n , (patient[0], chart_id, staff[0], diagnosis))\n CONN.commit()\n return viewChart(CONN, chart_id, staff, patient, 1)\n\n\ndef addMedication(CONN, patient, chart_id, staff):\n c = CONN.cursor()\n c.execute('SELECT * FROM patients WHERE hcno = ?;', (patient[0],))\n rows = c.fetchone()\n patientAge = rows[2]\n medicationName = input('Please enter a medication: ')\n c.execute(\n 'SELECT sug_amount FROM dosage WHERE drug_name = ? AND age_group = ?;',\n (medicationName, patientAge))\n dosageAmount = c.fetchone()\n if dosageAmount == None:\n print('Drug Name not exist')\n input('Press any key to return')\n return viewChart(CONN, chart_id, staff, patient, 1)\n c.execute('SELECT drug_name FROM reportedallergies WHERE hcno = ?;', (\n patient[0],))\n allergies = c.fetchone()\n for x in allergies:\n if x == medicationName:\n print('WARNING, the patinet is allergic to ' + x)\n c.execute('SELECT canbe_alg FROM inferredallergies WHERE alg = ?;', (\n medicationName,))\n inferallergies = c.fetchall()\n for x in inferallergies:\n print('Patinet can be allergic to: ' + x[0])\n amount = int(input('Medication amount: '))\n if amount > dosageAmount[0]:\n print('Suggest Amount: ' + str(dosageAmount[0]))\n confirm = input(\n 'WARNING: Prescibe Amount is greater than suggest amount.Confirm (y/n)'\n )\n if confirm == 'n':\n return viewChart(CONN, chart_id, staff, patient, 1)\n day = input('Medication length(in days): ')\n c.execute(\n \"\"\"INSERT INTO medications VALUES\n (?,?,?,DateTime('now', 'localtime'), DateTime('now','localtime'),DateTime('now',?,'localtime'),?,?);\"\"\"\n , (patient[0], chart_id, staff[0], '+' + day + ' day', amount,\n medicationName))\n CONN.commit()\n return viewChart(CONN, chart_id, staff, patient, 1)\n\n\ndef closeChart(CONN, patient, chart_id, staff):\n c = CONN.cursor()\n c.execute('SELECT * FROM charts WHERE chart_id = ?;', (chart_id,))\n rows = c.fetchone()\n if rows[3] is None:\n print('Close chart id ' + str(chart_id) + '?')\n print('1. Yes.')\n print('2. No.')\n result = input('Please enter your choice: ')\n if result == '1':\n print('Closing chart.')\n c.execute(\n \"\"\"UPDATE charts SET edate = DateTime('now','localtime')\n WHERE chart_id = ?;\"\"\"\n , (chart_id,))\n CONN.commit()\n return viewChart(CONN, chart_id, staff, patient, 1)\n elif result == '2':\n return viewChart(CONN, chart_id, staff, patient, 1)\n else:\n print('Invalid')\n return closeChart(CONN, patient, chart_id, staff)\n\n\ndef addPatient(CONN, staff):\n c = CONN.cursor()\n print('==========New Patient Record==========')\n name = input('Please enter patient name: ')\n hcno = input('Please enter patient HCNO: ')\n try:\n testHcno = int(hcno)\n except:\n print('Invalid HCNO, please try again')\n return addPatient(CONN, staff)\n age_group = input('Please enter age group: ')\n address = input('Please enter address: ')\n phone = input('Please enter phone number: ')\n emg_phone = input('Please enter emergency phone number: ')\n try:\n c.execute(\"\"\"INSERT INTO patients VALUES\n (?,?,?,?,?,?);\"\"\",\n (hcno, name, age_group, address, phone, emg_phone))\n CONN.commit()\n print('Patient record created.')\n except:\n print('Invalid entry, patient already exists')\n\n\ndef addChart(CONN, staff, patient):\n c = CONN.cursor()\n c.execute('SELECT chart_id FROM charts ORDER BY chart_id DESC LIMIT 1;')\n last_chart = c.fetchone()\n if last_chart[0] is not None:\n new_chart_id = int(last_chart[0]) + 1\n else:\n new_chart_id = '00001'\n c.execute(\n \"\"\"INSERT INTO charts VALUES\n (?,?, DateTime('now','localtime'), ?);\"\"\"\n , (new_chart_id, patient[0], None))\n c.execute(\n 'SELECT * FROM charts WHERE hcno = ? ORDER BY adate DESC LIMIT 1;',\n (patient[0],))\n CONN.commit()\n print('A new chart had been create. Chart ID: ' + str(new_chart_id))\n return patientChart(CONN, staff, patient)\n",
"step-4": "import sqlite3\nimport os\n\n\ndef patientSelect(CONN, staff):\n c = CONN.cursor()\n print('Search for Patient')\n select = input(\"Enter patient name(type 'exit' to leave): \")\n if select == 'exit':\n os.system('clear')\n return\n c.execute('SELECT hcno, name FROM patients WHERE name LIKE ?', ('%' +\n select + '%',))\n rows = c.fetchall()\n if len(rows) == 0:\n print('No patient found, please try again')\n return patientSelect(CONN, staff)\n count = 1\n for x in rows:\n print(str(count) + ': patient hcno ' + x[0] + '; patient name: ' + x[1]\n )\n count = count + 1\n try:\n select = input('Please select your patient: ')\n selectedPatient = int(select) - 1\n patientHCNO = rows[selectedPatient][0]\n patientName = rows[selectedPatient][1]\n patient = patientHCNO, patientName\n except:\n print('Invalid input, please try again')\n return patientSelect(CONN, staff)\n return patientChart(CONN, staff, patient)\n\n\ndef patientChart(CONN, staff, patient):\n c = CONN.cursor()\n os.system('clear')\n print('Patient HCNO: ' + patient[0] + ', Patient Name: ' + patient[1])\n c.execute(\n \"\"\"SELECT * \n FROM charts \n WHERE hcno = ?\n ORDER BY adate\n \"\"\"\n , (patient[0],))\n rows = c.fetchall()\n count = 1\n checkOpenChart = 0\n for x in rows:\n print(str(count) + ': chart id: ' + x[0] + '; patient hcno: ' + x[1\n ] + '; admission time: ' + x[2], end='')\n if x[3] is None:\n print(' discharge time: ' + 'Status: open.')\n checkOpenChart = checkOpenChart + 1\n else:\n print(' discharge time: ' + x[3] + 'Status: close.')\n count = count + 1\n if checkOpenChart == 0:\n print('No open chart')\n openChart = input('Do you want to create a new chart (y/n):')\n if openChart == 'y':\n print('Open chart')\n return addChart(CONN, staff, patient)\n else:\n print('')\n print(\n 'You have an open chart. If you want a new chart, close the open chart first'\n )\n try:\n select = input(\n \"Please select a chart to continue(type 'exit' to leave): \")\n if select == 'exit':\n os.system('clear')\n return patientSelect(CONN, staff)\n selectChart = int(select) - 1\n chart_id = rows[selectChart][0]\n except:\n print('Invalid enry')\n return patientChart(CONN, staff, patient)\n if rows[selectChart][3] is None:\n editAble = 1\n else:\n editAble = 0\n return viewChart(CONN, chart_id, staff, patient, editAble)\n\n\ndef viewChart(CONN, chart_id, staff, patient, editAble):\n c = CONN.cursor()\n os.system('clear')\n print('Patient HCNO: ' + patient[0] + ', Patient Name: ' + patient[1])\n print('symptoms table')\n c.execute(\n \"\"\"SELECT * \n FROM symptoms \n WHERE hcno = ? AND chart_id = ?\n ORDER BY obs_date;\"\"\"\n , (patient[0], chart_id))\n rows = c.fetchall()\n for x in rows:\n print(x)\n print('diagnosis table')\n c.execute(\n \"\"\"SELECT * \n FROM diagnoses \n WHERE hcno = ? AND chart_id = ?\n ORDER BY ddate;\"\"\"\n , (patient[0], chart_id))\n rows = c.fetchall()\n for x in rows:\n print(x)\n print('medication table')\n c.execute(\n \"\"\"SELECT * \n FROM medications \n WHERE hcno = ? AND chart_id = ?\n ORDER BY mdate;\"\"\"\n , (patient[0], chart_id))\n rows = c.fetchall()\n for x in rows:\n print(x)\n if editAble == 0:\n input('Press any key to return: ')\n return patientChart(CONN, staff, patient)\n if staff[1] == 'D':\n return doctorChartMenu(CONN, patient, chart_id, staff)\n elif staff[1] == 'N':\n return nurseChartMenu(CONN, patient, chart_id, staff)\n\n\ndef doctorChartMenu(CONN, patient, chart_id, staff):\n print('==========Chart Menu==========')\n print('1. Add a symptoms')\n print('2. Add a Diagnosis')\n print('3. Add a medication')\n print('4. Exit')\n select = input('Please select an option to continue: ')\n if select == '1':\n print('Add symptoms')\n os.system('clear')\n return addSymptoms(CONN, patient, chart_id, staff)\n elif select == '2':\n print('Add Diagnosis')\n os.system('clear')\n return addDiagnosis(CONN, patient, chart_id, staff)\n elif select == '3':\n print('Add medication')\n os.system('clear')\n return addMedication(CONN, patient, chart_id, staff)\n elif select == '4':\n return patientChart(CONN, staff, patient)\n else:\n print('Invalid entry, please try again')\n return patientSelect(CONN, staff)\n\n\ndef nurseChartMenu(CONN, patient, chart_id, staff):\n print('Chart Menu')\n print('1. Add a symptoms')\n print('2. close chart')\n print('3. Exit')\n select = input('Please select an option to cintinue: ')\n if select == '1':\n os.system('clear')\n return addSymptoms(CONN, patient, chart_id, staff)\n elif select == '2':\n print('xx')\n return closeChart(CONN, patient, chart_id, staff)\n elif select == '3':\n return patientChart(CONN, staff, patient)\n else:\n print('Invalid, please try again')\n return patientSelect(CONN, staff)\n\n\ndef addSymptoms(CONN, patient, chart_id, staff):\n c = CONN.cursor()\n symptoms = input('Please enter a symptom: ')\n while len(symptoms) == 0:\n symptoms = input('Please enter a symptom: ')\n c.execute(\n \"\"\"INSERT INTO symptoms VALUES\n (?,?,?,DateTime('now','localtime'),?);\"\"\"\n , (patient[0], chart_id, staff[0], symptoms))\n CONN.commit()\n return viewChart(CONN, chart_id, staff, patient, 1)\n\n\ndef addDiagnosis(CONN, patient, chart_id, staff):\n c = CONN.cursor()\n diagnosis = input('Please enter a diagnosis: ')\n while len(diagnosis) == 0:\n diagnosis = input('Please enter a diagnosis: ')\n c.execute(\n \"\"\"INSERT INTO diagnoses VALUES\n (?,?,?,DateTime('now', 'localtime'),?);\"\"\"\n , (patient[0], chart_id, staff[0], diagnosis))\n CONN.commit()\n return viewChart(CONN, chart_id, staff, patient, 1)\n\n\ndef addMedication(CONN, patient, chart_id, staff):\n c = CONN.cursor()\n c.execute('SELECT * FROM patients WHERE hcno = ?;', (patient[0],))\n rows = c.fetchone()\n patientAge = rows[2]\n medicationName = input('Please enter a medication: ')\n c.execute(\n 'SELECT sug_amount FROM dosage WHERE drug_name = ? AND age_group = ?;',\n (medicationName, patientAge))\n dosageAmount = c.fetchone()\n if dosageAmount == None:\n print('Drug Name not exist')\n input('Press any key to return')\n return viewChart(CONN, chart_id, staff, patient, 1)\n c.execute('SELECT drug_name FROM reportedallergies WHERE hcno = ?;', (\n patient[0],))\n allergies = c.fetchone()\n for x in allergies:\n if x == medicationName:\n print('WARNING, the patinet is allergic to ' + x)\n c.execute('SELECT canbe_alg FROM inferredallergies WHERE alg = ?;', (\n medicationName,))\n inferallergies = c.fetchall()\n for x in inferallergies:\n print('Patinet can be allergic to: ' + x[0])\n amount = int(input('Medication amount: '))\n if amount > dosageAmount[0]:\n print('Suggest Amount: ' + str(dosageAmount[0]))\n confirm = input(\n 'WARNING: Prescibe Amount is greater than suggest amount.Confirm (y/n)'\n )\n if confirm == 'n':\n return viewChart(CONN, chart_id, staff, patient, 1)\n day = input('Medication length(in days): ')\n c.execute(\n \"\"\"INSERT INTO medications VALUES\n (?,?,?,DateTime('now', 'localtime'), DateTime('now','localtime'),DateTime('now',?,'localtime'),?,?);\"\"\"\n , (patient[0], chart_id, staff[0], '+' + day + ' day', amount,\n medicationName))\n CONN.commit()\n return viewChart(CONN, chart_id, staff, patient, 1)\n\n\ndef closeChart(CONN, patient, chart_id, staff):\n c = CONN.cursor()\n c.execute('SELECT * FROM charts WHERE chart_id = ?;', (chart_id,))\n rows = c.fetchone()\n if rows[3] is None:\n print('Close chart id ' + str(chart_id) + '?')\n print('1. Yes.')\n print('2. No.')\n result = input('Please enter your choice: ')\n if result == '1':\n print('Closing chart.')\n c.execute(\n \"\"\"UPDATE charts SET edate = DateTime('now','localtime')\n WHERE chart_id = ?;\"\"\"\n , (chart_id,))\n CONN.commit()\n return viewChart(CONN, chart_id, staff, patient, 1)\n elif result == '2':\n return viewChart(CONN, chart_id, staff, patient, 1)\n else:\n print('Invalid')\n return closeChart(CONN, patient, chart_id, staff)\n\n\ndef addPatient(CONN, staff):\n c = CONN.cursor()\n print('==========New Patient Record==========')\n name = input('Please enter patient name: ')\n hcno = input('Please enter patient HCNO: ')\n try:\n testHcno = int(hcno)\n except:\n print('Invalid HCNO, please try again')\n return addPatient(CONN, staff)\n age_group = input('Please enter age group: ')\n address = input('Please enter address: ')\n phone = input('Please enter phone number: ')\n emg_phone = input('Please enter emergency phone number: ')\n try:\n c.execute(\"\"\"INSERT INTO patients VALUES\n (?,?,?,?,?,?);\"\"\",\n (hcno, name, age_group, address, phone, emg_phone))\n CONN.commit()\n print('Patient record created.')\n except:\n print('Invalid entry, patient already exists')\n\n\ndef addChart(CONN, staff, patient):\n c = CONN.cursor()\n c.execute('SELECT chart_id FROM charts ORDER BY chart_id DESC LIMIT 1;')\n last_chart = c.fetchone()\n if last_chart[0] is not None:\n new_chart_id = int(last_chart[0]) + 1\n else:\n new_chart_id = '00001'\n c.execute(\n \"\"\"INSERT INTO charts VALUES\n (?,?, DateTime('now','localtime'), ?);\"\"\"\n , (new_chart_id, patient[0], None))\n c.execute(\n 'SELECT * FROM charts WHERE hcno = ? ORDER BY adate DESC LIMIT 1;',\n (patient[0],))\n CONN.commit()\n print('A new chart had been create. Chart ID: ' + str(new_chart_id))\n return patientChart(CONN, staff, patient)\n",
"step-5": "import sqlite3\nimport os\n\n#Search for a patient name\n#Every doctor enter a name, it will find the patinet name that is similar to the patient name\n#Once a match is found, the system will output a list of matched patient names.\n#Then, the doctor select the patient to continue\ndef patientSelect(CONN, staff):\n c = CONN.cursor()\n print(\"Search for Patient\")\n select = input(\"Enter patient name(type 'exit' to leave): \")\n if select == 'exit':\n os.system('clear')\n #return doctorMenu(CONN, staff[0])\n return\n c.execute('''SELECT hcno, name FROM patients WHERE name LIKE ?''', ('%'+select+'%',))\n rows = c.fetchall()\n if len(rows) == 0: \n print(\"No patient found, please try again\")\n return patientSelect(CONN, staff)\n\n count = 1\n for x in rows:\n print(str(count)+\": patient hcno \"+x[0]+\"; patient name: \"+x[1])\n count = count + 1\n\n try:\n select = input(\"Please select your patient: \")\n selectedPatient = int(select)-1\n patientHCNO = rows[selectedPatient][0]\n patientName = rows[selectedPatient][1]\n patient = (patientHCNO, patientName)\n except:\n print(\"Invalid input, please try again\")\n return patientSelect(CONN, staff)\n return patientChart(CONN, staff, patient)\n\n\n#Output the tables related to the patient\n#The doctor can select an open chart to continue\ndef patientChart(CONN, staff, patient):\n c = CONN.cursor()\n os.system('clear')\n\n print(\"Patient HCNO: \" + patient[0] + \", Patient Name: \" + patient[1])\n c.execute('''SELECT * \n FROM charts \n WHERE hcno = ?\n ORDER BY adate\n ''', (patient[0],))\n rows = c.fetchall()\n count = 1\n checkOpenChart = 0\n for x in rows:\n print(str(count)+\": chart id: \"+x[0]+\"; patient hcno: \"+ x[1] + \"; admission time: \"+x[2], end=\"\")\n if x[3] is None:\n print(\" discharge time: \" + \"Status: open.\")\n checkOpenChart = checkOpenChart + 1\n else:\n print(\" discharge time: \" + x[3] + \"Status: close.\")\n count = count + 1;\n if checkOpenChart == 0:\n print(\"No open chart\")\n openChart = input(\"Do you want to create a new chart (y/n):\")\n if openChart == 'y':\n print(\"Open chart\")\n return addChart(CONN, staff, patient)\n else:\n print(\"\")\n print(\"You have an open chart. If you want a new chart, close the open chart first\")\n\n try:\n select = input(\"Please select a chart to continue(type 'exit' to leave): \")\n if select == 'exit':\n os.system('clear')\n return patientSelect(CONN, staff)\n selectChart = int(select)-1\n chart_id = rows[selectChart][0] \n except:\n print(\"Invalid enry\")\n return patientChart(CONN, staff, patient)\n\n if rows[selectChart][3] is None:\n editAble = 1\n else:\n editAble = 0\n\n return viewChart(CONN, chart_id, staff, patient, editAble)\n\n#View a list of charts that related to the patient\ndef viewChart(CONN, chart_id, staff, patient, editAble):\n c = CONN.cursor()\n os.system('clear')\n print(\"Patient HCNO: \" + patient[0] + \", Patient Name: \" + patient[1])\n print(\"symptoms table\")\n c.execute('''SELECT * \n FROM symptoms \n WHERE hcno = ? AND chart_id = ?\n ORDER BY obs_date;''', (patient[0], chart_id))\n rows = c.fetchall()\n for x in rows:\n print(x)\n print(\"diagnosis table\")\n c.execute('''SELECT * \n FROM diagnoses \n WHERE hcno = ? AND chart_id = ?\n ORDER BY ddate;''', (patient[0], chart_id))\n rows = c.fetchall()\n for x in rows:\n print(x)\n print(\"medication table\")\n c.execute('''SELECT * \n FROM medications \n WHERE hcno = ? AND chart_id = ?\n ORDER BY mdate;''', (patient[0], chart_id))\n rows = c.fetchall()\n for x in rows:\n print(x)\n\n if editAble == 0:\n input(\"Press any key to return: \")\n return patientChart(CONN, staff, patient)\n if staff[1] == 'D':\n return doctorChartMenu(CONN, patient, chart_id, staff)\n elif staff[1] == 'N':\n return nurseChartMenu(CONN, patient, chart_id, staff) \n\n#If the chart is open, able to edit the chart\ndef doctorChartMenu(CONN, patient, chart_id, staff):\n print(\"==========Chart Menu==========\")\n print(\"1. Add a symptoms\")\n print(\"2. Add a Diagnosis\")\n print(\"3. Add a medication\")\n print(\"4. Exit\")\n select = input(\"Please select an option to continue: \")\n if select == '1':\n print(\"Add symptoms\")\n os.system('clear')\n return addSymptoms(CONN, patient, chart_id, staff)\n elif select == '2':\n print(\"Add Diagnosis\")\n os.system('clear')\n return addDiagnosis(CONN, patient, chart_id, staff)\n elif select == '3':\n print(\"Add medication\")\n os.system('clear')\n return addMedication(CONN, patient, chart_id, staff)\n elif select == '4':\n return patientChart(CONN, staff, patient)\n else:\n print(\"Invalid entry, please try again\")\n return patientSelect(CONN, staff)\n\ndef nurseChartMenu(CONN, patient, chart_id, staff):\n print(\"Chart Menu\")\n print(\"1. Add a symptoms\")\n print(\"2. close chart\")\n print(\"3. Exit\")\n select = input(\"Please select an option to cintinue: \")\n\n if select == '1':\n os.system('clear')\n return addSymptoms(CONN, patient, chart_id, staff)\n elif select == '2':\n print(\"xx\")\n return closeChart(CONN, patient, chart_id, staff)\n elif select == '3':\n return patientChart(CONN, staff, patient)\n else:\n print(\"Invalid, please try again\")\n return patientSelect(CONN, staff)\n#Insert a symptom\n#Ask doctor for symptom name\n#Observer date will be current time\n# The function will return to viewChart()\ndef addSymptoms(CONN, patient, chart_id, staff):\n c = CONN.cursor()\n symptoms = input(\"Please enter a symptom: \")\n while len(symptoms) == 0:\n symptoms = input(\"Please enter a symptom: \")\n \n c.execute('''INSERT INTO symptoms VALUES\n (?,?,?,DateTime('now','localtime'),?);''',(patient[0], chart_id, staff[0], symptoms))\n CONN.commit()\n return viewChart(CONN, chart_id, staff, patient, 1)\n\n# Insert a diagnosis\n# Will prompt for a diagnose name\n# Observe date will be current time\n# Return to viewChart() after finish\ndef addDiagnosis(CONN, patient, chart_id, staff):\n #Insert a diagnosis\n c = CONN.cursor()\n diagnosis = input(\"Please enter a diagnosis: \")\n while len(diagnosis) == 0:\n diagnosis = input(\"Please enter a diagnosis: \")\n \n c.execute('''INSERT INTO diagnoses VALUES\n (?,?,?,DateTime('now', 'localtime'),?);''',(patient[0], chart_id, staff[0], diagnosis))\n CONN.commit()\n return viewChart(CONN, chart_id, staff, patient, 1)\n\n# Insert a medication\n# Will prompt for a medication name\n# start date will be today\n# Return to viewChart() after finish\ndef addMedication(CONN, patient, chart_id, staff):\n c = CONN.cursor()\n c.execute(\"SELECT * FROM patients WHERE hcno = ?;\",(patient[0],))\n rows = c.fetchone()\n patientAge = rows[2]\n\n #Get Medication Name, if not exist in database, return to previous page\n medicationName = input(\"Please enter a medication: \")\n c.execute(\"SELECT sug_amount FROM dosage WHERE drug_name = ? AND age_group = ?;\", (medicationName,patientAge))\n dosageAmount = c.fetchone()\n if dosageAmount == None:\n print(\"Drug Name not exist\")\n input(\"Press any key to return\")\n return viewChart(CONN, chart_id, staff, patient, 1)\n\n c.execute('''SELECT drug_name FROM reportedallergies WHERE hcno = ?;''', (patient[0],))\n allergies = c.fetchone()\n for x in allergies:\n if x == medicationName:\n print(\"WARNING, the patinet is allergic to \"+ x)\n c.execute('''SELECT canbe_alg FROM inferredallergies WHERE alg = ?;''',(medicationName,))\n \n inferallergies = c.fetchall()\n for x in inferallergies:\n print(\"Patinet can be allergic to: \" + x[0])\n\n # Get prescripbtion amount, if larger than suggest amount, display warning message\n amount = int(input(\"Medication amount: \"))\n if amount > dosageAmount[0]:\n print(\"Suggest Amount: \"+ str(dosageAmount[0]))\n confirm = input(\"WARNING: Prescibe Amount is greater than suggest amount.Confirm (y/n)\")\n\n if confirm == 'n':\n return viewChart(CONN, chart_id, staff, patient, 1)\n \n #Get medication period\n day = input(\"Medication length(in days): \")\n\n c.execute('''INSERT INTO medications VALUES\n (?,?,?,DateTime('now', 'localtime'), DateTime('now','localtime'),DateTime('now',?,'localtime'),?,?);''',(patient[0], chart_id, staff[0], '+'+day+' day', amount, medicationName))\n CONN.commit()\n return viewChart(CONN, chart_id, staff, patient, 1)\n\ndef closeChart(CONN, patient, chart_id, staff):\n c = CONN.cursor()\n c.execute(\"SELECT * FROM charts WHERE chart_id = ?;\", (chart_id,))\n rows = c.fetchone()\n\n if rows[3] is None:\n print(\"Close chart id \"+str(chart_id)+\"?\")\n print(\"1. Yes.\")\n print(\"2. No.\")\n result = input(\"Please enter your choice: \")\n if result == '1':\n print(\"Closing chart.\")\n c.execute('''UPDATE charts SET edate = DateTime('now','localtime')\n WHERE chart_id = ?;''', (chart_id,))\n CONN.commit()\n return viewChart(CONN, chart_id, staff, patient, 1)\n elif result == '2':\n return viewChart(CONN, chart_id, staff, patient, 1)\n else:\n print(\"Invalid\")\n return closeChart(CONN, patient, chart_id, staff)\n\ndef addPatient(CONN, staff):\n c = CONN.cursor()\n print(\"==========New Patient Record==========\")\n\n name = input(\"Please enter patient name: \")\n \n hcno = input(\"Please enter patient HCNO: \")\n try:\n testHcno = int(hcno)\n except:\n print(\"Invalid HCNO, please try again\")\n return addPatient(CONN, staff)\n age_group = input(\"Please enter age group: \")\n address = input(\"Please enter address: \")\n phone = input(\"Please enter phone number: \")\n emg_phone = input(\"Please enter emergency phone number: \")\n try:\n c.execute('''INSERT INTO patients VALUES\n (?,?,?,?,?,?);''',(hcno, name, age_group, address, phone, emg_phone))\n CONN.commit()\n print(\"Patient record created.\")\n except:\n print(\"Invalid entry, patient already exists\")\n\ndef addChart(CONN, staff, patient):\n c = CONN.cursor()\n #phcno = input(\"Please enter patient health care #: \")\n c.execute(\"SELECT chart_id FROM charts ORDER BY chart_id DESC LIMIT 1;\")\n last_chart = c.fetchone()\n if last_chart[0] is not None:\n new_chart_id = int(last_chart[0])+1\n else:\n new_chart_id = '00001'\n\n c.execute('''INSERT INTO charts VALUES\n (?,?, DateTime('now','localtime'), ?);''', (new_chart_id, patient[0], None))\n c.execute(\"SELECT * FROM charts WHERE hcno = ? ORDER BY adate DESC LIMIT 1;\", (patient[0],))\n CONN.commit()\n\n print(\"A new chart had been create. Chart ID: \"+ str(new_chart_id))\n return patientChart(CONN, staff, patient)\n",
"step-ids": [
7,
9,
11,
12,
13
]
}
|
[
7,
9,
11,
12,
13
] |
import random
import HardMode
import EasyMode
#Intro function, gets user input of game start, instructions, and game mode
def introduction():
like_to_play = int(input ("Welcome to Rock Paper Scissors, would you like to play? (1 = yes, 2 = no) "))
#like_to_play = int(like_to_play)
#need to set y/n variables instead of numeric: flow control
if(like_to_play == 1):
easy_or_hard = input("Easy (1) or hard (2)? ")
easy_or_hard = int(easy_or_hard)
if easy_or_hard == 1:
EasyMode.play_game_easy()
elif easy_or_hard == 2:
HardMode.play_game_hard()
else:
print("Invalid option!")
else:
print("Goodbye!")
introduction()
|
normal
|
{
"blob_id": "31246a2e022f3c5b0ce68bb06422307439cbd9b6",
"index": 4272,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef introduction():\n like_to_play = int(input(\n 'Welcome to Rock Paper Scissors, would you like to play? (1 = yes, 2 = no) '\n ))\n if like_to_play == 1:\n easy_or_hard = input('Easy (1) or hard (2)? ')\n easy_or_hard = int(easy_or_hard)\n if easy_or_hard == 1:\n EasyMode.play_game_easy()\n elif easy_or_hard == 2:\n HardMode.play_game_hard()\n else:\n print('Invalid option!')\n else:\n print('Goodbye!')\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef introduction():\n like_to_play = int(input(\n 'Welcome to Rock Paper Scissors, would you like to play? (1 = yes, 2 = no) '\n ))\n if like_to_play == 1:\n easy_or_hard = input('Easy (1) or hard (2)? ')\n easy_or_hard = int(easy_or_hard)\n if easy_or_hard == 1:\n EasyMode.play_game_easy()\n elif easy_or_hard == 2:\n HardMode.play_game_hard()\n else:\n print('Invalid option!')\n else:\n print('Goodbye!')\n\n\nintroduction()\n",
"step-4": "import random\nimport HardMode\nimport EasyMode\n\n\ndef introduction():\n like_to_play = int(input(\n 'Welcome to Rock Paper Scissors, would you like to play? (1 = yes, 2 = no) '\n ))\n if like_to_play == 1:\n easy_or_hard = input('Easy (1) or hard (2)? ')\n easy_or_hard = int(easy_or_hard)\n if easy_or_hard == 1:\n EasyMode.play_game_easy()\n elif easy_or_hard == 2:\n HardMode.play_game_hard()\n else:\n print('Invalid option!')\n else:\n print('Goodbye!')\n\n\nintroduction()\n",
"step-5": "import random\nimport HardMode\nimport EasyMode\n\n#Intro function, gets user input of game start, instructions, and game mode\ndef introduction():\n like_to_play = int(input (\"Welcome to Rock Paper Scissors, would you like to play? (1 = yes, 2 = no) \"))\n #like_to_play = int(like_to_play)\n #need to set y/n variables instead of numeric: flow control\n \n if(like_to_play == 1):\n easy_or_hard = input(\"Easy (1) or hard (2)? \")\n easy_or_hard = int(easy_or_hard)\n\n if easy_or_hard == 1:\n EasyMode.play_game_easy()\n elif easy_or_hard == 2:\n HardMode.play_game_hard()\n else:\n print(\"Invalid option!\")\n\n else:\n print(\"Goodbye!\")\n\nintroduction()\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import sqlparse
f = open("parse.sql")
go = open("struct.go", "w+")
dictiony = {
"uuid": "string",
"varchar": "string",
"timestamp": "time.Time",
"int": "int",
"text": "string",
"dbname": "IndividualContrAgent",
"interface": "IndividualContrAgentI",
"ica":"ica"
}
#package
go.write("package main\n\n")
#import
go.write("import (\n ")
go.write('"github.com/jmoiron/sqlx"\n)\n\n')
#struct
go.write("type {0} struct {1}\n".format(dictiony["dbname"], "{"))
go.write(" ID {}\n".format(dictiony["uuid"]))
go.write(" Name {}\n".format(dictiony["varchar"]))
go.write(" PhoneNumber {}\n".format(dictiony["varchar"]))
go.write(" Address {}\n".format(dictiony["varchar"]))
go.write(" Description {}\n".format(dictiony["varchar"]))
go.write("}\n\n")
#db struct
go.write("type {0}Repo struct {1}\n".format(dictiony["dbname"], "{"))
go.write(" db *sqlx.DB\n}\n\n")
#interface
go.write("type {0}I interface {1}\n".format(dictiony["dbname"], "{"))
go.write(" Create(*{0}) (string, error)\n{1}\n\n".format(dictiony["dbname"], "}"))
#newIndCountrAgent
go.write("func New{0}(db *sqlx.DB) {1} {2}\n".format(dictiony["dbname"],dictiony["interface"], "{"))
go.write(" return &{0}Repo{1}\n".format(dictiony["dbname"], "{"))
go.write(" db: db,\n {0}\n{1}\n\n".format("}", "}"))
#create
go.write("func(ica *{2}Repo) Create(agent {0}) (string, error) {1}\n".format(dictiony["dbname"], "{", dictiony["dbname"]))
go.write(" query := `INSERT INTO {} (\n".format(dictiony["dbname"]))
go.write(" id, \n name,\n phonenumber,\n address,\n")
go.write(" description)\n values($1, $2, $3, $4, $5);`\n")
go.write(" prp, err := ica.db.Prepare(query)\n\n ")
go.write(' if err != nil ')
go.write("{\n")
go.write(' return "", err\n')
go.write(" }\n")
go.write(" _, err = prp.Exec(\n")
go.write(" agent.ID,\n agent.Name,\n")
go.write(" agent.PhoneNumber,\n agent.Address,\n agent.Description,\n )\n")
go.write(" if err != nil {\n ")
go.write('return "", err\n }\n\n')
go.write(" return agent.ID, err\n}")
#get
|
normal
|
{
"blob_id": "e99e558ebf5938a90f00df6593c9f75a18affcb8",
"index": 9127,
"step-1": "<mask token>\n",
"step-2": "<mask token>\ngo.write('package main\\n\\n')\ngo.write('import (\\n ')\ngo.write(\"\"\"\"github.com/jmoiron/sqlx\"\n)\n\n\"\"\")\ngo.write('type {0} struct {1}\\n'.format(dictiony['dbname'], '{'))\ngo.write(' ID {}\\n'.format(dictiony['uuid']))\ngo.write(' Name {}\\n'.format(dictiony['varchar']))\ngo.write(' PhoneNumber {}\\n'.format(dictiony['varchar']))\ngo.write(' Address {}\\n'.format(dictiony['varchar']))\ngo.write(' Description {}\\n'.format(dictiony['varchar']))\ngo.write('}\\n\\n')\ngo.write('type {0}Repo struct {1}\\n'.format(dictiony['dbname'], '{'))\ngo.write(\"\"\" db *sqlx.DB\n}\n\n\"\"\")\ngo.write('type {0}I interface {1}\\n'.format(dictiony['dbname'], '{'))\ngo.write(\"\"\" Create(*{0}) (string, error)\n{1}\n\n\"\"\".format(dictiony[\n 'dbname'], '}'))\ngo.write('func New{0}(db *sqlx.DB) {1} {2}\\n'.format(dictiony['dbname'],\n dictiony['interface'], '{'))\ngo.write(' return &{0}Repo{1}\\n'.format(dictiony['dbname'], '{'))\ngo.write(\"\"\" db: db,\n {0}\n{1}\n\n\"\"\".format('}', '}'))\ngo.write('func(ica *{2}Repo) Create(agent {0}) (string, error) {1}\\n'.\n format(dictiony['dbname'], '{', dictiony['dbname']))\ngo.write(' query := `INSERT INTO {} (\\n'.format(dictiony['dbname']))\ngo.write(\n \"\"\" id, \n name,\n phonenumber,\n address,\n\"\"\")\ngo.write(\"\"\" description)\n values($1, $2, $3, $4, $5);`\n\"\"\")\ngo.write(' prp, err := ica.db.Prepare(query)\\n\\n ')\ngo.write(' if err != nil ')\ngo.write('{\\n')\ngo.write(' return \"\", err\\n')\ngo.write(' }\\n')\ngo.write(' _, err = prp.Exec(\\n')\ngo.write(\"\"\" agent.ID,\n agent.Name,\n\"\"\")\ngo.write(\n \"\"\" agent.PhoneNumber,\n agent.Address,\n agent.Description,\n )\n\"\"\"\n )\ngo.write(' if err != nil {\\n ')\ngo.write(\"\"\"return \"\", err\n }\n\n\"\"\")\ngo.write(\"\"\" return agent.ID, err\n}\"\"\")\n",
"step-3": "<mask token>\nf = open('parse.sql')\ngo = open('struct.go', 'w+')\ndictiony = {'uuid': 'string', 'varchar': 'string', 'timestamp': 'time.Time',\n 'int': 'int', 'text': 'string', 'dbname': 'IndividualContrAgent',\n 'interface': 'IndividualContrAgentI', 'ica': 'ica'}\ngo.write('package main\\n\\n')\ngo.write('import (\\n ')\ngo.write(\"\"\"\"github.com/jmoiron/sqlx\"\n)\n\n\"\"\")\ngo.write('type {0} struct {1}\\n'.format(dictiony['dbname'], '{'))\ngo.write(' ID {}\\n'.format(dictiony['uuid']))\ngo.write(' Name {}\\n'.format(dictiony['varchar']))\ngo.write(' PhoneNumber {}\\n'.format(dictiony['varchar']))\ngo.write(' Address {}\\n'.format(dictiony['varchar']))\ngo.write(' Description {}\\n'.format(dictiony['varchar']))\ngo.write('}\\n\\n')\ngo.write('type {0}Repo struct {1}\\n'.format(dictiony['dbname'], '{'))\ngo.write(\"\"\" db *sqlx.DB\n}\n\n\"\"\")\ngo.write('type {0}I interface {1}\\n'.format(dictiony['dbname'], '{'))\ngo.write(\"\"\" Create(*{0}) (string, error)\n{1}\n\n\"\"\".format(dictiony[\n 'dbname'], '}'))\ngo.write('func New{0}(db *sqlx.DB) {1} {2}\\n'.format(dictiony['dbname'],\n dictiony['interface'], '{'))\ngo.write(' return &{0}Repo{1}\\n'.format(dictiony['dbname'], '{'))\ngo.write(\"\"\" db: db,\n {0}\n{1}\n\n\"\"\".format('}', '}'))\ngo.write('func(ica *{2}Repo) Create(agent {0}) (string, error) {1}\\n'.\n format(dictiony['dbname'], '{', dictiony['dbname']))\ngo.write(' query := `INSERT INTO {} (\\n'.format(dictiony['dbname']))\ngo.write(\n \"\"\" id, \n name,\n phonenumber,\n address,\n\"\"\")\ngo.write(\"\"\" description)\n values($1, $2, $3, $4, $5);`\n\"\"\")\ngo.write(' prp, err := ica.db.Prepare(query)\\n\\n ')\ngo.write(' if err != nil ')\ngo.write('{\\n')\ngo.write(' return \"\", err\\n')\ngo.write(' }\\n')\ngo.write(' _, err = prp.Exec(\\n')\ngo.write(\"\"\" agent.ID,\n agent.Name,\n\"\"\")\ngo.write(\n \"\"\" agent.PhoneNumber,\n agent.Address,\n agent.Description,\n )\n\"\"\"\n )\ngo.write(' if err != nil {\\n ')\ngo.write(\"\"\"return \"\", err\n }\n\n\"\"\")\ngo.write(\"\"\" return agent.ID, err\n}\"\"\")\n",
"step-4": "import sqlparse\nf = open('parse.sql')\ngo = open('struct.go', 'w+')\ndictiony = {'uuid': 'string', 'varchar': 'string', 'timestamp': 'time.Time',\n 'int': 'int', 'text': 'string', 'dbname': 'IndividualContrAgent',\n 'interface': 'IndividualContrAgentI', 'ica': 'ica'}\ngo.write('package main\\n\\n')\ngo.write('import (\\n ')\ngo.write(\"\"\"\"github.com/jmoiron/sqlx\"\n)\n\n\"\"\")\ngo.write('type {0} struct {1}\\n'.format(dictiony['dbname'], '{'))\ngo.write(' ID {}\\n'.format(dictiony['uuid']))\ngo.write(' Name {}\\n'.format(dictiony['varchar']))\ngo.write(' PhoneNumber {}\\n'.format(dictiony['varchar']))\ngo.write(' Address {}\\n'.format(dictiony['varchar']))\ngo.write(' Description {}\\n'.format(dictiony['varchar']))\ngo.write('}\\n\\n')\ngo.write('type {0}Repo struct {1}\\n'.format(dictiony['dbname'], '{'))\ngo.write(\"\"\" db *sqlx.DB\n}\n\n\"\"\")\ngo.write('type {0}I interface {1}\\n'.format(dictiony['dbname'], '{'))\ngo.write(\"\"\" Create(*{0}) (string, error)\n{1}\n\n\"\"\".format(dictiony[\n 'dbname'], '}'))\ngo.write('func New{0}(db *sqlx.DB) {1} {2}\\n'.format(dictiony['dbname'],\n dictiony['interface'], '{'))\ngo.write(' return &{0}Repo{1}\\n'.format(dictiony['dbname'], '{'))\ngo.write(\"\"\" db: db,\n {0}\n{1}\n\n\"\"\".format('}', '}'))\ngo.write('func(ica *{2}Repo) Create(agent {0}) (string, error) {1}\\n'.\n format(dictiony['dbname'], '{', dictiony['dbname']))\ngo.write(' query := `INSERT INTO {} (\\n'.format(dictiony['dbname']))\ngo.write(\n \"\"\" id, \n name,\n phonenumber,\n address,\n\"\"\")\ngo.write(\"\"\" description)\n values($1, $2, $3, $4, $5);`\n\"\"\")\ngo.write(' prp, err := ica.db.Prepare(query)\\n\\n ')\ngo.write(' if err != nil ')\ngo.write('{\\n')\ngo.write(' return \"\", err\\n')\ngo.write(' }\\n')\ngo.write(' _, err = prp.Exec(\\n')\ngo.write(\"\"\" agent.ID,\n agent.Name,\n\"\"\")\ngo.write(\n \"\"\" agent.PhoneNumber,\n agent.Address,\n agent.Description,\n )\n\"\"\"\n )\ngo.write(' if err != nil {\\n ')\ngo.write(\"\"\"return \"\", err\n }\n\n\"\"\")\ngo.write(\"\"\" return agent.ID, err\n}\"\"\")\n",
"step-5": "import sqlparse\n\nf = open(\"parse.sql\")\ngo = open(\"struct.go\", \"w+\")\ndictiony = {\n \"uuid\": \"string\",\n \"varchar\": \"string\",\n \"timestamp\": \"time.Time\",\n \"int\": \"int\",\n \"text\": \"string\",\n \"dbname\": \"IndividualContrAgent\",\n \"interface\": \"IndividualContrAgentI\",\n \"ica\":\"ica\"\n}\n#package\ngo.write(\"package main\\n\\n\")\n\n#import\ngo.write(\"import (\\n \")\ngo.write('\"github.com/jmoiron/sqlx\"\\n)\\n\\n')\n\n#struct\ngo.write(\"type {0} struct {1}\\n\".format(dictiony[\"dbname\"], \"{\"))\ngo.write(\" ID {}\\n\".format(dictiony[\"uuid\"]))\ngo.write(\" Name {}\\n\".format(dictiony[\"varchar\"]))\ngo.write(\" PhoneNumber {}\\n\".format(dictiony[\"varchar\"]))\ngo.write(\" Address {}\\n\".format(dictiony[\"varchar\"]))\ngo.write(\" Description {}\\n\".format(dictiony[\"varchar\"]))\ngo.write(\"}\\n\\n\")\n\n#db struct\ngo.write(\"type {0}Repo struct {1}\\n\".format(dictiony[\"dbname\"], \"{\"))\ngo.write(\" db *sqlx.DB\\n}\\n\\n\")\n\n#interface\ngo.write(\"type {0}I interface {1}\\n\".format(dictiony[\"dbname\"], \"{\"))\ngo.write(\" Create(*{0}) (string, error)\\n{1}\\n\\n\".format(dictiony[\"dbname\"], \"}\"))\n\n#newIndCountrAgent\ngo.write(\"func New{0}(db *sqlx.DB) {1} {2}\\n\".format(dictiony[\"dbname\"],dictiony[\"interface\"], \"{\"))\ngo.write(\" return &{0}Repo{1}\\n\".format(dictiony[\"dbname\"], \"{\"))\ngo.write(\" db: db,\\n {0}\\n{1}\\n\\n\".format(\"}\", \"}\"))\n\n#create\ngo.write(\"func(ica *{2}Repo) Create(agent {0}) (string, error) {1}\\n\".format(dictiony[\"dbname\"], \"{\", dictiony[\"dbname\"]))\ngo.write(\" query := `INSERT INTO {} (\\n\".format(dictiony[\"dbname\"]))\ngo.write(\" id, \\n name,\\n phonenumber,\\n address,\\n\")\ngo.write(\" description)\\n values($1, $2, $3, $4, $5);`\\n\")\ngo.write(\" prp, err := ica.db.Prepare(query)\\n\\n \")\ngo.write(' if err != nil ')\ngo.write(\"{\\n\")\ngo.write(' return \"\", err\\n')\ngo.write(\" }\\n\")\ngo.write(\" _, err = prp.Exec(\\n\")\ngo.write(\" agent.ID,\\n agent.Name,\\n\")\ngo.write(\" agent.PhoneNumber,\\n agent.Address,\\n agent.Description,\\n )\\n\")\ngo.write(\" if err != nil {\\n \")\ngo.write('return \"\", err\\n }\\n\\n')\ngo.write(\" return agent.ID, err\\n}\")\n\n#get\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
class Planet:
def __init__(self, x, y, radius):
self.radius = radius
self.x = x
self.y = y
canvas = Screen()
canvas.setup(800, 800)
self.turtle = Turtle()
<|reserved_special_token_0|>
def scaleSize(self, scale):
self.radius = self.radius * scale
def draw(self, colour):
self.turtle.goto(self.x, self.y)
self.turtle.color(colour)
self.turtle.dot(self.radius)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Planet:
def __init__(self, x, y, radius):
self.radius = radius
self.x = x
self.y = y
canvas = Screen()
canvas.setup(800, 800)
self.turtle = Turtle()
def circumference(self):
return 2 * 3.1415 * self.radius
def scaleSize(self, scale):
self.radius = self.radius * scale
def draw(self, colour):
self.turtle.goto(self.x, self.y)
self.turtle.color(colour)
self.turtle.dot(self.radius)
<|reserved_special_token_0|>
planet1.draw('red')
print('Circumference *check the maths!* is:', planet1.circumference())
planet1.scaleSize(0.5)
planet1.draw('yellow')
<|reserved_special_token_0|>
planet2.draw('black')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Planet:
def __init__(self, x, y, radius):
self.radius = radius
self.x = x
self.y = y
canvas = Screen()
canvas.setup(800, 800)
self.turtle = Turtle()
def circumference(self):
return 2 * 3.1415 * self.radius
def scaleSize(self, scale):
self.radius = self.radius * scale
def draw(self, colour):
self.turtle.goto(self.x, self.y)
self.turtle.color(colour)
self.turtle.dot(self.radius)
planet1 = Planet(-200, -100, 200)
planet1.draw('red')
print('Circumference *check the maths!* is:', planet1.circumference())
planet1.scaleSize(0.5)
planet1.draw('yellow')
planet2 = Planet(300, 200, 100)
planet2.draw('black')
<|reserved_special_token_1|>
from turtle import *
class Planet:
def __init__(self, x, y, radius):
self.radius = radius
self.x = x
self.y = y
canvas = Screen()
canvas.setup(800, 800)
self.turtle = Turtle()
def circumference(self):
return 2 * 3.1415 * self.radius
def scaleSize(self, scale):
self.radius = self.radius * scale
def draw(self, colour):
self.turtle.goto(self.x, self.y)
self.turtle.color(colour)
self.turtle.dot(self.radius)
planet1 = Planet(-200, -100, 200)
planet1.draw('red')
print('Circumference *check the maths!* is:', planet1.circumference())
planet1.scaleSize(0.5)
planet1.draw('yellow')
planet2 = Planet(300, 200, 100)
planet2.draw('black')
<|reserved_special_token_1|>
# Planet Class
from turtle import *
class Planet:
def __init__(self, x, y, radius):
self.radius = radius
self.x = x
self.y = y
canvas = Screen()
canvas.setup(800, 800)
self.turtle = Turtle()
def circumference(self):
return 2*3.1415*self.radius
def scaleSize(self, scale):
self.radius = self.radius*scale
def draw(self, colour):
self.turtle.goto(self.x, self.y)
self.turtle.color(colour)
self.turtle.dot(self.radius)
#====instance of the class===
planet1 = Planet(-200, -100, 200)
planet1.draw('red')
print('Circumference *check the maths!* is:', planet1.circumference())
planet1.scaleSize(0.5)
planet1.draw('yellow')
planet2 = Planet(300, 200, 100)
planet2.draw('black')
|
flexible
|
{
"blob_id": "668b63d1f1bd035226e3e12bc6816abc897affc3",
"index": 9975,
"step-1": "<mask token>\n\n\nclass Planet:\n\n def __init__(self, x, y, radius):\n self.radius = radius\n self.x = x\n self.y = y\n canvas = Screen()\n canvas.setup(800, 800)\n self.turtle = Turtle()\n <mask token>\n\n def scaleSize(self, scale):\n self.radius = self.radius * scale\n\n def draw(self, colour):\n self.turtle.goto(self.x, self.y)\n self.turtle.color(colour)\n self.turtle.dot(self.radius)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Planet:\n\n def __init__(self, x, y, radius):\n self.radius = radius\n self.x = x\n self.y = y\n canvas = Screen()\n canvas.setup(800, 800)\n self.turtle = Turtle()\n\n def circumference(self):\n return 2 * 3.1415 * self.radius\n\n def scaleSize(self, scale):\n self.radius = self.radius * scale\n\n def draw(self, colour):\n self.turtle.goto(self.x, self.y)\n self.turtle.color(colour)\n self.turtle.dot(self.radius)\n\n\n<mask token>\nplanet1.draw('red')\nprint('Circumference *check the maths!* is:', planet1.circumference())\nplanet1.scaleSize(0.5)\nplanet1.draw('yellow')\n<mask token>\nplanet2.draw('black')\n",
"step-3": "<mask token>\n\n\nclass Planet:\n\n def __init__(self, x, y, radius):\n self.radius = radius\n self.x = x\n self.y = y\n canvas = Screen()\n canvas.setup(800, 800)\n self.turtle = Turtle()\n\n def circumference(self):\n return 2 * 3.1415 * self.radius\n\n def scaleSize(self, scale):\n self.radius = self.radius * scale\n\n def draw(self, colour):\n self.turtle.goto(self.x, self.y)\n self.turtle.color(colour)\n self.turtle.dot(self.radius)\n\n\nplanet1 = Planet(-200, -100, 200)\nplanet1.draw('red')\nprint('Circumference *check the maths!* is:', planet1.circumference())\nplanet1.scaleSize(0.5)\nplanet1.draw('yellow')\nplanet2 = Planet(300, 200, 100)\nplanet2.draw('black')\n",
"step-4": "from turtle import *\n\n\nclass Planet:\n\n def __init__(self, x, y, radius):\n self.radius = radius\n self.x = x\n self.y = y\n canvas = Screen()\n canvas.setup(800, 800)\n self.turtle = Turtle()\n\n def circumference(self):\n return 2 * 3.1415 * self.radius\n\n def scaleSize(self, scale):\n self.radius = self.radius * scale\n\n def draw(self, colour):\n self.turtle.goto(self.x, self.y)\n self.turtle.color(colour)\n self.turtle.dot(self.radius)\n\n\nplanet1 = Planet(-200, -100, 200)\nplanet1.draw('red')\nprint('Circumference *check the maths!* is:', planet1.circumference())\nplanet1.scaleSize(0.5)\nplanet1.draw('yellow')\nplanet2 = Planet(300, 200, 100)\nplanet2.draw('black')\n",
"step-5": "# Planet Class\r\nfrom turtle import *\r\nclass Planet:\r\n def __init__(self, x, y, radius):\r\n self.radius = radius\r\n self.x = x\r\n self.y = y\r\n canvas = Screen()\r\n canvas.setup(800, 800)\r\n self.turtle = Turtle()\r\n\r\n def circumference(self):\r\n return 2*3.1415*self.radius\r\n\r\n def scaleSize(self, scale):\r\n self.radius = self.radius*scale\r\n\r\n def draw(self, colour):\r\n self.turtle.goto(self.x, self.y)\r\n self.turtle.color(colour)\r\n self.turtle.dot(self.radius)\r\n\r\n\r\n\r\n#====instance of the class===\r\nplanet1 = Planet(-200, -100, 200)\r\nplanet1.draw('red')\r\nprint('Circumference *check the maths!* is:', planet1.circumference())\r\nplanet1.scaleSize(0.5)\r\nplanet1.draw('yellow')\r\nplanet2 = Planet(300, 200, 100)\r\nplanet2.draw('black')\r\n",
"step-ids": [
4,
6,
7,
8,
9
]
}
|
[
4,
6,
7,
8,
9
] |
from slistener import SListener
from slistener import track
import datetime
import time, tweepy, sys
import json
import re
#def tweet_collector():
consumer_key='qpUR91PwjvChszV0VFgrc4Hje'
consumer_secret='q9mPUZE2OsFbaqKUF32ZsY1ry4anZ1k8pNSne56wc3HInmERFu'
access_token='2845943577-R0g6YRlrdEqSFb2mKy5HXuByQPdpq4TLGrPkmSs'
access_token_secret='ed5emUSxHENLtqN8nLYvGkbipKAEemFd0fgjsXNPC8GED'
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
api = tweepy.API(auth)
listen = SListener(api)
stream = tweepy.Stream(auth, listen)
print "Streaming started..."
global track
try:
stream.filter(track = track)
except:
stream.disconnect()
|
normal
|
{
"blob_id": "606e40dd073c3efc95ef01a08466fd536a28f140",
"index": 324,
"step-1": "from slistener import SListener\nfrom slistener import track\nimport datetime\nimport time, tweepy, sys\nimport json\nimport re\n\n#def tweet_collector():\nconsumer_key='qpUR91PwjvChszV0VFgrc4Hje'\nconsumer_secret='q9mPUZE2OsFbaqKUF32ZsY1ry4anZ1k8pNSne56wc3HInmERFu'\naccess_token='2845943577-R0g6YRlrdEqSFb2mKy5HXuByQPdpq4TLGrPkmSs'\naccess_token_secret='ed5emUSxHENLtqN8nLYvGkbipKAEemFd0fgjsXNPC8GED'\nauth = tweepy.OAuthHandler(consumer_key, consumer_secret)\nauth.set_access_token(access_token, access_token_secret)\napi = tweepy.API(auth) \n\nlisten = SListener(api)\nstream = tweepy.Stream(auth, listen)\nprint \"Streaming started...\"\nglobal track \ntry:\n stream.filter(track = track)\nexcept:\n stream.disconnect()\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
if os.environ.get('DISPLAY', '') == '':
print('no display found. Using non-interactive Agg backend')
mpl.use('Agg')
<|reserved_special_token_0|>
sys.path.append(path_to_utils)
<|reserved_special_token_0|>
print('using the last results')
<|reserved_special_token_0|>
print('record', record)
<|reserved_special_token_0|>
if 'datasets/sop/' in PROBLEM_FILE:
print('showing SOP')
problem_type = ProblemType.SOP
SAVE_TO_FIGURE = 'solution_sop.png'
elif 'datasets/dop_sop_dataset/' in PROBLEM_FILE:
print('showing DOP')
problem_type = ProblemType.DOP
SAVE_TO_FIGURE = 'solution_dop.png'
elif 'datasets/opn_sop_dataset/' in PROBLEM_FILE:
print('showing OPN')
problem_type = ProblemType.OPN
SAVE_TO_FIGURE = 'solution_opn.png'
else:
error('can not decide problem type based on problem file location')
problem_type = ProblemType.UNKNOWN
<|reserved_special_token_0|>
op.load_problem_file(PROBLEM_FILE)
<|reserved_special_token_0|>
print('problem loaded')
print('result_target_ids:', result_target_ids)
print('result_cluster_ids:', result_cluster_ids)
print('result_rewards', result_rewards)
print('sets_prices', sets_prices)
print('sets', sets)
print('nodes', nodes)
<|reserved_special_token_0|>
for clust_idx in range(len(result_cluster_ids)):
clust = result_cluster_ids[clust_idx]
node = result_target_ids[clust_idx]
if problem_type == ProblemType.DOP:
node_inside_cluster = node - sets[clust][0]
head_ang = (math.pi + 2 * math.pi * node_inside_cluster /
sampling_heading)
result_head_angs.append(head_ang)
calc_reward += sets_prices[clust]
if node not in sets[clust]:
print('what the hell, it is not good')
print('calc_reward', calc_reward)
<|reserved_special_token_0|>
if problem_type == ProblemType.DOP:
xses = [i[0] for i in original_nodes]
yses = [i[1] for i in original_nodes]
maxx = max(xses)
minx = min(xses)
maxy = max(yses)
miny = min(yses)
nodes_w_rewards = np.zeros((len(original_nodes), 3))
for nidx in range(len(original_nodes)):
nodes_w_rewards[nidx, 0] = original_nodes[nidx][0]
nodes_w_rewards[nidx, 1] = original_nodes[nidx][1]
nodes_w_rewards[nidx, 2] = sets_prices[nidx]
elif problem_type == ProblemType.OPN:
xses = [nodes[i][0] for i in nodes]
yses = [nodes[i][1] for i in nodes]
maxx = max(xses)
minx = min(xses)
maxy = max(yses)
miny = min(yses)
nodes_w_rewards = np.zeros((len(nodes), 3))
for nidx in nodes:
nodes_w_rewards[nidx, 0] = nodes[nidx][0]
nodes_w_rewards[nidx, 1] = nodes[nidx][1]
for set_idx in sets:
if nidx in sets[set_idx]:
nodes_w_rewards[nidx, 2] = sets_prices[set_idx]
break
else:
xses = [nodes[i][0] for i in nodes]
yses = [nodes[i][1] for i in nodes]
maxx = max(xses)
minx = min(xses)
maxy = max(yses)
miny = min(yses)
nodes_w_rewards = np.zeros((len(nodes), 3))
for nidx in nodes:
nodes_w_rewards[nidx, 0] = nodes[nidx][0]
nodes_w_rewards[nidx, 1] = nodes[nidx][1]
for set_idx in sets:
if nidx in sets[set_idx]:
nodes_w_rewards[nidx, 2] = sets_prices[set_idx]
break
<|reserved_special_token_0|>
print(figsize)
<|reserved_special_token_0|>
plt.plot(nodes_w_rewards[:, 0], nodes_w_rewards[:, 1], 'ok', ms=4.0)
if problem_type == ProblemType.DOP:
for nidx1 in range(len(nodes_w_rewards)):
points = []
node1 = nodes_w_rewards[nidx1, :]
points.append([node1[0], node1[1]])
for hind in range(sampling_heading):
head_ang = math.pi + 2 * math.pi * hind / sampling_heading
arrow_len = 30
arrow(node1[0], node1[1], arrow_len * math.cos(head_ang),
arrow_len * math.sin(head_ang))
set_rew = nodes_w_rewards[nidx1, 2]
alpha = 0.0
concave_hull = figure_utils.alpha_shape(points, alpha=alpha)
color = mycmapScalarMap.to_rgba(set_rew)
figure_utils.plot_polygon(concave_hull.buffer(40), fc=color)
elif problem_type == ProblemType.OPN:
for set_idx in reversed(sorted(sets.keys())):
points = []
set_rew = sets_prices[set_idx]
for nidx1 in sets[set_idx]:
node1 = nodes_w_rewards[nidx1, :]
points.append([node1[0], node1[1]])
for nidx2 in sets[set_idx]:
if nidx1 != nidx2:
node2 = nodes_w_rewards[nidx2, :]
alpha = 0.0
concave_hull = figure_utils.alpha_shape(points, alpha=alpha)
color = mycmapScalarMap.to_rgba(set_rew)
figure_utils.plot_polygon(concave_hull.buffer(25), fc=color)
else:
for set_idx in reversed(sorted(sets.keys())):
points = []
set_rew = sets_prices[set_idx]
for nidx1 in sets[set_idx]:
node1 = nodes_w_rewards[nidx1, :]
points.append([node1[0], node1[1]])
for nidx2 in sets[set_idx]:
if nidx1 != nidx2:
node2 = nodes_w_rewards[nidx2, :]
alpha = 0.0
concave_hull = figure_utils.alpha_shape(points, alpha=alpha)
color = mycmapScalarMap.to_rgba(set_rew)
figure_utils.plot_polygon(concave_hull.buffer(25), fc=color)
for node_idx in range(1, len(result_target_ids)):
if problem_type == ProblemType.DOP:
step_size = 20
turning_radius = op.dubins_radius
node = result_cluster_ids[node_idx]
node_prew = result_cluster_ids[node_idx - 1]
q_start = [nodes_w_rewards[node, 0], nodes_w_rewards[node, 1],
result_head_angs[node_idx]]
q_end = [nodes_w_rewards[node_prew][0], nodes_w_rewards[node_prew][
1], result_head_angs[node_idx - 1]]
path = dubins.shortest_path(q_start, q_end, turning_radius)
qs, _ = path.sample_many(step_size)
xses = [item[0] for item in qs]
yses = [item[1] for item in qs]
print(node_prew, '->', node, ',', q_start, '->', q_end)
plt.plot(xses, yses, '-g', lw=1.6)
elif problem_type == ProblemType.OPN:
node = result_target_ids[node_idx]
node_prew = result_target_ids[node_idx - 1]
node_pos = [nodes[node][0], nodes[node][1]]
node_pos_prew = [nodes[node_prew][0], nodes[node_prew][1]]
print(node_prew, '->', node, ',', node_pos_prew, '->', node_pos)
plt.plot([node_pos_prew[0], node_pos[0]], [node_pos_prew[1],
node_pos[1]], '-g', lw=1.6)
else:
node = result_target_ids[node_idx]
node_prew = result_target_ids[node_idx - 1]
node_pos = [nodes[node][0], nodes[node][1]]
node_pos_prew = [nodes[node_prew][0], nodes[node_prew][1]]
print(node_prew, '->', node, ',', node_pos_prew, '->', node_pos)
plt.plot([node_pos_prew[0], node_pos[0]], [node_pos_prew[1],
node_pos[1]], '-g', lw=1.6)
<|reserved_special_token_0|>
ax.axis('equal')
figure_utils.no_axis(ax)
<|reserved_special_token_0|>
cb.ax.tick_params(labelsize=tick_font_size)
cb.set_label('profit', labelpad=-65.0, y=0.8, fontsize=legend_font_size)
fig.subplots_adjust(left=-0.035, right=1.035, top=1.07, bottom=0.0)
plt.savefig(SAVE_TO_FIGURE, dpi=300)
if SHOW_FIGURE:
plt.show()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
if os.environ.get('DISPLAY', '') == '':
print('no display found. Using non-interactive Agg backend')
mpl.use('Agg')
<|reserved_special_token_0|>
this_script_path = os.path.dirname(__file__)
path_to_utils = os.path.join(this_script_path, 'utils')
sys.path.append(path_to_utils)
<|reserved_special_token_0|>
legend_font_size = 24
tick_font_size = 20
NUM_POINTS_TO_GEN = 16
SCATTER_SIZE = 80
FIG_HEIGHT = 7.5
SHOW_FIGURE = True
RESULT_FILE = '../sources/results/results.log'
RESULT_FILE = os.path.join(this_script_path, RESULT_FILE)
data_vns_sop = orienteering_utils.parse_op_log(RESULT_FILE)
print('using the last results')
record = data_vns_sop[-1]
print('record', record)
problem_type = ProblemType.UNKNOWN
PROBLEM_FILE = record['PROBLEM_FILE']
PROBLEM_FILE = os.path.join(this_script_path, PROBLEM_FILE)
if 'datasets/sop/' in PROBLEM_FILE:
print('showing SOP')
problem_type = ProblemType.SOP
SAVE_TO_FIGURE = 'solution_sop.png'
elif 'datasets/dop_sop_dataset/' in PROBLEM_FILE:
print('showing DOP')
problem_type = ProblemType.DOP
SAVE_TO_FIGURE = 'solution_dop.png'
elif 'datasets/opn_sop_dataset/' in PROBLEM_FILE:
print('showing OPN')
problem_type = ProblemType.OPN
SAVE_TO_FIGURE = 'solution_opn.png'
else:
error('can not decide problem type based on problem file location')
problem_type = ProblemType.UNKNOWN
op = orienteering_utils.SetOrienteeringProblemDefinition()
op.load_problem_file(PROBLEM_FILE)
nodes = op.nodes
sets_prices = op.get_sets_prices()
sets = op.get_sets()
original_nodes = op.get_set_centers()
result_target_ids = record['RESULT_TARGET_IDS']
result_cluster_ids = record['RESULT_CLUSTER_IDS']
result_rewards = record['REWARDS']
print('problem loaded')
print('result_target_ids:', result_target_ids)
print('result_cluster_ids:', result_cluster_ids)
print('result_rewards', result_rewards)
print('sets_prices', sets_prices)
print('sets', sets)
print('nodes', nodes)
result_head_angs = []
sampling_heading = len(sets[0])
calc_reward = 0
for clust_idx in range(len(result_cluster_ids)):
clust = result_cluster_ids[clust_idx]
node = result_target_ids[clust_idx]
if problem_type == ProblemType.DOP:
node_inside_cluster = node - sets[clust][0]
head_ang = (math.pi + 2 * math.pi * node_inside_cluster /
sampling_heading)
result_head_angs.append(head_ang)
calc_reward += sets_prices[clust]
if node not in sets[clust]:
print('what the hell, it is not good')
print('calc_reward', calc_reward)
mycmap = plt.cm.get_cmap('RdYlBu_r')
maxx, maxy = -sys.float_info.max, -sys.float_info.max
minx, miny = sys.float_info.max, sys.float_info.max
circle_radiuses = np.ones([len(nodes), 1])
circle_radiuses1 = np.multiply(2.0, circle_radiuses)
nodes_w_rewards = np.zeros((len(nodes), 3))
if problem_type == ProblemType.DOP:
xses = [i[0] for i in original_nodes]
yses = [i[1] for i in original_nodes]
maxx = max(xses)
minx = min(xses)
maxy = max(yses)
miny = min(yses)
nodes_w_rewards = np.zeros((len(original_nodes), 3))
for nidx in range(len(original_nodes)):
nodes_w_rewards[nidx, 0] = original_nodes[nidx][0]
nodes_w_rewards[nidx, 1] = original_nodes[nidx][1]
nodes_w_rewards[nidx, 2] = sets_prices[nidx]
elif problem_type == ProblemType.OPN:
xses = [nodes[i][0] for i in nodes]
yses = [nodes[i][1] for i in nodes]
maxx = max(xses)
minx = min(xses)
maxy = max(yses)
miny = min(yses)
nodes_w_rewards = np.zeros((len(nodes), 3))
for nidx in nodes:
nodes_w_rewards[nidx, 0] = nodes[nidx][0]
nodes_w_rewards[nidx, 1] = nodes[nidx][1]
for set_idx in sets:
if nidx in sets[set_idx]:
nodes_w_rewards[nidx, 2] = sets_prices[set_idx]
break
else:
xses = [nodes[i][0] for i in nodes]
yses = [nodes[i][1] for i in nodes]
maxx = max(xses)
minx = min(xses)
maxy = max(yses)
miny = min(yses)
nodes_w_rewards = np.zeros((len(nodes), 3))
for nidx in nodes:
nodes_w_rewards[nidx, 0] = nodes[nidx][0]
nodes_w_rewards[nidx, 1] = nodes[nidx][1]
for set_idx in sets:
if nidx in sets[set_idx]:
nodes_w_rewards[nidx, 2] = sets_prices[set_idx]
break
minrew = min(nodes_w_rewards[:, 2])
maxrew = max(nodes_w_rewards[:, 2])
cNorm = mpl.colors.Normalize(vmin=minrew, vmax=maxrew + 0.1 * (maxrew - minrew)
)
mycmapScalarMap = mpl.cm.ScalarMappable(norm=cNorm, cmap=mycmap)
fig_width = FIG_HEIGHT * (maxx - minx) / (maxy - miny)
figsize = fig_width * 0.9, FIG_HEIGHT
print(figsize)
fig = plt.figure(num=None, figsize=figsize, dpi=80, facecolor='w',
edgecolor='k')
circles = figure_utils.circles(nodes_w_rewards[:, 0], nodes_w_rewards[:, 1],
circle_radiuses1, c=nodes_w_rewards[:, 2], alpha=0.05, edgecolor=
'black', linewidth=0.9, linestyle=':')
sc = plt.scatter(nodes_w_rewards[:, 0], nodes_w_rewards[:, 1], c=
nodes_w_rewards[:, 2], cmap=mycmap, alpha=1.0, s=1, facecolor='black',
lw=0.5)
plt.plot(nodes_w_rewards[:, 0], nodes_w_rewards[:, 1], 'ok', ms=4.0)
if problem_type == ProblemType.DOP:
for nidx1 in range(len(nodes_w_rewards)):
points = []
node1 = nodes_w_rewards[nidx1, :]
points.append([node1[0], node1[1]])
for hind in range(sampling_heading):
head_ang = math.pi + 2 * math.pi * hind / sampling_heading
arrow_len = 30
arrow(node1[0], node1[1], arrow_len * math.cos(head_ang),
arrow_len * math.sin(head_ang))
set_rew = nodes_w_rewards[nidx1, 2]
alpha = 0.0
concave_hull = figure_utils.alpha_shape(points, alpha=alpha)
color = mycmapScalarMap.to_rgba(set_rew)
figure_utils.plot_polygon(concave_hull.buffer(40), fc=color)
elif problem_type == ProblemType.OPN:
for set_idx in reversed(sorted(sets.keys())):
points = []
set_rew = sets_prices[set_idx]
for nidx1 in sets[set_idx]:
node1 = nodes_w_rewards[nidx1, :]
points.append([node1[0], node1[1]])
for nidx2 in sets[set_idx]:
if nidx1 != nidx2:
node2 = nodes_w_rewards[nidx2, :]
alpha = 0.0
concave_hull = figure_utils.alpha_shape(points, alpha=alpha)
color = mycmapScalarMap.to_rgba(set_rew)
figure_utils.plot_polygon(concave_hull.buffer(25), fc=color)
else:
for set_idx in reversed(sorted(sets.keys())):
points = []
set_rew = sets_prices[set_idx]
for nidx1 in sets[set_idx]:
node1 = nodes_w_rewards[nidx1, :]
points.append([node1[0], node1[1]])
for nidx2 in sets[set_idx]:
if nidx1 != nidx2:
node2 = nodes_w_rewards[nidx2, :]
alpha = 0.0
concave_hull = figure_utils.alpha_shape(points, alpha=alpha)
color = mycmapScalarMap.to_rgba(set_rew)
figure_utils.plot_polygon(concave_hull.buffer(25), fc=color)
for node_idx in range(1, len(result_target_ids)):
if problem_type == ProblemType.DOP:
step_size = 20
turning_radius = op.dubins_radius
node = result_cluster_ids[node_idx]
node_prew = result_cluster_ids[node_idx - 1]
q_start = [nodes_w_rewards[node, 0], nodes_w_rewards[node, 1],
result_head_angs[node_idx]]
q_end = [nodes_w_rewards[node_prew][0], nodes_w_rewards[node_prew][
1], result_head_angs[node_idx - 1]]
path = dubins.shortest_path(q_start, q_end, turning_radius)
qs, _ = path.sample_many(step_size)
xses = [item[0] for item in qs]
yses = [item[1] for item in qs]
print(node_prew, '->', node, ',', q_start, '->', q_end)
plt.plot(xses, yses, '-g', lw=1.6)
elif problem_type == ProblemType.OPN:
node = result_target_ids[node_idx]
node_prew = result_target_ids[node_idx - 1]
node_pos = [nodes[node][0], nodes[node][1]]
node_pos_prew = [nodes[node_prew][0], nodes[node_prew][1]]
print(node_prew, '->', node, ',', node_pos_prew, '->', node_pos)
plt.plot([node_pos_prew[0], node_pos[0]], [node_pos_prew[1],
node_pos[1]], '-g', lw=1.6)
else:
node = result_target_ids[node_idx]
node_prew = result_target_ids[node_idx - 1]
node_pos = [nodes[node][0], nodes[node][1]]
node_pos_prew = [nodes[node_prew][0], nodes[node_prew][1]]
print(node_prew, '->', node, ',', node_pos_prew, '->', node_pos)
plt.plot([node_pos_prew[0], node_pos[0]], [node_pos_prew[1],
node_pos[1]], '-g', lw=1.6)
ax = plt.gca()
ax.axis('equal')
figure_utils.no_axis(ax)
cbar_position = [0.2, 0.05, 0.6, 0.03]
cbar_ax = fig.add_axes(cbar_position)
cb = plt.colorbar(sc, cax=cbar_ax, orientation='horizontal')
cb.ax.tick_params(labelsize=tick_font_size)
cb.set_label('profit', labelpad=-65.0, y=0.8, fontsize=legend_font_size)
fig.subplots_adjust(left=-0.035, right=1.035, top=1.07, bottom=0.0)
plt.savefig(SAVE_TO_FIGURE, dpi=300)
if SHOW_FIGURE:
plt.show()
<|reserved_special_token_1|>
import sys, os
import random
import numpy as np
import matplotlib as mpl
if os.environ.get('DISPLAY', '') == '':
print('no display found. Using non-interactive Agg backend')
mpl.use('Agg')
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable
import shapely.geometry as geometry
from shapely.ops import cascaded_union, polygonize
import math
from matplotlib.pyplot import arrow
import dubins
this_script_path = os.path.dirname(__file__)
path_to_utils = os.path.join(this_script_path, 'utils')
sys.path.append(path_to_utils)
import figure_utils
import orienteering_utils
from orienteering_utils import ProblemType
legend_font_size = 24
tick_font_size = 20
NUM_POINTS_TO_GEN = 16
SCATTER_SIZE = 80
FIG_HEIGHT = 7.5
SHOW_FIGURE = True
RESULT_FILE = '../sources/results/results.log'
RESULT_FILE = os.path.join(this_script_path, RESULT_FILE)
data_vns_sop = orienteering_utils.parse_op_log(RESULT_FILE)
print('using the last results')
record = data_vns_sop[-1]
print('record', record)
problem_type = ProblemType.UNKNOWN
PROBLEM_FILE = record['PROBLEM_FILE']
PROBLEM_FILE = os.path.join(this_script_path, PROBLEM_FILE)
if 'datasets/sop/' in PROBLEM_FILE:
print('showing SOP')
problem_type = ProblemType.SOP
SAVE_TO_FIGURE = 'solution_sop.png'
elif 'datasets/dop_sop_dataset/' in PROBLEM_FILE:
print('showing DOP')
problem_type = ProblemType.DOP
SAVE_TO_FIGURE = 'solution_dop.png'
elif 'datasets/opn_sop_dataset/' in PROBLEM_FILE:
print('showing OPN')
problem_type = ProblemType.OPN
SAVE_TO_FIGURE = 'solution_opn.png'
else:
error('can not decide problem type based on problem file location')
problem_type = ProblemType.UNKNOWN
op = orienteering_utils.SetOrienteeringProblemDefinition()
op.load_problem_file(PROBLEM_FILE)
nodes = op.nodes
sets_prices = op.get_sets_prices()
sets = op.get_sets()
original_nodes = op.get_set_centers()
result_target_ids = record['RESULT_TARGET_IDS']
result_cluster_ids = record['RESULT_CLUSTER_IDS']
result_rewards = record['REWARDS']
print('problem loaded')
print('result_target_ids:', result_target_ids)
print('result_cluster_ids:', result_cluster_ids)
print('result_rewards', result_rewards)
print('sets_prices', sets_prices)
print('sets', sets)
print('nodes', nodes)
result_head_angs = []
sampling_heading = len(sets[0])
calc_reward = 0
for clust_idx in range(len(result_cluster_ids)):
clust = result_cluster_ids[clust_idx]
node = result_target_ids[clust_idx]
if problem_type == ProblemType.DOP:
node_inside_cluster = node - sets[clust][0]
head_ang = (math.pi + 2 * math.pi * node_inside_cluster /
sampling_heading)
result_head_angs.append(head_ang)
calc_reward += sets_prices[clust]
if node not in sets[clust]:
print('what the hell, it is not good')
print('calc_reward', calc_reward)
mycmap = plt.cm.get_cmap('RdYlBu_r')
maxx, maxy = -sys.float_info.max, -sys.float_info.max
minx, miny = sys.float_info.max, sys.float_info.max
circle_radiuses = np.ones([len(nodes), 1])
circle_radiuses1 = np.multiply(2.0, circle_radiuses)
nodes_w_rewards = np.zeros((len(nodes), 3))
if problem_type == ProblemType.DOP:
xses = [i[0] for i in original_nodes]
yses = [i[1] for i in original_nodes]
maxx = max(xses)
minx = min(xses)
maxy = max(yses)
miny = min(yses)
nodes_w_rewards = np.zeros((len(original_nodes), 3))
for nidx in range(len(original_nodes)):
nodes_w_rewards[nidx, 0] = original_nodes[nidx][0]
nodes_w_rewards[nidx, 1] = original_nodes[nidx][1]
nodes_w_rewards[nidx, 2] = sets_prices[nidx]
elif problem_type == ProblemType.OPN:
xses = [nodes[i][0] for i in nodes]
yses = [nodes[i][1] for i in nodes]
maxx = max(xses)
minx = min(xses)
maxy = max(yses)
miny = min(yses)
nodes_w_rewards = np.zeros((len(nodes), 3))
for nidx in nodes:
nodes_w_rewards[nidx, 0] = nodes[nidx][0]
nodes_w_rewards[nidx, 1] = nodes[nidx][1]
for set_idx in sets:
if nidx in sets[set_idx]:
nodes_w_rewards[nidx, 2] = sets_prices[set_idx]
break
else:
xses = [nodes[i][0] for i in nodes]
yses = [nodes[i][1] for i in nodes]
maxx = max(xses)
minx = min(xses)
maxy = max(yses)
miny = min(yses)
nodes_w_rewards = np.zeros((len(nodes), 3))
for nidx in nodes:
nodes_w_rewards[nidx, 0] = nodes[nidx][0]
nodes_w_rewards[nidx, 1] = nodes[nidx][1]
for set_idx in sets:
if nidx in sets[set_idx]:
nodes_w_rewards[nidx, 2] = sets_prices[set_idx]
break
minrew = min(nodes_w_rewards[:, 2])
maxrew = max(nodes_w_rewards[:, 2])
cNorm = mpl.colors.Normalize(vmin=minrew, vmax=maxrew + 0.1 * (maxrew - minrew)
)
mycmapScalarMap = mpl.cm.ScalarMappable(norm=cNorm, cmap=mycmap)
fig_width = FIG_HEIGHT * (maxx - minx) / (maxy - miny)
figsize = fig_width * 0.9, FIG_HEIGHT
print(figsize)
fig = plt.figure(num=None, figsize=figsize, dpi=80, facecolor='w',
edgecolor='k')
circles = figure_utils.circles(nodes_w_rewards[:, 0], nodes_w_rewards[:, 1],
circle_radiuses1, c=nodes_w_rewards[:, 2], alpha=0.05, edgecolor=
'black', linewidth=0.9, linestyle=':')
sc = plt.scatter(nodes_w_rewards[:, 0], nodes_w_rewards[:, 1], c=
nodes_w_rewards[:, 2], cmap=mycmap, alpha=1.0, s=1, facecolor='black',
lw=0.5)
plt.plot(nodes_w_rewards[:, 0], nodes_w_rewards[:, 1], 'ok', ms=4.0)
if problem_type == ProblemType.DOP:
for nidx1 in range(len(nodes_w_rewards)):
points = []
node1 = nodes_w_rewards[nidx1, :]
points.append([node1[0], node1[1]])
for hind in range(sampling_heading):
head_ang = math.pi + 2 * math.pi * hind / sampling_heading
arrow_len = 30
arrow(node1[0], node1[1], arrow_len * math.cos(head_ang),
arrow_len * math.sin(head_ang))
set_rew = nodes_w_rewards[nidx1, 2]
alpha = 0.0
concave_hull = figure_utils.alpha_shape(points, alpha=alpha)
color = mycmapScalarMap.to_rgba(set_rew)
figure_utils.plot_polygon(concave_hull.buffer(40), fc=color)
elif problem_type == ProblemType.OPN:
for set_idx in reversed(sorted(sets.keys())):
points = []
set_rew = sets_prices[set_idx]
for nidx1 in sets[set_idx]:
node1 = nodes_w_rewards[nidx1, :]
points.append([node1[0], node1[1]])
for nidx2 in sets[set_idx]:
if nidx1 != nidx2:
node2 = nodes_w_rewards[nidx2, :]
alpha = 0.0
concave_hull = figure_utils.alpha_shape(points, alpha=alpha)
color = mycmapScalarMap.to_rgba(set_rew)
figure_utils.plot_polygon(concave_hull.buffer(25), fc=color)
else:
for set_idx in reversed(sorted(sets.keys())):
points = []
set_rew = sets_prices[set_idx]
for nidx1 in sets[set_idx]:
node1 = nodes_w_rewards[nidx1, :]
points.append([node1[0], node1[1]])
for nidx2 in sets[set_idx]:
if nidx1 != nidx2:
node2 = nodes_w_rewards[nidx2, :]
alpha = 0.0
concave_hull = figure_utils.alpha_shape(points, alpha=alpha)
color = mycmapScalarMap.to_rgba(set_rew)
figure_utils.plot_polygon(concave_hull.buffer(25), fc=color)
for node_idx in range(1, len(result_target_ids)):
if problem_type == ProblemType.DOP:
step_size = 20
turning_radius = op.dubins_radius
node = result_cluster_ids[node_idx]
node_prew = result_cluster_ids[node_idx - 1]
q_start = [nodes_w_rewards[node, 0], nodes_w_rewards[node, 1],
result_head_angs[node_idx]]
q_end = [nodes_w_rewards[node_prew][0], nodes_w_rewards[node_prew][
1], result_head_angs[node_idx - 1]]
path = dubins.shortest_path(q_start, q_end, turning_radius)
qs, _ = path.sample_many(step_size)
xses = [item[0] for item in qs]
yses = [item[1] for item in qs]
print(node_prew, '->', node, ',', q_start, '->', q_end)
plt.plot(xses, yses, '-g', lw=1.6)
elif problem_type == ProblemType.OPN:
node = result_target_ids[node_idx]
node_prew = result_target_ids[node_idx - 1]
node_pos = [nodes[node][0], nodes[node][1]]
node_pos_prew = [nodes[node_prew][0], nodes[node_prew][1]]
print(node_prew, '->', node, ',', node_pos_prew, '->', node_pos)
plt.plot([node_pos_prew[0], node_pos[0]], [node_pos_prew[1],
node_pos[1]], '-g', lw=1.6)
else:
node = result_target_ids[node_idx]
node_prew = result_target_ids[node_idx - 1]
node_pos = [nodes[node][0], nodes[node][1]]
node_pos_prew = [nodes[node_prew][0], nodes[node_prew][1]]
print(node_prew, '->', node, ',', node_pos_prew, '->', node_pos)
plt.plot([node_pos_prew[0], node_pos[0]], [node_pos_prew[1],
node_pos[1]], '-g', lw=1.6)
ax = plt.gca()
ax.axis('equal')
figure_utils.no_axis(ax)
cbar_position = [0.2, 0.05, 0.6, 0.03]
cbar_ax = fig.add_axes(cbar_position)
cb = plt.colorbar(sc, cax=cbar_ax, orientation='horizontal')
cb.ax.tick_params(labelsize=tick_font_size)
cb.set_label('profit', labelpad=-65.0, y=0.8, fontsize=legend_font_size)
fig.subplots_adjust(left=-0.035, right=1.035, top=1.07, bottom=0.0)
plt.savefig(SAVE_TO_FIGURE, dpi=300)
if SHOW_FIGURE:
plt.show()
<|reserved_special_token_1|>
#!/usr/bin/env python3
import sys, os
import random
import numpy as np
import matplotlib as mpl
if os.environ.get('DISPLAY','') == '':
print('no display found. Using non-interactive Agg backend')
mpl.use('Agg')
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable
import shapely.geometry as geometry
from shapely.ops import cascaded_union, polygonize
import math
from matplotlib.pyplot import arrow
import dubins
this_script_path = os.path.dirname(__file__)
path_to_utils = os.path.join(this_script_path, "utils")
sys.path.append(path_to_utils)
import figure_utils
import orienteering_utils
from orienteering_utils import ProblemType
legend_font_size = 24
tick_font_size = 20
NUM_POINTS_TO_GEN = 16
SCATTER_SIZE = 80
FIG_HEIGHT = 7.5
SHOW_FIGURE = True
RESULT_FILE = "../sources/results/results.log"
RESULT_FILE = os.path.join(this_script_path, RESULT_FILE)
#use nice latex fonts if latex is installed
#figure_utils.configure_latex_fonts_latex()
data_vns_sop = orienteering_utils.parse_op_log(RESULT_FILE)
print("using the last results")
record = data_vns_sop[-1]
print("record", record)
problem_type = ProblemType.UNKNOWN
PROBLEM_FILE = record['PROBLEM_FILE']
PROBLEM_FILE = os.path.join(this_script_path, PROBLEM_FILE)
if "datasets/sop/" in PROBLEM_FILE:
print("showing SOP")
problem_type = ProblemType.SOP
SAVE_TO_FIGURE = "solution_sop.png"
elif "datasets/dop_sop_dataset/" in PROBLEM_FILE:
print("showing DOP")
problem_type = ProblemType.DOP
SAVE_TO_FIGURE = "solution_dop.png"
elif "datasets/opn_sop_dataset/" in PROBLEM_FILE:
print("showing OPN")
problem_type = ProblemType.OPN
SAVE_TO_FIGURE = "solution_opn.png"
else:
error("can not decide problem type based on problem file location")
problem_type = ProblemType.UNKNOWN
op = orienteering_utils.SetOrienteeringProblemDefinition()
op.load_problem_file(PROBLEM_FILE)
nodes = op.nodes
sets_prices = op.get_sets_prices()
sets = op.get_sets()
original_nodes = op.get_set_centers()
result_target_ids = record['RESULT_TARGET_IDS']
result_cluster_ids = record['RESULT_CLUSTER_IDS']
result_rewards = record['REWARDS']
print("problem loaded")
print("result_target_ids:", result_target_ids)
print("result_cluster_ids:", result_cluster_ids)
print("result_rewards", result_rewards)
print("sets_prices", sets_prices)
print("sets", sets)
print("nodes", nodes)
# for the DOP only
result_head_angs = []
sampling_heading = len(sets[0])
calc_reward = 0
for clust_idx in range(len(result_cluster_ids)):
clust = result_cluster_ids[clust_idx]
node = result_target_ids[clust_idx]
if problem_type == ProblemType.DOP:
node_inside_cluster = node - sets[clust][0]
# result_node_inside_cluster.append(node_inside_cluster)
head_ang = math.pi + (2 * math.pi * node_inside_cluster) / sampling_heading
result_head_angs.append(head_ang)
calc_reward += sets_prices[clust]
if node not in sets[clust]:
print("what the hell, it is not good")
print("calc_reward", calc_reward)
mycmap = plt.cm.get_cmap('RdYlBu_r')
maxx, maxy = -sys.float_info.max,-sys.float_info.max
minx, miny = sys.float_info.max,sys.float_info.max
circle_radiuses = np.ones([len(nodes), 1])
circle_radiuses1 = np.multiply(2.0, circle_radiuses)
nodes_w_rewards = np.zeros((len(nodes), 3))
if problem_type == ProblemType.DOP:
xses = [i[0] for i in original_nodes]
yses = [i[1] for i in original_nodes]
maxx = max(xses)
minx = min(xses)
maxy = max(yses)
miny = min(yses)
nodes_w_rewards = np.zeros((len(original_nodes), 3))
for nidx in range(len(original_nodes)):
nodes_w_rewards[nidx, 0] = original_nodes[nidx][0]
nodes_w_rewards[nidx, 1] = original_nodes[nidx][1]
nodes_w_rewards[nidx, 2] = sets_prices[nidx]
elif problem_type == ProblemType.OPN :
xses = [nodes[i][0] for i in nodes]
yses = [nodes[i][1] for i in nodes]
maxx = max(xses)
minx = min(xses)
maxy = max(yses)
miny = min(yses)
nodes_w_rewards = np.zeros((len(nodes), 3))
for nidx in nodes:
nodes_w_rewards[nidx, 0] = nodes[nidx][0]
nodes_w_rewards[nidx, 1] = nodes[nidx][1]
for set_idx in sets:
if nidx in sets[set_idx]:
nodes_w_rewards[nidx, 2] = sets_prices[set_idx]
break
else:
xses = [nodes[i][0] for i in nodes]
yses = [nodes[i][1] for i in nodes]
maxx = max(xses)
minx = min(xses)
maxy = max(yses)
miny = min(yses)
nodes_w_rewards = np.zeros((len(nodes), 3))
for nidx in nodes:
nodes_w_rewards[nidx, 0] = nodes[nidx][0]
nodes_w_rewards[nidx, 1] = nodes[nidx][1]
for set_idx in sets:
if nidx in sets[set_idx]:
nodes_w_rewards[nidx, 2] = sets_prices[set_idx]
break
minrew = min(nodes_w_rewards[:, 2])
maxrew = max(nodes_w_rewards[:, 2])
cNorm = mpl.colors.Normalize(vmin=minrew, vmax=maxrew + 0.1 * (maxrew - minrew))
mycmapScalarMap = mpl.cm.ScalarMappable(norm=cNorm, cmap=mycmap)
fig_width = FIG_HEIGHT*(maxx-minx)/(maxy-miny)
figsize = (fig_width*0.9,FIG_HEIGHT)
print(figsize)
fig = plt.figure(num=None, figsize=figsize, dpi=80, facecolor='w', edgecolor='k')
circles = figure_utils.circles(nodes_w_rewards[:, 0], nodes_w_rewards[:, 1], circle_radiuses1, c=nodes_w_rewards[:, 2] , alpha=0.05, edgecolor='black', linewidth=0.9, linestyle=':')
sc = plt.scatter(nodes_w_rewards[:, 0], nodes_w_rewards[:, 1], c=nodes_w_rewards[:, 2], cmap=mycmap , alpha=1.0, s=1, facecolor='black', lw=0.5)
plt.plot(nodes_w_rewards[:, 0], nodes_w_rewards[:, 1], 'ok', ms=4.0)
# print(nodes_w_rewards[:, 2])
if problem_type == ProblemType.DOP:
for nidx1 in range(len(nodes_w_rewards)):
points = []
node1 = nodes_w_rewards[nidx1, :]
points.append([node1[0], node1[1]])
for hind in range(sampling_heading):
head_ang = math.pi + (2 * math.pi * hind) / sampling_heading
arrow_len = 30
arrow(node1[0], node1[1], arrow_len * math.cos(head_ang), arrow_len * math.sin(head_ang))
set_rew = nodes_w_rewards[nidx1, 2]
alpha = 0.0
concave_hull = figure_utils.alpha_shape(points, alpha=alpha)
color = mycmapScalarMap.to_rgba(set_rew)
figure_utils.plot_polygon(concave_hull.buffer(40), fc=color)
elif problem_type == ProblemType.OPN:
for set_idx in reversed(sorted(sets.keys())):
points = []
set_rew = sets_prices[set_idx]
for nidx1 in sets[set_idx]:
node1 = nodes_w_rewards[nidx1, :]
points.append([node1[0], node1[1]])
for nidx2 in sets[set_idx]:
if(nidx1 != nidx2):
node2 = nodes_w_rewards[nidx2, :]
# plt.plot([node1[0], node2[0] ], [node1[1], node2[1] ], '-k', lw=0.2)
alpha = 0.0
concave_hull = figure_utils.alpha_shape(points, alpha=alpha)
color = mycmapScalarMap.to_rgba(set_rew)
figure_utils.plot_polygon(concave_hull.buffer(25), fc=color)
else:
for set_idx in reversed(sorted(sets.keys())):
points = []
set_rew = sets_prices[set_idx]
for nidx1 in sets[set_idx]:
node1 = nodes_w_rewards[nidx1, :]
points.append([node1[0], node1[1]])
for nidx2 in sets[set_idx]:
if(nidx1 != nidx2):
node2 = nodes_w_rewards[nidx2, :]
# plt.plot([node1[0], node2[0] ], [node1[1], node2[1] ], '-k', lw=0.2)
alpha = 0.0
concave_hull = figure_utils.alpha_shape(points, alpha=alpha)
color = mycmapScalarMap.to_rgba(set_rew)
figure_utils.plot_polygon(concave_hull.buffer(25), fc=color)
for node_idx in range(1, len(result_target_ids)):
if problem_type == ProblemType.DOP:
step_size = 20
turning_radius = op.dubins_radius
node = result_cluster_ids[node_idx]
node_prew = result_cluster_ids[node_idx - 1]
q_start = [nodes_w_rewards[node, 0], nodes_w_rewards[node, 1], result_head_angs[node_idx]]
q_end = [nodes_w_rewards[node_prew][0], nodes_w_rewards[node_prew][1], result_head_angs[node_idx - 1]]
path = dubins.shortest_path(q_start, q_end, turning_radius)
qs, _ = path.sample_many(step_size)
# length_dub += math.ceil(path.path_length())
xses = [item[0] for item in qs]
yses = [item[1] for item in qs]
print(node_prew, '->', node, ",", q_start, '->', q_end)
plt.plot(xses, yses, '-g', lw=1.6)
elif problem_type == ProblemType.OPN:
node = result_target_ids[node_idx]
node_prew = result_target_ids[node_idx - 1]
node_pos = [nodes[node][0], nodes[node][1]]
node_pos_prew = [nodes[node_prew][0], nodes[node_prew][1]]
print(node_prew, '->', node, ",", node_pos_prew, '->', node_pos)
plt.plot([node_pos_prew[0], node_pos[0] ], [node_pos_prew[1], node_pos[1] ], '-g', lw=1.6)
else:
node = result_target_ids[node_idx]
node_prew = result_target_ids[node_idx - 1]
node_pos = [nodes[node][0], nodes[node][1]]
node_pos_prew = [nodes[node_prew][0], nodes[node_prew][1]]
print(node_prew, '->', node, ",", node_pos_prew, '->', node_pos)
plt.plot([node_pos_prew[0], node_pos[0] ], [node_pos_prew[1], node_pos[1] ], '-g', lw=1.6)
ax = plt.gca()
ax.axis('equal')
figure_utils.no_axis(ax)
cbar_position = [0.20, 0.05, 0.6, 0.03]
cbar_ax = fig.add_axes(cbar_position)
cb = plt.colorbar(sc, cax=cbar_ax, orientation='horizontal')
cb.ax.tick_params(labelsize=tick_font_size)
cb.set_label('profit', labelpad=-65.0, y=0.8, fontsize=legend_font_size)
# offset = 0.08
fig.subplots_adjust(left=-0.035, right=1.035 , top=1.07 , bottom=0.0)
plt.savefig(SAVE_TO_FIGURE, dpi=300)
if SHOW_FIGURE:
plt.show()
|
flexible
|
{
"blob_id": "b4454d92ab8380e0eded2f7aed737378e1710c72",
"index": 9413,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif os.environ.get('DISPLAY', '') == '':\n print('no display found. Using non-interactive Agg backend')\n mpl.use('Agg')\n<mask token>\nsys.path.append(path_to_utils)\n<mask token>\nprint('using the last results')\n<mask token>\nprint('record', record)\n<mask token>\nif 'datasets/sop/' in PROBLEM_FILE:\n print('showing SOP')\n problem_type = ProblemType.SOP\n SAVE_TO_FIGURE = 'solution_sop.png'\nelif 'datasets/dop_sop_dataset/' in PROBLEM_FILE:\n print('showing DOP')\n problem_type = ProblemType.DOP\n SAVE_TO_FIGURE = 'solution_dop.png'\nelif 'datasets/opn_sop_dataset/' in PROBLEM_FILE:\n print('showing OPN')\n problem_type = ProblemType.OPN\n SAVE_TO_FIGURE = 'solution_opn.png'\nelse:\n error('can not decide problem type based on problem file location')\n problem_type = ProblemType.UNKNOWN\n<mask token>\nop.load_problem_file(PROBLEM_FILE)\n<mask token>\nprint('problem loaded')\nprint('result_target_ids:', result_target_ids)\nprint('result_cluster_ids:', result_cluster_ids)\nprint('result_rewards', result_rewards)\nprint('sets_prices', sets_prices)\nprint('sets', sets)\nprint('nodes', nodes)\n<mask token>\nfor clust_idx in range(len(result_cluster_ids)):\n clust = result_cluster_ids[clust_idx]\n node = result_target_ids[clust_idx]\n if problem_type == ProblemType.DOP:\n node_inside_cluster = node - sets[clust][0]\n head_ang = (math.pi + 2 * math.pi * node_inside_cluster /\n sampling_heading)\n result_head_angs.append(head_ang)\n calc_reward += sets_prices[clust]\n if node not in sets[clust]:\n print('what the hell, it is not good')\nprint('calc_reward', calc_reward)\n<mask token>\nif problem_type == ProblemType.DOP:\n xses = [i[0] for i in original_nodes]\n yses = [i[1] for i in original_nodes]\n maxx = max(xses)\n minx = min(xses)\n maxy = max(yses)\n miny = min(yses)\n nodes_w_rewards = np.zeros((len(original_nodes), 3))\n for nidx in range(len(original_nodes)):\n nodes_w_rewards[nidx, 0] = original_nodes[nidx][0]\n nodes_w_rewards[nidx, 1] = original_nodes[nidx][1]\n nodes_w_rewards[nidx, 2] = sets_prices[nidx]\nelif problem_type == ProblemType.OPN:\n xses = [nodes[i][0] for i in nodes]\n yses = [nodes[i][1] for i in nodes]\n maxx = max(xses)\n minx = min(xses)\n maxy = max(yses)\n miny = min(yses)\n nodes_w_rewards = np.zeros((len(nodes), 3))\n for nidx in nodes:\n nodes_w_rewards[nidx, 0] = nodes[nidx][0]\n nodes_w_rewards[nidx, 1] = nodes[nidx][1]\n for set_idx in sets:\n if nidx in sets[set_idx]:\n nodes_w_rewards[nidx, 2] = sets_prices[set_idx]\n break\nelse:\n xses = [nodes[i][0] for i in nodes]\n yses = [nodes[i][1] for i in nodes]\n maxx = max(xses)\n minx = min(xses)\n maxy = max(yses)\n miny = min(yses)\n nodes_w_rewards = np.zeros((len(nodes), 3))\n for nidx in nodes:\n nodes_w_rewards[nidx, 0] = nodes[nidx][0]\n nodes_w_rewards[nidx, 1] = nodes[nidx][1]\n for set_idx in sets:\n if nidx in sets[set_idx]:\n nodes_w_rewards[nidx, 2] = sets_prices[set_idx]\n break\n<mask token>\nprint(figsize)\n<mask token>\nplt.plot(nodes_w_rewards[:, 0], nodes_w_rewards[:, 1], 'ok', ms=4.0)\nif problem_type == ProblemType.DOP:\n for nidx1 in range(len(nodes_w_rewards)):\n points = []\n node1 = nodes_w_rewards[nidx1, :]\n points.append([node1[0], node1[1]])\n for hind in range(sampling_heading):\n head_ang = math.pi + 2 * math.pi * hind / sampling_heading\n arrow_len = 30\n arrow(node1[0], node1[1], arrow_len * math.cos(head_ang), \n arrow_len * math.sin(head_ang))\n set_rew = nodes_w_rewards[nidx1, 2]\n alpha = 0.0\n concave_hull = figure_utils.alpha_shape(points, alpha=alpha)\n color = mycmapScalarMap.to_rgba(set_rew)\n figure_utils.plot_polygon(concave_hull.buffer(40), fc=color)\nelif problem_type == ProblemType.OPN:\n for set_idx in reversed(sorted(sets.keys())):\n points = []\n set_rew = sets_prices[set_idx]\n for nidx1 in sets[set_idx]:\n node1 = nodes_w_rewards[nidx1, :]\n points.append([node1[0], node1[1]])\n for nidx2 in sets[set_idx]:\n if nidx1 != nidx2:\n node2 = nodes_w_rewards[nidx2, :]\n alpha = 0.0\n concave_hull = figure_utils.alpha_shape(points, alpha=alpha)\n color = mycmapScalarMap.to_rgba(set_rew)\n figure_utils.plot_polygon(concave_hull.buffer(25), fc=color)\nelse:\n for set_idx in reversed(sorted(sets.keys())):\n points = []\n set_rew = sets_prices[set_idx]\n for nidx1 in sets[set_idx]:\n node1 = nodes_w_rewards[nidx1, :]\n points.append([node1[0], node1[1]])\n for nidx2 in sets[set_idx]:\n if nidx1 != nidx2:\n node2 = nodes_w_rewards[nidx2, :]\n alpha = 0.0\n concave_hull = figure_utils.alpha_shape(points, alpha=alpha)\n color = mycmapScalarMap.to_rgba(set_rew)\n figure_utils.plot_polygon(concave_hull.buffer(25), fc=color)\nfor node_idx in range(1, len(result_target_ids)):\n if problem_type == ProblemType.DOP:\n step_size = 20\n turning_radius = op.dubins_radius\n node = result_cluster_ids[node_idx]\n node_prew = result_cluster_ids[node_idx - 1]\n q_start = [nodes_w_rewards[node, 0], nodes_w_rewards[node, 1],\n result_head_angs[node_idx]]\n q_end = [nodes_w_rewards[node_prew][0], nodes_w_rewards[node_prew][\n 1], result_head_angs[node_idx - 1]]\n path = dubins.shortest_path(q_start, q_end, turning_radius)\n qs, _ = path.sample_many(step_size)\n xses = [item[0] for item in qs]\n yses = [item[1] for item in qs]\n print(node_prew, '->', node, ',', q_start, '->', q_end)\n plt.plot(xses, yses, '-g', lw=1.6)\n elif problem_type == ProblemType.OPN:\n node = result_target_ids[node_idx]\n node_prew = result_target_ids[node_idx - 1]\n node_pos = [nodes[node][0], nodes[node][1]]\n node_pos_prew = [nodes[node_prew][0], nodes[node_prew][1]]\n print(node_prew, '->', node, ',', node_pos_prew, '->', node_pos)\n plt.plot([node_pos_prew[0], node_pos[0]], [node_pos_prew[1],\n node_pos[1]], '-g', lw=1.6)\n else:\n node = result_target_ids[node_idx]\n node_prew = result_target_ids[node_idx - 1]\n node_pos = [nodes[node][0], nodes[node][1]]\n node_pos_prew = [nodes[node_prew][0], nodes[node_prew][1]]\n print(node_prew, '->', node, ',', node_pos_prew, '->', node_pos)\n plt.plot([node_pos_prew[0], node_pos[0]], [node_pos_prew[1],\n node_pos[1]], '-g', lw=1.6)\n<mask token>\nax.axis('equal')\nfigure_utils.no_axis(ax)\n<mask token>\ncb.ax.tick_params(labelsize=tick_font_size)\ncb.set_label('profit', labelpad=-65.0, y=0.8, fontsize=legend_font_size)\nfig.subplots_adjust(left=-0.035, right=1.035, top=1.07, bottom=0.0)\nplt.savefig(SAVE_TO_FIGURE, dpi=300)\nif SHOW_FIGURE:\n plt.show()\n",
"step-3": "<mask token>\nif os.environ.get('DISPLAY', '') == '':\n print('no display found. Using non-interactive Agg backend')\n mpl.use('Agg')\n<mask token>\nthis_script_path = os.path.dirname(__file__)\npath_to_utils = os.path.join(this_script_path, 'utils')\nsys.path.append(path_to_utils)\n<mask token>\nlegend_font_size = 24\ntick_font_size = 20\nNUM_POINTS_TO_GEN = 16\nSCATTER_SIZE = 80\nFIG_HEIGHT = 7.5\nSHOW_FIGURE = True\nRESULT_FILE = '../sources/results/results.log'\nRESULT_FILE = os.path.join(this_script_path, RESULT_FILE)\ndata_vns_sop = orienteering_utils.parse_op_log(RESULT_FILE)\nprint('using the last results')\nrecord = data_vns_sop[-1]\nprint('record', record)\nproblem_type = ProblemType.UNKNOWN\nPROBLEM_FILE = record['PROBLEM_FILE']\nPROBLEM_FILE = os.path.join(this_script_path, PROBLEM_FILE)\nif 'datasets/sop/' in PROBLEM_FILE:\n print('showing SOP')\n problem_type = ProblemType.SOP\n SAVE_TO_FIGURE = 'solution_sop.png'\nelif 'datasets/dop_sop_dataset/' in PROBLEM_FILE:\n print('showing DOP')\n problem_type = ProblemType.DOP\n SAVE_TO_FIGURE = 'solution_dop.png'\nelif 'datasets/opn_sop_dataset/' in PROBLEM_FILE:\n print('showing OPN')\n problem_type = ProblemType.OPN\n SAVE_TO_FIGURE = 'solution_opn.png'\nelse:\n error('can not decide problem type based on problem file location')\n problem_type = ProblemType.UNKNOWN\nop = orienteering_utils.SetOrienteeringProblemDefinition()\nop.load_problem_file(PROBLEM_FILE)\nnodes = op.nodes\nsets_prices = op.get_sets_prices()\nsets = op.get_sets()\noriginal_nodes = op.get_set_centers()\nresult_target_ids = record['RESULT_TARGET_IDS']\nresult_cluster_ids = record['RESULT_CLUSTER_IDS']\nresult_rewards = record['REWARDS']\nprint('problem loaded')\nprint('result_target_ids:', result_target_ids)\nprint('result_cluster_ids:', result_cluster_ids)\nprint('result_rewards', result_rewards)\nprint('sets_prices', sets_prices)\nprint('sets', sets)\nprint('nodes', nodes)\nresult_head_angs = []\nsampling_heading = len(sets[0])\ncalc_reward = 0\nfor clust_idx in range(len(result_cluster_ids)):\n clust = result_cluster_ids[clust_idx]\n node = result_target_ids[clust_idx]\n if problem_type == ProblemType.DOP:\n node_inside_cluster = node - sets[clust][0]\n head_ang = (math.pi + 2 * math.pi * node_inside_cluster /\n sampling_heading)\n result_head_angs.append(head_ang)\n calc_reward += sets_prices[clust]\n if node not in sets[clust]:\n print('what the hell, it is not good')\nprint('calc_reward', calc_reward)\nmycmap = plt.cm.get_cmap('RdYlBu_r')\nmaxx, maxy = -sys.float_info.max, -sys.float_info.max\nminx, miny = sys.float_info.max, sys.float_info.max\ncircle_radiuses = np.ones([len(nodes), 1])\ncircle_radiuses1 = np.multiply(2.0, circle_radiuses)\nnodes_w_rewards = np.zeros((len(nodes), 3))\nif problem_type == ProblemType.DOP:\n xses = [i[0] for i in original_nodes]\n yses = [i[1] for i in original_nodes]\n maxx = max(xses)\n minx = min(xses)\n maxy = max(yses)\n miny = min(yses)\n nodes_w_rewards = np.zeros((len(original_nodes), 3))\n for nidx in range(len(original_nodes)):\n nodes_w_rewards[nidx, 0] = original_nodes[nidx][0]\n nodes_w_rewards[nidx, 1] = original_nodes[nidx][1]\n nodes_w_rewards[nidx, 2] = sets_prices[nidx]\nelif problem_type == ProblemType.OPN:\n xses = [nodes[i][0] for i in nodes]\n yses = [nodes[i][1] for i in nodes]\n maxx = max(xses)\n minx = min(xses)\n maxy = max(yses)\n miny = min(yses)\n nodes_w_rewards = np.zeros((len(nodes), 3))\n for nidx in nodes:\n nodes_w_rewards[nidx, 0] = nodes[nidx][0]\n nodes_w_rewards[nidx, 1] = nodes[nidx][1]\n for set_idx in sets:\n if nidx in sets[set_idx]:\n nodes_w_rewards[nidx, 2] = sets_prices[set_idx]\n break\nelse:\n xses = [nodes[i][0] for i in nodes]\n yses = [nodes[i][1] for i in nodes]\n maxx = max(xses)\n minx = min(xses)\n maxy = max(yses)\n miny = min(yses)\n nodes_w_rewards = np.zeros((len(nodes), 3))\n for nidx in nodes:\n nodes_w_rewards[nidx, 0] = nodes[nidx][0]\n nodes_w_rewards[nidx, 1] = nodes[nidx][1]\n for set_idx in sets:\n if nidx in sets[set_idx]:\n nodes_w_rewards[nidx, 2] = sets_prices[set_idx]\n break\nminrew = min(nodes_w_rewards[:, 2])\nmaxrew = max(nodes_w_rewards[:, 2])\ncNorm = mpl.colors.Normalize(vmin=minrew, vmax=maxrew + 0.1 * (maxrew - minrew)\n )\nmycmapScalarMap = mpl.cm.ScalarMappable(norm=cNorm, cmap=mycmap)\nfig_width = FIG_HEIGHT * (maxx - minx) / (maxy - miny)\nfigsize = fig_width * 0.9, FIG_HEIGHT\nprint(figsize)\nfig = plt.figure(num=None, figsize=figsize, dpi=80, facecolor='w',\n edgecolor='k')\ncircles = figure_utils.circles(nodes_w_rewards[:, 0], nodes_w_rewards[:, 1],\n circle_radiuses1, c=nodes_w_rewards[:, 2], alpha=0.05, edgecolor=\n 'black', linewidth=0.9, linestyle=':')\nsc = plt.scatter(nodes_w_rewards[:, 0], nodes_w_rewards[:, 1], c=\n nodes_w_rewards[:, 2], cmap=mycmap, alpha=1.0, s=1, facecolor='black',\n lw=0.5)\nplt.plot(nodes_w_rewards[:, 0], nodes_w_rewards[:, 1], 'ok', ms=4.0)\nif problem_type == ProblemType.DOP:\n for nidx1 in range(len(nodes_w_rewards)):\n points = []\n node1 = nodes_w_rewards[nidx1, :]\n points.append([node1[0], node1[1]])\n for hind in range(sampling_heading):\n head_ang = math.pi + 2 * math.pi * hind / sampling_heading\n arrow_len = 30\n arrow(node1[0], node1[1], arrow_len * math.cos(head_ang), \n arrow_len * math.sin(head_ang))\n set_rew = nodes_w_rewards[nidx1, 2]\n alpha = 0.0\n concave_hull = figure_utils.alpha_shape(points, alpha=alpha)\n color = mycmapScalarMap.to_rgba(set_rew)\n figure_utils.plot_polygon(concave_hull.buffer(40), fc=color)\nelif problem_type == ProblemType.OPN:\n for set_idx in reversed(sorted(sets.keys())):\n points = []\n set_rew = sets_prices[set_idx]\n for nidx1 in sets[set_idx]:\n node1 = nodes_w_rewards[nidx1, :]\n points.append([node1[0], node1[1]])\n for nidx2 in sets[set_idx]:\n if nidx1 != nidx2:\n node2 = nodes_w_rewards[nidx2, :]\n alpha = 0.0\n concave_hull = figure_utils.alpha_shape(points, alpha=alpha)\n color = mycmapScalarMap.to_rgba(set_rew)\n figure_utils.plot_polygon(concave_hull.buffer(25), fc=color)\nelse:\n for set_idx in reversed(sorted(sets.keys())):\n points = []\n set_rew = sets_prices[set_idx]\n for nidx1 in sets[set_idx]:\n node1 = nodes_w_rewards[nidx1, :]\n points.append([node1[0], node1[1]])\n for nidx2 in sets[set_idx]:\n if nidx1 != nidx2:\n node2 = nodes_w_rewards[nidx2, :]\n alpha = 0.0\n concave_hull = figure_utils.alpha_shape(points, alpha=alpha)\n color = mycmapScalarMap.to_rgba(set_rew)\n figure_utils.plot_polygon(concave_hull.buffer(25), fc=color)\nfor node_idx in range(1, len(result_target_ids)):\n if problem_type == ProblemType.DOP:\n step_size = 20\n turning_radius = op.dubins_radius\n node = result_cluster_ids[node_idx]\n node_prew = result_cluster_ids[node_idx - 1]\n q_start = [nodes_w_rewards[node, 0], nodes_w_rewards[node, 1],\n result_head_angs[node_idx]]\n q_end = [nodes_w_rewards[node_prew][0], nodes_w_rewards[node_prew][\n 1], result_head_angs[node_idx - 1]]\n path = dubins.shortest_path(q_start, q_end, turning_radius)\n qs, _ = path.sample_many(step_size)\n xses = [item[0] for item in qs]\n yses = [item[1] for item in qs]\n print(node_prew, '->', node, ',', q_start, '->', q_end)\n plt.plot(xses, yses, '-g', lw=1.6)\n elif problem_type == ProblemType.OPN:\n node = result_target_ids[node_idx]\n node_prew = result_target_ids[node_idx - 1]\n node_pos = [nodes[node][0], nodes[node][1]]\n node_pos_prew = [nodes[node_prew][0], nodes[node_prew][1]]\n print(node_prew, '->', node, ',', node_pos_prew, '->', node_pos)\n plt.plot([node_pos_prew[0], node_pos[0]], [node_pos_prew[1],\n node_pos[1]], '-g', lw=1.6)\n else:\n node = result_target_ids[node_idx]\n node_prew = result_target_ids[node_idx - 1]\n node_pos = [nodes[node][0], nodes[node][1]]\n node_pos_prew = [nodes[node_prew][0], nodes[node_prew][1]]\n print(node_prew, '->', node, ',', node_pos_prew, '->', node_pos)\n plt.plot([node_pos_prew[0], node_pos[0]], [node_pos_prew[1],\n node_pos[1]], '-g', lw=1.6)\nax = plt.gca()\nax.axis('equal')\nfigure_utils.no_axis(ax)\ncbar_position = [0.2, 0.05, 0.6, 0.03]\ncbar_ax = fig.add_axes(cbar_position)\ncb = plt.colorbar(sc, cax=cbar_ax, orientation='horizontal')\ncb.ax.tick_params(labelsize=tick_font_size)\ncb.set_label('profit', labelpad=-65.0, y=0.8, fontsize=legend_font_size)\nfig.subplots_adjust(left=-0.035, right=1.035, top=1.07, bottom=0.0)\nplt.savefig(SAVE_TO_FIGURE, dpi=300)\nif SHOW_FIGURE:\n plt.show()\n",
"step-4": "import sys, os\nimport random\nimport numpy as np\nimport matplotlib as mpl\nif os.environ.get('DISPLAY', '') == '':\n print('no display found. Using non-interactive Agg backend')\n mpl.use('Agg')\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.axes_grid1 import make_axes_locatable\nimport shapely.geometry as geometry\nfrom shapely.ops import cascaded_union, polygonize\nimport math\nfrom matplotlib.pyplot import arrow\nimport dubins\nthis_script_path = os.path.dirname(__file__)\npath_to_utils = os.path.join(this_script_path, 'utils')\nsys.path.append(path_to_utils)\nimport figure_utils\nimport orienteering_utils\nfrom orienteering_utils import ProblemType\nlegend_font_size = 24\ntick_font_size = 20\nNUM_POINTS_TO_GEN = 16\nSCATTER_SIZE = 80\nFIG_HEIGHT = 7.5\nSHOW_FIGURE = True\nRESULT_FILE = '../sources/results/results.log'\nRESULT_FILE = os.path.join(this_script_path, RESULT_FILE)\ndata_vns_sop = orienteering_utils.parse_op_log(RESULT_FILE)\nprint('using the last results')\nrecord = data_vns_sop[-1]\nprint('record', record)\nproblem_type = ProblemType.UNKNOWN\nPROBLEM_FILE = record['PROBLEM_FILE']\nPROBLEM_FILE = os.path.join(this_script_path, PROBLEM_FILE)\nif 'datasets/sop/' in PROBLEM_FILE:\n print('showing SOP')\n problem_type = ProblemType.SOP\n SAVE_TO_FIGURE = 'solution_sop.png'\nelif 'datasets/dop_sop_dataset/' in PROBLEM_FILE:\n print('showing DOP')\n problem_type = ProblemType.DOP\n SAVE_TO_FIGURE = 'solution_dop.png'\nelif 'datasets/opn_sop_dataset/' in PROBLEM_FILE:\n print('showing OPN')\n problem_type = ProblemType.OPN\n SAVE_TO_FIGURE = 'solution_opn.png'\nelse:\n error('can not decide problem type based on problem file location')\n problem_type = ProblemType.UNKNOWN\nop = orienteering_utils.SetOrienteeringProblemDefinition()\nop.load_problem_file(PROBLEM_FILE)\nnodes = op.nodes\nsets_prices = op.get_sets_prices()\nsets = op.get_sets()\noriginal_nodes = op.get_set_centers()\nresult_target_ids = record['RESULT_TARGET_IDS']\nresult_cluster_ids = record['RESULT_CLUSTER_IDS']\nresult_rewards = record['REWARDS']\nprint('problem loaded')\nprint('result_target_ids:', result_target_ids)\nprint('result_cluster_ids:', result_cluster_ids)\nprint('result_rewards', result_rewards)\nprint('sets_prices', sets_prices)\nprint('sets', sets)\nprint('nodes', nodes)\nresult_head_angs = []\nsampling_heading = len(sets[0])\ncalc_reward = 0\nfor clust_idx in range(len(result_cluster_ids)):\n clust = result_cluster_ids[clust_idx]\n node = result_target_ids[clust_idx]\n if problem_type == ProblemType.DOP:\n node_inside_cluster = node - sets[clust][0]\n head_ang = (math.pi + 2 * math.pi * node_inside_cluster /\n sampling_heading)\n result_head_angs.append(head_ang)\n calc_reward += sets_prices[clust]\n if node not in sets[clust]:\n print('what the hell, it is not good')\nprint('calc_reward', calc_reward)\nmycmap = plt.cm.get_cmap('RdYlBu_r')\nmaxx, maxy = -sys.float_info.max, -sys.float_info.max\nminx, miny = sys.float_info.max, sys.float_info.max\ncircle_radiuses = np.ones([len(nodes), 1])\ncircle_radiuses1 = np.multiply(2.0, circle_radiuses)\nnodes_w_rewards = np.zeros((len(nodes), 3))\nif problem_type == ProblemType.DOP:\n xses = [i[0] for i in original_nodes]\n yses = [i[1] for i in original_nodes]\n maxx = max(xses)\n minx = min(xses)\n maxy = max(yses)\n miny = min(yses)\n nodes_w_rewards = np.zeros((len(original_nodes), 3))\n for nidx in range(len(original_nodes)):\n nodes_w_rewards[nidx, 0] = original_nodes[nidx][0]\n nodes_w_rewards[nidx, 1] = original_nodes[nidx][1]\n nodes_w_rewards[nidx, 2] = sets_prices[nidx]\nelif problem_type == ProblemType.OPN:\n xses = [nodes[i][0] for i in nodes]\n yses = [nodes[i][1] for i in nodes]\n maxx = max(xses)\n minx = min(xses)\n maxy = max(yses)\n miny = min(yses)\n nodes_w_rewards = np.zeros((len(nodes), 3))\n for nidx in nodes:\n nodes_w_rewards[nidx, 0] = nodes[nidx][0]\n nodes_w_rewards[nidx, 1] = nodes[nidx][1]\n for set_idx in sets:\n if nidx in sets[set_idx]:\n nodes_w_rewards[nidx, 2] = sets_prices[set_idx]\n break\nelse:\n xses = [nodes[i][0] for i in nodes]\n yses = [nodes[i][1] for i in nodes]\n maxx = max(xses)\n minx = min(xses)\n maxy = max(yses)\n miny = min(yses)\n nodes_w_rewards = np.zeros((len(nodes), 3))\n for nidx in nodes:\n nodes_w_rewards[nidx, 0] = nodes[nidx][0]\n nodes_w_rewards[nidx, 1] = nodes[nidx][1]\n for set_idx in sets:\n if nidx in sets[set_idx]:\n nodes_w_rewards[nidx, 2] = sets_prices[set_idx]\n break\nminrew = min(nodes_w_rewards[:, 2])\nmaxrew = max(nodes_w_rewards[:, 2])\ncNorm = mpl.colors.Normalize(vmin=minrew, vmax=maxrew + 0.1 * (maxrew - minrew)\n )\nmycmapScalarMap = mpl.cm.ScalarMappable(norm=cNorm, cmap=mycmap)\nfig_width = FIG_HEIGHT * (maxx - minx) / (maxy - miny)\nfigsize = fig_width * 0.9, FIG_HEIGHT\nprint(figsize)\nfig = plt.figure(num=None, figsize=figsize, dpi=80, facecolor='w',\n edgecolor='k')\ncircles = figure_utils.circles(nodes_w_rewards[:, 0], nodes_w_rewards[:, 1],\n circle_radiuses1, c=nodes_w_rewards[:, 2], alpha=0.05, edgecolor=\n 'black', linewidth=0.9, linestyle=':')\nsc = plt.scatter(nodes_w_rewards[:, 0], nodes_w_rewards[:, 1], c=\n nodes_w_rewards[:, 2], cmap=mycmap, alpha=1.0, s=1, facecolor='black',\n lw=0.5)\nplt.plot(nodes_w_rewards[:, 0], nodes_w_rewards[:, 1], 'ok', ms=4.0)\nif problem_type == ProblemType.DOP:\n for nidx1 in range(len(nodes_w_rewards)):\n points = []\n node1 = nodes_w_rewards[nidx1, :]\n points.append([node1[0], node1[1]])\n for hind in range(sampling_heading):\n head_ang = math.pi + 2 * math.pi * hind / sampling_heading\n arrow_len = 30\n arrow(node1[0], node1[1], arrow_len * math.cos(head_ang), \n arrow_len * math.sin(head_ang))\n set_rew = nodes_w_rewards[nidx1, 2]\n alpha = 0.0\n concave_hull = figure_utils.alpha_shape(points, alpha=alpha)\n color = mycmapScalarMap.to_rgba(set_rew)\n figure_utils.plot_polygon(concave_hull.buffer(40), fc=color)\nelif problem_type == ProblemType.OPN:\n for set_idx in reversed(sorted(sets.keys())):\n points = []\n set_rew = sets_prices[set_idx]\n for nidx1 in sets[set_idx]:\n node1 = nodes_w_rewards[nidx1, :]\n points.append([node1[0], node1[1]])\n for nidx2 in sets[set_idx]:\n if nidx1 != nidx2:\n node2 = nodes_w_rewards[nidx2, :]\n alpha = 0.0\n concave_hull = figure_utils.alpha_shape(points, alpha=alpha)\n color = mycmapScalarMap.to_rgba(set_rew)\n figure_utils.plot_polygon(concave_hull.buffer(25), fc=color)\nelse:\n for set_idx in reversed(sorted(sets.keys())):\n points = []\n set_rew = sets_prices[set_idx]\n for nidx1 in sets[set_idx]:\n node1 = nodes_w_rewards[nidx1, :]\n points.append([node1[0], node1[1]])\n for nidx2 in sets[set_idx]:\n if nidx1 != nidx2:\n node2 = nodes_w_rewards[nidx2, :]\n alpha = 0.0\n concave_hull = figure_utils.alpha_shape(points, alpha=alpha)\n color = mycmapScalarMap.to_rgba(set_rew)\n figure_utils.plot_polygon(concave_hull.buffer(25), fc=color)\nfor node_idx in range(1, len(result_target_ids)):\n if problem_type == ProblemType.DOP:\n step_size = 20\n turning_radius = op.dubins_radius\n node = result_cluster_ids[node_idx]\n node_prew = result_cluster_ids[node_idx - 1]\n q_start = [nodes_w_rewards[node, 0], nodes_w_rewards[node, 1],\n result_head_angs[node_idx]]\n q_end = [nodes_w_rewards[node_prew][0], nodes_w_rewards[node_prew][\n 1], result_head_angs[node_idx - 1]]\n path = dubins.shortest_path(q_start, q_end, turning_radius)\n qs, _ = path.sample_many(step_size)\n xses = [item[0] for item in qs]\n yses = [item[1] for item in qs]\n print(node_prew, '->', node, ',', q_start, '->', q_end)\n plt.plot(xses, yses, '-g', lw=1.6)\n elif problem_type == ProblemType.OPN:\n node = result_target_ids[node_idx]\n node_prew = result_target_ids[node_idx - 1]\n node_pos = [nodes[node][0], nodes[node][1]]\n node_pos_prew = [nodes[node_prew][0], nodes[node_prew][1]]\n print(node_prew, '->', node, ',', node_pos_prew, '->', node_pos)\n plt.plot([node_pos_prew[0], node_pos[0]], [node_pos_prew[1],\n node_pos[1]], '-g', lw=1.6)\n else:\n node = result_target_ids[node_idx]\n node_prew = result_target_ids[node_idx - 1]\n node_pos = [nodes[node][0], nodes[node][1]]\n node_pos_prew = [nodes[node_prew][0], nodes[node_prew][1]]\n print(node_prew, '->', node, ',', node_pos_prew, '->', node_pos)\n plt.plot([node_pos_prew[0], node_pos[0]], [node_pos_prew[1],\n node_pos[1]], '-g', lw=1.6)\nax = plt.gca()\nax.axis('equal')\nfigure_utils.no_axis(ax)\ncbar_position = [0.2, 0.05, 0.6, 0.03]\ncbar_ax = fig.add_axes(cbar_position)\ncb = plt.colorbar(sc, cax=cbar_ax, orientation='horizontal')\ncb.ax.tick_params(labelsize=tick_font_size)\ncb.set_label('profit', labelpad=-65.0, y=0.8, fontsize=legend_font_size)\nfig.subplots_adjust(left=-0.035, right=1.035, top=1.07, bottom=0.0)\nplt.savefig(SAVE_TO_FIGURE, dpi=300)\nif SHOW_FIGURE:\n plt.show()\n",
"step-5": "#!/usr/bin/env python3\n\nimport sys, os\nimport random\nimport numpy as np\n\nimport matplotlib as mpl\nif os.environ.get('DISPLAY','') == '':\n print('no display found. Using non-interactive Agg backend')\n mpl.use('Agg')\nimport matplotlib.pyplot as plt\n\nfrom mpl_toolkits.axes_grid1 import make_axes_locatable\nimport shapely.geometry as geometry\nfrom shapely.ops import cascaded_union, polygonize\nimport math\nfrom matplotlib.pyplot import arrow\nimport dubins\nthis_script_path = os.path.dirname(__file__) \npath_to_utils = os.path.join(this_script_path, \"utils\") \nsys.path.append(path_to_utils)\nimport figure_utils\nimport orienteering_utils\nfrom orienteering_utils import ProblemType\n\n\nlegend_font_size = 24\ntick_font_size = 20\nNUM_POINTS_TO_GEN = 16\nSCATTER_SIZE = 80\nFIG_HEIGHT = 7.5\nSHOW_FIGURE = True\n\nRESULT_FILE = \"../sources/results/results.log\"\nRESULT_FILE = os.path.join(this_script_path, RESULT_FILE)\n \n#use nice latex fonts if latex is installed\n#figure_utils.configure_latex_fonts_latex()\n\ndata_vns_sop = orienteering_utils.parse_op_log(RESULT_FILE)\n\nprint(\"using the last results\")\nrecord = data_vns_sop[-1]\nprint(\"record\", record)\n\nproblem_type = ProblemType.UNKNOWN\n\nPROBLEM_FILE = record['PROBLEM_FILE']\nPROBLEM_FILE = os.path.join(this_script_path, PROBLEM_FILE)\n\nif \"datasets/sop/\" in PROBLEM_FILE:\n print(\"showing SOP\")\n problem_type = ProblemType.SOP\n SAVE_TO_FIGURE = \"solution_sop.png\"\n\nelif \"datasets/dop_sop_dataset/\" in PROBLEM_FILE:\n print(\"showing DOP\")\n problem_type = ProblemType.DOP\n SAVE_TO_FIGURE = \"solution_dop.png\"\n\nelif \"datasets/opn_sop_dataset/\" in PROBLEM_FILE:\n print(\"showing OPN\")\n problem_type = ProblemType.OPN\n SAVE_TO_FIGURE = \"solution_opn.png\"\n \nelse:\n error(\"can not decide problem type based on problem file location\")\n problem_type = ProblemType.UNKNOWN\n\nop = orienteering_utils.SetOrienteeringProblemDefinition()\nop.load_problem_file(PROBLEM_FILE)\nnodes = op.nodes\nsets_prices = op.get_sets_prices()\nsets = op.get_sets()\noriginal_nodes = op.get_set_centers()\n\nresult_target_ids = record['RESULT_TARGET_IDS']\nresult_cluster_ids = record['RESULT_CLUSTER_IDS']\nresult_rewards = record['REWARDS']\nprint(\"problem loaded\")\nprint(\"result_target_ids:\", result_target_ids)\nprint(\"result_cluster_ids:\", result_cluster_ids)\nprint(\"result_rewards\", result_rewards)\nprint(\"sets_prices\", sets_prices)\nprint(\"sets\", sets)\nprint(\"nodes\", nodes)\n\n# for the DOP only\nresult_head_angs = []\nsampling_heading = len(sets[0])\n\ncalc_reward = 0\nfor clust_idx in range(len(result_cluster_ids)):\n clust = result_cluster_ids[clust_idx]\n node = result_target_ids[clust_idx]\n\n if problem_type == ProblemType.DOP:\n node_inside_cluster = node - sets[clust][0]\n # result_node_inside_cluster.append(node_inside_cluster)\n head_ang = math.pi + (2 * math.pi * node_inside_cluster) / sampling_heading\n result_head_angs.append(head_ang)\n\n calc_reward += sets_prices[clust]\n if node not in sets[clust]:\n print(\"what the hell, it is not good\")\n\nprint(\"calc_reward\", calc_reward)\n\nmycmap = plt.cm.get_cmap('RdYlBu_r')\n\nmaxx, maxy = -sys.float_info.max,-sys.float_info.max\nminx, miny = sys.float_info.max,sys.float_info.max\n\ncircle_radiuses = np.ones([len(nodes), 1])\ncircle_radiuses1 = np.multiply(2.0, circle_radiuses)\n\nnodes_w_rewards = np.zeros((len(nodes), 3))\nif problem_type == ProblemType.DOP:\n xses = [i[0] for i in original_nodes]\n yses = [i[1] for i in original_nodes]\n maxx = max(xses)\n minx = min(xses)\n maxy = max(yses)\n miny = min(yses)\n \n nodes_w_rewards = np.zeros((len(original_nodes), 3))\n for nidx in range(len(original_nodes)):\n nodes_w_rewards[nidx, 0] = original_nodes[nidx][0] \n nodes_w_rewards[nidx, 1] = original_nodes[nidx][1] \n nodes_w_rewards[nidx, 2] = sets_prices[nidx]\nelif problem_type == ProblemType.OPN :\n xses = [nodes[i][0] for i in nodes]\n yses = [nodes[i][1] for i in nodes]\n maxx = max(xses)\n minx = min(xses)\n maxy = max(yses)\n miny = min(yses)\n \n nodes_w_rewards = np.zeros((len(nodes), 3))\n for nidx in nodes:\n nodes_w_rewards[nidx, 0] = nodes[nidx][0]\n nodes_w_rewards[nidx, 1] = nodes[nidx][1]\n \n for set_idx in sets:\n if nidx in sets[set_idx]:\n nodes_w_rewards[nidx, 2] = sets_prices[set_idx]\n break\nelse:\n xses = [nodes[i][0] for i in nodes]\n yses = [nodes[i][1] for i in nodes]\n maxx = max(xses)\n minx = min(xses)\n maxy = max(yses)\n miny = min(yses)\n \n nodes_w_rewards = np.zeros((len(nodes), 3))\n for nidx in nodes:\n nodes_w_rewards[nidx, 0] = nodes[nidx][0]\n nodes_w_rewards[nidx, 1] = nodes[nidx][1]\n\n for set_idx in sets:\n if nidx in sets[set_idx]:\n nodes_w_rewards[nidx, 2] = sets_prices[set_idx]\n break\n\nminrew = min(nodes_w_rewards[:, 2])\nmaxrew = max(nodes_w_rewards[:, 2])\n\n\ncNorm = mpl.colors.Normalize(vmin=minrew, vmax=maxrew + 0.1 * (maxrew - minrew)) \nmycmapScalarMap = mpl.cm.ScalarMappable(norm=cNorm, cmap=mycmap)\n\nfig_width = FIG_HEIGHT*(maxx-minx)/(maxy-miny)\nfigsize = (fig_width*0.9,FIG_HEIGHT)\nprint(figsize)\n\nfig = plt.figure(num=None, figsize=figsize, dpi=80, facecolor='w', edgecolor='k')\ncircles = figure_utils.circles(nodes_w_rewards[:, 0], nodes_w_rewards[:, 1], circle_radiuses1, c=nodes_w_rewards[:, 2] , alpha=0.05, edgecolor='black', linewidth=0.9, linestyle=':')\nsc = plt.scatter(nodes_w_rewards[:, 0], nodes_w_rewards[:, 1], c=nodes_w_rewards[:, 2], cmap=mycmap , alpha=1.0, s=1, facecolor='black', lw=0.5)\nplt.plot(nodes_w_rewards[:, 0], nodes_w_rewards[:, 1], 'ok', ms=4.0)\n\n# print(nodes_w_rewards[:, 2])\n\nif problem_type == ProblemType.DOP:\n for nidx1 in range(len(nodes_w_rewards)): \n points = []\n node1 = nodes_w_rewards[nidx1, :]\n points.append([node1[0], node1[1]])\n \n for hind in range(sampling_heading):\n head_ang = math.pi + (2 * math.pi * hind) / sampling_heading\n arrow_len = 30\n arrow(node1[0], node1[1], arrow_len * math.cos(head_ang), arrow_len * math.sin(head_ang))\n \n set_rew = nodes_w_rewards[nidx1, 2] \n \n alpha = 0.0\n concave_hull = figure_utils.alpha_shape(points, alpha=alpha) \n color = mycmapScalarMap.to_rgba(set_rew)\n figure_utils.plot_polygon(concave_hull.buffer(40), fc=color)\nelif problem_type == ProblemType.OPN:\n for set_idx in reversed(sorted(sets.keys())):\n points = []\n set_rew = sets_prices[set_idx]\n for nidx1 in sets[set_idx]: \n node1 = nodes_w_rewards[nidx1, :]\n points.append([node1[0], node1[1]])\n for nidx2 in sets[set_idx]: \n if(nidx1 != nidx2):\n node2 = nodes_w_rewards[nidx2, :]\n # plt.plot([node1[0], node2[0] ], [node1[1], node2[1] ], '-k', lw=0.2)\n \n alpha = 0.0\n concave_hull = figure_utils.alpha_shape(points, alpha=alpha)\n \n color = mycmapScalarMap.to_rgba(set_rew)\n figure_utils.plot_polygon(concave_hull.buffer(25), fc=color)\n\nelse: \n for set_idx in reversed(sorted(sets.keys())):\n points = []\n set_rew = sets_prices[set_idx]\n for nidx1 in sets[set_idx]: \n node1 = nodes_w_rewards[nidx1, :]\n points.append([node1[0], node1[1]])\n for nidx2 in sets[set_idx]: \n if(nidx1 != nidx2):\n node2 = nodes_w_rewards[nidx2, :]\n # plt.plot([node1[0], node2[0] ], [node1[1], node2[1] ], '-k', lw=0.2)\n \n alpha = 0.0\n concave_hull = figure_utils.alpha_shape(points, alpha=alpha)\n \n color = mycmapScalarMap.to_rgba(set_rew)\n figure_utils.plot_polygon(concave_hull.buffer(25), fc=color)\n \n\nfor node_idx in range(1, len(result_target_ids)):\n \n if problem_type == ProblemType.DOP:\n step_size = 20\n turning_radius = op.dubins_radius\n node = result_cluster_ids[node_idx]\n node_prew = result_cluster_ids[node_idx - 1]\n q_start = [nodes_w_rewards[node, 0], nodes_w_rewards[node, 1], result_head_angs[node_idx]]\n q_end = [nodes_w_rewards[node_prew][0], nodes_w_rewards[node_prew][1], result_head_angs[node_idx - 1]]\n path = dubins.shortest_path(q_start, q_end, turning_radius)\n qs, _ = path.sample_many(step_size)\n # length_dub += math.ceil(path.path_length())\n xses = [item[0] for item in qs]\n yses = [item[1] for item in qs]\n print(node_prew, '->', node, \",\", q_start, '->', q_end)\n plt.plot(xses, yses, '-g', lw=1.6)\n \n elif problem_type == ProblemType.OPN:\n node = result_target_ids[node_idx]\n node_prew = result_target_ids[node_idx - 1]\n node_pos = [nodes[node][0], nodes[node][1]]\n node_pos_prew = [nodes[node_prew][0], nodes[node_prew][1]]\n print(node_prew, '->', node, \",\", node_pos_prew, '->', node_pos)\n plt.plot([node_pos_prew[0], node_pos[0] ], [node_pos_prew[1], node_pos[1] ], '-g', lw=1.6)\n\n else:\n node = result_target_ids[node_idx]\n node_prew = result_target_ids[node_idx - 1]\n node_pos = [nodes[node][0], nodes[node][1]]\n node_pos_prew = [nodes[node_prew][0], nodes[node_prew][1]]\n print(node_prew, '->', node, \",\", node_pos_prew, '->', node_pos)\n plt.plot([node_pos_prew[0], node_pos[0] ], [node_pos_prew[1], node_pos[1] ], '-g', lw=1.6)\n\nax = plt.gca()\nax.axis('equal')\nfigure_utils.no_axis(ax)\n\ncbar_position = [0.20, 0.05, 0.6, 0.03]\ncbar_ax = fig.add_axes(cbar_position)\ncb = plt.colorbar(sc, cax=cbar_ax, orientation='horizontal')\ncb.ax.tick_params(labelsize=tick_font_size)\ncb.set_label('profit', labelpad=-65.0, y=0.8, fontsize=legend_font_size)\n\n# offset = 0.08\nfig.subplots_adjust(left=-0.035, right=1.035 , top=1.07 , bottom=0.0)\n\nplt.savefig(SAVE_TO_FIGURE, dpi=300)\nif SHOW_FIGURE:\n plt.show() \n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
class MoveDigState(State):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class MoveDigState(State):
def __init__(self):
super().__init__('MoveDig', 'ScanDig')
self.transitionReady = False
self.digSiteDistance = 0
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class MoveDigState(State):
def __init__(self):
super().__init__('MoveDig', 'ScanDig')
self.transitionReady = False
self.digSiteDistance = 0
def run(self, moveInstructions):
print('\n>run() not implemented\n')
self.transitionReady = False
self.transitionReady = True
def transition(self):
return self.transitionReady
<|reserved_special_token_1|>
from states.state import State
class MoveDigState(State):
def __init__(self):
super().__init__('MoveDig', 'ScanDig')
self.transitionReady = False
self.digSiteDistance = 0
def run(self, moveInstructions):
print('\n>run() not implemented\n')
self.transitionReady = False
self.transitionReady = True
def transition(self):
return self.transitionReady
<|reserved_special_token_1|>
from states.state import State
class MoveDigState(State):
#init attributes of state
def __init__(self):
super().__init__("MoveDig", "ScanDig")
self.transitionReady = False
self.digSiteDistance = 0
#implementation for each state: overridden
def run(self, moveInstructions):
print("\n>run() not implemented\n")
#always begin with no transition
self.transitionReady = False
#track distance
#execute move instructions
#when in dig site then
self.transitionReady = True
#implementation for each state: overridden
def transition(self):
return self.transitionReady
|
flexible
|
{
"blob_id": "ce4ecff2012cfda4a458912713b0330a218fa186",
"index": 873,
"step-1": "<mask token>\n\n\nclass MoveDigState(State):\n <mask token>\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass MoveDigState(State):\n\n def __init__(self):\n super().__init__('MoveDig', 'ScanDig')\n self.transitionReady = False\n self.digSiteDistance = 0\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass MoveDigState(State):\n\n def __init__(self):\n super().__init__('MoveDig', 'ScanDig')\n self.transitionReady = False\n self.digSiteDistance = 0\n\n def run(self, moveInstructions):\n print('\\n>run() not implemented\\n')\n self.transitionReady = False\n self.transitionReady = True\n\n def transition(self):\n return self.transitionReady\n",
"step-4": "from states.state import State\n\n\nclass MoveDigState(State):\n\n def __init__(self):\n super().__init__('MoveDig', 'ScanDig')\n self.transitionReady = False\n self.digSiteDistance = 0\n\n def run(self, moveInstructions):\n print('\\n>run() not implemented\\n')\n self.transitionReady = False\n self.transitionReady = True\n\n def transition(self):\n return self.transitionReady\n",
"step-5": "from states.state import State\n\nclass MoveDigState(State):\n #init attributes of state\n def __init__(self):\n super().__init__(\"MoveDig\", \"ScanDig\")\n self.transitionReady = False\n self.digSiteDistance = 0\n\n #implementation for each state: overridden\n def run(self, moveInstructions):\n print(\"\\n>run() not implemented\\n\")\n \n #always begin with no transition\n self.transitionReady = False\n\n #track distance\n #execute move instructions \n #when in dig site then\n self.transitionReady = True\n\n #implementation for each state: overridden\n def transition(self):\n return self.transitionReady",
"step-ids": [
1,
2,
4,
5,
6
]
}
|
[
1,
2,
4,
5,
6
] |
from collections import defaultdict
def solve(n, seq):
flag = True
# slot = [0] * (n + 10)
freq = defaultdict()
# refer to next free slot
i = 1
p = len(seq)
j = 0
while j < p:
c = seq[j]
if i > n:
flag = False
break
if c in freq.keys():
if freq[c] == 1:
freq[c] = 0
i -= 1
else:
freq[c] = 1
i += 1
if c not in freq.keys():
freq[c] = 1
i += 1
j += 1
if flag == True:
return 0
else:
return 1
# number of computers
n = int(input())
seq = input()
if solve(n, seq):
print("Satisfied")
else:
print("Not Satisfied")
|
normal
|
{
"blob_id": "89b03bb5ca86e426459e23866f86f8770e4a1613",
"index": 3420,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef solve(n, seq):\n flag = True\n freq = defaultdict()\n i = 1\n p = len(seq)\n j = 0\n while j < p:\n c = seq[j]\n if i > n:\n flag = False\n break\n if c in freq.keys():\n if freq[c] == 1:\n freq[c] = 0\n i -= 1\n else:\n freq[c] = 1\n i += 1\n if c not in freq.keys():\n freq[c] = 1\n i += 1\n j += 1\n if flag == True:\n return 0\n else:\n return 1\n\n\n<mask token>\nif solve(n, seq):\n print('Satisfied')\nelse:\n print('Not Satisfied')\n",
"step-3": "<mask token>\n\n\ndef solve(n, seq):\n flag = True\n freq = defaultdict()\n i = 1\n p = len(seq)\n j = 0\n while j < p:\n c = seq[j]\n if i > n:\n flag = False\n break\n if c in freq.keys():\n if freq[c] == 1:\n freq[c] = 0\n i -= 1\n else:\n freq[c] = 1\n i += 1\n if c not in freq.keys():\n freq[c] = 1\n i += 1\n j += 1\n if flag == True:\n return 0\n else:\n return 1\n\n\nn = int(input())\nseq = input()\nif solve(n, seq):\n print('Satisfied')\nelse:\n print('Not Satisfied')\n",
"step-4": "from collections import defaultdict\n\n\ndef solve(n, seq):\n flag = True\n freq = defaultdict()\n i = 1\n p = len(seq)\n j = 0\n while j < p:\n c = seq[j]\n if i > n:\n flag = False\n break\n if c in freq.keys():\n if freq[c] == 1:\n freq[c] = 0\n i -= 1\n else:\n freq[c] = 1\n i += 1\n if c not in freq.keys():\n freq[c] = 1\n i += 1\n j += 1\n if flag == True:\n return 0\n else:\n return 1\n\n\nn = int(input())\nseq = input()\nif solve(n, seq):\n print('Satisfied')\nelse:\n print('Not Satisfied')\n",
"step-5": "from collections import defaultdict\n\ndef solve(n, seq):\n flag = True\n # slot = [0] * (n + 10)\n freq = defaultdict()\n\n # refer to next free slot\n i = 1\n p = len(seq)\n j = 0\n\n while j < p:\n c = seq[j]\n if i > n:\n flag = False\n break\n if c in freq.keys():\n if freq[c] == 1:\n freq[c] = 0\n i -= 1\n else:\n freq[c] = 1\n i += 1\n if c not in freq.keys():\n freq[c] = 1\n i += 1\n j += 1\n\n if flag == True:\n return 0\n else:\n return 1 \n\n\n# number of computers\nn = int(input())\nseq = input()\n\nif solve(n, seq):\n print(\"Satisfied\")\nelse:\n print(\"Not Satisfied\")",
"step-ids": [
0,
2,
3,
4,
5
]
}
|
[
0,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
class build_ext_and_proto(build_ext):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
if not CUDA_HOME:
path_to_cuda_gdb = shutil.which('cuda-gdb')
if path_to_cuda_gdb is None:
raise OSError(
'Could not locate CUDA. Please set the environment variable CUDA_HOME to the path to the CUDA installation and try again.'
)
CUDA_HOME = os.path.dirname(os.path.dirname(path_to_cuda_gdb))
if not os.path.isdir(CUDA_HOME):
raise OSError(f'Invalid CUDA_HOME: directory does not exist: {CUDA_HOME}')
<|reserved_special_token_0|>
try:
nthreads = int(os.environ.get('PARALLEL_LEVEL', '0') or '0')
except Exception:
nthreads = 0
<|reserved_special_token_0|>
class build_ext_and_proto(build_ext):
def run(self):
protoc = None
if 'PROTOC' in os.environ and os.path.exists(os.environ['PROTOC']):
protoc = os.environ['PROTOC']
else:
protoc = find_executable('protoc')
if protoc is None:
sys.stderr.write('protoc not found')
sys.exit(1)
for source in ['cudf/utils/metadata/orc_column_statistics.proto']:
output = source.replace('.proto', '_pb2.py')
if not os.path.exists(output) or os.path.getmtime(source
) > os.path.getmtime(output):
with open(output, 'a') as src:
src.write('# flake8: noqa' + os.linesep)
src.write('# fmt: off' + os.linesep)
subprocess.check_call([protoc, '--python_out=.', source])
with open(output, 'r+') as src:
new_src_content = ('# flake8: noqa' + os.linesep +
'# fmt: off' + os.linesep + src.read() +
'# fmt: on' + os.linesep)
src.seek(0)
src.write(new_src_content)
build_ext.run(self)
<|reserved_special_token_0|>
setup(name='cudf', version=versioneer.get_version(), description=
'cuDF - GPU Dataframe', url='https://github.com/rapidsai/cudf', author=
'NVIDIA Corporation', license='Apache 2.0', classifiers=[
'Intended Audience :: Developers', 'Topic :: Database',
'Topic :: Scientific/Engineering',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7'], setup_requires=['cython',
'protobuf'], ext_modules=cythonize(extensions, nthreads=nthreads,
compiler_directives=dict(profile=False, language_level=3,
embedsignature=True)), packages=find_packages(include=['cudf', 'cudf.*'
]), package_data=dict.fromkeys(find_packages(include=['cudf._lib*']), [
'*.pxd']), cmdclass=cmdclass, install_requires=install_requires,
zip_safe=False)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
install_requires = ['numba', 'cython']
cython_files = ['cudf/**/*.pyx']
CUDA_HOME = os.environ.get('CUDA_HOME', False)
if not CUDA_HOME:
path_to_cuda_gdb = shutil.which('cuda-gdb')
if path_to_cuda_gdb is None:
raise OSError(
'Could not locate CUDA. Please set the environment variable CUDA_HOME to the path to the CUDA installation and try again.'
)
CUDA_HOME = os.path.dirname(os.path.dirname(path_to_cuda_gdb))
if not os.path.isdir(CUDA_HOME):
raise OSError(f'Invalid CUDA_HOME: directory does not exist: {CUDA_HOME}')
cuda_include_dir = os.path.join(CUDA_HOME, 'include')
CUDF_ROOT = os.environ.get('CUDF_ROOT', '../../cpp/build/')
try:
nthreads = int(os.environ.get('PARALLEL_LEVEL', '0') or '0')
except Exception:
nthreads = 0
cmdclass = versioneer.get_cmdclass()
class build_ext_and_proto(build_ext):
def run(self):
protoc = None
if 'PROTOC' in os.environ and os.path.exists(os.environ['PROTOC']):
protoc = os.environ['PROTOC']
else:
protoc = find_executable('protoc')
if protoc is None:
sys.stderr.write('protoc not found')
sys.exit(1)
for source in ['cudf/utils/metadata/orc_column_statistics.proto']:
output = source.replace('.proto', '_pb2.py')
if not os.path.exists(output) or os.path.getmtime(source
) > os.path.getmtime(output):
with open(output, 'a') as src:
src.write('# flake8: noqa' + os.linesep)
src.write('# fmt: off' + os.linesep)
subprocess.check_call([protoc, '--python_out=.', source])
with open(output, 'r+') as src:
new_src_content = ('# flake8: noqa' + os.linesep +
'# fmt: off' + os.linesep + src.read() +
'# fmt: on' + os.linesep)
src.seek(0)
src.write(new_src_content)
build_ext.run(self)
cmdclass['build_ext'] = build_ext_and_proto
extensions = [Extension('*', sources=cython_files, include_dirs=[
'../../cpp/include/cudf', '../../cpp/include', os.path.join(CUDF_ROOT,
'include'), os.path.join(CUDF_ROOT, '_deps/libcudacxx-src/include'), os
.path.join(os.path.dirname(sysconfig.get_path('include')),
'libcudf/libcudacxx'), os.path.dirname(sysconfig.get_path('include')),
np.get_include(), pa.get_include(), cuda_include_dir], library_dirs=pa.
get_library_dirs() + [get_python_lib(), os.path.join(os.sys.prefix,
'lib')], libraries=['cudf'] + pa.get_libraries() + ['arrow_cuda'],
language='c++', extra_compile_args=['-std=c++14'])]
setup(name='cudf', version=versioneer.get_version(), description=
'cuDF - GPU Dataframe', url='https://github.com/rapidsai/cudf', author=
'NVIDIA Corporation', license='Apache 2.0', classifiers=[
'Intended Audience :: Developers', 'Topic :: Database',
'Topic :: Scientific/Engineering',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7'], setup_requires=['cython',
'protobuf'], ext_modules=cythonize(extensions, nthreads=nthreads,
compiler_directives=dict(profile=False, language_level=3,
embedsignature=True)), packages=find_packages(include=['cudf', 'cudf.*'
]), package_data=dict.fromkeys(find_packages(include=['cudf._lib*']), [
'*.pxd']), cmdclass=cmdclass, install_requires=install_requires,
zip_safe=False)
<|reserved_special_token_1|>
import os
import shutil
import subprocess
import sys
import sysconfig
from distutils.spawn import find_executable
from distutils.sysconfig import get_python_lib
import numpy as np
import pyarrow as pa
from Cython.Build import cythonize
from Cython.Distutils import build_ext
from setuptools import find_packages, setup
from setuptools.extension import Extension
import versioneer
install_requires = ['numba', 'cython']
cython_files = ['cudf/**/*.pyx']
CUDA_HOME = os.environ.get('CUDA_HOME', False)
if not CUDA_HOME:
path_to_cuda_gdb = shutil.which('cuda-gdb')
if path_to_cuda_gdb is None:
raise OSError(
'Could not locate CUDA. Please set the environment variable CUDA_HOME to the path to the CUDA installation and try again.'
)
CUDA_HOME = os.path.dirname(os.path.dirname(path_to_cuda_gdb))
if not os.path.isdir(CUDA_HOME):
raise OSError(f'Invalid CUDA_HOME: directory does not exist: {CUDA_HOME}')
cuda_include_dir = os.path.join(CUDA_HOME, 'include')
CUDF_ROOT = os.environ.get('CUDF_ROOT', '../../cpp/build/')
try:
nthreads = int(os.environ.get('PARALLEL_LEVEL', '0') or '0')
except Exception:
nthreads = 0
cmdclass = versioneer.get_cmdclass()
class build_ext_and_proto(build_ext):
def run(self):
protoc = None
if 'PROTOC' in os.environ and os.path.exists(os.environ['PROTOC']):
protoc = os.environ['PROTOC']
else:
protoc = find_executable('protoc')
if protoc is None:
sys.stderr.write('protoc not found')
sys.exit(1)
for source in ['cudf/utils/metadata/orc_column_statistics.proto']:
output = source.replace('.proto', '_pb2.py')
if not os.path.exists(output) or os.path.getmtime(source
) > os.path.getmtime(output):
with open(output, 'a') as src:
src.write('# flake8: noqa' + os.linesep)
src.write('# fmt: off' + os.linesep)
subprocess.check_call([protoc, '--python_out=.', source])
with open(output, 'r+') as src:
new_src_content = ('# flake8: noqa' + os.linesep +
'# fmt: off' + os.linesep + src.read() +
'# fmt: on' + os.linesep)
src.seek(0)
src.write(new_src_content)
build_ext.run(self)
cmdclass['build_ext'] = build_ext_and_proto
extensions = [Extension('*', sources=cython_files, include_dirs=[
'../../cpp/include/cudf', '../../cpp/include', os.path.join(CUDF_ROOT,
'include'), os.path.join(CUDF_ROOT, '_deps/libcudacxx-src/include'), os
.path.join(os.path.dirname(sysconfig.get_path('include')),
'libcudf/libcudacxx'), os.path.dirname(sysconfig.get_path('include')),
np.get_include(), pa.get_include(), cuda_include_dir], library_dirs=pa.
get_library_dirs() + [get_python_lib(), os.path.join(os.sys.prefix,
'lib')], libraries=['cudf'] + pa.get_libraries() + ['arrow_cuda'],
language='c++', extra_compile_args=['-std=c++14'])]
setup(name='cudf', version=versioneer.get_version(), description=
'cuDF - GPU Dataframe', url='https://github.com/rapidsai/cudf', author=
'NVIDIA Corporation', license='Apache 2.0', classifiers=[
'Intended Audience :: Developers', 'Topic :: Database',
'Topic :: Scientific/Engineering',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7'], setup_requires=['cython',
'protobuf'], ext_modules=cythonize(extensions, nthreads=nthreads,
compiler_directives=dict(profile=False, language_level=3,
embedsignature=True)), packages=find_packages(include=['cudf', 'cudf.*'
]), package_data=dict.fromkeys(find_packages(include=['cudf._lib*']), [
'*.pxd']), cmdclass=cmdclass, install_requires=install_requires,
zip_safe=False)
<|reserved_special_token_1|>
# Copyright (c) 2018-2020, NVIDIA CORPORATION.
import os
import shutil
import subprocess
import sys
import sysconfig
from distutils.spawn import find_executable
from distutils.sysconfig import get_python_lib
import numpy as np
import pyarrow as pa
from Cython.Build import cythonize
from Cython.Distutils import build_ext
from setuptools import find_packages, setup
from setuptools.extension import Extension
import versioneer
install_requires = ["numba", "cython"]
cython_files = ["cudf/**/*.pyx"]
CUDA_HOME = os.environ.get("CUDA_HOME", False)
if not CUDA_HOME:
path_to_cuda_gdb = shutil.which("cuda-gdb")
if path_to_cuda_gdb is None:
raise OSError(
"Could not locate CUDA. "
"Please set the environment variable "
"CUDA_HOME to the path to the CUDA installation "
"and try again."
)
CUDA_HOME = os.path.dirname(os.path.dirname(path_to_cuda_gdb))
if not os.path.isdir(CUDA_HOME):
raise OSError(f"Invalid CUDA_HOME: directory does not exist: {CUDA_HOME}")
cuda_include_dir = os.path.join(CUDA_HOME, "include")
CUDF_ROOT = os.environ.get("CUDF_ROOT", "../../cpp/build/")
try:
nthreads = int(os.environ.get("PARALLEL_LEVEL", "0") or "0")
except Exception:
nthreads = 0
cmdclass = versioneer.get_cmdclass()
class build_ext_and_proto(build_ext):
def run(self):
# Get protoc
protoc = None
if "PROTOC" in os.environ and os.path.exists(os.environ["PROTOC"]):
protoc = os.environ["PROTOC"]
else:
protoc = find_executable("protoc")
if protoc is None:
sys.stderr.write("protoc not found")
sys.exit(1)
# Build .proto file
for source in ["cudf/utils/metadata/orc_column_statistics.proto"]:
output = source.replace(".proto", "_pb2.py")
if not os.path.exists(output) or (
os.path.getmtime(source) > os.path.getmtime(output)
):
with open(output, "a") as src:
src.write("# flake8: noqa" + os.linesep)
src.write("# fmt: off" + os.linesep)
subprocess.check_call([protoc, "--python_out=.", source])
with open(output, "r+") as src:
new_src_content = (
"# flake8: noqa"
+ os.linesep
+ "# fmt: off"
+ os.linesep
+ src.read()
+ "# fmt: on"
+ os.linesep
)
src.seek(0)
src.write(new_src_content)
# Run original Cython build_ext command
build_ext.run(self)
cmdclass["build_ext"] = build_ext_and_proto
extensions = [
Extension(
"*",
sources=cython_files,
include_dirs=[
"../../cpp/include/cudf",
"../../cpp/include",
os.path.join(CUDF_ROOT, "include"),
os.path.join(CUDF_ROOT, "_deps/libcudacxx-src/include"),
os.path.join(
os.path.dirname(sysconfig.get_path("include")),
"libcudf/libcudacxx",
),
os.path.dirname(sysconfig.get_path("include")),
np.get_include(),
pa.get_include(),
cuda_include_dir,
],
library_dirs=(
pa.get_library_dirs()
+ [get_python_lib(), os.path.join(os.sys.prefix, "lib")]
),
libraries=["cudf"] + pa.get_libraries() + ["arrow_cuda"],
language="c++",
extra_compile_args=["-std=c++14"],
)
]
setup(
name="cudf",
version=versioneer.get_version(),
description="cuDF - GPU Dataframe",
url="https://github.com/rapidsai/cudf",
author="NVIDIA Corporation",
license="Apache 2.0",
classifiers=[
"Intended Audience :: Developers",
"Topic :: Database",
"Topic :: Scientific/Engineering",
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
],
# Include the separately-compiled shared library
setup_requires=["cython", "protobuf"],
ext_modules=cythonize(
extensions,
nthreads=nthreads,
compiler_directives=dict(
profile=False, language_level=3, embedsignature=True
),
),
packages=find_packages(include=["cudf", "cudf.*"]),
package_data=dict.fromkeys(
find_packages(include=["cudf._lib*"]), ["*.pxd"],
),
cmdclass=cmdclass,
install_requires=install_requires,
zip_safe=False,
)
|
flexible
|
{
"blob_id": "b3095f181032727544ce3ee6f1ad3a70976c0061",
"index": 7892,
"step-1": "<mask token>\n\n\nclass build_ext_and_proto(build_ext):\n <mask token>\n\n\n<mask token>\n",
"step-2": "<mask token>\nif not CUDA_HOME:\n path_to_cuda_gdb = shutil.which('cuda-gdb')\n if path_to_cuda_gdb is None:\n raise OSError(\n 'Could not locate CUDA. Please set the environment variable CUDA_HOME to the path to the CUDA installation and try again.'\n )\n CUDA_HOME = os.path.dirname(os.path.dirname(path_to_cuda_gdb))\nif not os.path.isdir(CUDA_HOME):\n raise OSError(f'Invalid CUDA_HOME: directory does not exist: {CUDA_HOME}')\n<mask token>\ntry:\n nthreads = int(os.environ.get('PARALLEL_LEVEL', '0') or '0')\nexcept Exception:\n nthreads = 0\n<mask token>\n\n\nclass build_ext_and_proto(build_ext):\n\n def run(self):\n protoc = None\n if 'PROTOC' in os.environ and os.path.exists(os.environ['PROTOC']):\n protoc = os.environ['PROTOC']\n else:\n protoc = find_executable('protoc')\n if protoc is None:\n sys.stderr.write('protoc not found')\n sys.exit(1)\n for source in ['cudf/utils/metadata/orc_column_statistics.proto']:\n output = source.replace('.proto', '_pb2.py')\n if not os.path.exists(output) or os.path.getmtime(source\n ) > os.path.getmtime(output):\n with open(output, 'a') as src:\n src.write('# flake8: noqa' + os.linesep)\n src.write('# fmt: off' + os.linesep)\n subprocess.check_call([protoc, '--python_out=.', source])\n with open(output, 'r+') as src:\n new_src_content = ('# flake8: noqa' + os.linesep +\n '# fmt: off' + os.linesep + src.read() +\n '# fmt: on' + os.linesep)\n src.seek(0)\n src.write(new_src_content)\n build_ext.run(self)\n\n\n<mask token>\nsetup(name='cudf', version=versioneer.get_version(), description=\n 'cuDF - GPU Dataframe', url='https://github.com/rapidsai/cudf', author=\n 'NVIDIA Corporation', license='Apache 2.0', classifiers=[\n 'Intended Audience :: Developers', 'Topic :: Database',\n 'Topic :: Scientific/Engineering',\n 'License :: OSI Approved :: Apache Software License',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7'], setup_requires=['cython',\n 'protobuf'], ext_modules=cythonize(extensions, nthreads=nthreads,\n compiler_directives=dict(profile=False, language_level=3,\n embedsignature=True)), packages=find_packages(include=['cudf', 'cudf.*'\n ]), package_data=dict.fromkeys(find_packages(include=['cudf._lib*']), [\n '*.pxd']), cmdclass=cmdclass, install_requires=install_requires,\n zip_safe=False)\n",
"step-3": "<mask token>\ninstall_requires = ['numba', 'cython']\ncython_files = ['cudf/**/*.pyx']\nCUDA_HOME = os.environ.get('CUDA_HOME', False)\nif not CUDA_HOME:\n path_to_cuda_gdb = shutil.which('cuda-gdb')\n if path_to_cuda_gdb is None:\n raise OSError(\n 'Could not locate CUDA. Please set the environment variable CUDA_HOME to the path to the CUDA installation and try again.'\n )\n CUDA_HOME = os.path.dirname(os.path.dirname(path_to_cuda_gdb))\nif not os.path.isdir(CUDA_HOME):\n raise OSError(f'Invalid CUDA_HOME: directory does not exist: {CUDA_HOME}')\ncuda_include_dir = os.path.join(CUDA_HOME, 'include')\nCUDF_ROOT = os.environ.get('CUDF_ROOT', '../../cpp/build/')\ntry:\n nthreads = int(os.environ.get('PARALLEL_LEVEL', '0') or '0')\nexcept Exception:\n nthreads = 0\ncmdclass = versioneer.get_cmdclass()\n\n\nclass build_ext_and_proto(build_ext):\n\n def run(self):\n protoc = None\n if 'PROTOC' in os.environ and os.path.exists(os.environ['PROTOC']):\n protoc = os.environ['PROTOC']\n else:\n protoc = find_executable('protoc')\n if protoc is None:\n sys.stderr.write('protoc not found')\n sys.exit(1)\n for source in ['cudf/utils/metadata/orc_column_statistics.proto']:\n output = source.replace('.proto', '_pb2.py')\n if not os.path.exists(output) or os.path.getmtime(source\n ) > os.path.getmtime(output):\n with open(output, 'a') as src:\n src.write('# flake8: noqa' + os.linesep)\n src.write('# fmt: off' + os.linesep)\n subprocess.check_call([protoc, '--python_out=.', source])\n with open(output, 'r+') as src:\n new_src_content = ('# flake8: noqa' + os.linesep +\n '# fmt: off' + os.linesep + src.read() +\n '# fmt: on' + os.linesep)\n src.seek(0)\n src.write(new_src_content)\n build_ext.run(self)\n\n\ncmdclass['build_ext'] = build_ext_and_proto\nextensions = [Extension('*', sources=cython_files, include_dirs=[\n '../../cpp/include/cudf', '../../cpp/include', os.path.join(CUDF_ROOT,\n 'include'), os.path.join(CUDF_ROOT, '_deps/libcudacxx-src/include'), os\n .path.join(os.path.dirname(sysconfig.get_path('include')),\n 'libcudf/libcudacxx'), os.path.dirname(sysconfig.get_path('include')),\n np.get_include(), pa.get_include(), cuda_include_dir], library_dirs=pa.\n get_library_dirs() + [get_python_lib(), os.path.join(os.sys.prefix,\n 'lib')], libraries=['cudf'] + pa.get_libraries() + ['arrow_cuda'],\n language='c++', extra_compile_args=['-std=c++14'])]\nsetup(name='cudf', version=versioneer.get_version(), description=\n 'cuDF - GPU Dataframe', url='https://github.com/rapidsai/cudf', author=\n 'NVIDIA Corporation', license='Apache 2.0', classifiers=[\n 'Intended Audience :: Developers', 'Topic :: Database',\n 'Topic :: Scientific/Engineering',\n 'License :: OSI Approved :: Apache Software License',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7'], setup_requires=['cython',\n 'protobuf'], ext_modules=cythonize(extensions, nthreads=nthreads,\n compiler_directives=dict(profile=False, language_level=3,\n embedsignature=True)), packages=find_packages(include=['cudf', 'cudf.*'\n ]), package_data=dict.fromkeys(find_packages(include=['cudf._lib*']), [\n '*.pxd']), cmdclass=cmdclass, install_requires=install_requires,\n zip_safe=False)\n",
"step-4": "import os\nimport shutil\nimport subprocess\nimport sys\nimport sysconfig\nfrom distutils.spawn import find_executable\nfrom distutils.sysconfig import get_python_lib\nimport numpy as np\nimport pyarrow as pa\nfrom Cython.Build import cythonize\nfrom Cython.Distutils import build_ext\nfrom setuptools import find_packages, setup\nfrom setuptools.extension import Extension\nimport versioneer\ninstall_requires = ['numba', 'cython']\ncython_files = ['cudf/**/*.pyx']\nCUDA_HOME = os.environ.get('CUDA_HOME', False)\nif not CUDA_HOME:\n path_to_cuda_gdb = shutil.which('cuda-gdb')\n if path_to_cuda_gdb is None:\n raise OSError(\n 'Could not locate CUDA. Please set the environment variable CUDA_HOME to the path to the CUDA installation and try again.'\n )\n CUDA_HOME = os.path.dirname(os.path.dirname(path_to_cuda_gdb))\nif not os.path.isdir(CUDA_HOME):\n raise OSError(f'Invalid CUDA_HOME: directory does not exist: {CUDA_HOME}')\ncuda_include_dir = os.path.join(CUDA_HOME, 'include')\nCUDF_ROOT = os.environ.get('CUDF_ROOT', '../../cpp/build/')\ntry:\n nthreads = int(os.environ.get('PARALLEL_LEVEL', '0') or '0')\nexcept Exception:\n nthreads = 0\ncmdclass = versioneer.get_cmdclass()\n\n\nclass build_ext_and_proto(build_ext):\n\n def run(self):\n protoc = None\n if 'PROTOC' in os.environ and os.path.exists(os.environ['PROTOC']):\n protoc = os.environ['PROTOC']\n else:\n protoc = find_executable('protoc')\n if protoc is None:\n sys.stderr.write('protoc not found')\n sys.exit(1)\n for source in ['cudf/utils/metadata/orc_column_statistics.proto']:\n output = source.replace('.proto', '_pb2.py')\n if not os.path.exists(output) or os.path.getmtime(source\n ) > os.path.getmtime(output):\n with open(output, 'a') as src:\n src.write('# flake8: noqa' + os.linesep)\n src.write('# fmt: off' + os.linesep)\n subprocess.check_call([protoc, '--python_out=.', source])\n with open(output, 'r+') as src:\n new_src_content = ('# flake8: noqa' + os.linesep +\n '# fmt: off' + os.linesep + src.read() +\n '# fmt: on' + os.linesep)\n src.seek(0)\n src.write(new_src_content)\n build_ext.run(self)\n\n\ncmdclass['build_ext'] = build_ext_and_proto\nextensions = [Extension('*', sources=cython_files, include_dirs=[\n '../../cpp/include/cudf', '../../cpp/include', os.path.join(CUDF_ROOT,\n 'include'), os.path.join(CUDF_ROOT, '_deps/libcudacxx-src/include'), os\n .path.join(os.path.dirname(sysconfig.get_path('include')),\n 'libcudf/libcudacxx'), os.path.dirname(sysconfig.get_path('include')),\n np.get_include(), pa.get_include(), cuda_include_dir], library_dirs=pa.\n get_library_dirs() + [get_python_lib(), os.path.join(os.sys.prefix,\n 'lib')], libraries=['cudf'] + pa.get_libraries() + ['arrow_cuda'],\n language='c++', extra_compile_args=['-std=c++14'])]\nsetup(name='cudf', version=versioneer.get_version(), description=\n 'cuDF - GPU Dataframe', url='https://github.com/rapidsai/cudf', author=\n 'NVIDIA Corporation', license='Apache 2.0', classifiers=[\n 'Intended Audience :: Developers', 'Topic :: Database',\n 'Topic :: Scientific/Engineering',\n 'License :: OSI Approved :: Apache Software License',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7'], setup_requires=['cython',\n 'protobuf'], ext_modules=cythonize(extensions, nthreads=nthreads,\n compiler_directives=dict(profile=False, language_level=3,\n embedsignature=True)), packages=find_packages(include=['cudf', 'cudf.*'\n ]), package_data=dict.fromkeys(find_packages(include=['cudf._lib*']), [\n '*.pxd']), cmdclass=cmdclass, install_requires=install_requires,\n zip_safe=False)\n",
"step-5": "# Copyright (c) 2018-2020, NVIDIA CORPORATION.\nimport os\nimport shutil\nimport subprocess\nimport sys\nimport sysconfig\nfrom distutils.spawn import find_executable\nfrom distutils.sysconfig import get_python_lib\n\nimport numpy as np\nimport pyarrow as pa\nfrom Cython.Build import cythonize\nfrom Cython.Distutils import build_ext\nfrom setuptools import find_packages, setup\nfrom setuptools.extension import Extension\n\nimport versioneer\n\ninstall_requires = [\"numba\", \"cython\"]\n\ncython_files = [\"cudf/**/*.pyx\"]\n\nCUDA_HOME = os.environ.get(\"CUDA_HOME\", False)\nif not CUDA_HOME:\n path_to_cuda_gdb = shutil.which(\"cuda-gdb\")\n if path_to_cuda_gdb is None:\n raise OSError(\n \"Could not locate CUDA. \"\n \"Please set the environment variable \"\n \"CUDA_HOME to the path to the CUDA installation \"\n \"and try again.\"\n )\n CUDA_HOME = os.path.dirname(os.path.dirname(path_to_cuda_gdb))\n\nif not os.path.isdir(CUDA_HOME):\n raise OSError(f\"Invalid CUDA_HOME: directory does not exist: {CUDA_HOME}\")\n\ncuda_include_dir = os.path.join(CUDA_HOME, \"include\")\n\nCUDF_ROOT = os.environ.get(\"CUDF_ROOT\", \"../../cpp/build/\")\n\ntry:\n nthreads = int(os.environ.get(\"PARALLEL_LEVEL\", \"0\") or \"0\")\nexcept Exception:\n nthreads = 0\n\ncmdclass = versioneer.get_cmdclass()\n\n\nclass build_ext_and_proto(build_ext):\n def run(self):\n # Get protoc\n protoc = None\n if \"PROTOC\" in os.environ and os.path.exists(os.environ[\"PROTOC\"]):\n protoc = os.environ[\"PROTOC\"]\n else:\n protoc = find_executable(\"protoc\")\n if protoc is None:\n sys.stderr.write(\"protoc not found\")\n sys.exit(1)\n\n # Build .proto file\n for source in [\"cudf/utils/metadata/orc_column_statistics.proto\"]:\n output = source.replace(\".proto\", \"_pb2.py\")\n\n if not os.path.exists(output) or (\n os.path.getmtime(source) > os.path.getmtime(output)\n ):\n with open(output, \"a\") as src:\n src.write(\"# flake8: noqa\" + os.linesep)\n src.write(\"# fmt: off\" + os.linesep)\n subprocess.check_call([protoc, \"--python_out=.\", source])\n with open(output, \"r+\") as src:\n new_src_content = (\n \"# flake8: noqa\"\n + os.linesep\n + \"# fmt: off\"\n + os.linesep\n + src.read()\n + \"# fmt: on\"\n + os.linesep\n )\n src.seek(0)\n src.write(new_src_content)\n\n # Run original Cython build_ext command\n build_ext.run(self)\n\n\ncmdclass[\"build_ext\"] = build_ext_and_proto\n\nextensions = [\n Extension(\n \"*\",\n sources=cython_files,\n include_dirs=[\n \"../../cpp/include/cudf\",\n \"../../cpp/include\",\n os.path.join(CUDF_ROOT, \"include\"),\n os.path.join(CUDF_ROOT, \"_deps/libcudacxx-src/include\"),\n os.path.join(\n os.path.dirname(sysconfig.get_path(\"include\")),\n \"libcudf/libcudacxx\",\n ),\n os.path.dirname(sysconfig.get_path(\"include\")),\n np.get_include(),\n pa.get_include(),\n cuda_include_dir,\n ],\n library_dirs=(\n pa.get_library_dirs()\n + [get_python_lib(), os.path.join(os.sys.prefix, \"lib\")]\n ),\n libraries=[\"cudf\"] + pa.get_libraries() + [\"arrow_cuda\"],\n language=\"c++\",\n extra_compile_args=[\"-std=c++14\"],\n )\n]\n\nsetup(\n name=\"cudf\",\n version=versioneer.get_version(),\n description=\"cuDF - GPU Dataframe\",\n url=\"https://github.com/rapidsai/cudf\",\n author=\"NVIDIA Corporation\",\n license=\"Apache 2.0\",\n classifiers=[\n \"Intended Audience :: Developers\",\n \"Topic :: Database\",\n \"Topic :: Scientific/Engineering\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n ],\n # Include the separately-compiled shared library\n setup_requires=[\"cython\", \"protobuf\"],\n ext_modules=cythonize(\n extensions,\n nthreads=nthreads,\n compiler_directives=dict(\n profile=False, language_level=3, embedsignature=True\n ),\n ),\n packages=find_packages(include=[\"cudf\", \"cudf.*\"]),\n package_data=dict.fromkeys(\n find_packages(include=[\"cudf._lib*\"]), [\"*.pxd\"],\n ),\n cmdclass=cmdclass,\n install_requires=install_requires,\n zip_safe=False,\n)\n",
"step-ids": [
1,
3,
4,
5,
6
]
}
|
[
1,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
class DatabaseConnection:
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def connect(self):
self.conn = MySQLdb.connect(host=self.address, port=3306, user=self
.user, passwd=self.password, db=self.database)
c = self.conn.cursor()
return c, self.conn
def disconnect(self):
self.conn.close()
def addEmail(self, email, number):
try:
c, conn = self.connect()
c.execute(
'INSERT INTO User (email, maxEmailsPerMonth) VALUES (%s, %s)',
(thwart(email), thwart(number)))
conn.commit()
self.disconnect()
return True
except Exception:
return False
def removeEmail(self, email):
try:
c, conn = self.connect()
c.execute('DELETE from User WHERE email = (%s)', (thwart(email),))
conn.commit()
self.disconnect()
return True
except Exception:
return False
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class DatabaseConnection:
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def connect(self):
self.conn = MySQLdb.connect(host=self.address, port=3306, user=self
.user, passwd=self.password, db=self.database)
c = self.conn.cursor()
return c, self.conn
def disconnect(self):
self.conn.close()
def addEmail(self, email, number):
try:
c, conn = self.connect()
c.execute(
'INSERT INTO User (email, maxEmailsPerMonth) VALUES (%s, %s)',
(thwart(email), thwart(number)))
conn.commit()
self.disconnect()
return True
except Exception:
return False
def removeEmail(self, email):
try:
c, conn = self.connect()
c.execute('DELETE from User WHERE email = (%s)', (thwart(email),))
conn.commit()
self.disconnect()
return True
except Exception:
return False
<|reserved_special_token_0|>
def getMostClicked(self):
try:
c, conn = self.connect()
c.execute(
'SELECT idEmail, repo, numClicked FROM SpamMail ORDER BY numClicked DESC LIMIT 1'
)
data = c.fetchone()
print(data)
self.disconnect()
return [data[0], data[1], data[2]]
except:
return []
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class DatabaseConnection:
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def connect(self):
self.conn = MySQLdb.connect(host=self.address, port=3306, user=self
.user, passwd=self.password, db=self.database)
c = self.conn.cursor()
return c, self.conn
def disconnect(self):
self.conn.close()
def addEmail(self, email, number):
try:
c, conn = self.connect()
c.execute(
'INSERT INTO User (email, maxEmailsPerMonth) VALUES (%s, %s)',
(thwart(email), thwart(number)))
conn.commit()
self.disconnect()
return True
except Exception:
return False
def removeEmail(self, email):
try:
c, conn = self.connect()
c.execute('DELETE from User WHERE email = (%s)', (thwart(email),))
conn.commit()
self.disconnect()
return True
except Exception:
return False
def updateSpamTable(self, mailID, repo):
try:
c, conn = self.connect()
no = c.execute('SELECT * FROM spammail WHERE idEmail = %s', (
thwart(mailID),))
print(no)
if no == 0:
c.execute(
'INSERT INTO spammail (numClicked, repo, idEmail) VALUES (%s, %s, %s)'
, (1, thwart(repo), thwart(mailID)))
else:
c.execute('SELECT numClicked FROM spammail WHERE idEmail = %s',
(thwart(mailID),))
no = c.fetchone()[0]
print(no)
c.execute(
'UPDATE spammail SET numClicked = %s WHERE idEmail = %s',
(no + 1, thwart(mailID)))
conn.commit()
self.disconnect()
print('here')
return True
except:
return False
def getMostClicked(self):
try:
c, conn = self.connect()
c.execute(
'SELECT idEmail, repo, numClicked FROM SpamMail ORDER BY numClicked DESC LIMIT 1'
)
data = c.fetchone()
print(data)
self.disconnect()
return [data[0], data[1], data[2]]
except:
return []
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class DatabaseConnection:
def __init__(self, address, user, password, database):
self.address = address
self.user = user
self.password = password
self.database = database
<|reserved_special_token_0|>
def connect(self):
self.conn = MySQLdb.connect(host=self.address, port=3306, user=self
.user, passwd=self.password, db=self.database)
c = self.conn.cursor()
return c, self.conn
def disconnect(self):
self.conn.close()
def addEmail(self, email, number):
try:
c, conn = self.connect()
c.execute(
'INSERT INTO User (email, maxEmailsPerMonth) VALUES (%s, %s)',
(thwart(email), thwart(number)))
conn.commit()
self.disconnect()
return True
except Exception:
return False
def removeEmail(self, email):
try:
c, conn = self.connect()
c.execute('DELETE from User WHERE email = (%s)', (thwart(email),))
conn.commit()
self.disconnect()
return True
except Exception:
return False
def updateSpamTable(self, mailID, repo):
try:
c, conn = self.connect()
no = c.execute('SELECT * FROM spammail WHERE idEmail = %s', (
thwart(mailID),))
print(no)
if no == 0:
c.execute(
'INSERT INTO spammail (numClicked, repo, idEmail) VALUES (%s, %s, %s)'
, (1, thwart(repo), thwart(mailID)))
else:
c.execute('SELECT numClicked FROM spammail WHERE idEmail = %s',
(thwart(mailID),))
no = c.fetchone()[0]
print(no)
c.execute(
'UPDATE spammail SET numClicked = %s WHERE idEmail = %s',
(no + 1, thwart(mailID)))
conn.commit()
self.disconnect()
print('here')
return True
except:
return False
def getMostClicked(self):
try:
c, conn = self.connect()
c.execute(
'SELECT idEmail, repo, numClicked FROM SpamMail ORDER BY numClicked DESC LIMIT 1'
)
data = c.fetchone()
print(data)
self.disconnect()
return [data[0], data[1], data[2]]
except:
return []
<|reserved_special_token_1|>
import MySQLdb
from MySQLdb import escape_string as thwart
"""
"""
class DatabaseConnection:
def __init__(self, address, user, password, database):
self.address = address
self.user = user
self.password = password
self.database = database
"""
"""
def connect(self):
self.conn = MySQLdb.connect(host=self.address,
port=3306,
user=self.user,
passwd=self.password,
db=self.database)
c = self.conn.cursor()
return c, self.conn
def disconnect(self):
self.conn.close()
def addEmail(self, email, number):
try:
c, conn = self.connect()
c.execute("INSERT INTO User (email, maxEmailsPerMonth) VALUES (%s, %s)", (thwart(email), thwart(number),))
conn.commit()
self.disconnect()
return True
except Exception:
return False
def removeEmail(self, email):
try:
c, conn = self.connect()
c.execute("DELETE from User WHERE email = (%s)", (thwart(email),))
conn.commit()
self.disconnect()
return True
except Exception:
return False
def updateSpamTable(self, mailID, repo):
try:
c, conn = self.connect()
no = c.execute("SELECT * FROM spammail WHERE idEmail = %s", (thwart(mailID),))
print(no)
if no == 0:
c.execute("INSERT INTO spammail (numClicked, repo, idEmail) VALUES (%s, %s, %s)", (1, thwart(repo), thwart(mailID),))
else:
c.execute("SELECT numClicked FROM spammail WHERE idEmail = %s", (thwart(mailID),))
no = c.fetchone()[0]
print(no)
c.execute("UPDATE spammail SET numClicked = %s WHERE idEmail = %s", (no+1, thwart(mailID),))
conn.commit()
self.disconnect()
print("here")
return True
except:
return False
def getMostClicked(self):
try:
c, conn = self.connect()
c.execute("SELECT idEmail, repo, numClicked FROM SpamMail ORDER BY numClicked DESC LIMIT 1")
data = c.fetchone()
print(data)
self.disconnect()
return [data[0], data[1], data[2]]
except:
return []
|
flexible
|
{
"blob_id": "c6502d6b589fa75dfbd5946a1097e77fc0b472c4",
"index": 1126,
"step-1": "<mask token>\n\n\nclass DatabaseConnection:\n <mask token>\n <mask token>\n\n def connect(self):\n self.conn = MySQLdb.connect(host=self.address, port=3306, user=self\n .user, passwd=self.password, db=self.database)\n c = self.conn.cursor()\n return c, self.conn\n\n def disconnect(self):\n self.conn.close()\n\n def addEmail(self, email, number):\n try:\n c, conn = self.connect()\n c.execute(\n 'INSERT INTO User (email, maxEmailsPerMonth) VALUES (%s, %s)',\n (thwart(email), thwart(number)))\n conn.commit()\n self.disconnect()\n return True\n except Exception:\n return False\n\n def removeEmail(self, email):\n try:\n c, conn = self.connect()\n c.execute('DELETE from User WHERE email = (%s)', (thwart(email),))\n conn.commit()\n self.disconnect()\n return True\n except Exception:\n return False\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass DatabaseConnection:\n <mask token>\n <mask token>\n\n def connect(self):\n self.conn = MySQLdb.connect(host=self.address, port=3306, user=self\n .user, passwd=self.password, db=self.database)\n c = self.conn.cursor()\n return c, self.conn\n\n def disconnect(self):\n self.conn.close()\n\n def addEmail(self, email, number):\n try:\n c, conn = self.connect()\n c.execute(\n 'INSERT INTO User (email, maxEmailsPerMonth) VALUES (%s, %s)',\n (thwart(email), thwart(number)))\n conn.commit()\n self.disconnect()\n return True\n except Exception:\n return False\n\n def removeEmail(self, email):\n try:\n c, conn = self.connect()\n c.execute('DELETE from User WHERE email = (%s)', (thwart(email),))\n conn.commit()\n self.disconnect()\n return True\n except Exception:\n return False\n <mask token>\n\n def getMostClicked(self):\n try:\n c, conn = self.connect()\n c.execute(\n 'SELECT idEmail, repo, numClicked FROM SpamMail ORDER BY numClicked DESC LIMIT 1'\n )\n data = c.fetchone()\n print(data)\n self.disconnect()\n return [data[0], data[1], data[2]]\n except:\n return []\n",
"step-3": "<mask token>\n\n\nclass DatabaseConnection:\n <mask token>\n <mask token>\n\n def connect(self):\n self.conn = MySQLdb.connect(host=self.address, port=3306, user=self\n .user, passwd=self.password, db=self.database)\n c = self.conn.cursor()\n return c, self.conn\n\n def disconnect(self):\n self.conn.close()\n\n def addEmail(self, email, number):\n try:\n c, conn = self.connect()\n c.execute(\n 'INSERT INTO User (email, maxEmailsPerMonth) VALUES (%s, %s)',\n (thwart(email), thwart(number)))\n conn.commit()\n self.disconnect()\n return True\n except Exception:\n return False\n\n def removeEmail(self, email):\n try:\n c, conn = self.connect()\n c.execute('DELETE from User WHERE email = (%s)', (thwart(email),))\n conn.commit()\n self.disconnect()\n return True\n except Exception:\n return False\n\n def updateSpamTable(self, mailID, repo):\n try:\n c, conn = self.connect()\n no = c.execute('SELECT * FROM spammail WHERE idEmail = %s', (\n thwart(mailID),))\n print(no)\n if no == 0:\n c.execute(\n 'INSERT INTO spammail (numClicked, repo, idEmail) VALUES (%s, %s, %s)'\n , (1, thwart(repo), thwart(mailID)))\n else:\n c.execute('SELECT numClicked FROM spammail WHERE idEmail = %s',\n (thwart(mailID),))\n no = c.fetchone()[0]\n print(no)\n c.execute(\n 'UPDATE spammail SET numClicked = %s WHERE idEmail = %s',\n (no + 1, thwart(mailID)))\n conn.commit()\n self.disconnect()\n print('here')\n return True\n except:\n return False\n\n def getMostClicked(self):\n try:\n c, conn = self.connect()\n c.execute(\n 'SELECT idEmail, repo, numClicked FROM SpamMail ORDER BY numClicked DESC LIMIT 1'\n )\n data = c.fetchone()\n print(data)\n self.disconnect()\n return [data[0], data[1], data[2]]\n except:\n return []\n",
"step-4": "<mask token>\n\n\nclass DatabaseConnection:\n\n def __init__(self, address, user, password, database):\n self.address = address\n self.user = user\n self.password = password\n self.database = database\n <mask token>\n\n def connect(self):\n self.conn = MySQLdb.connect(host=self.address, port=3306, user=self\n .user, passwd=self.password, db=self.database)\n c = self.conn.cursor()\n return c, self.conn\n\n def disconnect(self):\n self.conn.close()\n\n def addEmail(self, email, number):\n try:\n c, conn = self.connect()\n c.execute(\n 'INSERT INTO User (email, maxEmailsPerMonth) VALUES (%s, %s)',\n (thwart(email), thwart(number)))\n conn.commit()\n self.disconnect()\n return True\n except Exception:\n return False\n\n def removeEmail(self, email):\n try:\n c, conn = self.connect()\n c.execute('DELETE from User WHERE email = (%s)', (thwart(email),))\n conn.commit()\n self.disconnect()\n return True\n except Exception:\n return False\n\n def updateSpamTable(self, mailID, repo):\n try:\n c, conn = self.connect()\n no = c.execute('SELECT * FROM spammail WHERE idEmail = %s', (\n thwart(mailID),))\n print(no)\n if no == 0:\n c.execute(\n 'INSERT INTO spammail (numClicked, repo, idEmail) VALUES (%s, %s, %s)'\n , (1, thwart(repo), thwart(mailID)))\n else:\n c.execute('SELECT numClicked FROM spammail WHERE idEmail = %s',\n (thwart(mailID),))\n no = c.fetchone()[0]\n print(no)\n c.execute(\n 'UPDATE spammail SET numClicked = %s WHERE idEmail = %s',\n (no + 1, thwart(mailID)))\n conn.commit()\n self.disconnect()\n print('here')\n return True\n except:\n return False\n\n def getMostClicked(self):\n try:\n c, conn = self.connect()\n c.execute(\n 'SELECT idEmail, repo, numClicked FROM SpamMail ORDER BY numClicked DESC LIMIT 1'\n )\n data = c.fetchone()\n print(data)\n self.disconnect()\n return [data[0], data[1], data[2]]\n except:\n return []\n",
"step-5": "import MySQLdb\nfrom MySQLdb import escape_string as thwart\n\"\"\"\n\"\"\"\nclass DatabaseConnection:\n\n def __init__(self, address, user, password, database):\n self.address = address\n self.user = user\n self.password = password\n self.database = database\n\n \"\"\"\n \n \"\"\"\n def connect(self):\n self.conn = MySQLdb.connect(host=self.address,\n port=3306,\n user=self.user,\n passwd=self.password,\n db=self.database)\n\n c = self.conn.cursor()\n return c, self.conn\n\n def disconnect(self):\n self.conn.close()\n\n\n def addEmail(self, email, number):\n try:\n c, conn = self.connect()\n c.execute(\"INSERT INTO User (email, maxEmailsPerMonth) VALUES (%s, %s)\", (thwart(email), thwart(number),))\n conn.commit()\n self.disconnect()\n return True\n except Exception:\n return False\n\n def removeEmail(self, email):\n try:\n c, conn = self.connect()\n c.execute(\"DELETE from User WHERE email = (%s)\", (thwart(email),))\n conn.commit()\n self.disconnect()\n return True\n except Exception:\n return False\n\n\n def updateSpamTable(self, mailID, repo):\n try:\n c, conn = self.connect()\n no = c.execute(\"SELECT * FROM spammail WHERE idEmail = %s\", (thwart(mailID),))\n print(no)\n if no == 0:\n c.execute(\"INSERT INTO spammail (numClicked, repo, idEmail) VALUES (%s, %s, %s)\", (1, thwart(repo), thwart(mailID),))\n else:\n c.execute(\"SELECT numClicked FROM spammail WHERE idEmail = %s\", (thwart(mailID),))\n no = c.fetchone()[0]\n print(no)\n c.execute(\"UPDATE spammail SET numClicked = %s WHERE idEmail = %s\", (no+1, thwart(mailID),))\n\n conn.commit()\n self.disconnect()\n print(\"here\")\n return True\n except:\n return False\n\n def getMostClicked(self):\n try:\n c, conn = self.connect()\n c.execute(\"SELECT idEmail, repo, numClicked FROM SpamMail ORDER BY numClicked DESC LIMIT 1\")\n data = c.fetchone()\n print(data)\n self.disconnect()\n return [data[0], data[1], data[2]]\n except:\n return []\n\n",
"step-ids": [
5,
6,
7,
8,
11
]
}
|
[
5,
6,
7,
8,
11
] |
inp = int(input())
print(bytes(inp))
|
normal
|
{
"blob_id": "63a2c8b0c2eba2d5f9f82352196ef2b67d4d63b5",
"index": 3838,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(bytes(inp))\n",
"step-3": "inp = int(input())\nprint(bytes(inp))\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
<|reserved_special_token_0|>
class RtlConverter(object):
def __init__(self, filelist, topmodule='userlogic', include=None,
define=None, single_clock=False):
self.filelist = filelist
self.topmodule = topmodule
self.include = include
self.define = define
self.single_clock = single_clock
self.top_parameters = collections.OrderedDict()
self.top_ioports = collections.OrderedDict()
self.coram_object = collections.OrderedDict()
def getTopParameters(self):
return self.top_parameters
def getTopIOPorts(self):
return self.top_ioports
def getCoramObject(self):
return self.coram_object
def dumpCoramObject(self):
coram_object = self.getCoramObject()
print('----------------------------------------')
print('CoRAM Objects in User-defined RTL')
for mode, coram_items in coram_object.items():
print(' CoRAM %s' % mode)
for threadname, idx, subid, addrwidth, datawidth in sorted(
coram_items, key=lambda x: x[1]):
print(' %s(ID:%d%s Thread:%s AddrWidth:%s DataWidth:%s)' %
(mode, idx, '' if subid is None else ''.join(('[', str(
subid), ']')), threadname, str(addrwidth), str(datawidth)))
def generate(self):
preprocess_define = []
if self.single_clock:
preprocess_define.append('CORAM_SINGLE_CLOCK')
if self.define:
preprocess_define.extend(self.define)
code_parser = VerilogCodeParser(self.filelist, preprocess_include=
self.include, preprocess_define=preprocess_define)
ast = code_parser.parse()
module_visitor = ModuleVisitor()
module_visitor.visit(ast)
modulenames = module_visitor.get_modulenames()
moduleinfotable = module_visitor.get_moduleinfotable()
instanceconvert_visitor = InstanceConvertVisitor(moduleinfotable,
self.topmodule)
instanceconvert_visitor.start_visit()
replaced_instance = instanceconvert_visitor.getMergedReplacedInstance()
replaced_instports = instanceconvert_visitor.getReplacedInstPorts()
replaced_items = instanceconvert_visitor.getReplacedItems()
new_moduleinfotable = instanceconvert_visitor.get_new_moduleinfotable()
instancereplace_visitor = InstanceReplaceVisitor(replaced_instance,
replaced_instports, replaced_items, new_moduleinfotable)
ret = instancereplace_visitor.getAST()
frametable = instanceconvert_visitor.getFrameTable()
top_ioports = []
for i in moduleinfotable.getIOPorts(self.topmodule):
if signaltype.isClock(i) or signaltype.isReset(i):
continue
top_ioports.append(i)
top_scope = ScopeChain([ScopeLabel(self.topmodule, 'module')])
top_sigs = frametable.getSignals(top_scope)
top_params = frametable.getConsts(top_scope)
for sk, sv in top_sigs.items():
if len(sk) > 2:
continue
signame = sk[1].scopename
for svv in sv:
if signame in top_ioports and not (signaltype.isClock(
signame) or signaltype.isReset(signame)) and isinstance(svv
, vast.Input) or isinstance(svv, vast.Output
) or isinstance(svv, vast.Inout):
port = svv
msb_val = instanceconvert_visitor.optimize(
instanceconvert_visitor.getTree(port.width.msb,
top_scope))
lsb_val = instanceconvert_visitor.optimize(
instanceconvert_visitor.getTree(port.width.lsb,
top_scope))
width = int(msb_val.value) - int(lsb_val.value) + 1
self.top_ioports[signame] = port, width
break
for ck, cv in top_params.items():
if len(ck) > 2:
continue
signame = ck[1].scopename
param = cv[0]
if isinstance(param, vast.Genvar):
continue
self.top_parameters[signame] = param
self.coram_object = instanceconvert_visitor.getCoramObject()
return ret
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class RtlConverter(object):
def __init__(self, filelist, topmodule='userlogic', include=None,
define=None, single_clock=False):
self.filelist = filelist
self.topmodule = topmodule
self.include = include
self.define = define
self.single_clock = single_clock
self.top_parameters = collections.OrderedDict()
self.top_ioports = collections.OrderedDict()
self.coram_object = collections.OrderedDict()
def getTopParameters(self):
return self.top_parameters
def getTopIOPorts(self):
return self.top_ioports
def getCoramObject(self):
return self.coram_object
def dumpCoramObject(self):
coram_object = self.getCoramObject()
print('----------------------------------------')
print('CoRAM Objects in User-defined RTL')
for mode, coram_items in coram_object.items():
print(' CoRAM %s' % mode)
for threadname, idx, subid, addrwidth, datawidth in sorted(
coram_items, key=lambda x: x[1]):
print(' %s(ID:%d%s Thread:%s AddrWidth:%s DataWidth:%s)' %
(mode, idx, '' if subid is None else ''.join(('[', str(
subid), ']')), threadname, str(addrwidth), str(datawidth)))
def generate(self):
preprocess_define = []
if self.single_clock:
preprocess_define.append('CORAM_SINGLE_CLOCK')
if self.define:
preprocess_define.extend(self.define)
code_parser = VerilogCodeParser(self.filelist, preprocess_include=
self.include, preprocess_define=preprocess_define)
ast = code_parser.parse()
module_visitor = ModuleVisitor()
module_visitor.visit(ast)
modulenames = module_visitor.get_modulenames()
moduleinfotable = module_visitor.get_moduleinfotable()
instanceconvert_visitor = InstanceConvertVisitor(moduleinfotable,
self.topmodule)
instanceconvert_visitor.start_visit()
replaced_instance = instanceconvert_visitor.getMergedReplacedInstance()
replaced_instports = instanceconvert_visitor.getReplacedInstPorts()
replaced_items = instanceconvert_visitor.getReplacedItems()
new_moduleinfotable = instanceconvert_visitor.get_new_moduleinfotable()
instancereplace_visitor = InstanceReplaceVisitor(replaced_instance,
replaced_instports, replaced_items, new_moduleinfotable)
ret = instancereplace_visitor.getAST()
frametable = instanceconvert_visitor.getFrameTable()
top_ioports = []
for i in moduleinfotable.getIOPorts(self.topmodule):
if signaltype.isClock(i) or signaltype.isReset(i):
continue
top_ioports.append(i)
top_scope = ScopeChain([ScopeLabel(self.topmodule, 'module')])
top_sigs = frametable.getSignals(top_scope)
top_params = frametable.getConsts(top_scope)
for sk, sv in top_sigs.items():
if len(sk) > 2:
continue
signame = sk[1].scopename
for svv in sv:
if signame in top_ioports and not (signaltype.isClock(
signame) or signaltype.isReset(signame)) and isinstance(svv
, vast.Input) or isinstance(svv, vast.Output
) or isinstance(svv, vast.Inout):
port = svv
msb_val = instanceconvert_visitor.optimize(
instanceconvert_visitor.getTree(port.width.msb,
top_scope))
lsb_val = instanceconvert_visitor.optimize(
instanceconvert_visitor.getTree(port.width.lsb,
top_scope))
width = int(msb_val.value) - int(lsb_val.value) + 1
self.top_ioports[signame] = port, width
break
for ck, cv in top_params.items():
if len(ck) > 2:
continue
signame = ck[1].scopename
param = cv[0]
if isinstance(param, vast.Genvar):
continue
self.top_parameters[signame] = param
self.coram_object = instanceconvert_visitor.getCoramObject()
return ret
def main():
from optparse import OptionParser
INFO = 'PyCoRAM RTL Converter'
VERSION = utils.version.VERSION
USAGE = 'Usage: python rtlconverter.py -t TOPMODULE file ...'
def showVersion():
print(INFO)
print(VERSION)
print(USAGE)
sys.exit()
optparser = OptionParser()
optparser.add_option('-v', '--version', action='store_true', dest=
'showversion', default=False, help='Show the version')
optparser.add_option('-t', '--top', dest='topmodule', default=
'userlogic', help='Top module, Default=userlogic')
optparser.add_option('-o', '--output', dest='outputfile', default=
'out.v', help='Output file name, Default=out.v')
optparser.add_option('-I', '--include', dest='include', action='append',
default=[], help='Include path')
optparser.add_option('-D', dest='define', action='append', default=[],
help='Macro Definition')
optparser.add_option('--singleclock', action='store_true', dest=
'single_clock', default=False, help='Use single clock mode')
options, args = optparser.parse_args()
filelist = args
if options.showversion:
showVersion()
for f in filelist:
if not os.path.exists(f):
raise IOError('file not found: ' + f)
if len(filelist) == 0:
showVersion()
converter = RtlConverter(filelist, options.topmodule, include=options.
include, define=options.define, single_clock=options.single_clock)
ast = converter.generate()
converter.dumpCoramObject()
asttocode = ASTCodeGenerator()
code = asttocode.visit(ast)
f = open(options.outputfile, 'w')
f.write(code)
f.close()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
<|reserved_special_token_0|>
if sys.version_info[0] >= 3:
from rtlconverter.convertvisitor import InstanceConvertVisitor
from rtlconverter.convertvisitor import InstanceReplaceVisitor
else:
from convertvisitor import InstanceConvertVisitor
from convertvisitor import InstanceReplaceVisitor
<|reserved_special_token_0|>
class RtlConverter(object):
def __init__(self, filelist, topmodule='userlogic', include=None,
define=None, single_clock=False):
self.filelist = filelist
self.topmodule = topmodule
self.include = include
self.define = define
self.single_clock = single_clock
self.top_parameters = collections.OrderedDict()
self.top_ioports = collections.OrderedDict()
self.coram_object = collections.OrderedDict()
def getTopParameters(self):
return self.top_parameters
def getTopIOPorts(self):
return self.top_ioports
def getCoramObject(self):
return self.coram_object
def dumpCoramObject(self):
coram_object = self.getCoramObject()
print('----------------------------------------')
print('CoRAM Objects in User-defined RTL')
for mode, coram_items in coram_object.items():
print(' CoRAM %s' % mode)
for threadname, idx, subid, addrwidth, datawidth in sorted(
coram_items, key=lambda x: x[1]):
print(' %s(ID:%d%s Thread:%s AddrWidth:%s DataWidth:%s)' %
(mode, idx, '' if subid is None else ''.join(('[', str(
subid), ']')), threadname, str(addrwidth), str(datawidth)))
def generate(self):
preprocess_define = []
if self.single_clock:
preprocess_define.append('CORAM_SINGLE_CLOCK')
if self.define:
preprocess_define.extend(self.define)
code_parser = VerilogCodeParser(self.filelist, preprocess_include=
self.include, preprocess_define=preprocess_define)
ast = code_parser.parse()
module_visitor = ModuleVisitor()
module_visitor.visit(ast)
modulenames = module_visitor.get_modulenames()
moduleinfotable = module_visitor.get_moduleinfotable()
instanceconvert_visitor = InstanceConvertVisitor(moduleinfotable,
self.topmodule)
instanceconvert_visitor.start_visit()
replaced_instance = instanceconvert_visitor.getMergedReplacedInstance()
replaced_instports = instanceconvert_visitor.getReplacedInstPorts()
replaced_items = instanceconvert_visitor.getReplacedItems()
new_moduleinfotable = instanceconvert_visitor.get_new_moduleinfotable()
instancereplace_visitor = InstanceReplaceVisitor(replaced_instance,
replaced_instports, replaced_items, new_moduleinfotable)
ret = instancereplace_visitor.getAST()
frametable = instanceconvert_visitor.getFrameTable()
top_ioports = []
for i in moduleinfotable.getIOPorts(self.topmodule):
if signaltype.isClock(i) or signaltype.isReset(i):
continue
top_ioports.append(i)
top_scope = ScopeChain([ScopeLabel(self.topmodule, 'module')])
top_sigs = frametable.getSignals(top_scope)
top_params = frametable.getConsts(top_scope)
for sk, sv in top_sigs.items():
if len(sk) > 2:
continue
signame = sk[1].scopename
for svv in sv:
if signame in top_ioports and not (signaltype.isClock(
signame) or signaltype.isReset(signame)) and isinstance(svv
, vast.Input) or isinstance(svv, vast.Output
) or isinstance(svv, vast.Inout):
port = svv
msb_val = instanceconvert_visitor.optimize(
instanceconvert_visitor.getTree(port.width.msb,
top_scope))
lsb_val = instanceconvert_visitor.optimize(
instanceconvert_visitor.getTree(port.width.lsb,
top_scope))
width = int(msb_val.value) - int(lsb_val.value) + 1
self.top_ioports[signame] = port, width
break
for ck, cv in top_params.items():
if len(ck) > 2:
continue
signame = ck[1].scopename
param = cv[0]
if isinstance(param, vast.Genvar):
continue
self.top_parameters[signame] = param
self.coram_object = instanceconvert_visitor.getCoramObject()
return ret
def main():
from optparse import OptionParser
INFO = 'PyCoRAM RTL Converter'
VERSION = utils.version.VERSION
USAGE = 'Usage: python rtlconverter.py -t TOPMODULE file ...'
def showVersion():
print(INFO)
print(VERSION)
print(USAGE)
sys.exit()
optparser = OptionParser()
optparser.add_option('-v', '--version', action='store_true', dest=
'showversion', default=False, help='Show the version')
optparser.add_option('-t', '--top', dest='topmodule', default=
'userlogic', help='Top module, Default=userlogic')
optparser.add_option('-o', '--output', dest='outputfile', default=
'out.v', help='Output file name, Default=out.v')
optparser.add_option('-I', '--include', dest='include', action='append',
default=[], help='Include path')
optparser.add_option('-D', dest='define', action='append', default=[],
help='Macro Definition')
optparser.add_option('--singleclock', action='store_true', dest=
'single_clock', default=False, help='Use single clock mode')
options, args = optparser.parse_args()
filelist = args
if options.showversion:
showVersion()
for f in filelist:
if not os.path.exists(f):
raise IOError('file not found: ' + f)
if len(filelist) == 0:
showVersion()
converter = RtlConverter(filelist, options.topmodule, include=options.
include, define=options.define, single_clock=options.single_clock)
ast = converter.generate()
converter.dumpCoramObject()
asttocode = ASTCodeGenerator()
code = asttocode.visit(ast)
f = open(options.outputfile, 'w')
f.write(code)
f.close()
if __name__ == '__main__':
main()
<|reserved_special_token_1|>
import sys
import os
import subprocess
import copy
import collections
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import utils.version
if sys.version_info[0] >= 3:
from rtlconverter.convertvisitor import InstanceConvertVisitor
from rtlconverter.convertvisitor import InstanceReplaceVisitor
else:
from convertvisitor import InstanceConvertVisitor
from convertvisitor import InstanceReplaceVisitor
import pyverilog.utils.signaltype as signaltype
from pyverilog.utils.scope import ScopeLabel, ScopeChain
import pyverilog.vparser.ast as vast
from pyverilog.vparser.parser import VerilogCodeParser
from pyverilog.dataflow.modulevisitor import ModuleVisitor
from pyverilog.ast_code_generator.codegen import ASTCodeGenerator
class RtlConverter(object):
def __init__(self, filelist, topmodule='userlogic', include=None,
define=None, single_clock=False):
self.filelist = filelist
self.topmodule = topmodule
self.include = include
self.define = define
self.single_clock = single_clock
self.top_parameters = collections.OrderedDict()
self.top_ioports = collections.OrderedDict()
self.coram_object = collections.OrderedDict()
def getTopParameters(self):
return self.top_parameters
def getTopIOPorts(self):
return self.top_ioports
def getCoramObject(self):
return self.coram_object
def dumpCoramObject(self):
coram_object = self.getCoramObject()
print('----------------------------------------')
print('CoRAM Objects in User-defined RTL')
for mode, coram_items in coram_object.items():
print(' CoRAM %s' % mode)
for threadname, idx, subid, addrwidth, datawidth in sorted(
coram_items, key=lambda x: x[1]):
print(' %s(ID:%d%s Thread:%s AddrWidth:%s DataWidth:%s)' %
(mode, idx, '' if subid is None else ''.join(('[', str(
subid), ']')), threadname, str(addrwidth), str(datawidth)))
def generate(self):
preprocess_define = []
if self.single_clock:
preprocess_define.append('CORAM_SINGLE_CLOCK')
if self.define:
preprocess_define.extend(self.define)
code_parser = VerilogCodeParser(self.filelist, preprocess_include=
self.include, preprocess_define=preprocess_define)
ast = code_parser.parse()
module_visitor = ModuleVisitor()
module_visitor.visit(ast)
modulenames = module_visitor.get_modulenames()
moduleinfotable = module_visitor.get_moduleinfotable()
instanceconvert_visitor = InstanceConvertVisitor(moduleinfotable,
self.topmodule)
instanceconvert_visitor.start_visit()
replaced_instance = instanceconvert_visitor.getMergedReplacedInstance()
replaced_instports = instanceconvert_visitor.getReplacedInstPorts()
replaced_items = instanceconvert_visitor.getReplacedItems()
new_moduleinfotable = instanceconvert_visitor.get_new_moduleinfotable()
instancereplace_visitor = InstanceReplaceVisitor(replaced_instance,
replaced_instports, replaced_items, new_moduleinfotable)
ret = instancereplace_visitor.getAST()
frametable = instanceconvert_visitor.getFrameTable()
top_ioports = []
for i in moduleinfotable.getIOPorts(self.topmodule):
if signaltype.isClock(i) or signaltype.isReset(i):
continue
top_ioports.append(i)
top_scope = ScopeChain([ScopeLabel(self.topmodule, 'module')])
top_sigs = frametable.getSignals(top_scope)
top_params = frametable.getConsts(top_scope)
for sk, sv in top_sigs.items():
if len(sk) > 2:
continue
signame = sk[1].scopename
for svv in sv:
if signame in top_ioports and not (signaltype.isClock(
signame) or signaltype.isReset(signame)) and isinstance(svv
, vast.Input) or isinstance(svv, vast.Output
) or isinstance(svv, vast.Inout):
port = svv
msb_val = instanceconvert_visitor.optimize(
instanceconvert_visitor.getTree(port.width.msb,
top_scope))
lsb_val = instanceconvert_visitor.optimize(
instanceconvert_visitor.getTree(port.width.lsb,
top_scope))
width = int(msb_val.value) - int(lsb_val.value) + 1
self.top_ioports[signame] = port, width
break
for ck, cv in top_params.items():
if len(ck) > 2:
continue
signame = ck[1].scopename
param = cv[0]
if isinstance(param, vast.Genvar):
continue
self.top_parameters[signame] = param
self.coram_object = instanceconvert_visitor.getCoramObject()
return ret
def main():
from optparse import OptionParser
INFO = 'PyCoRAM RTL Converter'
VERSION = utils.version.VERSION
USAGE = 'Usage: python rtlconverter.py -t TOPMODULE file ...'
def showVersion():
print(INFO)
print(VERSION)
print(USAGE)
sys.exit()
optparser = OptionParser()
optparser.add_option('-v', '--version', action='store_true', dest=
'showversion', default=False, help='Show the version')
optparser.add_option('-t', '--top', dest='topmodule', default=
'userlogic', help='Top module, Default=userlogic')
optparser.add_option('-o', '--output', dest='outputfile', default=
'out.v', help='Output file name, Default=out.v')
optparser.add_option('-I', '--include', dest='include', action='append',
default=[], help='Include path')
optparser.add_option('-D', dest='define', action='append', default=[],
help='Macro Definition')
optparser.add_option('--singleclock', action='store_true', dest=
'single_clock', default=False, help='Use single clock mode')
options, args = optparser.parse_args()
filelist = args
if options.showversion:
showVersion()
for f in filelist:
if not os.path.exists(f):
raise IOError('file not found: ' + f)
if len(filelist) == 0:
showVersion()
converter = RtlConverter(filelist, options.topmodule, include=options.
include, define=options.define, single_clock=options.single_clock)
ast = converter.generate()
converter.dumpCoramObject()
asttocode = ASTCodeGenerator()
code = asttocode.visit(ast)
f = open(options.outputfile, 'w')
f.write(code)
f.close()
if __name__ == '__main__':
main()
<|reserved_special_token_1|>
#-------------------------------------------------------------------------------
# rtlconverter.py
#
# PyCoRAM RTL Converter
#
# Copyright (C) 2013, Shinya Takamaeda-Yamazaki
# License: Apache 2.0
#-------------------------------------------------------------------------------
import sys
import os
import subprocess
import copy
import collections
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))) )
import utils.version
if sys.version_info[0] >= 3:
from rtlconverter.convertvisitor import InstanceConvertVisitor
from rtlconverter.convertvisitor import InstanceReplaceVisitor
else:
from convertvisitor import InstanceConvertVisitor
from convertvisitor import InstanceReplaceVisitor
import pyverilog.utils.signaltype as signaltype
from pyverilog.utils.scope import ScopeLabel, ScopeChain
import pyverilog.vparser.ast as vast
from pyverilog.vparser.parser import VerilogCodeParser
from pyverilog.dataflow.modulevisitor import ModuleVisitor
from pyverilog.ast_code_generator.codegen import ASTCodeGenerator
class RtlConverter(object):
def __init__(self, filelist, topmodule='userlogic', include=None,
define=None, single_clock=False):
self.filelist = filelist
self.topmodule = topmodule
self.include = include
self.define = define
self.single_clock = single_clock
self.top_parameters = collections.OrderedDict()
self.top_ioports = collections.OrderedDict()
self.coram_object = collections.OrderedDict()
def getTopParameters(self):
return self.top_parameters
def getTopIOPorts(self):
return self.top_ioports
def getCoramObject(self):
return self.coram_object
def dumpCoramObject(self):
coram_object = self.getCoramObject()
print("----------------------------------------")
print("CoRAM Objects in User-defined RTL")
for mode, coram_items in coram_object.items():
print(" CoRAM %s" % mode)
for threadname, idx, subid, addrwidth, datawidth in sorted(coram_items, key=lambda x:x[1]):
print(" %s(ID:%d%s Thread:%s AddrWidth:%s DataWidth:%s)" %
(mode, idx, ( '' if subid is None else ''.join( ('[', str(subid), ']') ) ),
threadname, str(addrwidth), str(datawidth)))
def generate(self):
preprocess_define = []
if self.single_clock:
preprocess_define.append('CORAM_SINGLE_CLOCK')
if self.define:
preprocess_define.extend(self.define)
code_parser = VerilogCodeParser(self.filelist,
preprocess_include=self.include,
preprocess_define=preprocess_define)
ast = code_parser.parse()
module_visitor = ModuleVisitor()
module_visitor.visit(ast)
modulenames = module_visitor.get_modulenames()
moduleinfotable = module_visitor.get_moduleinfotable()
instanceconvert_visitor = InstanceConvertVisitor(moduleinfotable, self.topmodule)
instanceconvert_visitor.start_visit()
replaced_instance = instanceconvert_visitor.getMergedReplacedInstance()
replaced_instports = instanceconvert_visitor.getReplacedInstPorts()
replaced_items = instanceconvert_visitor.getReplacedItems()
new_moduleinfotable = instanceconvert_visitor.get_new_moduleinfotable()
instancereplace_visitor = InstanceReplaceVisitor(replaced_instance,
replaced_instports,
replaced_items,
new_moduleinfotable)
ret = instancereplace_visitor.getAST()
# gather user-defined io-ports on top-module and parameters to connect external
frametable = instanceconvert_visitor.getFrameTable()
top_ioports = []
for i in moduleinfotable.getIOPorts(self.topmodule):
if signaltype.isClock(i) or signaltype.isReset(i): continue
top_ioports.append(i)
top_scope = ScopeChain( [ScopeLabel(self.topmodule, 'module')] )
top_sigs = frametable.getSignals(top_scope)
top_params = frametable.getConsts(top_scope)
for sk, sv in top_sigs.items():
if len(sk) > 2: continue
signame = sk[1].scopename
for svv in sv:
if (signame in top_ioports and
not (signaltype.isClock(signame) or signaltype.isReset(signame)) and
isinstance(svv, vast.Input) or isinstance(svv, vast.Output) or isinstance(svv, vast.Inout)):
port = svv
msb_val = instanceconvert_visitor.optimize(instanceconvert_visitor.getTree(port.width.msb, top_scope))
lsb_val = instanceconvert_visitor.optimize(instanceconvert_visitor.getTree(port.width.lsb, top_scope))
width = int(msb_val.value) - int(lsb_val.value) + 1
self.top_ioports[signame] = (port, width)
break
for ck, cv in top_params.items():
if len(ck) > 2: continue
signame = ck[1].scopename
param = cv[0]
if isinstance(param, vast.Genvar): continue
self.top_parameters[signame] = param
self.coram_object = instanceconvert_visitor.getCoramObject()
return ret
def main():
from optparse import OptionParser
INFO = "PyCoRAM RTL Converter"
VERSION = utils.version.VERSION
USAGE = "Usage: python rtlconverter.py -t TOPMODULE file ..."
def showVersion():
print(INFO)
print(VERSION)
print(USAGE)
sys.exit()
optparser = OptionParser()
optparser.add_option("-v","--version",action="store_true",dest="showversion",
default=False,help="Show the version")
optparser.add_option("-t","--top",dest="topmodule",
default="userlogic",help="Top module, Default=userlogic")
optparser.add_option("-o","--output",dest="outputfile",
default="out.v",help="Output file name, Default=out.v")
optparser.add_option("-I","--include",dest="include",action="append",
default=[],help="Include path")
optparser.add_option("-D",dest="define",action="append",
default=[],help="Macro Definition")
optparser.add_option("--singleclock",action="store_true",dest="single_clock",
default=False,help="Use single clock mode")
(options, args) = optparser.parse_args()
filelist = args
if options.showversion:
showVersion()
for f in filelist:
if not os.path.exists(f): raise IOError("file not found: " + f)
if len(filelist) == 0:
showVersion()
converter = RtlConverter(filelist, options.topmodule,
include=options.include,
define=options.define,
single_clock=options.single_clock)
ast = converter.generate()
converter.dumpCoramObject()
asttocode = ASTCodeGenerator()
code = asttocode.visit(ast)
f = open(options.outputfile, 'w')
f.write(code)
f.close()
if __name__ == '__main__':
main()
|
flexible
|
{
"blob_id": "55ffcf5e6120cc07da461e30979dd8a36a599bee",
"index": 8353,
"step-1": "<mask token>\n\n\nclass RtlConverter(object):\n\n def __init__(self, filelist, topmodule='userlogic', include=None,\n define=None, single_clock=False):\n self.filelist = filelist\n self.topmodule = topmodule\n self.include = include\n self.define = define\n self.single_clock = single_clock\n self.top_parameters = collections.OrderedDict()\n self.top_ioports = collections.OrderedDict()\n self.coram_object = collections.OrderedDict()\n\n def getTopParameters(self):\n return self.top_parameters\n\n def getTopIOPorts(self):\n return self.top_ioports\n\n def getCoramObject(self):\n return self.coram_object\n\n def dumpCoramObject(self):\n coram_object = self.getCoramObject()\n print('----------------------------------------')\n print('CoRAM Objects in User-defined RTL')\n for mode, coram_items in coram_object.items():\n print(' CoRAM %s' % mode)\n for threadname, idx, subid, addrwidth, datawidth in sorted(\n coram_items, key=lambda x: x[1]):\n print(' %s(ID:%d%s Thread:%s AddrWidth:%s DataWidth:%s)' %\n (mode, idx, '' if subid is None else ''.join(('[', str(\n subid), ']')), threadname, str(addrwidth), str(datawidth)))\n\n def generate(self):\n preprocess_define = []\n if self.single_clock:\n preprocess_define.append('CORAM_SINGLE_CLOCK')\n if self.define:\n preprocess_define.extend(self.define)\n code_parser = VerilogCodeParser(self.filelist, preprocess_include=\n self.include, preprocess_define=preprocess_define)\n ast = code_parser.parse()\n module_visitor = ModuleVisitor()\n module_visitor.visit(ast)\n modulenames = module_visitor.get_modulenames()\n moduleinfotable = module_visitor.get_moduleinfotable()\n instanceconvert_visitor = InstanceConvertVisitor(moduleinfotable,\n self.topmodule)\n instanceconvert_visitor.start_visit()\n replaced_instance = instanceconvert_visitor.getMergedReplacedInstance()\n replaced_instports = instanceconvert_visitor.getReplacedInstPorts()\n replaced_items = instanceconvert_visitor.getReplacedItems()\n new_moduleinfotable = instanceconvert_visitor.get_new_moduleinfotable()\n instancereplace_visitor = InstanceReplaceVisitor(replaced_instance,\n replaced_instports, replaced_items, new_moduleinfotable)\n ret = instancereplace_visitor.getAST()\n frametable = instanceconvert_visitor.getFrameTable()\n top_ioports = []\n for i in moduleinfotable.getIOPorts(self.topmodule):\n if signaltype.isClock(i) or signaltype.isReset(i):\n continue\n top_ioports.append(i)\n top_scope = ScopeChain([ScopeLabel(self.topmodule, 'module')])\n top_sigs = frametable.getSignals(top_scope)\n top_params = frametable.getConsts(top_scope)\n for sk, sv in top_sigs.items():\n if len(sk) > 2:\n continue\n signame = sk[1].scopename\n for svv in sv:\n if signame in top_ioports and not (signaltype.isClock(\n signame) or signaltype.isReset(signame)) and isinstance(svv\n , vast.Input) or isinstance(svv, vast.Output\n ) or isinstance(svv, vast.Inout):\n port = svv\n msb_val = instanceconvert_visitor.optimize(\n instanceconvert_visitor.getTree(port.width.msb,\n top_scope))\n lsb_val = instanceconvert_visitor.optimize(\n instanceconvert_visitor.getTree(port.width.lsb,\n top_scope))\n width = int(msb_val.value) - int(lsb_val.value) + 1\n self.top_ioports[signame] = port, width\n break\n for ck, cv in top_params.items():\n if len(ck) > 2:\n continue\n signame = ck[1].scopename\n param = cv[0]\n if isinstance(param, vast.Genvar):\n continue\n self.top_parameters[signame] = param\n self.coram_object = instanceconvert_visitor.getCoramObject()\n return ret\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass RtlConverter(object):\n\n def __init__(self, filelist, topmodule='userlogic', include=None,\n define=None, single_clock=False):\n self.filelist = filelist\n self.topmodule = topmodule\n self.include = include\n self.define = define\n self.single_clock = single_clock\n self.top_parameters = collections.OrderedDict()\n self.top_ioports = collections.OrderedDict()\n self.coram_object = collections.OrderedDict()\n\n def getTopParameters(self):\n return self.top_parameters\n\n def getTopIOPorts(self):\n return self.top_ioports\n\n def getCoramObject(self):\n return self.coram_object\n\n def dumpCoramObject(self):\n coram_object = self.getCoramObject()\n print('----------------------------------------')\n print('CoRAM Objects in User-defined RTL')\n for mode, coram_items in coram_object.items():\n print(' CoRAM %s' % mode)\n for threadname, idx, subid, addrwidth, datawidth in sorted(\n coram_items, key=lambda x: x[1]):\n print(' %s(ID:%d%s Thread:%s AddrWidth:%s DataWidth:%s)' %\n (mode, idx, '' if subid is None else ''.join(('[', str(\n subid), ']')), threadname, str(addrwidth), str(datawidth)))\n\n def generate(self):\n preprocess_define = []\n if self.single_clock:\n preprocess_define.append('CORAM_SINGLE_CLOCK')\n if self.define:\n preprocess_define.extend(self.define)\n code_parser = VerilogCodeParser(self.filelist, preprocess_include=\n self.include, preprocess_define=preprocess_define)\n ast = code_parser.parse()\n module_visitor = ModuleVisitor()\n module_visitor.visit(ast)\n modulenames = module_visitor.get_modulenames()\n moduleinfotable = module_visitor.get_moduleinfotable()\n instanceconvert_visitor = InstanceConvertVisitor(moduleinfotable,\n self.topmodule)\n instanceconvert_visitor.start_visit()\n replaced_instance = instanceconvert_visitor.getMergedReplacedInstance()\n replaced_instports = instanceconvert_visitor.getReplacedInstPorts()\n replaced_items = instanceconvert_visitor.getReplacedItems()\n new_moduleinfotable = instanceconvert_visitor.get_new_moduleinfotable()\n instancereplace_visitor = InstanceReplaceVisitor(replaced_instance,\n replaced_instports, replaced_items, new_moduleinfotable)\n ret = instancereplace_visitor.getAST()\n frametable = instanceconvert_visitor.getFrameTable()\n top_ioports = []\n for i in moduleinfotable.getIOPorts(self.topmodule):\n if signaltype.isClock(i) or signaltype.isReset(i):\n continue\n top_ioports.append(i)\n top_scope = ScopeChain([ScopeLabel(self.topmodule, 'module')])\n top_sigs = frametable.getSignals(top_scope)\n top_params = frametable.getConsts(top_scope)\n for sk, sv in top_sigs.items():\n if len(sk) > 2:\n continue\n signame = sk[1].scopename\n for svv in sv:\n if signame in top_ioports and not (signaltype.isClock(\n signame) or signaltype.isReset(signame)) and isinstance(svv\n , vast.Input) or isinstance(svv, vast.Output\n ) or isinstance(svv, vast.Inout):\n port = svv\n msb_val = instanceconvert_visitor.optimize(\n instanceconvert_visitor.getTree(port.width.msb,\n top_scope))\n lsb_val = instanceconvert_visitor.optimize(\n instanceconvert_visitor.getTree(port.width.lsb,\n top_scope))\n width = int(msb_val.value) - int(lsb_val.value) + 1\n self.top_ioports[signame] = port, width\n break\n for ck, cv in top_params.items():\n if len(ck) > 2:\n continue\n signame = ck[1].scopename\n param = cv[0]\n if isinstance(param, vast.Genvar):\n continue\n self.top_parameters[signame] = param\n self.coram_object = instanceconvert_visitor.getCoramObject()\n return ret\n\n\ndef main():\n from optparse import OptionParser\n INFO = 'PyCoRAM RTL Converter'\n VERSION = utils.version.VERSION\n USAGE = 'Usage: python rtlconverter.py -t TOPMODULE file ...'\n\n def showVersion():\n print(INFO)\n print(VERSION)\n print(USAGE)\n sys.exit()\n optparser = OptionParser()\n optparser.add_option('-v', '--version', action='store_true', dest=\n 'showversion', default=False, help='Show the version')\n optparser.add_option('-t', '--top', dest='topmodule', default=\n 'userlogic', help='Top module, Default=userlogic')\n optparser.add_option('-o', '--output', dest='outputfile', default=\n 'out.v', help='Output file name, Default=out.v')\n optparser.add_option('-I', '--include', dest='include', action='append',\n default=[], help='Include path')\n optparser.add_option('-D', dest='define', action='append', default=[],\n help='Macro Definition')\n optparser.add_option('--singleclock', action='store_true', dest=\n 'single_clock', default=False, help='Use single clock mode')\n options, args = optparser.parse_args()\n filelist = args\n if options.showversion:\n showVersion()\n for f in filelist:\n if not os.path.exists(f):\n raise IOError('file not found: ' + f)\n if len(filelist) == 0:\n showVersion()\n converter = RtlConverter(filelist, options.topmodule, include=options.\n include, define=options.define, single_clock=options.single_clock)\n ast = converter.generate()\n converter.dumpCoramObject()\n asttocode = ASTCodeGenerator()\n code = asttocode.visit(ast)\n f = open(options.outputfile, 'w')\n f.write(code)\n f.close()\n\n\n<mask token>\n",
"step-3": "<mask token>\nsys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))\n<mask token>\nif sys.version_info[0] >= 3:\n from rtlconverter.convertvisitor import InstanceConvertVisitor\n from rtlconverter.convertvisitor import InstanceReplaceVisitor\nelse:\n from convertvisitor import InstanceConvertVisitor\n from convertvisitor import InstanceReplaceVisitor\n<mask token>\n\n\nclass RtlConverter(object):\n\n def __init__(self, filelist, topmodule='userlogic', include=None,\n define=None, single_clock=False):\n self.filelist = filelist\n self.topmodule = topmodule\n self.include = include\n self.define = define\n self.single_clock = single_clock\n self.top_parameters = collections.OrderedDict()\n self.top_ioports = collections.OrderedDict()\n self.coram_object = collections.OrderedDict()\n\n def getTopParameters(self):\n return self.top_parameters\n\n def getTopIOPorts(self):\n return self.top_ioports\n\n def getCoramObject(self):\n return self.coram_object\n\n def dumpCoramObject(self):\n coram_object = self.getCoramObject()\n print('----------------------------------------')\n print('CoRAM Objects in User-defined RTL')\n for mode, coram_items in coram_object.items():\n print(' CoRAM %s' % mode)\n for threadname, idx, subid, addrwidth, datawidth in sorted(\n coram_items, key=lambda x: x[1]):\n print(' %s(ID:%d%s Thread:%s AddrWidth:%s DataWidth:%s)' %\n (mode, idx, '' if subid is None else ''.join(('[', str(\n subid), ']')), threadname, str(addrwidth), str(datawidth)))\n\n def generate(self):\n preprocess_define = []\n if self.single_clock:\n preprocess_define.append('CORAM_SINGLE_CLOCK')\n if self.define:\n preprocess_define.extend(self.define)\n code_parser = VerilogCodeParser(self.filelist, preprocess_include=\n self.include, preprocess_define=preprocess_define)\n ast = code_parser.parse()\n module_visitor = ModuleVisitor()\n module_visitor.visit(ast)\n modulenames = module_visitor.get_modulenames()\n moduleinfotable = module_visitor.get_moduleinfotable()\n instanceconvert_visitor = InstanceConvertVisitor(moduleinfotable,\n self.topmodule)\n instanceconvert_visitor.start_visit()\n replaced_instance = instanceconvert_visitor.getMergedReplacedInstance()\n replaced_instports = instanceconvert_visitor.getReplacedInstPorts()\n replaced_items = instanceconvert_visitor.getReplacedItems()\n new_moduleinfotable = instanceconvert_visitor.get_new_moduleinfotable()\n instancereplace_visitor = InstanceReplaceVisitor(replaced_instance,\n replaced_instports, replaced_items, new_moduleinfotable)\n ret = instancereplace_visitor.getAST()\n frametable = instanceconvert_visitor.getFrameTable()\n top_ioports = []\n for i in moduleinfotable.getIOPorts(self.topmodule):\n if signaltype.isClock(i) or signaltype.isReset(i):\n continue\n top_ioports.append(i)\n top_scope = ScopeChain([ScopeLabel(self.topmodule, 'module')])\n top_sigs = frametable.getSignals(top_scope)\n top_params = frametable.getConsts(top_scope)\n for sk, sv in top_sigs.items():\n if len(sk) > 2:\n continue\n signame = sk[1].scopename\n for svv in sv:\n if signame in top_ioports and not (signaltype.isClock(\n signame) or signaltype.isReset(signame)) and isinstance(svv\n , vast.Input) or isinstance(svv, vast.Output\n ) or isinstance(svv, vast.Inout):\n port = svv\n msb_val = instanceconvert_visitor.optimize(\n instanceconvert_visitor.getTree(port.width.msb,\n top_scope))\n lsb_val = instanceconvert_visitor.optimize(\n instanceconvert_visitor.getTree(port.width.lsb,\n top_scope))\n width = int(msb_val.value) - int(lsb_val.value) + 1\n self.top_ioports[signame] = port, width\n break\n for ck, cv in top_params.items():\n if len(ck) > 2:\n continue\n signame = ck[1].scopename\n param = cv[0]\n if isinstance(param, vast.Genvar):\n continue\n self.top_parameters[signame] = param\n self.coram_object = instanceconvert_visitor.getCoramObject()\n return ret\n\n\ndef main():\n from optparse import OptionParser\n INFO = 'PyCoRAM RTL Converter'\n VERSION = utils.version.VERSION\n USAGE = 'Usage: python rtlconverter.py -t TOPMODULE file ...'\n\n def showVersion():\n print(INFO)\n print(VERSION)\n print(USAGE)\n sys.exit()\n optparser = OptionParser()\n optparser.add_option('-v', '--version', action='store_true', dest=\n 'showversion', default=False, help='Show the version')\n optparser.add_option('-t', '--top', dest='topmodule', default=\n 'userlogic', help='Top module, Default=userlogic')\n optparser.add_option('-o', '--output', dest='outputfile', default=\n 'out.v', help='Output file name, Default=out.v')\n optparser.add_option('-I', '--include', dest='include', action='append',\n default=[], help='Include path')\n optparser.add_option('-D', dest='define', action='append', default=[],\n help='Macro Definition')\n optparser.add_option('--singleclock', action='store_true', dest=\n 'single_clock', default=False, help='Use single clock mode')\n options, args = optparser.parse_args()\n filelist = args\n if options.showversion:\n showVersion()\n for f in filelist:\n if not os.path.exists(f):\n raise IOError('file not found: ' + f)\n if len(filelist) == 0:\n showVersion()\n converter = RtlConverter(filelist, options.topmodule, include=options.\n include, define=options.define, single_clock=options.single_clock)\n ast = converter.generate()\n converter.dumpCoramObject()\n asttocode = ASTCodeGenerator()\n code = asttocode.visit(ast)\n f = open(options.outputfile, 'w')\n f.write(code)\n f.close()\n\n\nif __name__ == '__main__':\n main()\n",
"step-4": "import sys\nimport os\nimport subprocess\nimport copy\nimport collections\nsys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))\nimport utils.version\nif sys.version_info[0] >= 3:\n from rtlconverter.convertvisitor import InstanceConvertVisitor\n from rtlconverter.convertvisitor import InstanceReplaceVisitor\nelse:\n from convertvisitor import InstanceConvertVisitor\n from convertvisitor import InstanceReplaceVisitor\nimport pyverilog.utils.signaltype as signaltype\nfrom pyverilog.utils.scope import ScopeLabel, ScopeChain\nimport pyverilog.vparser.ast as vast\nfrom pyverilog.vparser.parser import VerilogCodeParser\nfrom pyverilog.dataflow.modulevisitor import ModuleVisitor\nfrom pyverilog.ast_code_generator.codegen import ASTCodeGenerator\n\n\nclass RtlConverter(object):\n\n def __init__(self, filelist, topmodule='userlogic', include=None,\n define=None, single_clock=False):\n self.filelist = filelist\n self.topmodule = topmodule\n self.include = include\n self.define = define\n self.single_clock = single_clock\n self.top_parameters = collections.OrderedDict()\n self.top_ioports = collections.OrderedDict()\n self.coram_object = collections.OrderedDict()\n\n def getTopParameters(self):\n return self.top_parameters\n\n def getTopIOPorts(self):\n return self.top_ioports\n\n def getCoramObject(self):\n return self.coram_object\n\n def dumpCoramObject(self):\n coram_object = self.getCoramObject()\n print('----------------------------------------')\n print('CoRAM Objects in User-defined RTL')\n for mode, coram_items in coram_object.items():\n print(' CoRAM %s' % mode)\n for threadname, idx, subid, addrwidth, datawidth in sorted(\n coram_items, key=lambda x: x[1]):\n print(' %s(ID:%d%s Thread:%s AddrWidth:%s DataWidth:%s)' %\n (mode, idx, '' if subid is None else ''.join(('[', str(\n subid), ']')), threadname, str(addrwidth), str(datawidth)))\n\n def generate(self):\n preprocess_define = []\n if self.single_clock:\n preprocess_define.append('CORAM_SINGLE_CLOCK')\n if self.define:\n preprocess_define.extend(self.define)\n code_parser = VerilogCodeParser(self.filelist, preprocess_include=\n self.include, preprocess_define=preprocess_define)\n ast = code_parser.parse()\n module_visitor = ModuleVisitor()\n module_visitor.visit(ast)\n modulenames = module_visitor.get_modulenames()\n moduleinfotable = module_visitor.get_moduleinfotable()\n instanceconvert_visitor = InstanceConvertVisitor(moduleinfotable,\n self.topmodule)\n instanceconvert_visitor.start_visit()\n replaced_instance = instanceconvert_visitor.getMergedReplacedInstance()\n replaced_instports = instanceconvert_visitor.getReplacedInstPorts()\n replaced_items = instanceconvert_visitor.getReplacedItems()\n new_moduleinfotable = instanceconvert_visitor.get_new_moduleinfotable()\n instancereplace_visitor = InstanceReplaceVisitor(replaced_instance,\n replaced_instports, replaced_items, new_moduleinfotable)\n ret = instancereplace_visitor.getAST()\n frametable = instanceconvert_visitor.getFrameTable()\n top_ioports = []\n for i in moduleinfotable.getIOPorts(self.topmodule):\n if signaltype.isClock(i) or signaltype.isReset(i):\n continue\n top_ioports.append(i)\n top_scope = ScopeChain([ScopeLabel(self.topmodule, 'module')])\n top_sigs = frametable.getSignals(top_scope)\n top_params = frametable.getConsts(top_scope)\n for sk, sv in top_sigs.items():\n if len(sk) > 2:\n continue\n signame = sk[1].scopename\n for svv in sv:\n if signame in top_ioports and not (signaltype.isClock(\n signame) or signaltype.isReset(signame)) and isinstance(svv\n , vast.Input) or isinstance(svv, vast.Output\n ) or isinstance(svv, vast.Inout):\n port = svv\n msb_val = instanceconvert_visitor.optimize(\n instanceconvert_visitor.getTree(port.width.msb,\n top_scope))\n lsb_val = instanceconvert_visitor.optimize(\n instanceconvert_visitor.getTree(port.width.lsb,\n top_scope))\n width = int(msb_val.value) - int(lsb_val.value) + 1\n self.top_ioports[signame] = port, width\n break\n for ck, cv in top_params.items():\n if len(ck) > 2:\n continue\n signame = ck[1].scopename\n param = cv[0]\n if isinstance(param, vast.Genvar):\n continue\n self.top_parameters[signame] = param\n self.coram_object = instanceconvert_visitor.getCoramObject()\n return ret\n\n\ndef main():\n from optparse import OptionParser\n INFO = 'PyCoRAM RTL Converter'\n VERSION = utils.version.VERSION\n USAGE = 'Usage: python rtlconverter.py -t TOPMODULE file ...'\n\n def showVersion():\n print(INFO)\n print(VERSION)\n print(USAGE)\n sys.exit()\n optparser = OptionParser()\n optparser.add_option('-v', '--version', action='store_true', dest=\n 'showversion', default=False, help='Show the version')\n optparser.add_option('-t', '--top', dest='topmodule', default=\n 'userlogic', help='Top module, Default=userlogic')\n optparser.add_option('-o', '--output', dest='outputfile', default=\n 'out.v', help='Output file name, Default=out.v')\n optparser.add_option('-I', '--include', dest='include', action='append',\n default=[], help='Include path')\n optparser.add_option('-D', dest='define', action='append', default=[],\n help='Macro Definition')\n optparser.add_option('--singleclock', action='store_true', dest=\n 'single_clock', default=False, help='Use single clock mode')\n options, args = optparser.parse_args()\n filelist = args\n if options.showversion:\n showVersion()\n for f in filelist:\n if not os.path.exists(f):\n raise IOError('file not found: ' + f)\n if len(filelist) == 0:\n showVersion()\n converter = RtlConverter(filelist, options.topmodule, include=options.\n include, define=options.define, single_clock=options.single_clock)\n ast = converter.generate()\n converter.dumpCoramObject()\n asttocode = ASTCodeGenerator()\n code = asttocode.visit(ast)\n f = open(options.outputfile, 'w')\n f.write(code)\n f.close()\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "#-------------------------------------------------------------------------------\n# rtlconverter.py\n# \n# PyCoRAM RTL Converter\n#\n# Copyright (C) 2013, Shinya Takamaeda-Yamazaki\n# License: Apache 2.0\n#-------------------------------------------------------------------------------\nimport sys\nimport os\nimport subprocess\nimport copy\nimport collections\n\nsys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))) )\n\nimport utils.version\n\nif sys.version_info[0] >= 3:\n from rtlconverter.convertvisitor import InstanceConvertVisitor\n from rtlconverter.convertvisitor import InstanceReplaceVisitor\nelse:\n from convertvisitor import InstanceConvertVisitor\n from convertvisitor import InstanceReplaceVisitor\n\nimport pyverilog.utils.signaltype as signaltype\nfrom pyverilog.utils.scope import ScopeLabel, ScopeChain\nimport pyverilog.vparser.ast as vast\nfrom pyverilog.vparser.parser import VerilogCodeParser\nfrom pyverilog.dataflow.modulevisitor import ModuleVisitor\nfrom pyverilog.ast_code_generator.codegen import ASTCodeGenerator\n\nclass RtlConverter(object):\n def __init__(self, filelist, topmodule='userlogic', include=None,\n define=None, single_clock=False):\n self.filelist = filelist\n self.topmodule = topmodule\n self.include = include\n self.define = define\n self.single_clock = single_clock\n\n self.top_parameters = collections.OrderedDict()\n self.top_ioports = collections.OrderedDict()\n self.coram_object = collections.OrderedDict()\n\n def getTopParameters(self):\n return self.top_parameters\n \n def getTopIOPorts(self):\n return self.top_ioports\n\n def getCoramObject(self):\n return self.coram_object\n\n def dumpCoramObject(self):\n coram_object = self.getCoramObject()\n print(\"----------------------------------------\")\n print(\"CoRAM Objects in User-defined RTL\")\n for mode, coram_items in coram_object.items():\n print(\" CoRAM %s\" % mode)\n for threadname, idx, subid, addrwidth, datawidth in sorted(coram_items, key=lambda x:x[1]):\n print(\" %s(ID:%d%s Thread:%s AddrWidth:%s DataWidth:%s)\" %\n (mode, idx, ( '' if subid is None else ''.join( ('[', str(subid), ']') ) ),\n threadname, str(addrwidth), str(datawidth)))\n \n def generate(self):\n preprocess_define = []\n if self.single_clock:\n preprocess_define.append('CORAM_SINGLE_CLOCK')\n if self.define:\n preprocess_define.extend(self.define)\n\n code_parser = VerilogCodeParser(self.filelist,\n preprocess_include=self.include,\n preprocess_define=preprocess_define)\n ast = code_parser.parse()\n\n module_visitor = ModuleVisitor()\n module_visitor.visit(ast)\n modulenames = module_visitor.get_modulenames()\n moduleinfotable = module_visitor.get_moduleinfotable()\n\n instanceconvert_visitor = InstanceConvertVisitor(moduleinfotable, self.topmodule)\n instanceconvert_visitor.start_visit()\n\n replaced_instance = instanceconvert_visitor.getMergedReplacedInstance()\n replaced_instports = instanceconvert_visitor.getReplacedInstPorts()\n replaced_items = instanceconvert_visitor.getReplacedItems() \n\n new_moduleinfotable = instanceconvert_visitor.get_new_moduleinfotable()\n instancereplace_visitor = InstanceReplaceVisitor(replaced_instance, \n replaced_instports,\n replaced_items,\n new_moduleinfotable)\n ret = instancereplace_visitor.getAST()\n\n # gather user-defined io-ports on top-module and parameters to connect external\n frametable = instanceconvert_visitor.getFrameTable()\n top_ioports = []\n for i in moduleinfotable.getIOPorts(self.topmodule):\n if signaltype.isClock(i) or signaltype.isReset(i): continue\n top_ioports.append(i)\n\n top_scope = ScopeChain( [ScopeLabel(self.topmodule, 'module')] )\n top_sigs = frametable.getSignals(top_scope)\n top_params = frametable.getConsts(top_scope)\n\n for sk, sv in top_sigs.items():\n if len(sk) > 2: continue\n signame = sk[1].scopename\n for svv in sv:\n if (signame in top_ioports and \n not (signaltype.isClock(signame) or signaltype.isReset(signame)) and\n isinstance(svv, vast.Input) or isinstance(svv, vast.Output) or isinstance(svv, vast.Inout)):\n port = svv\n msb_val = instanceconvert_visitor.optimize(instanceconvert_visitor.getTree(port.width.msb, top_scope))\n lsb_val = instanceconvert_visitor.optimize(instanceconvert_visitor.getTree(port.width.lsb, top_scope))\n width = int(msb_val.value) - int(lsb_val.value) + 1\n self.top_ioports[signame] = (port, width)\n break\n\n for ck, cv in top_params.items():\n if len(ck) > 2: continue\n signame = ck[1].scopename\n param = cv[0]\n if isinstance(param, vast.Genvar): continue\n self.top_parameters[signame] = param\n\n self.coram_object = instanceconvert_visitor.getCoramObject()\n\n return ret\n\ndef main():\n from optparse import OptionParser\n INFO = \"PyCoRAM RTL Converter\"\n VERSION = utils.version.VERSION\n USAGE = \"Usage: python rtlconverter.py -t TOPMODULE file ...\"\n\n def showVersion():\n print(INFO)\n print(VERSION)\n print(USAGE)\n sys.exit()\n \n optparser = OptionParser()\n optparser.add_option(\"-v\",\"--version\",action=\"store_true\",dest=\"showversion\",\n default=False,help=\"Show the version\")\n optparser.add_option(\"-t\",\"--top\",dest=\"topmodule\",\n default=\"userlogic\",help=\"Top module, Default=userlogic\")\n optparser.add_option(\"-o\",\"--output\",dest=\"outputfile\",\n default=\"out.v\",help=\"Output file name, Default=out.v\")\n optparser.add_option(\"-I\",\"--include\",dest=\"include\",action=\"append\",\n default=[],help=\"Include path\")\n optparser.add_option(\"-D\",dest=\"define\",action=\"append\",\n default=[],help=\"Macro Definition\")\n optparser.add_option(\"--singleclock\",action=\"store_true\",dest=\"single_clock\",\n default=False,help=\"Use single clock mode\")\n (options, args) = optparser.parse_args()\n\n filelist = args\n if options.showversion:\n showVersion()\n\n for f in filelist:\n if not os.path.exists(f): raise IOError(\"file not found: \" + f)\n\n if len(filelist) == 0:\n showVersion()\n\n converter = RtlConverter(filelist, options.topmodule,\n include=options.include, \n define=options.define,\n single_clock=options.single_clock)\n ast = converter.generate()\n converter.dumpCoramObject()\n \n asttocode = ASTCodeGenerator()\n code = asttocode.visit(ast)\n\n f = open(options.outputfile, 'w')\n f.write(code)\n f.close()\n\nif __name__ == '__main__':\n main()\n",
"step-ids": [
7,
8,
9,
10,
11
]
}
|
[
7,
8,
9,
10,
11
] |
from django.shortcuts import render, redirect
from django.http import HttpResponse
from django.contrib.auth.decorators import login_required
from django.contrib.admin.views.decorators import staff_member_required
from lessons.models import Lesson, Question, Response
from usermanage.models import SchoolClass
import json
@login_required
def lessons_overview(request):
if request.method == 'POST':
if request.user.is_staff:
school_class = SchoolClass.objects.get(id=request.POST['class_id'])
school_class.password = request.POST['class_pwd']
school_class.save()
if request.user.is_staff:
classes = request.user.teachers.select_related()
else:
classes = request.user.students.select_related()
return render(request, 'lessons_overview.html', {
'classes': classes,
})
@login_required
def lesson(request, id):
lesson = Lesson.objects.get(id=id)
if request.GET.get('grade_class'):
school_class = SchoolClass.objects.get(id=request.GET['grade_class'])
else:
school_class = None
return render(request, 'lesson.html', {
'lesson': lesson,
'school_class': school_class,
})
@staff_member_required
def new_lesson(request, id):
school_class = SchoolClass.objects.get(id=id)
if request.method == 'POST':
lesson = Lesson(
name=request.POST['lesson_name'],
school_class=school_class,
)
for title in request.POST.getlist('questions[]'):
question = Question(title=title)
question.save()
lesson.questions.add(question)
lesson.save()
return redirect('/')
return render(request, 'new_lesson.html', {
'school_class': school_class,
})
@staff_member_required
def grade_question(request, class_id, id):
question = Question.objects.get(id=id)
school_class = SchoolClass.objects.get(id=class_id)
students = school_class.students.all()
responses = Response.objects.filter(
answerer__in=students,
question=question
)
unanswered_students = []
for student in students:
try:
Response.objects.get(answerer=student, question=question)
except Response.DoesNotExist:
unanswered_students.append(student.get_full_name())
unanswered_students = ', '.join(unanswered_students) if unanswered_students else None
return render(request, 'question.html', {
'question': question,
'responses': responses,
'unanswered_students': unanswered_students,
})
def update_questions(questions, lesson_id):
questions = [q for q in questions if len(q) > 0]
lesson = Lesson.objects.get(id=lesson_id)
for question in lesson.questions.all():
question.title = questions.pop(0)
question.save()
if len(questions) > 0:
for title in questions:
new_question = Question(title=title)
new_question.save()
lesson.questions.add(new_question)
lesson.save()
@staff_member_required
def edit_lesson(request, id):
if request.method == 'POST':
if request.POST['action'] == 'update':
update_questions(request.POST.getlist('questions[]'), id)
return HttpResponse(status=200)
elif request.POST['action'] == 'delete':
Question.objects.get(id=request.POST['id']).delete()
return HttpResponse(status=200)
elif request.method == 'GET':
lesson = Lesson.objects.get(id=id)
return render(request, 'edit_lesson.html', {
'lesson': lesson,
})
@staff_member_required
def mark_response_seen(request):
response = Response.objects.get(id=request.POST['id'])
response.seen = True
response.save()
return HttpResponse(status=200)
@staff_member_required
def save_comment(request):
for id in request.POST.keys():
response = Response.objects.get(id=id)
response.seen = True # redundant
response.comment = request.POST[id]
response.save()
return HttpResponse(status=200)
@login_required
def save_responses(request):
responses = request.POST.items()
lesson = Lesson.objects.get(id=request.POST['lesson'])
responses.pop(responses.index(('lesson', request.POST['lesson'])))
new_response_ids = {}
for id in responses:
try:
response = Response.objects.get(id=id[0], answerer=request.user)
response.text = request.POST[id[0]]
response.save()
except ValueError:
if len(request.POST[id[0]]) > 0:
response = Response(
text=request.POST[id[0]],
answerer=request.user,
question=Question.objects.get(id=id[0][4:]),
lesson=lesson
)
response.save()
new_response_ids[id[0]] = str(response.id)
return HttpResponse(json.dumps(new_response_ids),
content_type='application/json')
|
normal
|
{
"blob_id": "ee417c5fff858d26ca60a78dffe4cff503a6f2b5",
"index": 6824,
"step-1": "<mask token>\n\n\n@login_required\ndef lessons_overview(request):\n if request.method == 'POST':\n if request.user.is_staff:\n school_class = SchoolClass.objects.get(id=request.POST['class_id'])\n school_class.password = request.POST['class_pwd']\n school_class.save()\n if request.user.is_staff:\n classes = request.user.teachers.select_related()\n else:\n classes = request.user.students.select_related()\n return render(request, 'lessons_overview.html', {'classes': classes})\n\n\n@login_required\ndef lesson(request, id):\n lesson = Lesson.objects.get(id=id)\n if request.GET.get('grade_class'):\n school_class = SchoolClass.objects.get(id=request.GET['grade_class'])\n else:\n school_class = None\n return render(request, 'lesson.html', {'lesson': lesson, 'school_class':\n school_class})\n\n\n<mask token>\n\n\n@staff_member_required\ndef grade_question(request, class_id, id):\n question = Question.objects.get(id=id)\n school_class = SchoolClass.objects.get(id=class_id)\n students = school_class.students.all()\n responses = Response.objects.filter(answerer__in=students, question=\n question)\n unanswered_students = []\n for student in students:\n try:\n Response.objects.get(answerer=student, question=question)\n except Response.DoesNotExist:\n unanswered_students.append(student.get_full_name())\n unanswered_students = ', '.join(unanswered_students\n ) if unanswered_students else None\n return render(request, 'question.html', {'question': question,\n 'responses': responses, 'unanswered_students': unanswered_students})\n\n\ndef update_questions(questions, lesson_id):\n questions = [q for q in questions if len(q) > 0]\n lesson = Lesson.objects.get(id=lesson_id)\n for question in lesson.questions.all():\n question.title = questions.pop(0)\n question.save()\n if len(questions) > 0:\n for title in questions:\n new_question = Question(title=title)\n new_question.save()\n lesson.questions.add(new_question)\n lesson.save()\n\n\n@staff_member_required\ndef edit_lesson(request, id):\n if request.method == 'POST':\n if request.POST['action'] == 'update':\n update_questions(request.POST.getlist('questions[]'), id)\n return HttpResponse(status=200)\n elif request.POST['action'] == 'delete':\n Question.objects.get(id=request.POST['id']).delete()\n return HttpResponse(status=200)\n elif request.method == 'GET':\n lesson = Lesson.objects.get(id=id)\n return render(request, 'edit_lesson.html', {'lesson': lesson})\n\n\n@staff_member_required\ndef mark_response_seen(request):\n response = Response.objects.get(id=request.POST['id'])\n response.seen = True\n response.save()\n return HttpResponse(status=200)\n\n\n<mask token>\n\n\n@login_required\ndef save_responses(request):\n responses = request.POST.items()\n lesson = Lesson.objects.get(id=request.POST['lesson'])\n responses.pop(responses.index(('lesson', request.POST['lesson'])))\n new_response_ids = {}\n for id in responses:\n try:\n response = Response.objects.get(id=id[0], answerer=request.user)\n response.text = request.POST[id[0]]\n response.save()\n except ValueError:\n if len(request.POST[id[0]]) > 0:\n response = Response(text=request.POST[id[0]], answerer=\n request.user, question=Question.objects.get(id=id[0][4:\n ]), lesson=lesson)\n response.save()\n new_response_ids[id[0]] = str(response.id)\n return HttpResponse(json.dumps(new_response_ids), content_type=\n 'application/json')\n",
"step-2": "<mask token>\n\n\n@login_required\ndef lessons_overview(request):\n if request.method == 'POST':\n if request.user.is_staff:\n school_class = SchoolClass.objects.get(id=request.POST['class_id'])\n school_class.password = request.POST['class_pwd']\n school_class.save()\n if request.user.is_staff:\n classes = request.user.teachers.select_related()\n else:\n classes = request.user.students.select_related()\n return render(request, 'lessons_overview.html', {'classes': classes})\n\n\n@login_required\ndef lesson(request, id):\n lesson = Lesson.objects.get(id=id)\n if request.GET.get('grade_class'):\n school_class = SchoolClass.objects.get(id=request.GET['grade_class'])\n else:\n school_class = None\n return render(request, 'lesson.html', {'lesson': lesson, 'school_class':\n school_class})\n\n\n@staff_member_required\ndef new_lesson(request, id):\n school_class = SchoolClass.objects.get(id=id)\n if request.method == 'POST':\n lesson = Lesson(name=request.POST['lesson_name'], school_class=\n school_class)\n for title in request.POST.getlist('questions[]'):\n question = Question(title=title)\n question.save()\n lesson.questions.add(question)\n lesson.save()\n return redirect('/')\n return render(request, 'new_lesson.html', {'school_class': school_class})\n\n\n@staff_member_required\ndef grade_question(request, class_id, id):\n question = Question.objects.get(id=id)\n school_class = SchoolClass.objects.get(id=class_id)\n students = school_class.students.all()\n responses = Response.objects.filter(answerer__in=students, question=\n question)\n unanswered_students = []\n for student in students:\n try:\n Response.objects.get(answerer=student, question=question)\n except Response.DoesNotExist:\n unanswered_students.append(student.get_full_name())\n unanswered_students = ', '.join(unanswered_students\n ) if unanswered_students else None\n return render(request, 'question.html', {'question': question,\n 'responses': responses, 'unanswered_students': unanswered_students})\n\n\ndef update_questions(questions, lesson_id):\n questions = [q for q in questions if len(q) > 0]\n lesson = Lesson.objects.get(id=lesson_id)\n for question in lesson.questions.all():\n question.title = questions.pop(0)\n question.save()\n if len(questions) > 0:\n for title in questions:\n new_question = Question(title=title)\n new_question.save()\n lesson.questions.add(new_question)\n lesson.save()\n\n\n@staff_member_required\ndef edit_lesson(request, id):\n if request.method == 'POST':\n if request.POST['action'] == 'update':\n update_questions(request.POST.getlist('questions[]'), id)\n return HttpResponse(status=200)\n elif request.POST['action'] == 'delete':\n Question.objects.get(id=request.POST['id']).delete()\n return HttpResponse(status=200)\n elif request.method == 'GET':\n lesson = Lesson.objects.get(id=id)\n return render(request, 'edit_lesson.html', {'lesson': lesson})\n\n\n@staff_member_required\ndef mark_response_seen(request):\n response = Response.objects.get(id=request.POST['id'])\n response.seen = True\n response.save()\n return HttpResponse(status=200)\n\n\n<mask token>\n\n\n@login_required\ndef save_responses(request):\n responses = request.POST.items()\n lesson = Lesson.objects.get(id=request.POST['lesson'])\n responses.pop(responses.index(('lesson', request.POST['lesson'])))\n new_response_ids = {}\n for id in responses:\n try:\n response = Response.objects.get(id=id[0], answerer=request.user)\n response.text = request.POST[id[0]]\n response.save()\n except ValueError:\n if len(request.POST[id[0]]) > 0:\n response = Response(text=request.POST[id[0]], answerer=\n request.user, question=Question.objects.get(id=id[0][4:\n ]), lesson=lesson)\n response.save()\n new_response_ids[id[0]] = str(response.id)\n return HttpResponse(json.dumps(new_response_ids), content_type=\n 'application/json')\n",
"step-3": "<mask token>\n\n\n@login_required\ndef lessons_overview(request):\n if request.method == 'POST':\n if request.user.is_staff:\n school_class = SchoolClass.objects.get(id=request.POST['class_id'])\n school_class.password = request.POST['class_pwd']\n school_class.save()\n if request.user.is_staff:\n classes = request.user.teachers.select_related()\n else:\n classes = request.user.students.select_related()\n return render(request, 'lessons_overview.html', {'classes': classes})\n\n\n@login_required\ndef lesson(request, id):\n lesson = Lesson.objects.get(id=id)\n if request.GET.get('grade_class'):\n school_class = SchoolClass.objects.get(id=request.GET['grade_class'])\n else:\n school_class = None\n return render(request, 'lesson.html', {'lesson': lesson, 'school_class':\n school_class})\n\n\n@staff_member_required\ndef new_lesson(request, id):\n school_class = SchoolClass.objects.get(id=id)\n if request.method == 'POST':\n lesson = Lesson(name=request.POST['lesson_name'], school_class=\n school_class)\n for title in request.POST.getlist('questions[]'):\n question = Question(title=title)\n question.save()\n lesson.questions.add(question)\n lesson.save()\n return redirect('/')\n return render(request, 'new_lesson.html', {'school_class': school_class})\n\n\n@staff_member_required\ndef grade_question(request, class_id, id):\n question = Question.objects.get(id=id)\n school_class = SchoolClass.objects.get(id=class_id)\n students = school_class.students.all()\n responses = Response.objects.filter(answerer__in=students, question=\n question)\n unanswered_students = []\n for student in students:\n try:\n Response.objects.get(answerer=student, question=question)\n except Response.DoesNotExist:\n unanswered_students.append(student.get_full_name())\n unanswered_students = ', '.join(unanswered_students\n ) if unanswered_students else None\n return render(request, 'question.html', {'question': question,\n 'responses': responses, 'unanswered_students': unanswered_students})\n\n\ndef update_questions(questions, lesson_id):\n questions = [q for q in questions if len(q) > 0]\n lesson = Lesson.objects.get(id=lesson_id)\n for question in lesson.questions.all():\n question.title = questions.pop(0)\n question.save()\n if len(questions) > 0:\n for title in questions:\n new_question = Question(title=title)\n new_question.save()\n lesson.questions.add(new_question)\n lesson.save()\n\n\n@staff_member_required\ndef edit_lesson(request, id):\n if request.method == 'POST':\n if request.POST['action'] == 'update':\n update_questions(request.POST.getlist('questions[]'), id)\n return HttpResponse(status=200)\n elif request.POST['action'] == 'delete':\n Question.objects.get(id=request.POST['id']).delete()\n return HttpResponse(status=200)\n elif request.method == 'GET':\n lesson = Lesson.objects.get(id=id)\n return render(request, 'edit_lesson.html', {'lesson': lesson})\n\n\n@staff_member_required\ndef mark_response_seen(request):\n response = Response.objects.get(id=request.POST['id'])\n response.seen = True\n response.save()\n return HttpResponse(status=200)\n\n\n@staff_member_required\ndef save_comment(request):\n for id in request.POST.keys():\n response = Response.objects.get(id=id)\n response.seen = True\n response.comment = request.POST[id]\n response.save()\n return HttpResponse(status=200)\n\n\n@login_required\ndef save_responses(request):\n responses = request.POST.items()\n lesson = Lesson.objects.get(id=request.POST['lesson'])\n responses.pop(responses.index(('lesson', request.POST['lesson'])))\n new_response_ids = {}\n for id in responses:\n try:\n response = Response.objects.get(id=id[0], answerer=request.user)\n response.text = request.POST[id[0]]\n response.save()\n except ValueError:\n if len(request.POST[id[0]]) > 0:\n response = Response(text=request.POST[id[0]], answerer=\n request.user, question=Question.objects.get(id=id[0][4:\n ]), lesson=lesson)\n response.save()\n new_response_ids[id[0]] = str(response.id)\n return HttpResponse(json.dumps(new_response_ids), content_type=\n 'application/json')\n",
"step-4": "from django.shortcuts import render, redirect\nfrom django.http import HttpResponse\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.admin.views.decorators import staff_member_required\nfrom lessons.models import Lesson, Question, Response\nfrom usermanage.models import SchoolClass\nimport json\n\n\n@login_required\ndef lessons_overview(request):\n if request.method == 'POST':\n if request.user.is_staff:\n school_class = SchoolClass.objects.get(id=request.POST['class_id'])\n school_class.password = request.POST['class_pwd']\n school_class.save()\n if request.user.is_staff:\n classes = request.user.teachers.select_related()\n else:\n classes = request.user.students.select_related()\n return render(request, 'lessons_overview.html', {'classes': classes})\n\n\n@login_required\ndef lesson(request, id):\n lesson = Lesson.objects.get(id=id)\n if request.GET.get('grade_class'):\n school_class = SchoolClass.objects.get(id=request.GET['grade_class'])\n else:\n school_class = None\n return render(request, 'lesson.html', {'lesson': lesson, 'school_class':\n school_class})\n\n\n@staff_member_required\ndef new_lesson(request, id):\n school_class = SchoolClass.objects.get(id=id)\n if request.method == 'POST':\n lesson = Lesson(name=request.POST['lesson_name'], school_class=\n school_class)\n for title in request.POST.getlist('questions[]'):\n question = Question(title=title)\n question.save()\n lesson.questions.add(question)\n lesson.save()\n return redirect('/')\n return render(request, 'new_lesson.html', {'school_class': school_class})\n\n\n@staff_member_required\ndef grade_question(request, class_id, id):\n question = Question.objects.get(id=id)\n school_class = SchoolClass.objects.get(id=class_id)\n students = school_class.students.all()\n responses = Response.objects.filter(answerer__in=students, question=\n question)\n unanswered_students = []\n for student in students:\n try:\n Response.objects.get(answerer=student, question=question)\n except Response.DoesNotExist:\n unanswered_students.append(student.get_full_name())\n unanswered_students = ', '.join(unanswered_students\n ) if unanswered_students else None\n return render(request, 'question.html', {'question': question,\n 'responses': responses, 'unanswered_students': unanswered_students})\n\n\ndef update_questions(questions, lesson_id):\n questions = [q for q in questions if len(q) > 0]\n lesson = Lesson.objects.get(id=lesson_id)\n for question in lesson.questions.all():\n question.title = questions.pop(0)\n question.save()\n if len(questions) > 0:\n for title in questions:\n new_question = Question(title=title)\n new_question.save()\n lesson.questions.add(new_question)\n lesson.save()\n\n\n@staff_member_required\ndef edit_lesson(request, id):\n if request.method == 'POST':\n if request.POST['action'] == 'update':\n update_questions(request.POST.getlist('questions[]'), id)\n return HttpResponse(status=200)\n elif request.POST['action'] == 'delete':\n Question.objects.get(id=request.POST['id']).delete()\n return HttpResponse(status=200)\n elif request.method == 'GET':\n lesson = Lesson.objects.get(id=id)\n return render(request, 'edit_lesson.html', {'lesson': lesson})\n\n\n@staff_member_required\ndef mark_response_seen(request):\n response = Response.objects.get(id=request.POST['id'])\n response.seen = True\n response.save()\n return HttpResponse(status=200)\n\n\n@staff_member_required\ndef save_comment(request):\n for id in request.POST.keys():\n response = Response.objects.get(id=id)\n response.seen = True\n response.comment = request.POST[id]\n response.save()\n return HttpResponse(status=200)\n\n\n@login_required\ndef save_responses(request):\n responses = request.POST.items()\n lesson = Lesson.objects.get(id=request.POST['lesson'])\n responses.pop(responses.index(('lesson', request.POST['lesson'])))\n new_response_ids = {}\n for id in responses:\n try:\n response = Response.objects.get(id=id[0], answerer=request.user)\n response.text = request.POST[id[0]]\n response.save()\n except ValueError:\n if len(request.POST[id[0]]) > 0:\n response = Response(text=request.POST[id[0]], answerer=\n request.user, question=Question.objects.get(id=id[0][4:\n ]), lesson=lesson)\n response.save()\n new_response_ids[id[0]] = str(response.id)\n return HttpResponse(json.dumps(new_response_ids), content_type=\n 'application/json')\n",
"step-5": "from django.shortcuts import render, redirect\nfrom django.http import HttpResponse\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.admin.views.decorators import staff_member_required\nfrom lessons.models import Lesson, Question, Response\nfrom usermanage.models import SchoolClass\nimport json\n\n\n@login_required\ndef lessons_overview(request):\n if request.method == 'POST':\n if request.user.is_staff:\n school_class = SchoolClass.objects.get(id=request.POST['class_id'])\n school_class.password = request.POST['class_pwd']\n school_class.save()\n\n if request.user.is_staff:\n classes = request.user.teachers.select_related()\n else:\n classes = request.user.students.select_related()\n return render(request, 'lessons_overview.html', {\n 'classes': classes,\n })\n\n\n@login_required\ndef lesson(request, id):\n lesson = Lesson.objects.get(id=id)\n if request.GET.get('grade_class'):\n school_class = SchoolClass.objects.get(id=request.GET['grade_class'])\n else:\n school_class = None\n return render(request, 'lesson.html', {\n 'lesson': lesson,\n 'school_class': school_class,\n })\n\n\n@staff_member_required\ndef new_lesson(request, id):\n school_class = SchoolClass.objects.get(id=id)\n\n if request.method == 'POST':\n lesson = Lesson(\n name=request.POST['lesson_name'],\n school_class=school_class,\n )\n for title in request.POST.getlist('questions[]'):\n question = Question(title=title)\n question.save()\n lesson.questions.add(question)\n lesson.save()\n return redirect('/')\n\n return render(request, 'new_lesson.html', {\n 'school_class': school_class,\n })\n\n\n@staff_member_required\ndef grade_question(request, class_id, id):\n question = Question.objects.get(id=id)\n\n school_class = SchoolClass.objects.get(id=class_id)\n students = school_class.students.all()\n responses = Response.objects.filter(\n answerer__in=students,\n question=question\n )\n\n unanswered_students = []\n for student in students:\n try:\n Response.objects.get(answerer=student, question=question)\n except Response.DoesNotExist:\n unanswered_students.append(student.get_full_name())\n unanswered_students = ', '.join(unanswered_students) if unanswered_students else None\n\n return render(request, 'question.html', {\n 'question': question,\n 'responses': responses,\n 'unanswered_students': unanswered_students,\n })\n\n\ndef update_questions(questions, lesson_id):\n questions = [q for q in questions if len(q) > 0]\n lesson = Lesson.objects.get(id=lesson_id)\n for question in lesson.questions.all():\n question.title = questions.pop(0)\n question.save()\n if len(questions) > 0:\n for title in questions:\n new_question = Question(title=title)\n new_question.save()\n lesson.questions.add(new_question)\n lesson.save()\n\n\n@staff_member_required\ndef edit_lesson(request, id):\n if request.method == 'POST':\n if request.POST['action'] == 'update':\n update_questions(request.POST.getlist('questions[]'), id)\n return HttpResponse(status=200)\n elif request.POST['action'] == 'delete':\n Question.objects.get(id=request.POST['id']).delete()\n return HttpResponse(status=200)\n\n elif request.method == 'GET':\n lesson = Lesson.objects.get(id=id)\n return render(request, 'edit_lesson.html', {\n 'lesson': lesson,\n })\n\n\n@staff_member_required\ndef mark_response_seen(request):\n response = Response.objects.get(id=request.POST['id'])\n response.seen = True\n response.save()\n return HttpResponse(status=200)\n\n\n@staff_member_required\ndef save_comment(request):\n for id in request.POST.keys():\n response = Response.objects.get(id=id)\n response.seen = True # redundant\n response.comment = request.POST[id]\n response.save()\n return HttpResponse(status=200)\n\n\n@login_required\ndef save_responses(request):\n responses = request.POST.items()\n lesson = Lesson.objects.get(id=request.POST['lesson'])\n responses.pop(responses.index(('lesson', request.POST['lesson'])))\n new_response_ids = {}\n\n for id in responses:\n try:\n response = Response.objects.get(id=id[0], answerer=request.user)\n response.text = request.POST[id[0]]\n response.save()\n except ValueError:\n if len(request.POST[id[0]]) > 0:\n response = Response(\n text=request.POST[id[0]],\n answerer=request.user,\n question=Question.objects.get(id=id[0][4:]),\n lesson=lesson\n )\n response.save()\n new_response_ids[id[0]] = str(response.id)\n\n return HttpResponse(json.dumps(new_response_ids),\n content_type='application/json')\n",
"step-ids": [
7,
8,
9,
10,
11
]
}
|
[
7,
8,
9,
10,
11
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def train_test_split(X, y, test_ratio=0.2, seed=None):
"""将数据X和y按照test_ratio分割成X_train,X_test,y_train,y_test"""
assert X.shape[0] == y.shape[0
], 'the size of X must be equal to the size of y'
assert 0.0 <= test_ratio <= 1.0, 'test_ratio must be valid'
if seed:
np.random.seed(seed)
shuffle_indexes = np.random.permutation(len(X))
test_size = int(len(X) * test_ratio)
test_indexes = shuffle_indexes[:test_size]
train_indexes = shuffle_indexes[test_size:]
X_train = X[train_indexes]
y_train = y[train_indexes]
X_test = X[test_indexes]
y_test = y[test_indexes]
return X_train, X_test, y_train, y_test
<|reserved_special_token_1|>
<|reserved_special_token_0|>
import numpy as np
def train_test_split(X, y, test_ratio=0.2, seed=None):
"""将数据X和y按照test_ratio分割成X_train,X_test,y_train,y_test"""
assert X.shape[0] == y.shape[0
], 'the size of X must be equal to the size of y'
assert 0.0 <= test_ratio <= 1.0, 'test_ratio must be valid'
if seed:
np.random.seed(seed)
shuffle_indexes = np.random.permutation(len(X))
test_size = int(len(X) * test_ratio)
test_indexes = shuffle_indexes[:test_size]
train_indexes = shuffle_indexes[test_size:]
X_train = X[train_indexes]
y_train = y[train_indexes]
X_test = X[test_indexes]
y_test = y[test_indexes]
return X_train, X_test, y_train, y_test
<|reserved_special_token_1|>
# -*- coding: utf-8 -*-
"""
Created on Tue Sep 4 15:19:49 2018
@author: haoyu
"""
import numpy as np
def train_test_split(X, y, test_ratio = 0.2, seed = None):
'''将数据X和y按照test_ratio分割成X_train,X_test,y_train,y_test'''
assert X.shape[0] == y.shape[0], \
'the size of X must be equal to the size of y'
assert 0.0 <= test_ratio <=1.0, \
'test_ratio must be valid'
if seed:
np.random.seed(seed)
shuffle_indexes = np.random.permutation(len(X))#打乱顺序获得索引
test_size = int(len(X) * test_ratio)
test_indexes = shuffle_indexes[:test_size]
train_indexes = shuffle_indexes[test_size:]
X_train = X[train_indexes]
y_train = y[train_indexes]
X_test = X[test_indexes]
y_test = y[test_indexes]
return X_train, X_test, y_train, y_test
|
flexible
|
{
"blob_id": "beda3d13e3dc12f7527f5c5ba8a0eb05c2734fd9",
"index": 6133,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef train_test_split(X, y, test_ratio=0.2, seed=None):\n \"\"\"将数据X和y按照test_ratio分割成X_train,X_test,y_train,y_test\"\"\"\n assert X.shape[0] == y.shape[0\n ], 'the size of X must be equal to the size of y'\n assert 0.0 <= test_ratio <= 1.0, 'test_ratio must be valid'\n if seed:\n np.random.seed(seed)\n shuffle_indexes = np.random.permutation(len(X))\n test_size = int(len(X) * test_ratio)\n test_indexes = shuffle_indexes[:test_size]\n train_indexes = shuffle_indexes[test_size:]\n X_train = X[train_indexes]\n y_train = y[train_indexes]\n X_test = X[test_indexes]\n y_test = y[test_indexes]\n return X_train, X_test, y_train, y_test\n",
"step-3": "<mask token>\nimport numpy as np\n\n\ndef train_test_split(X, y, test_ratio=0.2, seed=None):\n \"\"\"将数据X和y按照test_ratio分割成X_train,X_test,y_train,y_test\"\"\"\n assert X.shape[0] == y.shape[0\n ], 'the size of X must be equal to the size of y'\n assert 0.0 <= test_ratio <= 1.0, 'test_ratio must be valid'\n if seed:\n np.random.seed(seed)\n shuffle_indexes = np.random.permutation(len(X))\n test_size = int(len(X) * test_ratio)\n test_indexes = shuffle_indexes[:test_size]\n train_indexes = shuffle_indexes[test_size:]\n X_train = X[train_indexes]\n y_train = y[train_indexes]\n X_test = X[test_indexes]\n y_test = y[test_indexes]\n return X_train, X_test, y_train, y_test\n",
"step-4": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Sep 4 15:19:49 2018\n\n@author: haoyu\n\"\"\"\nimport numpy as np\n\ndef train_test_split(X, y, test_ratio = 0.2, seed = None):\n '''将数据X和y按照test_ratio分割成X_train,X_test,y_train,y_test'''\n assert X.shape[0] == y.shape[0], \\\n 'the size of X must be equal to the size of y'\n assert 0.0 <= test_ratio <=1.0, \\\n 'test_ratio must be valid'\n \n if seed:\n np.random.seed(seed)\n \n shuffle_indexes = np.random.permutation(len(X))#打乱顺序获得索引\n\n test_size = int(len(X) * test_ratio)\n test_indexes = shuffle_indexes[:test_size]\n train_indexes = shuffle_indexes[test_size:]\n\n X_train = X[train_indexes]\n y_train = y[train_indexes]\n\n X_test = X[test_indexes]\n y_test = y[test_indexes]\n \n return X_train, X_test, y_train, y_test",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
class BaseResource(Resource):
<|reserved_special_token_0|>
def __init__(self, *args, **kwargs):
super(BaseResource, self).__init__(*args, **kwargs)
self._user = None
<|reserved_special_token_0|>
@property
def current_user(self):
return current_user._get_current_object()
@property
def current_org(self):
return current_org._get_current_object()
def record_event(self, options):
options.update({'user_id': self.current_user.id, 'org_id': self.
current_org.id})
record_event.delay(options)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class BaseResource(Resource):
<|reserved_special_token_0|>
def __init__(self, *args, **kwargs):
super(BaseResource, self).__init__(*args, **kwargs)
self._user = None
def dispatch_request(self, *args, **kwargs):
kwargs.pop('org_slug', None)
return super(BaseResource, self).dispatch_request(*args, **kwargs)
@property
def current_user(self):
return current_user._get_current_object()
@property
def current_org(self):
return current_org._get_current_object()
def record_event(self, options):
options.update({'user_id': self.current_user.id, 'org_id': self.
current_org.id})
record_event.delay(options)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class BaseResource(Resource):
decorators = [login_required]
def __init__(self, *args, **kwargs):
super(BaseResource, self).__init__(*args, **kwargs)
self._user = None
def dispatch_request(self, *args, **kwargs):
kwargs.pop('org_slug', None)
return super(BaseResource, self).dispatch_request(*args, **kwargs)
@property
def current_user(self):
return current_user._get_current_object()
@property
def current_org(self):
return current_org._get_current_object()
def record_event(self, options):
options.update({'user_id': self.current_user.id, 'org_id': self.
current_org.id})
record_event.delay(options)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class BaseResource(Resource):
decorators = [login_required]
def __init__(self, *args, **kwargs):
super(BaseResource, self).__init__(*args, **kwargs)
self._user = None
def dispatch_request(self, *args, **kwargs):
kwargs.pop('org_slug', None)
return super(BaseResource, self).dispatch_request(*args, **kwargs)
@property
def current_user(self):
return current_user._get_current_object()
@property
def current_org(self):
return current_org._get_current_object()
def record_event(self, options):
options.update({'user_id': self.current_user.id, 'org_id': self.
current_org.id})
record_event.delay(options)
<|reserved_special_token_0|>
def get_object_or_404(fn, *args, **kwargs):
try:
return fn(*args, **kwargs)
except DoesNotExist:
abort(404)
<|reserved_special_token_1|>
from flask.ext.restful import Resource, abort
from flask_login import current_user, login_required
from peewee import DoesNotExist
from redash.authentication.org_resolving import current_org
from redash.tasks import record_event
class BaseResource(Resource):
decorators = [login_required]
def __init__(self, *args, **kwargs):
super(BaseResource, self).__init__(*args, **kwargs)
self._user = None
def dispatch_request(self, *args, **kwargs):
kwargs.pop('org_slug', None)
return super(BaseResource, self).dispatch_request(*args, **kwargs)
@property
def current_user(self):
return current_user._get_current_object()
@property
def current_org(self):
return current_org._get_current_object()
def record_event(self, options):
options.update({
'user_id': self.current_user.id,
'org_id': self.current_org.id
})
record_event.delay(options)
def require_fields(req, fields):
for f in fields:
if f not in req:
abort(400)
def get_object_or_404(fn, *args, **kwargs):
try:
return fn(*args, **kwargs)
except DoesNotExist:
abort(404)
|
flexible
|
{
"blob_id": "71cdddfdd7c1327a8a77808dbdd0ff98d827231f",
"index": 945,
"step-1": "<mask token>\n\n\nclass BaseResource(Resource):\n <mask token>\n\n def __init__(self, *args, **kwargs):\n super(BaseResource, self).__init__(*args, **kwargs)\n self._user = None\n <mask token>\n\n @property\n def current_user(self):\n return current_user._get_current_object()\n\n @property\n def current_org(self):\n return current_org._get_current_object()\n\n def record_event(self, options):\n options.update({'user_id': self.current_user.id, 'org_id': self.\n current_org.id})\n record_event.delay(options)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass BaseResource(Resource):\n <mask token>\n\n def __init__(self, *args, **kwargs):\n super(BaseResource, self).__init__(*args, **kwargs)\n self._user = None\n\n def dispatch_request(self, *args, **kwargs):\n kwargs.pop('org_slug', None)\n return super(BaseResource, self).dispatch_request(*args, **kwargs)\n\n @property\n def current_user(self):\n return current_user._get_current_object()\n\n @property\n def current_org(self):\n return current_org._get_current_object()\n\n def record_event(self, options):\n options.update({'user_id': self.current_user.id, 'org_id': self.\n current_org.id})\n record_event.delay(options)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass BaseResource(Resource):\n decorators = [login_required]\n\n def __init__(self, *args, **kwargs):\n super(BaseResource, self).__init__(*args, **kwargs)\n self._user = None\n\n def dispatch_request(self, *args, **kwargs):\n kwargs.pop('org_slug', None)\n return super(BaseResource, self).dispatch_request(*args, **kwargs)\n\n @property\n def current_user(self):\n return current_user._get_current_object()\n\n @property\n def current_org(self):\n return current_org._get_current_object()\n\n def record_event(self, options):\n options.update({'user_id': self.current_user.id, 'org_id': self.\n current_org.id})\n record_event.delay(options)\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\nclass BaseResource(Resource):\n decorators = [login_required]\n\n def __init__(self, *args, **kwargs):\n super(BaseResource, self).__init__(*args, **kwargs)\n self._user = None\n\n def dispatch_request(self, *args, **kwargs):\n kwargs.pop('org_slug', None)\n return super(BaseResource, self).dispatch_request(*args, **kwargs)\n\n @property\n def current_user(self):\n return current_user._get_current_object()\n\n @property\n def current_org(self):\n return current_org._get_current_object()\n\n def record_event(self, options):\n options.update({'user_id': self.current_user.id, 'org_id': self.\n current_org.id})\n record_event.delay(options)\n\n\n<mask token>\n\n\ndef get_object_or_404(fn, *args, **kwargs):\n try:\n return fn(*args, **kwargs)\n except DoesNotExist:\n abort(404)\n",
"step-5": "from flask.ext.restful import Resource, abort\nfrom flask_login import current_user, login_required\nfrom peewee import DoesNotExist\n\nfrom redash.authentication.org_resolving import current_org\nfrom redash.tasks import record_event\n\n\nclass BaseResource(Resource):\n decorators = [login_required]\n\n def __init__(self, *args, **kwargs):\n super(BaseResource, self).__init__(*args, **kwargs)\n self._user = None\n\n def dispatch_request(self, *args, **kwargs):\n kwargs.pop('org_slug', None)\n\n return super(BaseResource, self).dispatch_request(*args, **kwargs)\n\n @property\n def current_user(self):\n return current_user._get_current_object()\n\n @property\n def current_org(self):\n return current_org._get_current_object()\n\n def record_event(self, options):\n options.update({\n 'user_id': self.current_user.id,\n 'org_id': self.current_org.id\n })\n\n record_event.delay(options)\n\n\ndef require_fields(req, fields):\n for f in fields:\n if f not in req:\n abort(400)\n\n\ndef get_object_or_404(fn, *args, **kwargs):\n try:\n return fn(*args, **kwargs)\n except DoesNotExist:\n abort(404)\n",
"step-ids": [
5,
6,
7,
8,
11
]
}
|
[
5,
6,
7,
8,
11
] |
from django.contrib import admin
# Register your models here.
from blog.models import Post,Category,Profile
admin.site.register(Profile)
admin.site.register(Category)
admin.site.register(Post)
|
normal
|
{
"blob_id": "20f0de097fdd8f2a435c06a73c6a90cc7ebc69ad",
"index": 4014,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nadmin.site.register(Profile)\nadmin.site.register(Category)\nadmin.site.register(Post)\n",
"step-3": "from django.contrib import admin\nfrom blog.models import Post, Category, Profile\nadmin.site.register(Profile)\nadmin.site.register(Category)\nadmin.site.register(Post)\n",
"step-4": "from django.contrib import admin\n\n# Register your models here.\nfrom blog.models import Post,Category,Profile\n\nadmin.site.register(Profile)\nadmin.site.register(Category)\nadmin.site.register(Post)",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def create_app(settings_override={}):
app = Flask(__name__)
app.config.from_object('zezin.settings.Configuration')
app.config.update(settings_override)
db.init_app(app)
from zezin.views import partners_routes
app.register_blueprint(blueprint=partners_routes)
return app
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
db = SQLAlchemy()
def create_app(settings_override={}):
app = Flask(__name__)
app.config.from_object('zezin.settings.Configuration')
app.config.update(settings_override)
db.init_app(app)
from zezin.views import partners_routes
app.register_blueprint(blueprint=partners_routes)
return app
<|reserved_special_token_0|>
<|reserved_special_token_1|>
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
db = SQLAlchemy()
def create_app(settings_override={}):
app = Flask(__name__)
app.config.from_object('zezin.settings.Configuration')
app.config.update(settings_override)
db.init_app(app)
from zezin.views import partners_routes
app.register_blueprint(blueprint=partners_routes)
return app
import zezin.models
<|reserved_special_token_1|>
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
db = SQLAlchemy()
# pylint: disable=dangerous-default-value,wrong-import-position,unused-import, import-outside-toplevel
def create_app(settings_override={}):
app = Flask(__name__)
app.config.from_object('zezin.settings.Configuration')
app.config.update(settings_override)
db.init_app(app)
from zezin.views import partners_routes
app.register_blueprint(blueprint=partners_routes)
return app
import zezin.models # isort:skip
|
flexible
|
{
"blob_id": "6affc182f5d3353d46f6e9a21344bc85bf894165",
"index": 948,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef create_app(settings_override={}):\n app = Flask(__name__)\n app.config.from_object('zezin.settings.Configuration')\n app.config.update(settings_override)\n db.init_app(app)\n from zezin.views import partners_routes\n app.register_blueprint(blueprint=partners_routes)\n return app\n\n\n<mask token>\n",
"step-3": "<mask token>\ndb = SQLAlchemy()\n\n\ndef create_app(settings_override={}):\n app = Flask(__name__)\n app.config.from_object('zezin.settings.Configuration')\n app.config.update(settings_override)\n db.init_app(app)\n from zezin.views import partners_routes\n app.register_blueprint(blueprint=partners_routes)\n return app\n\n\n<mask token>\n",
"step-4": "from flask import Flask\nfrom flask_sqlalchemy import SQLAlchemy\ndb = SQLAlchemy()\n\n\ndef create_app(settings_override={}):\n app = Flask(__name__)\n app.config.from_object('zezin.settings.Configuration')\n app.config.update(settings_override)\n db.init_app(app)\n from zezin.views import partners_routes\n app.register_blueprint(blueprint=partners_routes)\n return app\n\n\nimport zezin.models\n",
"step-5": "from flask import Flask\nfrom flask_sqlalchemy import SQLAlchemy\n\ndb = SQLAlchemy()\n\n\n# pylint: disable=dangerous-default-value,wrong-import-position,unused-import, import-outside-toplevel\ndef create_app(settings_override={}):\n app = Flask(__name__)\n app.config.from_object('zezin.settings.Configuration')\n app.config.update(settings_override)\n\n db.init_app(app)\n\n from zezin.views import partners_routes\n\n app.register_blueprint(blueprint=partners_routes)\n\n return app\n\n\nimport zezin.models # isort:skip\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
#!/usr/bin/env python
import numpy as np
import time, random
import sys, os, struct, socket
import psycopg2
import test_coords
import alex_random
import new_sim_utils
import sdr_kml_writer
from geo_utils import geo_utils
from beacon import beacon
from sim_data import data_utils
ENABLE_JITTER = False
ENABLE_DROPPED_PACKETS = False
ENABLE_LOCATION_HISTORY = True
ENABLE_BEACON_DELAY = False
class simulation:
def __init__(self):
"""__init__"""
self.geo_utils = geo_utils()
self.DEBUG = True
self.rx_number = 4
self.packet_number = 0
self.iterator = 1
self.packet_error_rate = 0.1
self.all_locations = []
def init_sim(self,n):
"""
initialize simulation for n receivers.
"""
self.beacon = beacon(ENABLE_BEACON_DELAY)
self.data = data_utils(n)
random.seed()
if n < 3:
print 'Number of receivers %i is less than three.' %n
print 'Simulation controller will not run.'
print 'Now exiting.'
sys.exit()
self.data.set_rx_number(n)
tx_loc = test_coords.get_tx_coords()
self.data.set_tx_location(tx_loc)
# self.data.reset_rx_location()
for i in range(n):
rx_loc = alex_random.get_random_coord()
if self.DEBUG:
print "\n\n\n\n\n\nstore location: ", rx_loc
print '\n\n\n\n\n\n'
self.data.set_rx_location(i,rx_loc)
tof = self.geo_utils.time_of_flight(rx_loc,tx_loc)
self.data.set_rx_time_delay(tof)
id = i+1
self.data.set_rx_team_id(id)
if self.DEBUG:
print 'tx_loc: ', tx_loc
print 'rx_loc: ', rx_loc
print 'time: ', repr(tof)
print 'id: ', id
def rx_beacon_packet(self):
"""
receive a single beacon packet. this will then be copied n times.
this tries to ensure clock synchronization across receivers.
"""
self.beacon.make_packet()
rx_packet = self.beacon.tx_packet()
rx_time = np.float128('%.20f'%(time.time()))
if self.DEBUG:
print 'rx_time: ', repr(rx_time)
self.data.set_timestamp_base(rx_time)
self.data.set_beacon_packet(rx_packet)
def receiver_chain(self,h):
"""
simulate receiver chain for n repeaters
"""
self.host = h
n = self.data.get_rx_number()
beacon_packet = self.data.get_beacon_packet()
time_base = self.data.get_timestamp_base()
# lists containing data for all current teams
team_id = self.data.get_rx_team_id()
location = self.data.get_rx_location()
if ENABLE_LOCATION_HISTORY:
self.record_location_history(location)
tof = self.data.get_rx_time_delay()
if self.DEBUG:
print "\n\n\n\n\n\nretrieve location: ", location
print ''
print "type(tof): ", type(tof)
conn = psycopg2.connect(host = self.host,
user = "sdrc_user",
password = "sdrc_pass",
database = "sdrc_db")
cur = conn.cursor()
for i in range(n):
f = open('data_in.data', 'a')
(rx_pktno,) = struct.unpack('!H', beacon_packet[0:2])
(beacon_ID,) = struct.unpack('!H', beacon_packet[2:4])
# packet number
payload1 = struct.pack('!H', self.packet_number & 0xffff)
f.write(str(self.packet_number) + ';')
# team id
ident = team_id[i]
payload2 = struct.pack('!H', ident & 0xffff)
f.write(str(ident) + ';')
# location
if (self.iterator == 1):
loc = location[i]
else:
# old_loc = location[i]
# loc = alex_random.random_move(old_loc)
loc = alex_random.get_random_coord()
self.data.set_rx_location(i,loc)
f.write(str(loc)+';')
self.iterator += 1
payload3 = new_sim_utils.pack_loc(loc)
# toa
t = tof[i]
toa = time_base + t
# if (ENABLE_JITTER):
# jitter = self.random_timing_jitter()
# toa = toa+jitter
# else:
# pass
if self.DEBUG:
print "t = tof[i]: ", repr(t)
print "type(t): ", type (t)
print "toa = time_base + t: ", repr(toa)
print "type(toa): ", type(toa)
payload4 = new_sim_utils.pack_time(toa)
f.write(repr(toa)+';')
# beacon payload
payload5 = struct.pack('!H', rx_pktno & 0xffff)
f.write(str(rx_pktno) + ';')
payload6 = struct.pack('!H', beacon_ID & 0xffff)
f.write(str(beacon_ID) + '\n')
f.close()
# check if packet dropped
drop = self.drop_packet()
# this if evaluates true even if drop == False
# if (ENABLE_DROPPED_PACKETS and drop): # if drop == 'True'
# print 'ENABLE_DROPPED_PACKETS ', ENABLE_DROPPED_PACKETS
# print 'drop ', drop
# print (ENABLE_DROPPED_PACKETS and drop)
# print 'packet dropped'
# payload = ''
if ENABLE_DROPPED_PACKETS:
print 'ENABLE_DROPPED_PACKETS ', ENABLE_DROPPED_PACKETS
print 'drop ', drop
if drop: # if drop == 'True'
print 'drop ', drop
print 'packet dropped'
payload = ''
else: # if drop == 'False'
payload = (payload1 + payload2 +
payload3 + payload4 +
payload5 + payload6)
else: # if drop == 'False'
payload = (payload1 + payload2 +
payload3 + payload4 +
payload5 + payload6)
print "len(payload): ", len(payload)
cur.execute("INSERT INTO blob_table (field_1) VALUES (%s)", (psycopg2.Binary(payload),))
conn.commit()
cur.close()
conn.close()
self.packet_number += 1
def record_location_history(self,loc):
self.all_locations.append(loc)
# if self.DEBUG:
# print 'all locations:\n', self.all_locations
# def write_location_history(self):
# # f = open('location_history','w+')
# for i in self.all_locations:
# print repr(i[0][0][0]), repr(i[0][0][1]))
# # f.write(repr(i)+'\n')
# print '\n\n\n\n\n\n\n'
# print len(i)
# # f.close()
# kml_write = sdr_kml_writer.kml_writer()
# for i in range(0,len(x_results)):
# coord = str(x_results[i])+','+str(y_results[i])
# kml_write.add_placemark('','',coord)
# kml_write.write_to_file('geoloc_kml_file.kml')
def random_timing_jitter(self):
r = random.uniform(0,1)
jitter = r*1e-9
if self.DEBUG:
print 'Random timing jitter %f seconds' %(jitter)
return jitter
def drop_packet(self):
r = random.uniform(0,1)
print 'random value: ', r
print 'error rate: ', self.packet_error_rate
if (r > self.packet_error_rate):
drop = False
else:
drop = True
if self.DEBUG:
print 'Probability of dropped packet: ', self.packet_error_rate
print 'Packet dropped? ', drop
return drop
if __name__=='__main__':
from optparse import OptionParser
usage = "usage: %prog [options] arg"
parser = OptionParser(usage=usage)
parser.add_option("", "--host", type="string", default="128.173.90.68",
help="database host in dotted decimal form [default=%default]")
parser.add_option("-r", "--radios", type="int", default="3",
help="number of field radios to simulate [default=%default]")
parser.add_option("-i", "--iterations", type="int", default="10",
help="number of times to run simulation [default=%default]")
# parser.add_option("-d", "--drop", action="store_true", default=False,
# help="simlulate dropped packets [default=%default]")
# parser.add_option("-j", "--jitter", type="store_true", default=False,
# help="simulate clock jitter, drift... [default=%default]")
(options, args) = parser.parse_args()
main = simulation()
main.init_sim(options.radios)
for i in range(options.iterations):
main.rx_beacon_packet()
main.receiver_chain(options.host)
# main.write_location_history()
# don't use, adbapi can't handle too many db connections...
# #self.data.set_rpt_packet(payload)
# sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# sys.stdout.write("sock.connect((HOST, PORT)) ...")
# sock.connect((HOST, PORT))
# sys.stdout.write(" Done\n")
# sys.stdout.write("sock.send...")
# sock.send('%s\r\n' % payload)
# sys.stdout.write(" Done\n")
# sock.close()
# # don't use if using sockets above
# def write_to_db(self):
# data = self.data.get_rpt_packet()
# print 'conn = MySQLdb.connect'
# db = MySQLdb.connect (host = "localhost",
# user = "sdrc_user",
# passwd = "sdrc_pass",
# db = "test01")
# print 'cursor = conn.cursor ()'
# cursor = db.cursor ()
# table = 'test01_table'
# fields = '(rpt_pkt_num, rpt_team_id, rpt_location, rpt_timestamp, beacon_id, beacon_pkt_num)'
# # reset database
# cursor.execute("""DELETE FROM %s""" %(table,))
# for i in range(len(data)):
# sql = """ """
# print "loop: ",i
# payload = data[i]
# (rpt_packet_num,) = struct.unpack('!H',payload[0:2])
# (rpt_team_id,) = struct.unpack('!H',payload[2:4])
# rpt_location = new_sim_utils.unpack_loc(payload[4:24])
# rpt_timestamp = new_sim_utils.unpack_time(payload[24:36])
# (beacon_packet_num,) = struct.unpack('!H',payload[36:38])
# (beacon_id,) = struct.unpack('!H',payload[38:40])
# print type(beacon_id)
# sql = """INSERT INTO %s %s VALUES (\'%d\', \'%d\', \'%s\', \'%s\', \'%d\', \'%d\')""" %(table,fields,rpt_packet_num,
# rpt_team_id,str(rpt_location),
# repr(rpt_timestamp),beacon_id,
# beacon_packet_num)
# print sql
# print 'cursor.execute(sql)'
# cursor.execute(sql)
# print 'db.commit()'
# db.commit()
# print 'db.close()'
# db.close()
# def send_rpt_packet(self):
# """
# transmit repeater packets
# """
# pass
# def run(self):
# """
# run.
# """
# pass
# def work(self):
# """
# work function.
# """
# pass
# def __str__(self):
# """
# Print data in class: simulation
# """
# string = '\n########\nSimulation START\n'
# string += 'tx_location: ' + repr(self.data.get_tx_location()) + '\n'
# string += 'rx_location: ' + repr(self.data.get_rx_location()) + '\n'
# string += 'rx_time_delay: ' + repr(self.data.get_rx_time_delay()) + '\n'
# string += 'rx_team_id: ' + str(self.data.get_rx_team_id()) + '\n'
# string += 'rpt_packet: ' + str(self.data.get_rpt_packet())
# string += '########\nSimulation END\n'
# return string
# print main
# main.write_to_db()
# # not sure if we need this here
# dist = self.geo_utils.distance(__tx_loc,__rx_loc)
# self.__set_rx_distance(__dist)
# __power = new_sim_utils.power(__dist)
# self.set_rx_power(__power)
# def add_receiver(self):
# """
# add additional receiver to simulation
# """
# pass
# # do we really need this? don't think so...
# def copy_beacon_packet(self):
# """
# make n copies of beacon packet
# """
# num = self.get_rx_number()
# beacon_packet = self.get_beacon_packet()
# for i in range(__num):
# self.set_n_beacon_packet(__beacon_packet)
# Prepare SQL query to INSERT a record into the database.
# try:
# Execute the SQL command
# Commit your changes in the database
# except:
# # Rollback in case there is any error
# print 'db.rollback()'
# db.rollback()
# # disconnect from server
# cursor = db.cursor ()
# table = 'blob_table'
# fields = '(field_1)'
# sql = """INSERT INTO %s %s VALUES (\'%\r')""" %(table,fields,payload)
# print str(sql)
# print 'cursor.execute(sql)'
# cursor.execute(sql)
# print 'db.commit()'
# db.commit()
# db.close()
|
normal
|
{
"blob_id": "530c2c185e57ffd3ac64628fc9f7f7985b0480fe",
"index": 5529,
"step-1": "#!/usr/bin/env python\n\nimport numpy as np\nimport time, random\nimport sys, os, struct, socket\nimport psycopg2\n\nimport test_coords\nimport alex_random\nimport new_sim_utils\nimport sdr_kml_writer\n\nfrom geo_utils import geo_utils\nfrom beacon import beacon\nfrom sim_data import data_utils\n\nENABLE_JITTER = False\nENABLE_DROPPED_PACKETS = False\nENABLE_LOCATION_HISTORY = True\nENABLE_BEACON_DELAY = False\n\n\nclass simulation:\n\n def __init__(self):\n \"\"\"__init__\"\"\"\n\n self.geo_utils = geo_utils()\n \n \n \n self.DEBUG = True\n self.rx_number = 4\n self.packet_number = 0\n\n self.iterator = 1\n self.packet_error_rate = 0.1\n self.all_locations = []\n\n\n\n def init_sim(self,n):\n \"\"\"\n initialize simulation for n receivers.\n \"\"\"\n self.beacon = beacon(ENABLE_BEACON_DELAY)\n self.data = data_utils(n)\n random.seed()\n\n if n < 3:\n print 'Number of receivers %i is less than three.' %n\n print 'Simulation controller will not run.'\n print 'Now exiting.'\n sys.exit()\n \n self.data.set_rx_number(n)\n\n\n\n tx_loc = test_coords.get_tx_coords()\n self.data.set_tx_location(tx_loc)\n # self.data.reset_rx_location()\n\n for i in range(n):\n rx_loc = alex_random.get_random_coord()\n if self.DEBUG:\n print \"\\n\\n\\n\\n\\n\\nstore location: \", rx_loc\n print '\\n\\n\\n\\n\\n\\n'\n self.data.set_rx_location(i,rx_loc)\n\n tof = self.geo_utils.time_of_flight(rx_loc,tx_loc)\n self.data.set_rx_time_delay(tof)\n\n id = i+1\n self.data.set_rx_team_id(id)\n\n if self.DEBUG:\n print 'tx_loc: ', tx_loc\n print 'rx_loc: ', rx_loc\n print 'time: ', repr(tof)\n print 'id: ', id\n\n\n def rx_beacon_packet(self):\n \"\"\"\n receive a single beacon packet. this will then be copied n times.\n this tries to ensure clock synchronization across receivers.\n \"\"\" \n self.beacon.make_packet()\n rx_packet = self.beacon.tx_packet()\n rx_time = np.float128('%.20f'%(time.time()))\n if self.DEBUG:\n print 'rx_time: ', repr(rx_time)\n\n self.data.set_timestamp_base(rx_time)\n self.data.set_beacon_packet(rx_packet)\n\n\n def receiver_chain(self,h):\n \"\"\"\n simulate receiver chain for n repeaters\n \"\"\"\n\n self.host = h\n\n n = self.data.get_rx_number()\n beacon_packet = self.data.get_beacon_packet()\n time_base = self.data.get_timestamp_base()\n\n # lists containing data for all current teams\n team_id = self.data.get_rx_team_id()\n location = self.data.get_rx_location()\n if ENABLE_LOCATION_HISTORY:\n self.record_location_history(location)\n tof = self.data.get_rx_time_delay()\n\n if self.DEBUG:\n print \"\\n\\n\\n\\n\\n\\nretrieve location: \", location\n print ''\n print \"type(tof): \", type(tof)\n\n\n\n conn = psycopg2.connect(host = self.host,\n user = \"sdrc_user\",\n password = \"sdrc_pass\",\n database = \"sdrc_db\")\n\n cur = conn.cursor()\n\n\n for i in range(n):\n f = open('data_in.data', 'a')\n\n (rx_pktno,) = struct.unpack('!H', beacon_packet[0:2])\n (beacon_ID,) = struct.unpack('!H', beacon_packet[2:4])\n\n # packet number\n payload1 = struct.pack('!H', self.packet_number & 0xffff)\n f.write(str(self.packet_number) + ';')\n\n # team id\n ident = team_id[i]\n payload2 = struct.pack('!H', ident & 0xffff)\n f.write(str(ident) + ';')\n\n # location\n if (self.iterator == 1):\n loc = location[i]\n else:\n # old_loc = location[i]\n # loc = alex_random.random_move(old_loc)\n loc = alex_random.get_random_coord()\n self.data.set_rx_location(i,loc)\n\n f.write(str(loc)+';')\n\n self.iterator += 1\n payload3 = new_sim_utils.pack_loc(loc)\n \n\n # toa\n t = tof[i]\n toa = time_base + t\n # if (ENABLE_JITTER):\n # jitter = self.random_timing_jitter()\n # toa = toa+jitter\n # else:\n # pass\n if self.DEBUG:\n print \"t = tof[i]: \", repr(t)\n print \"type(t): \", type (t)\n print \"toa = time_base + t: \", repr(toa)\n print \"type(toa): \", type(toa)\n payload4 = new_sim_utils.pack_time(toa)\n\n f.write(repr(toa)+';')\n\n\n # beacon payload\n payload5 = struct.pack('!H', rx_pktno & 0xffff)\n f.write(str(rx_pktno) + ';')\n payload6 = struct.pack('!H', beacon_ID & 0xffff)\n f.write(str(beacon_ID) + '\\n')\n f.close()\n # check if packet dropped\n drop = self.drop_packet()\n # this if evaluates true even if drop == False\n # if (ENABLE_DROPPED_PACKETS and drop): # if drop == 'True'\n # print 'ENABLE_DROPPED_PACKETS ', ENABLE_DROPPED_PACKETS\n # print 'drop ', drop\n # print (ENABLE_DROPPED_PACKETS and drop)\n # print 'packet dropped'\n # payload = ''\n if ENABLE_DROPPED_PACKETS:\n print 'ENABLE_DROPPED_PACKETS ', ENABLE_DROPPED_PACKETS\n print 'drop ', drop\n if drop: # if drop == 'True'\n print 'drop ', drop\n print 'packet dropped'\n payload = ''\n else: # if drop == 'False'\n payload = (payload1 + payload2 +\n payload3 + payload4 +\n payload5 + payload6)\n else: # if drop == 'False'\n payload = (payload1 + payload2 +\n payload3 + payload4 +\n payload5 + payload6)\n\n\n print \"len(payload): \", len(payload)\n cur.execute(\"INSERT INTO blob_table (field_1) VALUES (%s)\", (psycopg2.Binary(payload),))\n\n\n conn.commit()\n cur.close() \n conn.close()\n\n self.packet_number += 1\n \n\n\n def record_location_history(self,loc):\n self.all_locations.append(loc)\n # if self.DEBUG:\n # print 'all locations:\\n', self.all_locations\n\n # def write_location_history(self):\n # # f = open('location_history','w+')\n # for i in self.all_locations:\n # print repr(i[0][0][0]), repr(i[0][0][1]))\n # # f.write(repr(i)+'\\n')\n # print '\\n\\n\\n\\n\\n\\n\\n'\n # print len(i)\n # # f.close()\n\n # kml_write = sdr_kml_writer.kml_writer()\n\n # for i in range(0,len(x_results)):\n # coord = str(x_results[i])+','+str(y_results[i])\n # kml_write.add_placemark('','',coord)\n # kml_write.write_to_file('geoloc_kml_file.kml') \n\n\n def random_timing_jitter(self):\n r = random.uniform(0,1)\n jitter = r*1e-9\n if self.DEBUG:\n print 'Random timing jitter %f seconds' %(jitter)\n return jitter\n\n\n\n def drop_packet(self):\n r = random.uniform(0,1)\n print 'random value: ', r\n print 'error rate: ', self.packet_error_rate\n if (r > self.packet_error_rate):\n \n drop = False\n else:\n drop = True\n if self.DEBUG:\n print 'Probability of dropped packet: ', self.packet_error_rate\n print 'Packet dropped? ', drop\n return drop\n \n \n\n\n\n\nif __name__=='__main__':\n from optparse import OptionParser\n usage = \"usage: %prog [options] arg\"\n\n parser = OptionParser(usage=usage)\n parser.add_option(\"\", \"--host\", type=\"string\", default=\"128.173.90.68\",\n help=\"database host in dotted decimal form [default=%default]\")\n parser.add_option(\"-r\", \"--radios\", type=\"int\", default=\"3\",\n help=\"number of field radios to simulate [default=%default]\")\n parser.add_option(\"-i\", \"--iterations\", type=\"int\", default=\"10\",\n help=\"number of times to run simulation [default=%default]\")\n # parser.add_option(\"-d\", \"--drop\", action=\"store_true\", default=False,\n # help=\"simlulate dropped packets [default=%default]\")\n # parser.add_option(\"-j\", \"--jitter\", type=\"store_true\", default=False,\n # help=\"simulate clock jitter, drift... [default=%default]\")\n\n (options, args) = parser.parse_args()\n\n main = simulation()\n main.init_sim(options.radios)\n for i in range(options.iterations): \n main.rx_beacon_packet()\n main.receiver_chain(options.host)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n# main.write_location_history()\n\n\n\n\n # don't use, adbapi can't handle too many db connections...\n# #self.data.set_rpt_packet(payload)\n# sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n# sys.stdout.write(\"sock.connect((HOST, PORT)) ...\")\n# sock.connect((HOST, PORT))\n# sys.stdout.write(\" Done\\n\")\n# sys.stdout.write(\"sock.send...\")\n# sock.send('%s\\r\\n' % payload)\n# sys.stdout.write(\" Done\\n\")\n# sock.close()\n\n\n\n \n\n # # don't use if using sockets above\n # def write_to_db(self):\n \n # data = self.data.get_rpt_packet()\n\n # print 'conn = MySQLdb.connect'\n # db = MySQLdb.connect (host = \"localhost\",\n # user = \"sdrc_user\",\n # passwd = \"sdrc_pass\",\n # db = \"test01\")\n # print 'cursor = conn.cursor ()'\n # cursor = db.cursor ()\n\n # table = 'test01_table'\n # fields = '(rpt_pkt_num, rpt_team_id, rpt_location, rpt_timestamp, beacon_id, beacon_pkt_num)'\n\n # # reset database\n # cursor.execute(\"\"\"DELETE FROM %s\"\"\" %(table,))\n \n\n # for i in range(len(data)):\n # sql = \"\"\" \"\"\"\n # print \"loop: \",i\n # payload = data[i]\n # (rpt_packet_num,) = struct.unpack('!H',payload[0:2])\n # (rpt_team_id,) = struct.unpack('!H',payload[2:4])\n # rpt_location = new_sim_utils.unpack_loc(payload[4:24])\n # rpt_timestamp = new_sim_utils.unpack_time(payload[24:36])\n # (beacon_packet_num,) = struct.unpack('!H',payload[36:38])\n # (beacon_id,) = struct.unpack('!H',payload[38:40])\n\n\n # print type(beacon_id)\n\n\n\n # sql = \"\"\"INSERT INTO %s %s VALUES (\\'%d\\', \\'%d\\', \\'%s\\', \\'%s\\', \\'%d\\', \\'%d\\')\"\"\" %(table,fields,rpt_packet_num,\n # rpt_team_id,str(rpt_location),\n # repr(rpt_timestamp),beacon_id,\n # beacon_packet_num)\n\n # print sql\n\n # print 'cursor.execute(sql)'\n # cursor.execute(sql)\n\n # print 'db.commit()'\n # db.commit()\n\n\n # print 'db.close()'\n # db.close()\n\n\n\n\n\n\n # def send_rpt_packet(self):\n # \"\"\"\n # transmit repeater packets\n # \"\"\"\n # pass\n\n\n\n\n\n\n\n\n \n \n\n\n # def run(self):\n # \"\"\"\n # run.\n # \"\"\"\n # pass\n\n # def work(self):\n # \"\"\"\n # work function.\n # \"\"\"\n # pass\n\n # def __str__(self):\n # \"\"\"\n # Print data in class: simulation\n # \"\"\"\n # string = '\\n########\\nSimulation START\\n'\n # string += 'tx_location: ' + repr(self.data.get_tx_location()) + '\\n'\n # string += 'rx_location: ' + repr(self.data.get_rx_location()) + '\\n'\n # string += 'rx_time_delay: ' + repr(self.data.get_rx_time_delay()) + '\\n'\n # string += 'rx_team_id: ' + str(self.data.get_rx_team_id()) + '\\n'\n # string += 'rpt_packet: ' + str(self.data.get_rpt_packet())\n # string += '########\\nSimulation END\\n'\n # return string\n \n\n\n\n\n\n\n\n# print main\n# main.write_to_db()\n \n\n# # not sure if we need this here\n# dist = self.geo_utils.distance(__tx_loc,__rx_loc)\n# self.__set_rx_distance(__dist)\n\n# __power = new_sim_utils.power(__dist)\n# self.set_rx_power(__power)\n\n\n# def add_receiver(self):\n# \"\"\"\n# add additional receiver to simulation\n# \"\"\"\n# pass\n\n# # do we really need this? don't think so...\n# def copy_beacon_packet(self):\n# \"\"\"\n# make n copies of beacon packet\n# \"\"\"\n# num = self.get_rx_number()\n# beacon_packet = self.get_beacon_packet()\n\n# for i in range(__num):\n# self.set_n_beacon_packet(__beacon_packet)\n\n\n# Prepare SQL query to INSERT a record into the database.\n# try:\n# Execute the SQL command\n# Commit your changes in the database\n# except:\n# # Rollback in case there is any error\n# print 'db.rollback()'\n# db.rollback()\n\n# # disconnect from server\n\n\n # cursor = db.cursor ()\n\n # table = 'blob_table'\n # fields = '(field_1)'\n\n\n\n # sql = \"\"\"INSERT INTO %s %s VALUES (\\'%\\r')\"\"\" %(table,fields,payload)\n\n # print str(sql)\n\n # print 'cursor.execute(sql)'\n # cursor.execute(sql)\n\n # print 'db.commit()'\n # db.commit()\n\n # db.close()\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
<|reserved_special_token_0|>
def parse_arguments():
parser = argparse.ArgumentParser()
parser.add_argument('--data_dir', type=str, required=True, help=
'dir holding sequences as separate files')
parser.add_argument('--maxlen', type=int, default=500, help=
'maximum length of sequence')
parser.add_argument('--ext', type=str, default='tar.gz', help=
'extention of files with sequences')
parser.add_argument('--datetime', type=bool, default=False, help=
'if time values in event sequences are represented in datetime format')
parser.add_argument('--save_dir', type=str, default='./', help=
'path to save results')
parser.add_argument('--maxsize', type=int, default=None, help=
'max number of sequences')
args = parser.parse_args()
return args
def tranform_data(args):
"""
Loads the sequences saved in the given directory.
Args:
data_dir (str, Path) - directory containing sequences
save_dir - directory for saving transform data
maxsize (int) - maximum number of sequences to load
maxlen (int) - maximum length of sequence, the sequences longer than maxlen will be truncated
ext (str) - extension of files in data_dir directory
datetime (bool) - variable meaning if time values in files are represented in datetime format
"""
data_dir = args.data_dir
save_dir = args.save_dir
os.makedirs(save_dir)
maxsize = args.maxsize
maxlen = args.maxlen
ext = args.ext
datetime = args.datetime
classes = set()
nb_files = 0
time_col = 'time'
event_col = 'event'
gt_ids = None
if args.ext == 'pkl':
with open(Path(args.data_dir, 'fx_labels'), 'rb') as fp:
gt_ids = pickle.load(fp)[:maxsize]
labels = np.unique(gt_ids)
gt_data = []
for i in range(len(gt_ids)):
gt_data.append(int(np.nonzero(gt_ids[i] == labels)[0]))
gt = {'cluster_id': gt_data}
print(gt_data)
gt_table = pd.DataFrame(data=gt)
gt_table.to_csv(Path(save_dir, 'clusters.csv'))
if Path(args.data_dir, 'clusters.csv').exists():
gt_ids = pd.read_csv(Path(args.data_dir, 'clusters.csv'))[:maxsize]
gt_ids.to_csv(Path(save_dir, 'clusters.csv'))
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
sys.path.append('..')
def parse_arguments():
parser = argparse.ArgumentParser()
parser.add_argument('--data_dir', type=str, required=True, help=
'dir holding sequences as separate files')
parser.add_argument('--maxlen', type=int, default=500, help=
'maximum length of sequence')
parser.add_argument('--ext', type=str, default='tar.gz', help=
'extention of files with sequences')
parser.add_argument('--datetime', type=bool, default=False, help=
'if time values in event sequences are represented in datetime format')
parser.add_argument('--save_dir', type=str, default='./', help=
'path to save results')
parser.add_argument('--maxsize', type=int, default=None, help=
'max number of sequences')
args = parser.parse_args()
return args
def tranform_data(args):
"""
Loads the sequences saved in the given directory.
Args:
data_dir (str, Path) - directory containing sequences
save_dir - directory for saving transform data
maxsize (int) - maximum number of sequences to load
maxlen (int) - maximum length of sequence, the sequences longer than maxlen will be truncated
ext (str) - extension of files in data_dir directory
datetime (bool) - variable meaning if time values in files are represented in datetime format
"""
data_dir = args.data_dir
save_dir = args.save_dir
os.makedirs(save_dir)
maxsize = args.maxsize
maxlen = args.maxlen
ext = args.ext
datetime = args.datetime
classes = set()
nb_files = 0
time_col = 'time'
event_col = 'event'
gt_ids = None
if args.ext == 'pkl':
with open(Path(args.data_dir, 'fx_labels'), 'rb') as fp:
gt_ids = pickle.load(fp)[:maxsize]
labels = np.unique(gt_ids)
gt_data = []
for i in range(len(gt_ids)):
gt_data.append(int(np.nonzero(gt_ids[i] == labels)[0]))
gt = {'cluster_id': gt_data}
print(gt_data)
gt_table = pd.DataFrame(data=gt)
gt_table.to_csv(Path(save_dir, 'clusters.csv'))
if Path(args.data_dir, 'clusters.csv').exists():
gt_ids = pd.read_csv(Path(args.data_dir, 'clusters.csv'))[:maxsize]
gt_ids.to_csv(Path(save_dir, 'clusters.csv'))
<|reserved_special_token_0|>
print(args)
tranform_data(args)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
sys.path.append('..')
def parse_arguments():
parser = argparse.ArgumentParser()
parser.add_argument('--data_dir', type=str, required=True, help=
'dir holding sequences as separate files')
parser.add_argument('--maxlen', type=int, default=500, help=
'maximum length of sequence')
parser.add_argument('--ext', type=str, default='tar.gz', help=
'extention of files with sequences')
parser.add_argument('--datetime', type=bool, default=False, help=
'if time values in event sequences are represented in datetime format')
parser.add_argument('--save_dir', type=str, default='./', help=
'path to save results')
parser.add_argument('--maxsize', type=int, default=None, help=
'max number of sequences')
args = parser.parse_args()
return args
def tranform_data(args):
"""
Loads the sequences saved in the given directory.
Args:
data_dir (str, Path) - directory containing sequences
save_dir - directory for saving transform data
maxsize (int) - maximum number of sequences to load
maxlen (int) - maximum length of sequence, the sequences longer than maxlen will be truncated
ext (str) - extension of files in data_dir directory
datetime (bool) - variable meaning if time values in files are represented in datetime format
"""
data_dir = args.data_dir
save_dir = args.save_dir
os.makedirs(save_dir)
maxsize = args.maxsize
maxlen = args.maxlen
ext = args.ext
datetime = args.datetime
classes = set()
nb_files = 0
time_col = 'time'
event_col = 'event'
gt_ids = None
if args.ext == 'pkl':
with open(Path(args.data_dir, 'fx_labels'), 'rb') as fp:
gt_ids = pickle.load(fp)[:maxsize]
labels = np.unique(gt_ids)
gt_data = []
for i in range(len(gt_ids)):
gt_data.append(int(np.nonzero(gt_ids[i] == labels)[0]))
gt = {'cluster_id': gt_data}
print(gt_data)
gt_table = pd.DataFrame(data=gt)
gt_table.to_csv(Path(save_dir, 'clusters.csv'))
if Path(args.data_dir, 'clusters.csv').exists():
gt_ids = pd.read_csv(Path(args.data_dir, 'clusters.csv'))[:maxsize]
gt_ids.to_csv(Path(save_dir, 'clusters.csv'))
args = parse_arguments()
print(args)
tranform_data(args)
<|reserved_special_token_1|>
import torch
import tarfile
import pickle
import pandas
import json
import argparse
from pathlib import Path
import numpy as np
import shutil
from shutil import copyfile
import os
import re
import pandas as pd
import sys
from numpy import asarray
from numpy import savetxt
sys.path.append('..')
def parse_arguments():
parser = argparse.ArgumentParser()
parser.add_argument('--data_dir', type=str, required=True, help=
'dir holding sequences as separate files')
parser.add_argument('--maxlen', type=int, default=500, help=
'maximum length of sequence')
parser.add_argument('--ext', type=str, default='tar.gz', help=
'extention of files with sequences')
parser.add_argument('--datetime', type=bool, default=False, help=
'if time values in event sequences are represented in datetime format')
parser.add_argument('--save_dir', type=str, default='./', help=
'path to save results')
parser.add_argument('--maxsize', type=int, default=None, help=
'max number of sequences')
args = parser.parse_args()
return args
def tranform_data(args):
"""
Loads the sequences saved in the given directory.
Args:
data_dir (str, Path) - directory containing sequences
save_dir - directory for saving transform data
maxsize (int) - maximum number of sequences to load
maxlen (int) - maximum length of sequence, the sequences longer than maxlen will be truncated
ext (str) - extension of files in data_dir directory
datetime (bool) - variable meaning if time values in files are represented in datetime format
"""
data_dir = args.data_dir
save_dir = args.save_dir
os.makedirs(save_dir)
maxsize = args.maxsize
maxlen = args.maxlen
ext = args.ext
datetime = args.datetime
classes = set()
nb_files = 0
time_col = 'time'
event_col = 'event'
gt_ids = None
if args.ext == 'pkl':
with open(Path(args.data_dir, 'fx_labels'), 'rb') as fp:
gt_ids = pickle.load(fp)[:maxsize]
labels = np.unique(gt_ids)
gt_data = []
for i in range(len(gt_ids)):
gt_data.append(int(np.nonzero(gt_ids[i] == labels)[0]))
gt = {'cluster_id': gt_data}
print(gt_data)
gt_table = pd.DataFrame(data=gt)
gt_table.to_csv(Path(save_dir, 'clusters.csv'))
if Path(args.data_dir, 'clusters.csv').exists():
gt_ids = pd.read_csv(Path(args.data_dir, 'clusters.csv'))[:maxsize]
gt_ids.to_csv(Path(save_dir, 'clusters.csv'))
args = parse_arguments()
print(args)
tranform_data(args)
<|reserved_special_token_1|>
import torch
import tarfile
import pickle
import pandas
import json
import argparse
from pathlib import Path
import numpy as np
import shutil
from shutil import copyfile
import os
import re
import pandas as pd
import sys
from numpy import asarray
from numpy import savetxt
sys.path.append("..")
def parse_arguments():
parser = argparse.ArgumentParser()
parser.add_argument('--data_dir', type=str, required=True, help='dir holding sequences as separate files')
parser.add_argument('--maxlen', type=int, default=500, help='maximum length of sequence')
parser.add_argument('--ext', type=str, default='tar.gz', help='extention of files with sequences')
parser.add_argument('--datetime', type=bool, default=False, help='if time values in event sequences are represented in datetime format')
parser.add_argument('--save_dir', type=str, default = './', help='path to save results')
parser.add_argument('--maxsize', type=int, default=None, help='max number of sequences')
args = parser.parse_args()
return args
def tranform_data(args):
"""
Loads the sequences saved in the given directory.
Args:
data_dir (str, Path) - directory containing sequences
save_dir - directory for saving transform data
maxsize (int) - maximum number of sequences to load
maxlen (int) - maximum length of sequence, the sequences longer than maxlen will be truncated
ext (str) - extension of files in data_dir directory
datetime (bool) - variable meaning if time values in files are represented in datetime format
"""
data_dir = args.data_dir
save_dir = args.save_dir
os.makedirs(save_dir)
maxsize = args.maxsize
maxlen = args.maxlen
ext = args.ext
datetime = args.datetime
classes = set()
nb_files = 0
time_col = 'time'
event_col = 'event'
gt_ids = None
if args.ext == "pkl":
with open(Path(args.data_dir, "fx_labels"), "rb") as fp:
gt_ids = pickle.load(fp)[:maxsize]
labels = np.unique(gt_ids)
gt_data = []
for i in range (len(gt_ids)):
gt_data.append(int(np.nonzero(gt_ids[i] == labels)[0]))
gt = {'cluster_id': gt_data}
print(gt_data)
gt_table = pd.DataFrame(data=gt)
gt_table.to_csv(Path(save_dir, 'clusters.csv'))
if Path(args.data_dir, 'clusters.csv').exists():
gt_ids = pd.read_csv(Path(args.data_dir, 'clusters.csv'))[:(maxsize)]
gt_ids.to_csv(Path(save_dir, 'clusters.csv'))
args = parse_arguments()
print(args)
tranform_data(args)
|
flexible
|
{
"blob_id": "da55d9a6534525e58b6c1d2db997e90a1c9b0f36",
"index": 1427,
"step-1": "<mask token>\n\n\ndef parse_arguments():\n parser = argparse.ArgumentParser()\n parser.add_argument('--data_dir', type=str, required=True, help=\n 'dir holding sequences as separate files')\n parser.add_argument('--maxlen', type=int, default=500, help=\n 'maximum length of sequence')\n parser.add_argument('--ext', type=str, default='tar.gz', help=\n 'extention of files with sequences')\n parser.add_argument('--datetime', type=bool, default=False, help=\n 'if time values in event sequences are represented in datetime format')\n parser.add_argument('--save_dir', type=str, default='./', help=\n 'path to save results')\n parser.add_argument('--maxsize', type=int, default=None, help=\n 'max number of sequences')\n args = parser.parse_args()\n return args\n\n\ndef tranform_data(args):\n \"\"\"\n Loads the sequences saved in the given directory.\n Args:\n data_dir (str, Path) - directory containing sequences\n save_dir - directory for saving transform data\n maxsize (int) - maximum number of sequences to load\n maxlen (int) - maximum length of sequence, the sequences longer than maxlen will be truncated\n ext (str) - extension of files in data_dir directory\n datetime (bool) - variable meaning if time values in files are represented in datetime format\n \n \"\"\"\n data_dir = args.data_dir\n save_dir = args.save_dir\n os.makedirs(save_dir)\n maxsize = args.maxsize\n maxlen = args.maxlen\n ext = args.ext\n datetime = args.datetime\n classes = set()\n nb_files = 0\n time_col = 'time'\n event_col = 'event'\n gt_ids = None\n if args.ext == 'pkl':\n with open(Path(args.data_dir, 'fx_labels'), 'rb') as fp:\n gt_ids = pickle.load(fp)[:maxsize]\n labels = np.unique(gt_ids)\n gt_data = []\n for i in range(len(gt_ids)):\n gt_data.append(int(np.nonzero(gt_ids[i] == labels)[0]))\n gt = {'cluster_id': gt_data}\n print(gt_data)\n gt_table = pd.DataFrame(data=gt)\n gt_table.to_csv(Path(save_dir, 'clusters.csv'))\n if Path(args.data_dir, 'clusters.csv').exists():\n gt_ids = pd.read_csv(Path(args.data_dir, 'clusters.csv'))[:maxsize]\n gt_ids.to_csv(Path(save_dir, 'clusters.csv'))\n\n\n<mask token>\n",
"step-2": "<mask token>\nsys.path.append('..')\n\n\ndef parse_arguments():\n parser = argparse.ArgumentParser()\n parser.add_argument('--data_dir', type=str, required=True, help=\n 'dir holding sequences as separate files')\n parser.add_argument('--maxlen', type=int, default=500, help=\n 'maximum length of sequence')\n parser.add_argument('--ext', type=str, default='tar.gz', help=\n 'extention of files with sequences')\n parser.add_argument('--datetime', type=bool, default=False, help=\n 'if time values in event sequences are represented in datetime format')\n parser.add_argument('--save_dir', type=str, default='./', help=\n 'path to save results')\n parser.add_argument('--maxsize', type=int, default=None, help=\n 'max number of sequences')\n args = parser.parse_args()\n return args\n\n\ndef tranform_data(args):\n \"\"\"\n Loads the sequences saved in the given directory.\n Args:\n data_dir (str, Path) - directory containing sequences\n save_dir - directory for saving transform data\n maxsize (int) - maximum number of sequences to load\n maxlen (int) - maximum length of sequence, the sequences longer than maxlen will be truncated\n ext (str) - extension of files in data_dir directory\n datetime (bool) - variable meaning if time values in files are represented in datetime format\n \n \"\"\"\n data_dir = args.data_dir\n save_dir = args.save_dir\n os.makedirs(save_dir)\n maxsize = args.maxsize\n maxlen = args.maxlen\n ext = args.ext\n datetime = args.datetime\n classes = set()\n nb_files = 0\n time_col = 'time'\n event_col = 'event'\n gt_ids = None\n if args.ext == 'pkl':\n with open(Path(args.data_dir, 'fx_labels'), 'rb') as fp:\n gt_ids = pickle.load(fp)[:maxsize]\n labels = np.unique(gt_ids)\n gt_data = []\n for i in range(len(gt_ids)):\n gt_data.append(int(np.nonzero(gt_ids[i] == labels)[0]))\n gt = {'cluster_id': gt_data}\n print(gt_data)\n gt_table = pd.DataFrame(data=gt)\n gt_table.to_csv(Path(save_dir, 'clusters.csv'))\n if Path(args.data_dir, 'clusters.csv').exists():\n gt_ids = pd.read_csv(Path(args.data_dir, 'clusters.csv'))[:maxsize]\n gt_ids.to_csv(Path(save_dir, 'clusters.csv'))\n\n\n<mask token>\nprint(args)\ntranform_data(args)\n",
"step-3": "<mask token>\nsys.path.append('..')\n\n\ndef parse_arguments():\n parser = argparse.ArgumentParser()\n parser.add_argument('--data_dir', type=str, required=True, help=\n 'dir holding sequences as separate files')\n parser.add_argument('--maxlen', type=int, default=500, help=\n 'maximum length of sequence')\n parser.add_argument('--ext', type=str, default='tar.gz', help=\n 'extention of files with sequences')\n parser.add_argument('--datetime', type=bool, default=False, help=\n 'if time values in event sequences are represented in datetime format')\n parser.add_argument('--save_dir', type=str, default='./', help=\n 'path to save results')\n parser.add_argument('--maxsize', type=int, default=None, help=\n 'max number of sequences')\n args = parser.parse_args()\n return args\n\n\ndef tranform_data(args):\n \"\"\"\n Loads the sequences saved in the given directory.\n Args:\n data_dir (str, Path) - directory containing sequences\n save_dir - directory for saving transform data\n maxsize (int) - maximum number of sequences to load\n maxlen (int) - maximum length of sequence, the sequences longer than maxlen will be truncated\n ext (str) - extension of files in data_dir directory\n datetime (bool) - variable meaning if time values in files are represented in datetime format\n \n \"\"\"\n data_dir = args.data_dir\n save_dir = args.save_dir\n os.makedirs(save_dir)\n maxsize = args.maxsize\n maxlen = args.maxlen\n ext = args.ext\n datetime = args.datetime\n classes = set()\n nb_files = 0\n time_col = 'time'\n event_col = 'event'\n gt_ids = None\n if args.ext == 'pkl':\n with open(Path(args.data_dir, 'fx_labels'), 'rb') as fp:\n gt_ids = pickle.load(fp)[:maxsize]\n labels = np.unique(gt_ids)\n gt_data = []\n for i in range(len(gt_ids)):\n gt_data.append(int(np.nonzero(gt_ids[i] == labels)[0]))\n gt = {'cluster_id': gt_data}\n print(gt_data)\n gt_table = pd.DataFrame(data=gt)\n gt_table.to_csv(Path(save_dir, 'clusters.csv'))\n if Path(args.data_dir, 'clusters.csv').exists():\n gt_ids = pd.read_csv(Path(args.data_dir, 'clusters.csv'))[:maxsize]\n gt_ids.to_csv(Path(save_dir, 'clusters.csv'))\n\n\nargs = parse_arguments()\nprint(args)\ntranform_data(args)\n",
"step-4": "import torch\nimport tarfile\nimport pickle\nimport pandas\nimport json\nimport argparse\nfrom pathlib import Path\nimport numpy as np\nimport shutil\nfrom shutil import copyfile\nimport os\nimport re\nimport pandas as pd\nimport sys\nfrom numpy import asarray\nfrom numpy import savetxt\nsys.path.append('..')\n\n\ndef parse_arguments():\n parser = argparse.ArgumentParser()\n parser.add_argument('--data_dir', type=str, required=True, help=\n 'dir holding sequences as separate files')\n parser.add_argument('--maxlen', type=int, default=500, help=\n 'maximum length of sequence')\n parser.add_argument('--ext', type=str, default='tar.gz', help=\n 'extention of files with sequences')\n parser.add_argument('--datetime', type=bool, default=False, help=\n 'if time values in event sequences are represented in datetime format')\n parser.add_argument('--save_dir', type=str, default='./', help=\n 'path to save results')\n parser.add_argument('--maxsize', type=int, default=None, help=\n 'max number of sequences')\n args = parser.parse_args()\n return args\n\n\ndef tranform_data(args):\n \"\"\"\n Loads the sequences saved in the given directory.\n Args:\n data_dir (str, Path) - directory containing sequences\n save_dir - directory for saving transform data\n maxsize (int) - maximum number of sequences to load\n maxlen (int) - maximum length of sequence, the sequences longer than maxlen will be truncated\n ext (str) - extension of files in data_dir directory\n datetime (bool) - variable meaning if time values in files are represented in datetime format\n \n \"\"\"\n data_dir = args.data_dir\n save_dir = args.save_dir\n os.makedirs(save_dir)\n maxsize = args.maxsize\n maxlen = args.maxlen\n ext = args.ext\n datetime = args.datetime\n classes = set()\n nb_files = 0\n time_col = 'time'\n event_col = 'event'\n gt_ids = None\n if args.ext == 'pkl':\n with open(Path(args.data_dir, 'fx_labels'), 'rb') as fp:\n gt_ids = pickle.load(fp)[:maxsize]\n labels = np.unique(gt_ids)\n gt_data = []\n for i in range(len(gt_ids)):\n gt_data.append(int(np.nonzero(gt_ids[i] == labels)[0]))\n gt = {'cluster_id': gt_data}\n print(gt_data)\n gt_table = pd.DataFrame(data=gt)\n gt_table.to_csv(Path(save_dir, 'clusters.csv'))\n if Path(args.data_dir, 'clusters.csv').exists():\n gt_ids = pd.read_csv(Path(args.data_dir, 'clusters.csv'))[:maxsize]\n gt_ids.to_csv(Path(save_dir, 'clusters.csv'))\n\n\nargs = parse_arguments()\nprint(args)\ntranform_data(args)\n",
"step-5": "import torch\nimport tarfile\nimport pickle\nimport pandas\nimport json\nimport argparse\nfrom pathlib import Path\nimport numpy as np\nimport shutil\nfrom shutil import copyfile\nimport os\nimport re\nimport pandas as pd\nimport sys\nfrom numpy import asarray\nfrom numpy import savetxt\nsys.path.append(\"..\")\ndef parse_arguments():\n parser = argparse.ArgumentParser()\n parser.add_argument('--data_dir', type=str, required=True, help='dir holding sequences as separate files')\n parser.add_argument('--maxlen', type=int, default=500, help='maximum length of sequence')\n parser.add_argument('--ext', type=str, default='tar.gz', help='extention of files with sequences')\n parser.add_argument('--datetime', type=bool, default=False, help='if time values in event sequences are represented in datetime format')\n parser.add_argument('--save_dir', type=str, default = './', help='path to save results')\n parser.add_argument('--maxsize', type=int, default=None, help='max number of sequences')\n args = parser.parse_args()\n return args\ndef tranform_data(args):\n \"\"\"\n Loads the sequences saved in the given directory.\n Args:\n data_dir (str, Path) - directory containing sequences\n save_dir - directory for saving transform data\n maxsize (int) - maximum number of sequences to load\n maxlen (int) - maximum length of sequence, the sequences longer than maxlen will be truncated\n ext (str) - extension of files in data_dir directory\n datetime (bool) - variable meaning if time values in files are represented in datetime format\n \n \"\"\"\n data_dir = args.data_dir\n save_dir = args.save_dir\n os.makedirs(save_dir)\n maxsize = args.maxsize\n maxlen = args.maxlen \n ext = args.ext\n datetime = args.datetime\n classes = set()\n nb_files = 0\n time_col = 'time'\n event_col = 'event'\n gt_ids = None\n if args.ext == \"pkl\":\n with open(Path(args.data_dir, \"fx_labels\"), \"rb\") as fp:\n gt_ids = pickle.load(fp)[:maxsize]\n labels = np.unique(gt_ids)\n gt_data = []\n for i in range (len(gt_ids)):\n gt_data.append(int(np.nonzero(gt_ids[i] == labels)[0]))\n gt = {'cluster_id': gt_data}\n print(gt_data)\n gt_table = pd.DataFrame(data=gt)\n gt_table.to_csv(Path(save_dir, 'clusters.csv'))\n if Path(args.data_dir, 'clusters.csv').exists():\n gt_ids = pd.read_csv(Path(args.data_dir, 'clusters.csv'))[:(maxsize)]\n gt_ids.to_csv(Path(save_dir, 'clusters.csv'))\n \n\n\n\nargs = parse_arguments()\nprint(args)\ntranform_data(args)",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
a, b = map(int, input().split())
def mult(a, b):
if a > 9 or b > 9 or a < 1 or b < 1:
print(-1)
else:
print(a * b)
mult(a, b)
|
normal
|
{
"blob_id": "991fa5f9c83a1821e62f7baacbc56a4d31982312",
"index": 3681,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef mult(a, b):\n if a > 9 or b > 9 or a < 1 or b < 1:\n print(-1)\n else:\n print(a * b)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef mult(a, b):\n if a > 9 or b > 9 or a < 1 or b < 1:\n print(-1)\n else:\n print(a * b)\n\n\nmult(a, b)\n",
"step-4": "a, b = map(int, input().split())\n\n\ndef mult(a, b):\n if a > 9 or b > 9 or a < 1 or b < 1:\n print(-1)\n else:\n print(a * b)\n\n\nmult(a, b)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
"""
Make sure overwriting read-only files works as expected (via win-tool).
"""
import TestGyp
import filecmp
import os
import stat
import sys
if sys.platform == 'win32':
test = TestGyp.TestGyp(formats=['ninja'])
os.makedirs('subdir')
read_only_files = ['read-only-file', 'subdir/A', 'subdir/B', 'subdir/C']
for f in read_only_files:
test.write(f, 'source_contents')
test.chmod(f, stat.S_IREAD)
if os.access(f, os.W_OK):
test.fail_test()
os.makedirs(test.built_file_path('dest/subdir'))
for f in read_only_files:
f = os.path.join('dest', f)
test.write(test.built_file_path(f), 'SHOULD BE OVERWRITTEN')
test.chmod(test.built_file_path(f), stat.S_IREAD)
if os.access(test.built_file_path(f), os.W_OK):
test.fail_test()
test.run_gyp('copies_readonly_files.gyp')
test.build('copies_readonly_files.gyp')
for f in read_only_files:
f = os.path.join('dest', f)
test.must_contain(test.built_file_path(f), 'source_contents')
for f in read_only_files:
if not filecmp.cmp(f, test.built_file_path(os.path.join('dest', f))):
test.fail_test()
test.pass_test()
|
normal
|
{
"blob_id": "efe5921afb160b7b5a953cdd0c2f90f64b5f34c9",
"index": 5975,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif sys.platform == 'win32':\n test = TestGyp.TestGyp(formats=['ninja'])\n os.makedirs('subdir')\n read_only_files = ['read-only-file', 'subdir/A', 'subdir/B', 'subdir/C']\n for f in read_only_files:\n test.write(f, 'source_contents')\n test.chmod(f, stat.S_IREAD)\n if os.access(f, os.W_OK):\n test.fail_test()\n os.makedirs(test.built_file_path('dest/subdir'))\n for f in read_only_files:\n f = os.path.join('dest', f)\n test.write(test.built_file_path(f), 'SHOULD BE OVERWRITTEN')\n test.chmod(test.built_file_path(f), stat.S_IREAD)\n if os.access(test.built_file_path(f), os.W_OK):\n test.fail_test()\n test.run_gyp('copies_readonly_files.gyp')\n test.build('copies_readonly_files.gyp')\n for f in read_only_files:\n f = os.path.join('dest', f)\n test.must_contain(test.built_file_path(f), 'source_contents')\n for f in read_only_files:\n if not filecmp.cmp(f, test.built_file_path(os.path.join('dest', f))):\n test.fail_test()\n test.pass_test()\n",
"step-3": "<mask token>\nimport TestGyp\nimport filecmp\nimport os\nimport stat\nimport sys\nif sys.platform == 'win32':\n test = TestGyp.TestGyp(formats=['ninja'])\n os.makedirs('subdir')\n read_only_files = ['read-only-file', 'subdir/A', 'subdir/B', 'subdir/C']\n for f in read_only_files:\n test.write(f, 'source_contents')\n test.chmod(f, stat.S_IREAD)\n if os.access(f, os.W_OK):\n test.fail_test()\n os.makedirs(test.built_file_path('dest/subdir'))\n for f in read_only_files:\n f = os.path.join('dest', f)\n test.write(test.built_file_path(f), 'SHOULD BE OVERWRITTEN')\n test.chmod(test.built_file_path(f), stat.S_IREAD)\n if os.access(test.built_file_path(f), os.W_OK):\n test.fail_test()\n test.run_gyp('copies_readonly_files.gyp')\n test.build('copies_readonly_files.gyp')\n for f in read_only_files:\n f = os.path.join('dest', f)\n test.must_contain(test.built_file_path(f), 'source_contents')\n for f in read_only_files:\n if not filecmp.cmp(f, test.built_file_path(os.path.join('dest', f))):\n test.fail_test()\n test.pass_test()\n",
"step-4": "\n\n\n\n\n\n\"\"\"\nMake sure overwriting read-only files works as expected (via win-tool).\n\"\"\"\n\nimport TestGyp\n\nimport filecmp\nimport os\nimport stat\nimport sys\n\nif sys.platform == 'win32':\n test = TestGyp.TestGyp(formats=['ninja'])\n\n \n os.makedirs('subdir')\n read_only_files = ['read-only-file', 'subdir/A', 'subdir/B', 'subdir/C']\n for f in read_only_files:\n test.write(f, 'source_contents')\n test.chmod(f, stat.S_IREAD)\n if os.access(f, os.W_OK):\n test.fail_test()\n\n \n \n \n os.makedirs(test.built_file_path('dest/subdir'))\n for f in read_only_files:\n f = os.path.join('dest', f)\n test.write(test.built_file_path(f), 'SHOULD BE OVERWRITTEN')\n test.chmod(test.built_file_path(f), stat.S_IREAD)\n \n if os.access(test.built_file_path(f), os.W_OK):\n test.fail_test()\n\n test.run_gyp('copies_readonly_files.gyp')\n test.build('copies_readonly_files.gyp')\n\n \n for f in read_only_files:\n f = os.path.join('dest', f)\n test.must_contain(test.built_file_path(f), 'source_contents')\n\n \n for f in read_only_files:\n if not filecmp.cmp(f, test.built_file_path(os.path.join('dest', f))):\n test.fail_test()\n\n test.pass_test()\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
class ReleaseFile:
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def __repr__(self):
return repr(self.name)
class SourceFile:
"""! Class represeting a source file
`name`: str
File name,
`url`: str
FTP URL,
`group`
<unknown>
@details More details
"""
def __init__(self, name, url, group):
self.group = group
self.url = url
self.name = name
class FileGroup:
"""! Represents a file group
`name`: str
Name of this group
`files`: List[ReleaseFile]
List of files within this group
`mainFile`: str
If this FileGroup has a subgroup, `mainFile` is the head of that group
`subFiles`: List[ReleaseFile]
Files within a subgroup
"""
def __init__(self, name, files: List[ReleaseFile]):
self.files = files
self.name = name
if len(files) == 1:
self.mainFile = files[0]
self.subFiles = {}
else:
self.mainFile = None
subFiles = []
for file in files:
if file.subgroup == 'SSE2':
self.mainFile = file
else:
subFiles.append(file)
self.subFiles = dict((x[0], next(x[1])) for x in groupby(
subFiles, lambda f: f.subgroup))
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class ReleaseFile:
"""! Class representing a Released file on Nebula
`name`: str
Mod (or build) name,
`url`: str
Primary host URL,
`group`: str
Mod group string,
`subgroup`: str
Mod subgroup string,
`mirrors`: List[str]
List of URL's of FTP mirrors
"""
def __init__(self, name, url, group, subgroup=None, mirrors=None):
if mirrors is None:
mirrors = []
self.mirrors = mirrors
self.subgroup = subgroup
self.group = group
self.url = url
self.name = name
self.base_url = '/'.join(url.split('/')[0:-1]) + '/'
self.filename = url.split('/')[-1]
self.content_hashes = None
self.hash = None
self.size = 0
def __repr__(self):
return repr(self.name)
class SourceFile:
"""! Class represeting a source file
`name`: str
File name,
`url`: str
FTP URL,
`group`
<unknown>
@details More details
"""
def __init__(self, name, url, group):
self.group = group
self.url = url
self.name = name
class FileGroup:
"""! Represents a file group
`name`: str
Name of this group
`files`: List[ReleaseFile]
List of files within this group
`mainFile`: str
If this FileGroup has a subgroup, `mainFile` is the head of that group
`subFiles`: List[ReleaseFile]
Files within a subgroup
"""
def __init__(self, name, files: List[ReleaseFile]):
self.files = files
self.name = name
if len(files) == 1:
self.mainFile = files[0]
self.subFiles = {}
else:
self.mainFile = None
subFiles = []
for file in files:
if file.subgroup == 'SSE2':
self.mainFile = file
else:
subFiles.append(file)
self.subFiles = dict((x[0], next(x[1])) for x in groupby(
subFiles, lambda f: f.subgroup))
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class ReleaseFile:
"""! Class representing a Released file on Nebula
`name`: str
Mod (or build) name,
`url`: str
Primary host URL,
`group`: str
Mod group string,
`subgroup`: str
Mod subgroup string,
`mirrors`: List[str]
List of URL's of FTP mirrors
"""
def __init__(self, name, url, group, subgroup=None, mirrors=None):
if mirrors is None:
mirrors = []
self.mirrors = mirrors
self.subgroup = subgroup
self.group = group
self.url = url
self.name = name
self.base_url = '/'.join(url.split('/')[0:-1]) + '/'
self.filename = url.split('/')[-1]
self.content_hashes = None
self.hash = None
self.size = 0
def __repr__(self):
return repr(self.name)
class SourceFile:
"""! Class represeting a source file
`name`: str
File name,
`url`: str
FTP URL,
`group`
<unknown>
@details More details
"""
def __init__(self, name, url, group):
self.group = group
self.url = url
self.name = name
class FileGroup:
"""! Represents a file group
`name`: str
Name of this group
`files`: List[ReleaseFile]
List of files within this group
`mainFile`: str
If this FileGroup has a subgroup, `mainFile` is the head of that group
`subFiles`: List[ReleaseFile]
Files within a subgroup
"""
def __init__(self, name, files: List[ReleaseFile]):
self.files = files
self.name = name
if len(files) == 1:
self.mainFile = files[0]
self.subFiles = {}
else:
self.mainFile = None
subFiles = []
for file in files:
if file.subgroup == 'SSE2':
self.mainFile = file
else:
subFiles.append(file)
self.subFiles = dict((x[0], next(x[1])) for x in groupby(
subFiles, lambda f: f.subgroup))
def get_release_files(tag_name, config) ->Tuple[List[ReleaseFile], Dict[str,
SourceFile]]:
"""! Brief Gets the binary and source files from the Github Release server
@param[in] `tag_name` Git tag of the current release
@param[in] `config` confi metadata set in main.py
@returns `List[ReleaseFile]` List of release files
@returns `Dict[str, SourceFile]` Dictionary of source files
@details Sends an `HTTP GET` request to github using their REST API to retrieve metadata. The files are not
actually downloaded here, just their metadata is gathered and organized in their respective container for later
use.
"""
@retry_multi(5)
def execute_request(path):
"""!
@brief Performs a GET request with the given path. To be used with Github's REST API.
@returns If successful, returns a .JSON object
"""
headers = {'Accept': 'application/vnd.github.v3+json'}
url = 'https://api.github.com' + path
response = requests.get(url, headers=headers, timeout=GLOBAL_TIMEOUT)
response.raise_for_status()
return response.json()
build_group_regex = re.compile('fs2_open_.*-builds-([^.-]*)(-([^.]*))?.*')
source_file_regex = re.compile('fs2_open_.*-source-([^.]*)?.*')
response = execute_request('/repos/{}/releases/tags/{}'.format(config[
'github']['repo'], tag_name))
binary_files = []
source_files = {}
for asset in response['assets']:
url = asset['browser_download_url']
name = asset['name']
group_match = build_group_regex.match(name)
if group_match is not None:
platform = group_match.group(1)
if platform == 'x64':
platform = 'Win64'
binary_files.append(ReleaseFile(name, url, platform,
group_match.group(3)))
else:
group_match = source_file_regex.match(name)
if group_match is None:
continue
group = group_match.group(1)
source_files[group] = SourceFile(name, url, group)
binary_files.sort(key=lambda ReleaseFile: ReleaseFile.name)
return binary_files, source_files
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class ReleaseFile:
"""! Class representing a Released file on Nebula
`name`: str
Mod (or build) name,
`url`: str
Primary host URL,
`group`: str
Mod group string,
`subgroup`: str
Mod subgroup string,
`mirrors`: List[str]
List of URL's of FTP mirrors
"""
def __init__(self, name, url, group, subgroup=None, mirrors=None):
if mirrors is None:
mirrors = []
self.mirrors = mirrors
self.subgroup = subgroup
self.group = group
self.url = url
self.name = name
self.base_url = '/'.join(url.split('/')[0:-1]) + '/'
self.filename = url.split('/')[-1]
self.content_hashes = None
self.hash = None
self.size = 0
def __repr__(self):
return repr(self.name)
class SourceFile:
"""! Class represeting a source file
`name`: str
File name,
`url`: str
FTP URL,
`group`
<unknown>
@details More details
"""
def __init__(self, name, url, group):
self.group = group
self.url = url
self.name = name
class FileGroup:
"""! Represents a file group
`name`: str
Name of this group
`files`: List[ReleaseFile]
List of files within this group
`mainFile`: str
If this FileGroup has a subgroup, `mainFile` is the head of that group
`subFiles`: List[ReleaseFile]
Files within a subgroup
"""
def __init__(self, name, files: List[ReleaseFile]):
self.files = files
self.name = name
if len(files) == 1:
self.mainFile = files[0]
self.subFiles = {}
else:
self.mainFile = None
subFiles = []
for file in files:
if file.subgroup == 'SSE2':
self.mainFile = file
else:
subFiles.append(file)
self.subFiles = dict((x[0], next(x[1])) for x in groupby(
subFiles, lambda f: f.subgroup))
def get_release_files(tag_name, config) ->Tuple[List[ReleaseFile], Dict[str,
SourceFile]]:
"""! Brief Gets the binary and source files from the Github Release server
@param[in] `tag_name` Git tag of the current release
@param[in] `config` confi metadata set in main.py
@returns `List[ReleaseFile]` List of release files
@returns `Dict[str, SourceFile]` Dictionary of source files
@details Sends an `HTTP GET` request to github using their REST API to retrieve metadata. The files are not
actually downloaded here, just their metadata is gathered and organized in their respective container for later
use.
"""
@retry_multi(5)
def execute_request(path):
"""!
@brief Performs a GET request with the given path. To be used with Github's REST API.
@returns If successful, returns a .JSON object
"""
headers = {'Accept': 'application/vnd.github.v3+json'}
url = 'https://api.github.com' + path
response = requests.get(url, headers=headers, timeout=GLOBAL_TIMEOUT)
response.raise_for_status()
return response.json()
build_group_regex = re.compile('fs2_open_.*-builds-([^.-]*)(-([^.]*))?.*')
source_file_regex = re.compile('fs2_open_.*-source-([^.]*)?.*')
response = execute_request('/repos/{}/releases/tags/{}'.format(config[
'github']['repo'], tag_name))
binary_files = []
source_files = {}
for asset in response['assets']:
url = asset['browser_download_url']
name = asset['name']
group_match = build_group_regex.match(name)
if group_match is not None:
platform = group_match.group(1)
if platform == 'x64':
platform = 'Win64'
binary_files.append(ReleaseFile(name, url, platform,
group_match.group(3)))
else:
group_match = source_file_regex.match(name)
if group_match is None:
continue
group = group_match.group(1)
source_files[group] = SourceFile(name, url, group)
binary_files.sort(key=lambda ReleaseFile: ReleaseFile.name)
return binary_files, source_files
def get_ftp_files(build_type, tag_name, config) ->List[ReleaseFile]:
"""!
@brief Gets file metadata for nightlies hosted on FTP, as determined by config["ftp"] attributes
@param [in] `build_type` Unknown str
@param [in] `tag_name` Github tag name of the release
@param [in] `config` config metadata set in main.py
"""
tag_regex = re.compile('nightly_(.*)')
build_group_regex = re.compile('nightly_.*-builds-([^.]+).*')
files = []
try:
with FTP(config['ftp']['host'], config['ftp']['user'], config['ftp'
]['pass']) as ftp:
version_str = tag_regex.match(tag_name).group(1)
path_template = config['ftp']['path']
path = path_template.format(type=build_type, version=version_str)
file_entries = list(ftp.mlsd(path, ['type']))
for entry in file_entries:
if entry[1]['type'] == 'file':
files.append(entry[0])
except error_perm:
print('Received permanent FTP error!')
return []
out_data = []
for file in files:
file_match = build_group_regex.match(file)
if file_match is None:
print("Ignoring non nightly file '{}'".format(file))
continue
group_match = file_match.group(1)
primary_url = None
mirrors = []
if 'x64' in group_match:
group_match = group_match.replace('x64', 'Win64')
for mirror in config['ftp']['mirrors']:
download_url = mirror.format(type=build_type, version=
version_str, file=file)
if primary_url is None:
primary_url = download_url
else:
mirrors.append(download_url)
out_data.append(ReleaseFile(file, primary_url, group_match, None,
mirrors))
return out_data
<|reserved_special_token_1|>
import re # regex module
from ftplib import FTP, error_perm
from itertools import groupby
from typing import List, Tuple, Dict
import requests # HTTP requests module
from util import retry_multi, GLOBAL_TIMEOUT # from util.py
class ReleaseFile:
"""! Class representing a Released file on Nebula
`name`: str
Mod (or build) name,
`url`: str
Primary host URL,
`group`: str
Mod group string,
`subgroup`: str
Mod subgroup string,
`mirrors`: List[str]
List of URL's of FTP mirrors
"""
def __init__(self, name, url, group, subgroup=None, mirrors=None):
if mirrors is None:
mirrors = []
self.mirrors = mirrors
self.subgroup = subgroup
self.group = group
self.url = url
self.name = name
self.base_url = "/".join(url.split('/')[0:-1]) + "/"
self.filename = url.split('/')[-1]
# A list of tuples of (filename, hash)
self.content_hashes = None
self.hash = None
self.size = 0
def __repr__(self):
return repr((self.name))
class SourceFile:
"""! Class represeting a source file
`name`: str
File name,
`url`: str
FTP URL,
`group`
<unknown>
@details More details
"""
def __init__(self, name, url, group):
self.group = group
self.url = url
self.name = name
class FileGroup:
"""! Represents a file group
`name`: str
Name of this group
`files`: List[ReleaseFile]
List of files within this group
`mainFile`: str
If this FileGroup has a subgroup, `mainFile` is the head of that group
`subFiles`: List[ReleaseFile]
Files within a subgroup
"""
def __init__(self, name, files: List[ReleaseFile]):
self.files = files
self.name = name
if len(files) == 1:
self.mainFile = files[0]
self.subFiles = {}
else:
self.mainFile = None
subFiles = []
for file in files:
# We only have subcategories for Windows where SSE2 is the main group
if file.subgroup == "SSE2":
self.mainFile = file
else:
subFiles.append(file)
self.subFiles = dict(((x[0], next(x[1])) for x in groupby(subFiles, lambda f: f.subgroup)))
def get_release_files(tag_name, config) -> Tuple[List[ReleaseFile], Dict[str, SourceFile]]:
"""! Brief Gets the binary and source files from the Github Release server
@param[in] `tag_name` Git tag of the current release
@param[in] `config` confi metadata set in main.py
@returns `List[ReleaseFile]` List of release files
@returns `Dict[str, SourceFile]` Dictionary of source files
@details Sends an `HTTP GET` request to github using their REST API to retrieve metadata. The files are not
actually downloaded here, just their metadata is gathered and organized in their respective container for later
use.
"""
@retry_multi(5) # retry at most 5 times
def execute_request(path):
"""!
@brief Performs a GET request with the given path. To be used with Github's REST API.
@returns If successful, returns a .JSON object
"""
headers = {
"Accept": "application/vnd.github.v3+json"
}
url = "https://api.github.com" + path
# GET https://api.github.com/<path> Accept: "application/vnd.github.v3+json"
response = requests.get(url, headers=headers, timeout=GLOBAL_TIMEOUT)
response.raise_for_status() # Raise a RequestException if we failed, and trigger retry
return response.json()
build_group_regex = re.compile("fs2_open_.*-builds-([^.-]*)(-([^.]*))?.*") # regex for matching binary .zip's and .7z's
source_file_regex = re.compile("fs2_open_.*-source-([^.]*)?.*") # regex for matching source .zip's and .7z's
# Get the github release metadata of the given tag name
response = execute_request(
"/repos/{}/releases/tags/{}".format(config["github"]["repo"], tag_name))
# Extract the binary and source files from the response["asset"] metadata
binary_files = []
source_files = {}
for asset in response["assets"]:
url = asset["browser_download_url"]
name = asset["name"]
group_match = build_group_regex.match(name)
if group_match is not None:
platform = group_match.group(1)
# x64 is the Visual Studio name but for consistency we need Win64
if platform == "x64":
platform = "Win64"
binary_files.append(ReleaseFile(name, url, platform, group_match.group(3)))
else:
group_match = source_file_regex.match(name)
if group_match is None:
continue
group = group_match.group(1)
source_files[group] = SourceFile(name, url, group)
binary_files.sort(key=lambda ReleaseFile: ReleaseFile.name)
return binary_files, source_files
def get_ftp_files(build_type, tag_name, config) -> List[ReleaseFile] :
"""!
@brief Gets file metadata for nightlies hosted on FTP, as determined by config["ftp"] attributes
@param [in] `build_type` Unknown str
@param [in] `tag_name` Github tag name of the release
@param [in] `config` config metadata set in main.py
"""
tag_regex = re.compile("nightly_(.*)")
build_group_regex = re.compile("nightly_.*-builds-([^.]+).*")
files = []
try:
with FTP(config["ftp"]["host"], config["ftp"]["user"], config["ftp"]["pass"]) as ftp:
# extract version
version_str = tag_regex.match(tag_name).group(1)
# extract filepath w/ version
# then list all ftp hits with that path
path_template = config["ftp"]["path"]
path = path_template.format(type=build_type, version=version_str)
file_entries = list(ftp.mlsd(path, ["type"]))
# get all ftp hits of type file
for entry in file_entries:
if entry[1]["type"] == "file":
files.append(entry[0])
except error_perm:
print("Received permanent FTP error!")
return []
out_data = []
for file in files:
# from the file list, extract only nightly files
file_match = build_group_regex.match(file)
if file_match is None:
print("Ignoring non nightly file '{}'".format(file))
continue
group_match = file_match.group(1)
primary_url = None
mirrors = []
# x64 is the name Visual Studio uses but Win64 works better for us since that gets displayed in the nightly post
if "x64" in group_match:
group_match = group_match.replace("x64", "Win64")
# construct the download URL list for all mirrors. The first listed ftp location is taken as the Primary
for mirror in config["ftp"]["mirrors"]:
download_url = mirror.format(type=build_type, version=version_str, file=file)
if primary_url is None:
primary_url = download_url
else:
mirrors.append(download_url)
# Form the List[ReleaseFile] list with the download URL links
out_data.append(ReleaseFile(file, primary_url, group_match, None, mirrors))
return out_data
|
flexible
|
{
"blob_id": "612b1851ba5a07a277982ed5be334392182c66ef",
"index": 4064,
"step-1": "<mask token>\n\n\nclass ReleaseFile:\n <mask token>\n <mask token>\n\n def __repr__(self):\n return repr(self.name)\n\n\nclass SourceFile:\n \"\"\"! Class represeting a source file\n\n `name`: str\n File name,\n `url`: str\n FTP URL,\n `group`\n <unknown>\n \n @details More details\n \"\"\"\n\n def __init__(self, name, url, group):\n self.group = group\n self.url = url\n self.name = name\n\n\nclass FileGroup:\n \"\"\"! Represents a file group\n\n `name`: str\n Name of this group\n `files`: List[ReleaseFile]\n List of files within this group\n `mainFile`: str\n If this FileGroup has a subgroup, `mainFile` is the head of that group\n `subFiles`: List[ReleaseFile]\n Files within a subgroup\n \"\"\"\n\n def __init__(self, name, files: List[ReleaseFile]):\n self.files = files\n self.name = name\n if len(files) == 1:\n self.mainFile = files[0]\n self.subFiles = {}\n else:\n self.mainFile = None\n subFiles = []\n for file in files:\n if file.subgroup == 'SSE2':\n self.mainFile = file\n else:\n subFiles.append(file)\n self.subFiles = dict((x[0], next(x[1])) for x in groupby(\n subFiles, lambda f: f.subgroup))\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass ReleaseFile:\n \"\"\"! Class representing a Released file on Nebula\n\n `name`: str\n Mod (or build) name,\n `url`: str\n Primary host URL,\n `group`: str\n Mod group string,\n `subgroup`: str\n Mod subgroup string,\n `mirrors`: List[str]\n List of URL's of FTP mirrors\n \"\"\"\n\n def __init__(self, name, url, group, subgroup=None, mirrors=None):\n if mirrors is None:\n mirrors = []\n self.mirrors = mirrors\n self.subgroup = subgroup\n self.group = group\n self.url = url\n self.name = name\n self.base_url = '/'.join(url.split('/')[0:-1]) + '/'\n self.filename = url.split('/')[-1]\n self.content_hashes = None\n self.hash = None\n self.size = 0\n\n def __repr__(self):\n return repr(self.name)\n\n\nclass SourceFile:\n \"\"\"! Class represeting a source file\n\n `name`: str\n File name,\n `url`: str\n FTP URL,\n `group`\n <unknown>\n \n @details More details\n \"\"\"\n\n def __init__(self, name, url, group):\n self.group = group\n self.url = url\n self.name = name\n\n\nclass FileGroup:\n \"\"\"! Represents a file group\n\n `name`: str\n Name of this group\n `files`: List[ReleaseFile]\n List of files within this group\n `mainFile`: str\n If this FileGroup has a subgroup, `mainFile` is the head of that group\n `subFiles`: List[ReleaseFile]\n Files within a subgroup\n \"\"\"\n\n def __init__(self, name, files: List[ReleaseFile]):\n self.files = files\n self.name = name\n if len(files) == 1:\n self.mainFile = files[0]\n self.subFiles = {}\n else:\n self.mainFile = None\n subFiles = []\n for file in files:\n if file.subgroup == 'SSE2':\n self.mainFile = file\n else:\n subFiles.append(file)\n self.subFiles = dict((x[0], next(x[1])) for x in groupby(\n subFiles, lambda f: f.subgroup))\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass ReleaseFile:\n \"\"\"! Class representing a Released file on Nebula\n\n `name`: str\n Mod (or build) name,\n `url`: str\n Primary host URL,\n `group`: str\n Mod group string,\n `subgroup`: str\n Mod subgroup string,\n `mirrors`: List[str]\n List of URL's of FTP mirrors\n \"\"\"\n\n def __init__(self, name, url, group, subgroup=None, mirrors=None):\n if mirrors is None:\n mirrors = []\n self.mirrors = mirrors\n self.subgroup = subgroup\n self.group = group\n self.url = url\n self.name = name\n self.base_url = '/'.join(url.split('/')[0:-1]) + '/'\n self.filename = url.split('/')[-1]\n self.content_hashes = None\n self.hash = None\n self.size = 0\n\n def __repr__(self):\n return repr(self.name)\n\n\nclass SourceFile:\n \"\"\"! Class represeting a source file\n\n `name`: str\n File name,\n `url`: str\n FTP URL,\n `group`\n <unknown>\n \n @details More details\n \"\"\"\n\n def __init__(self, name, url, group):\n self.group = group\n self.url = url\n self.name = name\n\n\nclass FileGroup:\n \"\"\"! Represents a file group\n\n `name`: str\n Name of this group\n `files`: List[ReleaseFile]\n List of files within this group\n `mainFile`: str\n If this FileGroup has a subgroup, `mainFile` is the head of that group\n `subFiles`: List[ReleaseFile]\n Files within a subgroup\n \"\"\"\n\n def __init__(self, name, files: List[ReleaseFile]):\n self.files = files\n self.name = name\n if len(files) == 1:\n self.mainFile = files[0]\n self.subFiles = {}\n else:\n self.mainFile = None\n subFiles = []\n for file in files:\n if file.subgroup == 'SSE2':\n self.mainFile = file\n else:\n subFiles.append(file)\n self.subFiles = dict((x[0], next(x[1])) for x in groupby(\n subFiles, lambda f: f.subgroup))\n\n\ndef get_release_files(tag_name, config) ->Tuple[List[ReleaseFile], Dict[str,\n SourceFile]]:\n \"\"\"! Brief Gets the binary and source files from the Github Release server\n\n @param[in] `tag_name` Git tag of the current release\n @param[in] `config` confi metadata set in main.py\n\n @returns `List[ReleaseFile]` List of release files\n @returns `Dict[str, SourceFile]` Dictionary of source files\n\n @details Sends an `HTTP GET` request to github using their REST API to retrieve metadata. The files are not\n actually downloaded here, just their metadata is gathered and organized in their respective container for later\n use.\n \"\"\"\n\n @retry_multi(5)\n def execute_request(path):\n \"\"\"!\n @brief Performs a GET request with the given path. To be used with Github's REST API.\n @returns If successful, returns a .JSON object\n \"\"\"\n headers = {'Accept': 'application/vnd.github.v3+json'}\n url = 'https://api.github.com' + path\n response = requests.get(url, headers=headers, timeout=GLOBAL_TIMEOUT)\n response.raise_for_status()\n return response.json()\n build_group_regex = re.compile('fs2_open_.*-builds-([^.-]*)(-([^.]*))?.*')\n source_file_regex = re.compile('fs2_open_.*-source-([^.]*)?.*')\n response = execute_request('/repos/{}/releases/tags/{}'.format(config[\n 'github']['repo'], tag_name))\n binary_files = []\n source_files = {}\n for asset in response['assets']:\n url = asset['browser_download_url']\n name = asset['name']\n group_match = build_group_regex.match(name)\n if group_match is not None:\n platform = group_match.group(1)\n if platform == 'x64':\n platform = 'Win64'\n binary_files.append(ReleaseFile(name, url, platform,\n group_match.group(3)))\n else:\n group_match = source_file_regex.match(name)\n if group_match is None:\n continue\n group = group_match.group(1)\n source_files[group] = SourceFile(name, url, group)\n binary_files.sort(key=lambda ReleaseFile: ReleaseFile.name)\n return binary_files, source_files\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\nclass ReleaseFile:\n \"\"\"! Class representing a Released file on Nebula\n\n `name`: str\n Mod (or build) name,\n `url`: str\n Primary host URL,\n `group`: str\n Mod group string,\n `subgroup`: str\n Mod subgroup string,\n `mirrors`: List[str]\n List of URL's of FTP mirrors\n \"\"\"\n\n def __init__(self, name, url, group, subgroup=None, mirrors=None):\n if mirrors is None:\n mirrors = []\n self.mirrors = mirrors\n self.subgroup = subgroup\n self.group = group\n self.url = url\n self.name = name\n self.base_url = '/'.join(url.split('/')[0:-1]) + '/'\n self.filename = url.split('/')[-1]\n self.content_hashes = None\n self.hash = None\n self.size = 0\n\n def __repr__(self):\n return repr(self.name)\n\n\nclass SourceFile:\n \"\"\"! Class represeting a source file\n\n `name`: str\n File name,\n `url`: str\n FTP URL,\n `group`\n <unknown>\n \n @details More details\n \"\"\"\n\n def __init__(self, name, url, group):\n self.group = group\n self.url = url\n self.name = name\n\n\nclass FileGroup:\n \"\"\"! Represents a file group\n\n `name`: str\n Name of this group\n `files`: List[ReleaseFile]\n List of files within this group\n `mainFile`: str\n If this FileGroup has a subgroup, `mainFile` is the head of that group\n `subFiles`: List[ReleaseFile]\n Files within a subgroup\n \"\"\"\n\n def __init__(self, name, files: List[ReleaseFile]):\n self.files = files\n self.name = name\n if len(files) == 1:\n self.mainFile = files[0]\n self.subFiles = {}\n else:\n self.mainFile = None\n subFiles = []\n for file in files:\n if file.subgroup == 'SSE2':\n self.mainFile = file\n else:\n subFiles.append(file)\n self.subFiles = dict((x[0], next(x[1])) for x in groupby(\n subFiles, lambda f: f.subgroup))\n\n\ndef get_release_files(tag_name, config) ->Tuple[List[ReleaseFile], Dict[str,\n SourceFile]]:\n \"\"\"! Brief Gets the binary and source files from the Github Release server\n\n @param[in] `tag_name` Git tag of the current release\n @param[in] `config` confi metadata set in main.py\n\n @returns `List[ReleaseFile]` List of release files\n @returns `Dict[str, SourceFile]` Dictionary of source files\n\n @details Sends an `HTTP GET` request to github using their REST API to retrieve metadata. The files are not\n actually downloaded here, just their metadata is gathered and organized in their respective container for later\n use.\n \"\"\"\n\n @retry_multi(5)\n def execute_request(path):\n \"\"\"!\n @brief Performs a GET request with the given path. To be used with Github's REST API.\n @returns If successful, returns a .JSON object\n \"\"\"\n headers = {'Accept': 'application/vnd.github.v3+json'}\n url = 'https://api.github.com' + path\n response = requests.get(url, headers=headers, timeout=GLOBAL_TIMEOUT)\n response.raise_for_status()\n return response.json()\n build_group_regex = re.compile('fs2_open_.*-builds-([^.-]*)(-([^.]*))?.*')\n source_file_regex = re.compile('fs2_open_.*-source-([^.]*)?.*')\n response = execute_request('/repos/{}/releases/tags/{}'.format(config[\n 'github']['repo'], tag_name))\n binary_files = []\n source_files = {}\n for asset in response['assets']:\n url = asset['browser_download_url']\n name = asset['name']\n group_match = build_group_regex.match(name)\n if group_match is not None:\n platform = group_match.group(1)\n if platform == 'x64':\n platform = 'Win64'\n binary_files.append(ReleaseFile(name, url, platform,\n group_match.group(3)))\n else:\n group_match = source_file_regex.match(name)\n if group_match is None:\n continue\n group = group_match.group(1)\n source_files[group] = SourceFile(name, url, group)\n binary_files.sort(key=lambda ReleaseFile: ReleaseFile.name)\n return binary_files, source_files\n\n\ndef get_ftp_files(build_type, tag_name, config) ->List[ReleaseFile]:\n \"\"\"!\n @brief Gets file metadata for nightlies hosted on FTP, as determined by config[\"ftp\"] attributes\n \n @param [in] `build_type` Unknown str\n @param [in] `tag_name` Github tag name of the release\n @param [in] `config` config metadata set in main.py\n \"\"\"\n tag_regex = re.compile('nightly_(.*)')\n build_group_regex = re.compile('nightly_.*-builds-([^.]+).*')\n files = []\n try:\n with FTP(config['ftp']['host'], config['ftp']['user'], config['ftp'\n ]['pass']) as ftp:\n version_str = tag_regex.match(tag_name).group(1)\n path_template = config['ftp']['path']\n path = path_template.format(type=build_type, version=version_str)\n file_entries = list(ftp.mlsd(path, ['type']))\n for entry in file_entries:\n if entry[1]['type'] == 'file':\n files.append(entry[0])\n except error_perm:\n print('Received permanent FTP error!')\n return []\n out_data = []\n for file in files:\n file_match = build_group_regex.match(file)\n if file_match is None:\n print(\"Ignoring non nightly file '{}'\".format(file))\n continue\n group_match = file_match.group(1)\n primary_url = None\n mirrors = []\n if 'x64' in group_match:\n group_match = group_match.replace('x64', 'Win64')\n for mirror in config['ftp']['mirrors']:\n download_url = mirror.format(type=build_type, version=\n version_str, file=file)\n if primary_url is None:\n primary_url = download_url\n else:\n mirrors.append(download_url)\n out_data.append(ReleaseFile(file, primary_url, group_match, None,\n mirrors))\n return out_data\n",
"step-5": "import re # regex module\nfrom ftplib import FTP, error_perm\nfrom itertools import groupby\nfrom typing import List, Tuple, Dict\n\nimport requests # HTTP requests module\n\nfrom util import retry_multi, GLOBAL_TIMEOUT\t# from util.py\n\n\nclass ReleaseFile:\n \"\"\"! Class representing a Released file on Nebula\n\n `name`: str\n Mod (or build) name,\n `url`: str\n Primary host URL,\n `group`: str\n Mod group string,\n `subgroup`: str\n Mod subgroup string,\n `mirrors`: List[str]\n List of URL's of FTP mirrors\n \"\"\"\n def __init__(self, name, url, group, subgroup=None, mirrors=None):\n if mirrors is None:\n mirrors = []\n self.mirrors = mirrors\n self.subgroup = subgroup\n self.group = group\n self.url = url\n self.name = name\n\n self.base_url = \"/\".join(url.split('/')[0:-1]) + \"/\"\n self.filename = url.split('/')[-1]\n\n # A list of tuples of (filename, hash)\n self.content_hashes = None\n\n self.hash = None\n self.size = 0\n \n def __repr__(self):\n return repr((self.name))\n\n\nclass SourceFile:\n \"\"\"! Class represeting a source file\n\n `name`: str\n File name,\n `url`: str\n FTP URL,\n `group`\n <unknown>\n \n @details More details\n \"\"\"\n def __init__(self, name, url, group):\n self.group = group\n self.url = url\n self.name = name\n\n\nclass FileGroup:\n \"\"\"! Represents a file group\n\n `name`: str\n Name of this group\n `files`: List[ReleaseFile]\n List of files within this group\n `mainFile`: str\n If this FileGroup has a subgroup, `mainFile` is the head of that group\n `subFiles`: List[ReleaseFile]\n Files within a subgroup\n \"\"\"\n\n def __init__(self, name, files: List[ReleaseFile]):\n self.files = files\n self.name = name\n\n if len(files) == 1:\n self.mainFile = files[0]\n self.subFiles = {}\n else:\n self.mainFile = None\n subFiles = []\n for file in files:\n # We only have subcategories for Windows where SSE2 is the main group\n if file.subgroup == \"SSE2\":\n self.mainFile = file\n else:\n subFiles.append(file)\n\n self.subFiles = dict(((x[0], next(x[1])) for x in groupby(subFiles, lambda f: f.subgroup)))\n\n\ndef get_release_files(tag_name, config) -> Tuple[List[ReleaseFile], Dict[str, SourceFile]]:\n \"\"\"! Brief Gets the binary and source files from the Github Release server\n\n @param[in] `tag_name` Git tag of the current release\n @param[in] `config` confi metadata set in main.py\n\n @returns `List[ReleaseFile]` List of release files\n @returns `Dict[str, SourceFile]` Dictionary of source files\n\n @details Sends an `HTTP GET` request to github using their REST API to retrieve metadata. The files are not\n actually downloaded here, just their metadata is gathered and organized in their respective container for later\n use.\n \"\"\"\n\n @retry_multi(5)\t# retry at most 5 times\n def execute_request(path):\n \"\"\"!\n @brief Performs a GET request with the given path. To be used with Github's REST API.\n @returns If successful, returns a .JSON object\n \"\"\"\n headers = {\n \"Accept\": \"application/vnd.github.v3+json\"\n }\n url = \"https://api.github.com\" + path\n\n # GET https://api.github.com/<path> Accept: \"application/vnd.github.v3+json\"\n\n response = requests.get(url, headers=headers, timeout=GLOBAL_TIMEOUT)\n\n response.raise_for_status() # Raise a RequestException if we failed, and trigger retry\n\n return response.json()\n\n build_group_regex = re.compile(\"fs2_open_.*-builds-([^.-]*)(-([^.]*))?.*\") # regex for matching binary .zip's and .7z's\n source_file_regex = re.compile(\"fs2_open_.*-source-([^.]*)?.*\") # regex for matching source .zip's and .7z's\n\n # Get the github release metadata of the given tag name\n response = execute_request(\n \"/repos/{}/releases/tags/{}\".format(config[\"github\"][\"repo\"], tag_name))\n\n # Extract the binary and source files from the response[\"asset\"] metadata\n binary_files = []\n source_files = {}\n for asset in response[\"assets\"]:\n url = asset[\"browser_download_url\"]\n name = asset[\"name\"]\n\n group_match = build_group_regex.match(name)\n\n if group_match is not None:\n platform = group_match.group(1)\n # x64 is the Visual Studio name but for consistency we need Win64\n if platform == \"x64\":\n platform = \"Win64\"\n\n binary_files.append(ReleaseFile(name, url, platform, group_match.group(3)))\n else:\n group_match = source_file_regex.match(name)\n\n if group_match is None:\n continue\n\n group = group_match.group(1)\n\n source_files[group] = SourceFile(name, url, group)\n\n binary_files.sort(key=lambda ReleaseFile: ReleaseFile.name)\n\n return binary_files, source_files\n\n\ndef get_ftp_files(build_type, tag_name, config) -> List[ReleaseFile] :\n \"\"\"!\n @brief Gets file metadata for nightlies hosted on FTP, as determined by config[\"ftp\"] attributes\n \n @param [in] `build_type` Unknown str\n @param [in] `tag_name` Github tag name of the release\n @param [in] `config` config metadata set in main.py\n \"\"\"\n\n tag_regex = re.compile(\"nightly_(.*)\")\n build_group_regex = re.compile(\"nightly_.*-builds-([^.]+).*\")\n\n files = []\n try:\n with FTP(config[\"ftp\"][\"host\"], config[\"ftp\"][\"user\"], config[\"ftp\"][\"pass\"]) as ftp:\n # extract version\n version_str = tag_regex.match(tag_name).group(1)\n\n # extract filepath w/ version\n # then list all ftp hits with that path\n path_template = config[\"ftp\"][\"path\"]\n path = path_template.format(type=build_type, version=version_str)\n file_entries = list(ftp.mlsd(path, [\"type\"]))\n\n # get all ftp hits of type file\n for entry in file_entries:\n if entry[1][\"type\"] == \"file\":\n files.append(entry[0])\n except error_perm:\n print(\"Received permanent FTP error!\")\n return []\n\n out_data = []\n for file in files:\n # from the file list, extract only nightly files\n file_match = build_group_regex.match(file)\n if file_match is None:\n print(\"Ignoring non nightly file '{}'\".format(file))\n continue\n\n group_match = file_match.group(1)\n primary_url = None\n mirrors = []\n\n # x64 is the name Visual Studio uses but Win64 works better for us since that gets displayed in the nightly post\n if \"x64\" in group_match:\n group_match = group_match.replace(\"x64\", \"Win64\")\n\n # construct the download URL list for all mirrors. The first listed ftp location is taken as the Primary\n for mirror in config[\"ftp\"][\"mirrors\"]:\n download_url = mirror.format(type=build_type, version=version_str, file=file)\n if primary_url is None:\n primary_url = download_url\n else:\n mirrors.append(download_url)\n\n # Form the List[ReleaseFile] list with the download URL links\n out_data.append(ReleaseFile(file, primary_url, group_match, None, mirrors))\n\n return out_data",
"step-ids": [
8,
10,
11,
12,
14
]
}
|
[
8,
10,
11,
12,
14
] |
<|reserved_special_token_0|>
class Account:
def __init__(self, name, balance):
self.name = name
self.balance = balance
def deposit(self, money):
self.balance += money
return 'Deposit accepted'
def withdraw(self, moneytaken):
if self.balance < moneytaken:
return 'Funds Unavailable'
else:
self.balance -= moneytaken
return 'Withdraw Accepted'
def __str__(self):
return f'Account owner: {self.name}\nAccount balance: {self.balance}$'
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class Line:
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def distance(self):
return ((self.coor2[0] - self.coor1[0]) ** 2 + (self.coor2[1] -
self.coor1[1]) ** 2) ** 0.5
def slope(self):
return (self.coor2[1] - self.coor1[1]) / (self.coor2[0] - self.coor1[0]
)
def __str__(self):
return (
f'The distance between A and B is: {self.distance()} and the slope is{self.slope()}'
)
<|reserved_special_token_0|>
class Cylinder:
pi = 3.14
def __init__(self, height=1, radius=1):
self.height = height
self.radius = radius
def volume(self):
return self.pi * self.radius ** 2 * self.height
def surface_area(self):
return 2 * self.pi * self.radius ** 2
def __str__(self):
return (
f'The Volume is {self.volume()} and the surface_area is {self.surface_area()}'
)
<|reserved_special_token_0|>
class Account:
def __init__(self, name, balance):
self.name = name
self.balance = balance
def deposit(self, money):
self.balance += money
return 'Deposit accepted'
def withdraw(self, moneytaken):
if self.balance < moneytaken:
return 'Funds Unavailable'
else:
self.balance -= moneytaken
return 'Withdraw Accepted'
def __str__(self):
return f'Account owner: {self.name}\nAccount balance: {self.balance}$'
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class Line:
def __init__(self, coor1, coor2):
self.coor1 = coor1
self.coor2 = coor2
<|reserved_special_token_0|>
def distance(self):
return ((self.coor2[0] - self.coor1[0]) ** 2 + (self.coor2[1] -
self.coor1[1]) ** 2) ** 0.5
def slope(self):
return (self.coor2[1] - self.coor1[1]) / (self.coor2[0] - self.coor1[0]
)
def __str__(self):
return (
f'The distance between A and B is: {self.distance()} and the slope is{self.slope()}'
)
<|reserved_special_token_0|>
class Cylinder:
pi = 3.14
def __init__(self, height=1, radius=1):
self.height = height
self.radius = radius
def volume(self):
return self.pi * self.radius ** 2 * self.height
def surface_area(self):
return 2 * self.pi * self.radius ** 2
def __str__(self):
return (
f'The Volume is {self.volume()} and the surface_area is {self.surface_area()}'
)
<|reserved_special_token_0|>
class Account:
def __init__(self, name, balance):
self.name = name
self.balance = balance
def deposit(self, money):
self.balance += money
return 'Deposit accepted'
def withdraw(self, moneytaken):
if self.balance < moneytaken:
return 'Funds Unavailable'
else:
self.balance -= moneytaken
return 'Withdraw Accepted'
def __str__(self):
return f'Account owner: {self.name}\nAccount balance: {self.balance}$'
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class Line:
def __init__(self, coor1, coor2):
self.coor1 = coor1
self.coor2 = coor2
def slope(self):
pass
def distance(self):
return ((self.coor2[0] - self.coor1[0]) ** 2 + (self.coor2[1] -
self.coor1[1]) ** 2) ** 0.5
def slope(self):
return (self.coor2[1] - self.coor1[1]) / (self.coor2[0] - self.coor1[0]
)
def __str__(self):
return (
f'The distance between A and B is: {self.distance()} and the slope is{self.slope()}'
)
line1 = Line((3, 4), (5, 6))
li = Line((3, 2), (8, 10))
print(li.distance())
print(line1.coor1[0])
print(line1.distance())
print(li)
class Cylinder:
pi = 3.14
def __init__(self, height=1, radius=1):
self.height = height
self.radius = radius
def volume(self):
return self.pi * self.radius ** 2 * self.height
def surface_area(self):
return 2 * self.pi * self.radius ** 2
def __str__(self):
return (
f'The Volume is {self.volume()} and the surface_area is {self.surface_area()}'
)
c = Cylinder(2, 3)
print(c)
class Account:
def __init__(self, name, balance):
self.name = name
self.balance = balance
def deposit(self, money):
self.balance += money
return 'Deposit accepted'
def withdraw(self, moneytaken):
if self.balance < moneytaken:
return 'Funds Unavailable'
else:
self.balance -= moneytaken
return 'Withdraw Accepted'
def __str__(self):
return f'Account owner: {self.name}\nAccount balance: {self.balance}$'
acct1 = Account('jose', 100)
print(acct1)
print(acct1.withdraw(1000))
print(acct1.balance)
print(acct1.deposit(101))
print(acct1.balance)
<|reserved_special_token_1|>
class Line:
def __init__(self,coor1,coor2):
self.coor1 = coor1
self.coor2 = coor2
def slope(self):
pass
def distance(self):
#x = self.coor1[0]-self.coor2[0]
#y = self.coor2[1]-self.coor2[1]
#return ((x**2)+(y**2))**0.5
return ((((self.coor2[0]-self.coor1[0])**2)+((self.coor2[1]-self.coor1[1])**2))**0.5)
def slope(self):
return (self.coor2[1]-self.coor1[1])/(self.coor2[0]-self.coor1[0])
def __str__(self):
return f'The distance between A and B is: {self.distance()} and the slope is{self.slope()}'
line1 = Line((3,4),(5,6))
li = Line((3,2),(8,10))
print(li.distance())
print(line1.coor1[0])
print(line1.distance())
print(li)
class Cylinder:
pi = 3.14
def __init__(self,height=1,radius=1):
self.height = height
self.radius = radius
def volume(self):
return self.pi*self.radius**2*self.height
def surface_area(self):
return 2*self.pi*self.radius**2
def __str__(self):
return f'The Volume is {self.volume()} and the surface_area is {self.surface_area()}'
c = Cylinder(2,3)
print(c)
class Account:
def __init__(self,name,balance):
self.name=name
self.balance=balance
def deposit(self,money):
self.balance += money
return 'Deposit accepted'
def withdraw(self,moneytaken):
if self.balance < moneytaken:
return 'Funds Unavailable'
else:
self.balance -= moneytaken
return 'Withdraw Accepted'
def __str__(self):
return f'Account owner: {self.name}\nAccount balance: {self.balance}$'
acct1 = Account('jose',100)
print(acct1)
print(acct1.withdraw(1000))
print(acct1.balance)
print(acct1.deposit(101))
print(acct1.balance)
|
flexible
|
{
"blob_id": "f91e997b305348485698d180b97138b040285b60",
"index": 9440,
"step-1": "<mask token>\n\n\nclass Account:\n\n def __init__(self, name, balance):\n self.name = name\n self.balance = balance\n\n def deposit(self, money):\n self.balance += money\n return 'Deposit accepted'\n\n def withdraw(self, moneytaken):\n if self.balance < moneytaken:\n return 'Funds Unavailable'\n else:\n self.balance -= moneytaken\n return 'Withdraw Accepted'\n\n def __str__(self):\n return f'Account owner: {self.name}\\nAccount balance: {self.balance}$'\n\n\n<mask token>\n",
"step-2": "class Line:\n <mask token>\n <mask token>\n\n def distance(self):\n return ((self.coor2[0] - self.coor1[0]) ** 2 + (self.coor2[1] -\n self.coor1[1]) ** 2) ** 0.5\n\n def slope(self):\n return (self.coor2[1] - self.coor1[1]) / (self.coor2[0] - self.coor1[0]\n )\n\n def __str__(self):\n return (\n f'The distance between A and B is: {self.distance()} and the slope is{self.slope()}'\n )\n\n\n<mask token>\n\n\nclass Cylinder:\n pi = 3.14\n\n def __init__(self, height=1, radius=1):\n self.height = height\n self.radius = radius\n\n def volume(self):\n return self.pi * self.radius ** 2 * self.height\n\n def surface_area(self):\n return 2 * self.pi * self.radius ** 2\n\n def __str__(self):\n return (\n f'The Volume is {self.volume()} and the surface_area is {self.surface_area()}'\n )\n\n\n<mask token>\n\n\nclass Account:\n\n def __init__(self, name, balance):\n self.name = name\n self.balance = balance\n\n def deposit(self, money):\n self.balance += money\n return 'Deposit accepted'\n\n def withdraw(self, moneytaken):\n if self.balance < moneytaken:\n return 'Funds Unavailable'\n else:\n self.balance -= moneytaken\n return 'Withdraw Accepted'\n\n def __str__(self):\n return f'Account owner: {self.name}\\nAccount balance: {self.balance}$'\n\n\n<mask token>\n",
"step-3": "class Line:\n\n def __init__(self, coor1, coor2):\n self.coor1 = coor1\n self.coor2 = coor2\n <mask token>\n\n def distance(self):\n return ((self.coor2[0] - self.coor1[0]) ** 2 + (self.coor2[1] -\n self.coor1[1]) ** 2) ** 0.5\n\n def slope(self):\n return (self.coor2[1] - self.coor1[1]) / (self.coor2[0] - self.coor1[0]\n )\n\n def __str__(self):\n return (\n f'The distance between A and B is: {self.distance()} and the slope is{self.slope()}'\n )\n\n\n<mask token>\n\n\nclass Cylinder:\n pi = 3.14\n\n def __init__(self, height=1, radius=1):\n self.height = height\n self.radius = radius\n\n def volume(self):\n return self.pi * self.radius ** 2 * self.height\n\n def surface_area(self):\n return 2 * self.pi * self.radius ** 2\n\n def __str__(self):\n return (\n f'The Volume is {self.volume()} and the surface_area is {self.surface_area()}'\n )\n\n\n<mask token>\n\n\nclass Account:\n\n def __init__(self, name, balance):\n self.name = name\n self.balance = balance\n\n def deposit(self, money):\n self.balance += money\n return 'Deposit accepted'\n\n def withdraw(self, moneytaken):\n if self.balance < moneytaken:\n return 'Funds Unavailable'\n else:\n self.balance -= moneytaken\n return 'Withdraw Accepted'\n\n def __str__(self):\n return f'Account owner: {self.name}\\nAccount balance: {self.balance}$'\n\n\n<mask token>\n",
"step-4": "class Line:\n\n def __init__(self, coor1, coor2):\n self.coor1 = coor1\n self.coor2 = coor2\n\n def slope(self):\n pass\n\n def distance(self):\n return ((self.coor2[0] - self.coor1[0]) ** 2 + (self.coor2[1] -\n self.coor1[1]) ** 2) ** 0.5\n\n def slope(self):\n return (self.coor2[1] - self.coor1[1]) / (self.coor2[0] - self.coor1[0]\n )\n\n def __str__(self):\n return (\n f'The distance between A and B is: {self.distance()} and the slope is{self.slope()}'\n )\n\n\nline1 = Line((3, 4), (5, 6))\nli = Line((3, 2), (8, 10))\nprint(li.distance())\nprint(line1.coor1[0])\nprint(line1.distance())\nprint(li)\n\n\nclass Cylinder:\n pi = 3.14\n\n def __init__(self, height=1, radius=1):\n self.height = height\n self.radius = radius\n\n def volume(self):\n return self.pi * self.radius ** 2 * self.height\n\n def surface_area(self):\n return 2 * self.pi * self.radius ** 2\n\n def __str__(self):\n return (\n f'The Volume is {self.volume()} and the surface_area is {self.surface_area()}'\n )\n\n\nc = Cylinder(2, 3)\nprint(c)\n\n\nclass Account:\n\n def __init__(self, name, balance):\n self.name = name\n self.balance = balance\n\n def deposit(self, money):\n self.balance += money\n return 'Deposit accepted'\n\n def withdraw(self, moneytaken):\n if self.balance < moneytaken:\n return 'Funds Unavailable'\n else:\n self.balance -= moneytaken\n return 'Withdraw Accepted'\n\n def __str__(self):\n return f'Account owner: {self.name}\\nAccount balance: {self.balance}$'\n\n\nacct1 = Account('jose', 100)\nprint(acct1)\nprint(acct1.withdraw(1000))\nprint(acct1.balance)\nprint(acct1.deposit(101))\nprint(acct1.balance)\n",
"step-5": "class Line:\n def __init__(self,coor1,coor2):\n self.coor1 = coor1\n self.coor2 = coor2 \n \n def slope(self):\n pass\n\n def distance(self):\n #x = self.coor1[0]-self.coor2[0]\n #y = self.coor2[1]-self.coor2[1]\n #return ((x**2)+(y**2))**0.5\n return ((((self.coor2[0]-self.coor1[0])**2)+((self.coor2[1]-self.coor1[1])**2))**0.5)\n\n def slope(self):\n return (self.coor2[1]-self.coor1[1])/(self.coor2[0]-self.coor1[0])\n\n\n\n def __str__(self):\n return f'The distance between A and B is: {self.distance()} and the slope is{self.slope()}'\n\n\nline1 = Line((3,4),(5,6))\nli = Line((3,2),(8,10))\nprint(li.distance())\nprint(line1.coor1[0])\nprint(line1.distance())\nprint(li)\n\nclass Cylinder:\n\n pi = 3.14\n \n def __init__(self,height=1,radius=1):\n self.height = height\n self.radius = radius\n\n\n def volume(self):\n return self.pi*self.radius**2*self.height\n\n def surface_area(self):\n return 2*self.pi*self.radius**2\n\n def __str__(self):\n return f'The Volume is {self.volume()} and the surface_area is {self.surface_area()}'\n\nc = Cylinder(2,3)\n\nprint(c)\n\nclass Account: \n def __init__(self,name,balance):\n self.name=name\n self.balance=balance\n\n def deposit(self,money):\n self.balance += money\n return 'Deposit accepted'\n\n def withdraw(self,moneytaken):\n if self.balance < moneytaken:\n return 'Funds Unavailable' \n else:\n self.balance -= moneytaken\n return 'Withdraw Accepted'\n\n def __str__(self):\n return f'Account owner: {self.name}\\nAccount balance: {self.balance}$'\n \n \n\n \n\nacct1 = Account('jose',100)\n\n\n\nprint(acct1)\nprint(acct1.withdraw(1000))\nprint(acct1.balance)\nprint(acct1.deposit(101))\nprint(acct1.balance)\n",
"step-ids": [
5,
15,
16,
19,
20
]
}
|
[
5,
15,
16,
19,
20
] |
# from models import dist_model
# model = dist_model.DistModel()
from os.path import join
import models
import util.util as util
import matplotlib.pylab as plt
use_gpu = True
fig_outdir = r"C:\Users\ponce\OneDrive - Washington University in St. Louis\ImageDiffMetric"
#%%
net_name = 'squeeze'
SpatialDist = models.PerceptualLoss(model='net-lin', net=net_name, colorspace='rgb', spatial=True, use_gpu=True, gpu_ids=[0])
PerceptLoss = models.PerceptualLoss(model='net-lin', net=net_name, colorspace='rgb', spatial=False, use_gpu=True, gpu_ids=[0])
#%%
imgdir = r"\\storage1.ris.wustl.edu\crponce\Active\Stimuli\2019-06-Evolutions\beto-191212a\backup_12_12_2019_10_47_39"
file0 = "block048_thread000_gen_gen047_001896.jpg"
file1 = "block048_thread000_gen_gen047_001900.jpg"
img0_ = util.load_image(join(imgdir,file0))
img1_ = util.load_image(join(imgdir,file1))
img0 = util.im2tensor(img0_) # RGB image from [-1,1]
if(use_gpu):
img0 = img0.cuda()
img1 = util.im2tensor(img1_)
if(use_gpu):
img1 = img1.cuda()
#%
# Compute distance
dist01 = SpatialDist.forward(img0,img1)#.item()
dist_sum = PerceptLoss.forward(img0,img1).item()
# dists.append(dist01)
# print('(%s, %s): %.3f'%(file0,file1,dist01))
# f.writelines('(%s, %s): %.3f'%(file0,file1,dist01))
# %
plt.figure(figsize=[9,3.5])
plt.subplot(131)
plt.imshow(img0_)
plt.subplot(132)
plt.imshow(img1_)
plt.subplot(133)
plt.pcolor(dist01.cpu().detach().squeeze())
plt.axis('image')
plt.gca().invert_yaxis()
plt.title("Dist %.2f"%dist_sum)
plt.savefig(join(fig_outdir,"Diff1212_1896_1900_%s.png" % net_name))
plt.show()
|
normal
|
{
"blob_id": "8fcbaf2663c22015a0c47f00c2d4fb8db6a5c308",
"index": 6209,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif use_gpu:\n img0 = img0.cuda()\n<mask token>\nif use_gpu:\n img1 = img1.cuda()\n<mask token>\nplt.figure(figsize=[9, 3.5])\nplt.subplot(131)\nplt.imshow(img0_)\nplt.subplot(132)\nplt.imshow(img1_)\nplt.subplot(133)\nplt.pcolor(dist01.cpu().detach().squeeze())\nplt.axis('image')\nplt.gca().invert_yaxis()\nplt.title('Dist %.2f' % dist_sum)\nplt.savefig(join(fig_outdir, 'Diff1212_1896_1900_%s.png' % net_name))\nplt.show()\n",
"step-3": "<mask token>\nuse_gpu = True\nfig_outdir = (\n 'C:\\\\Users\\\\ponce\\\\OneDrive - Washington University in St. Louis\\\\ImageDiffMetric'\n )\nnet_name = 'squeeze'\nSpatialDist = models.PerceptualLoss(model='net-lin', net=net_name,\n colorspace='rgb', spatial=True, use_gpu=True, gpu_ids=[0])\nPerceptLoss = models.PerceptualLoss(model='net-lin', net=net_name,\n colorspace='rgb', spatial=False, use_gpu=True, gpu_ids=[0])\nimgdir = (\n '\\\\\\\\storage1.ris.wustl.edu\\\\crponce\\\\Active\\\\Stimuli\\\\2019-06-Evolutions\\\\beto-191212a\\\\backup_12_12_2019_10_47_39'\n )\nfile0 = 'block048_thread000_gen_gen047_001896.jpg'\nfile1 = 'block048_thread000_gen_gen047_001900.jpg'\nimg0_ = util.load_image(join(imgdir, file0))\nimg1_ = util.load_image(join(imgdir, file1))\nimg0 = util.im2tensor(img0_)\nif use_gpu:\n img0 = img0.cuda()\nimg1 = util.im2tensor(img1_)\nif use_gpu:\n img1 = img1.cuda()\ndist01 = SpatialDist.forward(img0, img1)\ndist_sum = PerceptLoss.forward(img0, img1).item()\nplt.figure(figsize=[9, 3.5])\nplt.subplot(131)\nplt.imshow(img0_)\nplt.subplot(132)\nplt.imshow(img1_)\nplt.subplot(133)\nplt.pcolor(dist01.cpu().detach().squeeze())\nplt.axis('image')\nplt.gca().invert_yaxis()\nplt.title('Dist %.2f' % dist_sum)\nplt.savefig(join(fig_outdir, 'Diff1212_1896_1900_%s.png' % net_name))\nplt.show()\n",
"step-4": "from os.path import join\nimport models\nimport util.util as util\nimport matplotlib.pylab as plt\nuse_gpu = True\nfig_outdir = (\n 'C:\\\\Users\\\\ponce\\\\OneDrive - Washington University in St. Louis\\\\ImageDiffMetric'\n )\nnet_name = 'squeeze'\nSpatialDist = models.PerceptualLoss(model='net-lin', net=net_name,\n colorspace='rgb', spatial=True, use_gpu=True, gpu_ids=[0])\nPerceptLoss = models.PerceptualLoss(model='net-lin', net=net_name,\n colorspace='rgb', spatial=False, use_gpu=True, gpu_ids=[0])\nimgdir = (\n '\\\\\\\\storage1.ris.wustl.edu\\\\crponce\\\\Active\\\\Stimuli\\\\2019-06-Evolutions\\\\beto-191212a\\\\backup_12_12_2019_10_47_39'\n )\nfile0 = 'block048_thread000_gen_gen047_001896.jpg'\nfile1 = 'block048_thread000_gen_gen047_001900.jpg'\nimg0_ = util.load_image(join(imgdir, file0))\nimg1_ = util.load_image(join(imgdir, file1))\nimg0 = util.im2tensor(img0_)\nif use_gpu:\n img0 = img0.cuda()\nimg1 = util.im2tensor(img1_)\nif use_gpu:\n img1 = img1.cuda()\ndist01 = SpatialDist.forward(img0, img1)\ndist_sum = PerceptLoss.forward(img0, img1).item()\nplt.figure(figsize=[9, 3.5])\nplt.subplot(131)\nplt.imshow(img0_)\nplt.subplot(132)\nplt.imshow(img1_)\nplt.subplot(133)\nplt.pcolor(dist01.cpu().detach().squeeze())\nplt.axis('image')\nplt.gca().invert_yaxis()\nplt.title('Dist %.2f' % dist_sum)\nplt.savefig(join(fig_outdir, 'Diff1212_1896_1900_%s.png' % net_name))\nplt.show()\n",
"step-5": "# from models import dist_model\n# model = dist_model.DistModel()\nfrom os.path import join\nimport models\nimport util.util as util\nimport matplotlib.pylab as plt\nuse_gpu = True\nfig_outdir = r\"C:\\Users\\ponce\\OneDrive - Washington University in St. Louis\\ImageDiffMetric\"\n#%%\nnet_name = 'squeeze'\nSpatialDist = models.PerceptualLoss(model='net-lin', net=net_name, colorspace='rgb', spatial=True, use_gpu=True, gpu_ids=[0])\nPerceptLoss = models.PerceptualLoss(model='net-lin', net=net_name, colorspace='rgb', spatial=False, use_gpu=True, gpu_ids=[0])\n#%%\nimgdir = r\"\\\\storage1.ris.wustl.edu\\crponce\\Active\\Stimuli\\2019-06-Evolutions\\beto-191212a\\backup_12_12_2019_10_47_39\"\nfile0 = \"block048_thread000_gen_gen047_001896.jpg\"\nfile1 = \"block048_thread000_gen_gen047_001900.jpg\"\nimg0_ = util.load_image(join(imgdir,file0))\nimg1_ = util.load_image(join(imgdir,file1))\nimg0 = util.im2tensor(img0_) # RGB image from [-1,1]\nif(use_gpu):\n img0 = img0.cuda()\nimg1 = util.im2tensor(img1_)\nif(use_gpu):\n img1 = img1.cuda()\n#%\n# Compute distance\ndist01 = SpatialDist.forward(img0,img1)#.item()\ndist_sum = PerceptLoss.forward(img0,img1).item()\n# dists.append(dist01)\n# print('(%s, %s): %.3f'%(file0,file1,dist01))\n# f.writelines('(%s, %s): %.3f'%(file0,file1,dist01))\n# %\nplt.figure(figsize=[9,3.5])\nplt.subplot(131)\nplt.imshow(img0_)\nplt.subplot(132)\nplt.imshow(img1_)\nplt.subplot(133)\nplt.pcolor(dist01.cpu().detach().squeeze())\nplt.axis('image')\nplt.gca().invert_yaxis()\nplt.title(\"Dist %.2f\"%dist_sum)\nplt.savefig(join(fig_outdir,\"Diff1212_1896_1900_%s.png\" % net_name))\nplt.show()",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from locations.storefinders.storelocatorwidgets import StoreLocatorWidgetsSpider
class Pharmacy4LessAUSpider(StoreLocatorWidgetsSpider):
name = "pharmacy_4_less_au"
item_attributes = {"brand": "Pharmacy 4 Less", "brand_wikidata": "Q63367608"}
key = "6c0hBJeL5yk8cmaKJGNjTu0JhWNaMQpX"
|
normal
|
{
"blob_id": "aad3c104432a1a028d96263236133e495536ee69",
"index": 6644,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Pharmacy4LessAUSpider(StoreLocatorWidgetsSpider):\n <mask token>\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Pharmacy4LessAUSpider(StoreLocatorWidgetsSpider):\n name = 'pharmacy_4_less_au'\n item_attributes = {'brand': 'Pharmacy 4 Less', 'brand_wikidata':\n 'Q63367608'}\n key = '6c0hBJeL5yk8cmaKJGNjTu0JhWNaMQpX'\n",
"step-4": "from locations.storefinders.storelocatorwidgets import StoreLocatorWidgetsSpider\n\n\nclass Pharmacy4LessAUSpider(StoreLocatorWidgetsSpider):\n name = 'pharmacy_4_less_au'\n item_attributes = {'brand': 'Pharmacy 4 Less', 'brand_wikidata':\n 'Q63367608'}\n key = '6c0hBJeL5yk8cmaKJGNjTu0JhWNaMQpX'\n",
"step-5": "from locations.storefinders.storelocatorwidgets import StoreLocatorWidgetsSpider\n\n\nclass Pharmacy4LessAUSpider(StoreLocatorWidgetsSpider):\n name = \"pharmacy_4_less_au\"\n item_attributes = {\"brand\": \"Pharmacy 4 Less\", \"brand_wikidata\": \"Q63367608\"}\n key = \"6c0hBJeL5yk8cmaKJGNjTu0JhWNaMQpX\"\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
def get_perms(string):
toRtn = []
freq_table = count_letters(string)
get_perms_helper(freq_table, "", len(string), toRtn)
return toRtn
def count_letters(string):
freq = {}
for letter in string:
if letter not in freq:
freq[letter] = 0
freq[letter] += 1
return freq
def get_perms_helper(freq_table, prefix, remaining, result):
if remaining == 0:
result.append(prefix)
return
for letter in freq_table:
count = freq_table[letter]
if count > 0:
freq_table[letter] -= 1
get_perms_helper(freq_table, prefix + letter, remaining - 1, result)
freq_table[letter] = count
print get_perms("aaab")
|
normal
|
{
"blob_id": "719a993e1f5c5d1e803b04a5561373f2b9a5a5c2",
"index": 8524,
"step-1": "def get_perms(string):\n toRtn = []\n freq_table = count_letters(string)\n get_perms_helper(freq_table, \"\", len(string), toRtn)\n return toRtn\n\ndef count_letters(string):\n freq = {}\n for letter in string:\n if letter not in freq:\n freq[letter] = 0\n freq[letter] += 1\n return freq\n\ndef get_perms_helper(freq_table, prefix, remaining, result):\n if remaining == 0:\n result.append(prefix)\n return\n \n for letter in freq_table:\n count = freq_table[letter]\n if count > 0:\n freq_table[letter] -= 1\n get_perms_helper(freq_table, prefix + letter, remaining - 1, result)\n freq_table[letter] = count\n\nprint get_perms(\"aaab\")",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
while True:
xp, yp = set(), set()
veneer = []
W, H = map(int, input().split())
if not W:
break
N = int(input())
for i in range(N):
x1, y1, x2, y2 = map(int, input().split())
veneer.append((x1, y1, x2, y2))
xp.add(x1)
xp.add(x2)
yp.add(y1)
yp.add(y2)
xp = list(xp)
yp = list(yp)
wa = [[(0) for x in range(len(xp) + 1)] for y in range(len(yp) + 1)]
print()
for v in veneer:
xi1 = bisect(xp, v[0])
xi2 = bisect(xp, v[1])
yi1 = bisect(yp, v[2])
yi2 = bisect(yp, v[3])
print(xi1, yi1, xi2, yi2)
wa[yi1][xi1] += 1
wa[yi2 + 1][xi1] -= 1
wa[yi1][xi2 + 1] -= 1
mem = [[(0) for x in xp] for y in yp]
for y, _ in enumerate(yp):
for x, _ in enumerate(xp):
mem[y][x] += wa[y][x]
if y > 0:
mem[y][x] += mem[y - 1][x]
if x > 0:
mem[y][x] += mem[y][x - 1]
print(wa[y])
<|reserved_special_token_1|>
from bisect import bisect_left as bisect
while True:
xp, yp = set(), set()
veneer = []
W, H = map(int, input().split())
if not W:
break
N = int(input())
for i in range(N):
x1, y1, x2, y2 = map(int, input().split())
veneer.append((x1, y1, x2, y2))
xp.add(x1)
xp.add(x2)
yp.add(y1)
yp.add(y2)
xp = list(xp)
yp = list(yp)
wa = [[(0) for x in range(len(xp) + 1)] for y in range(len(yp) + 1)]
print()
for v in veneer:
xi1 = bisect(xp, v[0])
xi2 = bisect(xp, v[1])
yi1 = bisect(yp, v[2])
yi2 = bisect(yp, v[3])
print(xi1, yi1, xi2, yi2)
wa[yi1][xi1] += 1
wa[yi2 + 1][xi1] -= 1
wa[yi1][xi2 + 1] -= 1
mem = [[(0) for x in xp] for y in yp]
for y, _ in enumerate(yp):
for x, _ in enumerate(xp):
mem[y][x] += wa[y][x]
if y > 0:
mem[y][x] += mem[y - 1][x]
if x > 0:
mem[y][x] += mem[y][x - 1]
print(wa[y])
<|reserved_special_token_1|>
from bisect import bisect_left as bisect
while True:
xp, yp = set(), set()
veneer = []
W, H = map(int, input().split())
if not W:
break
N = int(input())
for i in range(N):
x1, y1, x2, y2 = map(int, input().split())
veneer.append((x1, y1, x2, y2))
xp.add(x1)
xp.add(x2)
yp.add(y1)
yp.add(y2)
xp = list(xp)
yp = list(yp)
wa = [[0 for x in range(len(xp) + 1)] for y in range(len(yp) + 1)]
print()
for v in veneer:
xi1 = bisect(xp, v[0])
xi2 = bisect(xp, v[1])
yi1 = bisect(yp, v[2])
yi2 = bisect(yp, v[3])
print(xi1, yi1, xi2, yi2)
wa[yi1][xi1] += 1
wa[yi2 + 1][xi1] -=1
wa[yi1][xi2 + 1] -=1
mem = [[0 for x in xp] for y in yp]
for y, _ in enumerate(yp):
for x, _ in enumerate(xp):
mem[y][x] += wa[y][x]
if y > 0:
mem[y][x] += mem[y - 1][x]
if x > 0:
mem[y][x] += mem[y][x - 1]
print(wa[y])
|
flexible
|
{
"blob_id": "e0fbb5ad6d822230865e34c1216b355f700e5cec",
"index": 7822,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwhile True:\n xp, yp = set(), set()\n veneer = []\n W, H = map(int, input().split())\n if not W:\n break\n N = int(input())\n for i in range(N):\n x1, y1, x2, y2 = map(int, input().split())\n veneer.append((x1, y1, x2, y2))\n xp.add(x1)\n xp.add(x2)\n yp.add(y1)\n yp.add(y2)\n xp = list(xp)\n yp = list(yp)\n wa = [[(0) for x in range(len(xp) + 1)] for y in range(len(yp) + 1)]\n print()\n for v in veneer:\n xi1 = bisect(xp, v[0])\n xi2 = bisect(xp, v[1])\n yi1 = bisect(yp, v[2])\n yi2 = bisect(yp, v[3])\n print(xi1, yi1, xi2, yi2)\n wa[yi1][xi1] += 1\n wa[yi2 + 1][xi1] -= 1\n wa[yi1][xi2 + 1] -= 1\n mem = [[(0) for x in xp] for y in yp]\n for y, _ in enumerate(yp):\n for x, _ in enumerate(xp):\n mem[y][x] += wa[y][x]\n if y > 0:\n mem[y][x] += mem[y - 1][x]\n if x > 0:\n mem[y][x] += mem[y][x - 1]\n print(wa[y])\n",
"step-3": "from bisect import bisect_left as bisect\nwhile True:\n xp, yp = set(), set()\n veneer = []\n W, H = map(int, input().split())\n if not W:\n break\n N = int(input())\n for i in range(N):\n x1, y1, x2, y2 = map(int, input().split())\n veneer.append((x1, y1, x2, y2))\n xp.add(x1)\n xp.add(x2)\n yp.add(y1)\n yp.add(y2)\n xp = list(xp)\n yp = list(yp)\n wa = [[(0) for x in range(len(xp) + 1)] for y in range(len(yp) + 1)]\n print()\n for v in veneer:\n xi1 = bisect(xp, v[0])\n xi2 = bisect(xp, v[1])\n yi1 = bisect(yp, v[2])\n yi2 = bisect(yp, v[3])\n print(xi1, yi1, xi2, yi2)\n wa[yi1][xi1] += 1\n wa[yi2 + 1][xi1] -= 1\n wa[yi1][xi2 + 1] -= 1\n mem = [[(0) for x in xp] for y in yp]\n for y, _ in enumerate(yp):\n for x, _ in enumerate(xp):\n mem[y][x] += wa[y][x]\n if y > 0:\n mem[y][x] += mem[y - 1][x]\n if x > 0:\n mem[y][x] += mem[y][x - 1]\n print(wa[y])\n",
"step-4": "from bisect import bisect_left as bisect\nwhile True:\n xp, yp = set(), set()\n veneer = []\n W, H = map(int, input().split())\n if not W:\n break\n N = int(input())\n for i in range(N):\n x1, y1, x2, y2 = map(int, input().split())\n veneer.append((x1, y1, x2, y2))\n xp.add(x1)\n xp.add(x2)\n yp.add(y1)\n yp.add(y2)\n xp = list(xp)\n yp = list(yp)\n wa = [[0 for x in range(len(xp) + 1)] for y in range(len(yp) + 1)]\n print()\n for v in veneer:\n xi1 = bisect(xp, v[0])\n xi2 = bisect(xp, v[1])\n yi1 = bisect(yp, v[2])\n yi2 = bisect(yp, v[3])\n print(xi1, yi1, xi2, yi2)\n wa[yi1][xi1] += 1\n wa[yi2 + 1][xi1] -=1\n wa[yi1][xi2 + 1] -=1\n mem = [[0 for x in xp] for y in yp]\n for y, _ in enumerate(yp):\n for x, _ in enumerate(xp):\n mem[y][x] += wa[y][x]\n if y > 0:\n mem[y][x] += mem[y - 1][x]\n if x > 0:\n mem[y][x] += mem[y][x - 1]\n print(wa[y])\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
#!/usr/bin/env python
import os
import tempfile
import shutil
import math
import sys
import subprocess
from irank.config import IrankOptionParser, IrankApp
from irank import db as irank_db
STATUS = 0
def main():
p = IrankOptionParser('%prog -d DEST playlist_name [playlist_name ...]')
p.add_option('-d', '--dest', help='export destination', default=None)
p.add_option('-l', '--limit', type="int", help='per-playlist filesize limit', default=None)
p.add_option('--no-checksum', dest='checksum', action="store_false", default=True)
p.add_option('-i', '--interactive', action='store_true', help='Interactively resolve errors')
p.add_option('--rsync-opt', dest='rsync_opts', action='append', default=[], help='Add rsync option (can be used multiple times)')
opts, args = p.parse_args()
assert opts.dest, p.get_usage()
assert len(args) > 0, p.get_usage()
app = IrankApp(opts)
music_base = os.path.expanduser(opts.music)
irank_base = os.path.expanduser(opts.irank)
export_base = os.path.expanduser(opts.dest)
export_music = export_base # Used to be __music, but android 4+ doesn't like sub-folders
songs = {}
all_songs = set()
# we use hard-links, so the export_temp must be on the same device as our music!
# export_temp = tempfile.mkdtemp(prefix='irank-export-')
export_temp = os.path.join(irank_base, "__export_temp")
if os.path.exists(export_temp):
shutil.rmtree(export_temp)
else:
os.makedirs(export_temp)
shutil.copy(
os.path.join(irank_base, "irank.sqlite"),
os.path.join(export_temp, "irank.sqlite")
)
try:
for playlist in args:
playlist_songs = set(app.songs_for(playlist, relative=True))
songs[playlist] = playlist_songs
all_songs.update(playlist_songs)
write_m3u(export_temp, playlist, sorted(playlist_songs))
print "Generated playlist %s: %s files" % (playlist, len(playlist_songs))
print "linking into %r ..." % (export_temp,)
total_size = link_all_files(all_songs, export_temp=export_temp, music_base=music_base, limit=opts.limit)
print "Syncing %s files (%0.2fgb)" % (len(all_songs),total_size / (math.pow(1000, 3)))
extra_sync_opts = []
syncing = True
while syncing:
try:
sync(export_temp, export_music, additional_opts=opts.rsync_opts + extra_sync_opts, checksum=opts.checksum)
break
except (subprocess.CalledProcessError, OSError) as e:
if not opts.interactive:
raise
print >> sys.stderr, "Error syncing: %s\n" % (e,)
while True:
print >> sys.stderr, "Press Ctrl-C to abort, <return> to restart, 'k' to retry (skipping existing files) and 's' to skip to next step"
result = raw_input().strip().lower()
if result == 'k':
extra_sync_opts = ['--ignore-existing']
break
elif result == '':
extra_sync_opts = []
break
elif result == 's':
syncing = False
break
else:
print >> sys.stderr, "Eh?"
finally:
shutil.rmtree(export_temp)
def link_all_files(all_songs, export_temp, music_base, limit=None):
total_size = 0
def file_size(path):
try:
return os.stat(path).st_size
except OSError:
print >> sys.stderr, "couldn't get file size of file: %s" % (path,)
return None
for file in all_songs:
#if not os.path.isdir(os.path.dirname(
src_file = os.path.join(music_base, file)
src_file_size = file_size(src_file)
if src_file_size is None:
continue
if limit and (total_size + src_file_size) > limit:
return total_size
else:
total_size += src_file_size
link_dest = os.path.join(export_temp, file)
link_dest_dir = os.path.dirname(link_dest)
if not os.path.isdir(link_dest_dir):
os.makedirs(link_dest_dir)
os.link(src_file, link_dest)
return total_size
def sync(src, dest, additional_opts=[], checksum=True):
cmd = [
'rsync',
#'-n',
'--progress',
'--modify-window=5',
'-r',
#'-v',
'--delete-before']
if checksum:
cmd.append('-c')
cmd = cmd + additional_opts + [src + os.path.sep, dest]
print "running: %r" % (cmd,)
subprocess.check_call(cmd, stdin=subprocess.PIPE)
def write_m3u(dest, name, files):
global STATUS
encoding = sys.getfilesystemencoding()
with open(os.path.join(dest, name + '.m3u'), 'w') as output:
for name in files:
try:
print >> output, name.encode(encoding)
except (UnicodeEncodeError, UnicodeDecodeError) as err:
print "FAILED to write song: %r" % (name,)
STATUS = 1
if __name__ == '__main__':
main()
sys.exit(STATUS)
|
normal
|
{
"blob_id": "df64d769ffba8cddac34282a526122e3c941249d",
"index": 245,
"step-1": "#!/usr/bin/env python\nimport os\nimport tempfile\nimport shutil\nimport math\nimport sys\nimport subprocess\n\nfrom irank.config import IrankOptionParser, IrankApp\nfrom irank import db as irank_db\nSTATUS = 0\n\ndef main():\n\tp = IrankOptionParser('%prog -d DEST playlist_name [playlist_name ...]')\n\tp.add_option('-d', '--dest', help='export destination', default=None)\n\tp.add_option('-l', '--limit', type=\"int\", help='per-playlist filesize limit', default=None)\n\tp.add_option('--no-checksum', dest='checksum', action=\"store_false\", default=True)\n\tp.add_option('-i', '--interactive', action='store_true', help='Interactively resolve errors')\n\tp.add_option('--rsync-opt', dest='rsync_opts', action='append', default=[], help='Add rsync option (can be used multiple times)')\n\topts, args = p.parse_args()\n\tassert opts.dest, p.get_usage()\n\tassert len(args) > 0, p.get_usage()\n\tapp = IrankApp(opts)\n\n\tmusic_base = os.path.expanduser(opts.music)\n\tirank_base = os.path.expanduser(opts.irank)\n\texport_base = os.path.expanduser(opts.dest)\n\texport_music = export_base # Used to be __music, but android 4+ doesn't like sub-folders\n\tsongs = {}\n\tall_songs = set()\n\n\t# we use hard-links, so the export_temp must be on the same device as our music!\n\t# export_temp = tempfile.mkdtemp(prefix='irank-export-')\n\texport_temp = os.path.join(irank_base, \"__export_temp\")\n\tif os.path.exists(export_temp):\n\t\tshutil.rmtree(export_temp)\n\telse:\n\t\tos.makedirs(export_temp)\n\t\n\tshutil.copy(\n\t\tos.path.join(irank_base, \"irank.sqlite\"),\n\t\tos.path.join(export_temp, \"irank.sqlite\")\n\t)\n\n\ttry:\n\t\tfor playlist in args:\n\t\t\tplaylist_songs = set(app.songs_for(playlist, relative=True))\n\t\t\tsongs[playlist] = playlist_songs\n\t\t\tall_songs.update(playlist_songs)\n\t\t\twrite_m3u(export_temp, playlist, sorted(playlist_songs))\n\t\t\tprint \"Generated playlist %s: %s files\" % (playlist, len(playlist_songs))\n\n\t\tprint \"linking into %r ...\" % (export_temp,)\n\t\ttotal_size = link_all_files(all_songs, export_temp=export_temp, music_base=music_base, limit=opts.limit)\n\n\t\tprint \"Syncing %s files (%0.2fgb)\" % (len(all_songs),total_size / (math.pow(1000, 3)))\n\t\textra_sync_opts = []\n\t\tsyncing = True\n\t\twhile syncing:\n\t\t\ttry:\n\t\t\t\tsync(export_temp, export_music, additional_opts=opts.rsync_opts + extra_sync_opts, checksum=opts.checksum)\n\t\t\t\tbreak\n\t\t\texcept (subprocess.CalledProcessError, OSError) as e:\n\t\t\t\tif not opts.interactive:\n\t\t\t\t\traise\n\t\t\t\tprint >> sys.stderr, \"Error syncing: %s\\n\" % (e,)\n\t\t\t\twhile True:\n\t\t\t\t\tprint >> sys.stderr, \"Press Ctrl-C to abort, <return> to restart, 'k' to retry (skipping existing files) and 's' to skip to next step\"\n\t\t\t\t\tresult = raw_input().strip().lower()\n\t\t\t\t\tif result == 'k':\n\t\t\t\t\t\textra_sync_opts = ['--ignore-existing']\n\t\t\t\t\t\tbreak\n\t\t\t\t\telif result == '':\n\t\t\t\t\t\textra_sync_opts = []\n\t\t\t\t\t\tbreak\n\t\t\t\t\telif result == 's':\n\t\t\t\t\t\tsyncing = False\n\t\t\t\t\t\tbreak\n\t\t\t\t\telse:\n\t\t\t\t\t\tprint >> sys.stderr, \"Eh?\"\n\tfinally:\n\t\tshutil.rmtree(export_temp)\n\ndef link_all_files(all_songs, export_temp, music_base, limit=None):\n\ttotal_size = 0\n\tdef file_size(path):\n\t\ttry:\n\t\t\treturn os.stat(path).st_size\n\t\texcept OSError:\n\t\t\tprint >> sys.stderr, \"couldn't get file size of file: %s\" % (path,)\n\t\treturn None\n\n\tfor file in all_songs:\n\t\t#if not os.path.isdir(os.path.dirname(\n\t\tsrc_file = os.path.join(music_base, file)\n\t\tsrc_file_size = file_size(src_file)\n\t\tif src_file_size is None:\n\t\t\tcontinue\n\t\tif limit and (total_size + src_file_size) > limit:\n\t\t\treturn total_size\n\t\telse:\n\t\t\ttotal_size += src_file_size\n\n\t\tlink_dest = os.path.join(export_temp, file)\n\t\tlink_dest_dir = os.path.dirname(link_dest)\n\t\tif not os.path.isdir(link_dest_dir):\n\t\t\tos.makedirs(link_dest_dir)\n\t\tos.link(src_file, link_dest)\n\treturn total_size\n\ndef sync(src, dest, additional_opts=[], checksum=True):\n\tcmd = [\n\t\t'rsync',\n\t\t#'-n',\n\t\t'--progress',\n\t\t'--modify-window=5',\n\t\t'-r',\n\t\t#'-v',\n\t\t'--delete-before']\n\tif checksum:\n\t\tcmd.append('-c')\n\tcmd = cmd + additional_opts + [src + os.path.sep, dest]\n\tprint \"running: %r\" % (cmd,)\n\tsubprocess.check_call(cmd, stdin=subprocess.PIPE)\n\ndef write_m3u(dest, name, files):\n\tglobal STATUS\n\tencoding = sys.getfilesystemencoding()\n\twith open(os.path.join(dest, name + '.m3u'), 'w') as output:\n\t\tfor name in files:\n\t\t\ttry:\n\t\t\t\tprint >> output, name.encode(encoding)\n\t\t\texcept (UnicodeEncodeError, UnicodeDecodeError) as err:\n\t\t\t\tprint \"FAILED to write song: %r\" % (name,)\n\t\t\t\tSTATUS = 1\n\nif __name__ == '__main__':\n\tmain()\n\tsys.exit(STATUS)\n\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
<|reserved_special_token_0|>
class Map:
<|reserved_special_token_0|>
def __init__(self, size, num_feeds):
self.size = size
self.map_cells = np.zeros((self.size, self.size))
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def createCell(self, pos):
if self.map_cells[pos[0]][pos[1]] == 1:
return False
else:
self.map_cells[pos[0]][pos[1]] = 1
return True
def ploting(self):
plt.ion()
plt.figure()
while True:
f1 = plt.subplot2grid((2, 2), (0, 0))
f2 = plt.subplot2grid((2, 2), (0, 1))
f3 = plt.subplot2grid((2, 2), (1, 0), colspan=2)
f1.matshow(nature_i.map_feeds[0], cmap=plt.cm.gray, vmin=0,
vmax=nature_i.abundance)
f2.matshow(nature_i.map_feeds[1], cmap=plt.cm.gray, vmin=0,
vmax=nature_i.abundance)
f3.matshow(self.map_cells, cmap=plt.get_cmap('gray'), vmin=0,
vmax=1)
plt.draw()
plt.pause(0.05)
<|reserved_special_token_0|>
class Nature:
"""manage feed seeds, delete feeds (eat by cells)"""
def __init__(self, abundance, num_feeds, size):
self.abundance = abundance
self.num_feeds = num_feeds
self.feeds = 0
self.size = size
map_feed_size = np.zeros((self.num_feeds, self.size, self.size))
self.map_size = map_feed_size.shape
self.map_feeds = np.random.randint(0, self.abundance, size=self.
map_size)
def deleteFeed(self, position, feed):
map_i.map_feeds[feed][position[0]][position[1]] = map_i.map_feeds[feed
][position[0]][position[1]] - 1
def createFeed(self, position, feed):
map_i.map_feeds[feed][position[0]][position[1]] = map_i.map_feeds[feed
][position[0]][position[1]] + 1
<|reserved_special_token_0|>
class MotherCell:
"""
Steps in a cell:
1/ update skills:
- hungry(feeds)
- mutability(feeds)
- reproductibility(feeds, time)
- mortality (feeds, time)
2/ check reproduction:
True: create cell with actual mutability skill, use feeds
False: pass
3/ check food:
check hungry:
True: calculate distance with smell:
distance = 0: eat(feeds)
distance > 0: move (x, y time) use feeds
4/ check dead(feeds, time):
True: dead
False: pass
"""
def __init__(self, ID, time, position, agility, smellInstinct,
reproduction, mutability, feeds, mortality):
self.ID = ID
self.localTime = goverment_i.globalTime - time
self.position = position
self.agility = agilityMin * agility
self.smellInstinct = smellInstinct
self.mutability = mutability
self.mortality = mortality
self.reproduction = reproduction
self.feeds = feeds
self.sweep = self.sweep()
self.moving = False
self.virtualPos = self.position
"""------------------------"""
def updateStates(self):
self.liveBar = sum(self.feeds) / len(self.feeds)
self.hungry = self.liveBar - self.mortality
self.burnFood()
self.food(self.feeds, self.instinct, self.hungry)
self.reproduction(self.mutability, self.feeds)
self.dead(self.liveBar, self.mortality, self.ID)
def reproduction(self):
pass
def food(self):
if self.hungry >= 4:
self.smell()
else:
pass
def burnFood(self):
if self.localTime % 1 == 0:
for i, x in enumerate(self.feeds):
self.feeds[i] = x - 1
def dead(self):
if self.liveBar - self.mortality == 0:
goverment_i.retirePopulation(self.ID)
"""------------------------"""
def smell(self):
for smellingPos in self.sweep:
pos = self.position[0] + smellingPos[0], self.position[1
] + smellingPos[1]
if not (pos[0] < 0 or pos[1] < 0 or pos[0] >= map_i.size or pos
[1] >= map_i.size):
for i in range(len(self.feeds)):
feeds = nature_i.map_feeds[i][int(pos[0])][int(pos[1])]
if feeds != 0:
self.move(pos)
if map_i.moveInMap(self.position, self.virtualPos
) is not True:
return
else:
self.eat((i, pos[0], pos[1]), nature_i)
self.position = self.virtualPos
time.sleep(0.0005)
return
def move(self, position_smelled):
direct = position_smelled[0] - self.position[0], position_smelled[1
] - self.position[1]
self.virtualPos = self.position[0] + T * self.agility * direct[0
], self.position[1] + T * self.agility * direct[1]
self.virtualPos = int(round(self.virtualPos[0], 0)), int(round(self
.virtualPos[1], 0))
def eat(self, food, nature_i):
self.feeds[food[0]] += 1
nature_i.map_feeds[food[0]][food[1]][food[2]] -= 1
def sweep(self):
sweep = []
signo = 1
SW = 0, 1
j = 1
sweep = [(0, 0), (0, 1)]
iterations = self.smellInstinct * 2 + 1
iterations = iterations * 2 + (iterations - 2) * 2
for i in range(1, iterations):
if i % 2 != 0:
signo = signo * -1
row = 1
col = 0
row = row * signo
col = col * signo
for x in range(j):
SW = SW[0] + row, SW[1] + col
sweep.append(SW)
if i % 2 == 0:
j = j + 1
row = 0
col = 1
row = row * signo
col = col * signo
for x in range(j):
SW = SW[0] + row, SW[1] + col
sweep.append(SW)
shuff = sweep[1:8]
shuffle(shuff)
sweep = [sweep[0]] + shuff + sweep[8:]
return sweep
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Goverment:
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def createPopulation(self, position, map, agility, smellInstinct):
if map.createCell(position) == False:
return False
else:
IDx = len(self.listID)
self.listID.append(IDx)
self.listCells.append(MotherCell(IDx, goverment_i.globalTime,
position, agility, smellInstinct, 5, 5, [10, 10], 5))
return True
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class Map:
"""manage map(x,y); collision, edges, plot...
map as 3dim matrix, (row, col, feeds (numfeeds + 1/0 if cell in position)
cell in position: [N][[N][pos, feed1, feed2, feed3, feed4, feed5]
"""
def __init__(self, size, num_feeds):
self.size = size
self.map_cells = np.zeros((self.size, self.size))
def available(self, position):
row = position[0]
col = position[1]
if row < 0 or row > self.size - 1 or col < 0 or col > self.size - 1:
return False
elif self.map_cells[row, col] == 1:
return False
else:
return True
def moveInMap(self, actual_position, position):
if actual_position == position:
return True
if self.available(position):
self.map_cells[position[0]][position[1]] = 1
self.map_cells[actual_position[0]][actual_position[1]] = 0
return True
else:
return False
def createCell(self, pos):
if self.map_cells[pos[0]][pos[1]] == 1:
return False
else:
self.map_cells[pos[0]][pos[1]] = 1
return True
def ploting(self):
plt.ion()
plt.figure()
while True:
f1 = plt.subplot2grid((2, 2), (0, 0))
f2 = plt.subplot2grid((2, 2), (0, 1))
f3 = plt.subplot2grid((2, 2), (1, 0), colspan=2)
f1.matshow(nature_i.map_feeds[0], cmap=plt.cm.gray, vmin=0,
vmax=nature_i.abundance)
f2.matshow(nature_i.map_feeds[1], cmap=plt.cm.gray, vmin=0,
vmax=nature_i.abundance)
f3.matshow(self.map_cells, cmap=plt.get_cmap('gray'), vmin=0,
vmax=1)
plt.draw()
plt.pause(0.05)
<|reserved_special_token_0|>
class Nature:
"""manage feed seeds, delete feeds (eat by cells)"""
def __init__(self, abundance, num_feeds, size):
self.abundance = abundance
self.num_feeds = num_feeds
self.feeds = 0
self.size = size
map_feed_size = np.zeros((self.num_feeds, self.size, self.size))
self.map_size = map_feed_size.shape
self.map_feeds = np.random.randint(0, self.abundance, size=self.
map_size)
def deleteFeed(self, position, feed):
map_i.map_feeds[feed][position[0]][position[1]] = map_i.map_feeds[feed
][position[0]][position[1]] - 1
def createFeed(self, position, feed):
map_i.map_feeds[feed][position[0]][position[1]] = map_i.map_feeds[feed
][position[0]][position[1]] + 1
<|reserved_special_token_0|>
class MotherCell:
"""
Steps in a cell:
1/ update skills:
- hungry(feeds)
- mutability(feeds)
- reproductibility(feeds, time)
- mortality (feeds, time)
2/ check reproduction:
True: create cell with actual mutability skill, use feeds
False: pass
3/ check food:
check hungry:
True: calculate distance with smell:
distance = 0: eat(feeds)
distance > 0: move (x, y time) use feeds
4/ check dead(feeds, time):
True: dead
False: pass
"""
def __init__(self, ID, time, position, agility, smellInstinct,
reproduction, mutability, feeds, mortality):
self.ID = ID
self.localTime = goverment_i.globalTime - time
self.position = position
self.agility = agilityMin * agility
self.smellInstinct = smellInstinct
self.mutability = mutability
self.mortality = mortality
self.reproduction = reproduction
self.feeds = feeds
self.sweep = self.sweep()
self.moving = False
self.virtualPos = self.position
"""------------------------"""
def updateStates(self):
self.liveBar = sum(self.feeds) / len(self.feeds)
self.hungry = self.liveBar - self.mortality
self.burnFood()
self.food(self.feeds, self.instinct, self.hungry)
self.reproduction(self.mutability, self.feeds)
self.dead(self.liveBar, self.mortality, self.ID)
def reproduction(self):
pass
def food(self):
if self.hungry >= 4:
self.smell()
else:
pass
def burnFood(self):
if self.localTime % 1 == 0:
for i, x in enumerate(self.feeds):
self.feeds[i] = x - 1
def dead(self):
if self.liveBar - self.mortality == 0:
goverment_i.retirePopulation(self.ID)
"""------------------------"""
def smell(self):
for smellingPos in self.sweep:
pos = self.position[0] + smellingPos[0], self.position[1
] + smellingPos[1]
if not (pos[0] < 0 or pos[1] < 0 or pos[0] >= map_i.size or pos
[1] >= map_i.size):
for i in range(len(self.feeds)):
feeds = nature_i.map_feeds[i][int(pos[0])][int(pos[1])]
if feeds != 0:
self.move(pos)
if map_i.moveInMap(self.position, self.virtualPos
) is not True:
return
else:
self.eat((i, pos[0], pos[1]), nature_i)
self.position = self.virtualPos
time.sleep(0.0005)
return
def move(self, position_smelled):
direct = position_smelled[0] - self.position[0], position_smelled[1
] - self.position[1]
self.virtualPos = self.position[0] + T * self.agility * direct[0
], self.position[1] + T * self.agility * direct[1]
self.virtualPos = int(round(self.virtualPos[0], 0)), int(round(self
.virtualPos[1], 0))
def eat(self, food, nature_i):
self.feeds[food[0]] += 1
nature_i.map_feeds[food[0]][food[1]][food[2]] -= 1
def sweep(self):
sweep = []
signo = 1
SW = 0, 1
j = 1
sweep = [(0, 0), (0, 1)]
iterations = self.smellInstinct * 2 + 1
iterations = iterations * 2 + (iterations - 2) * 2
for i in range(1, iterations):
if i % 2 != 0:
signo = signo * -1
row = 1
col = 0
row = row * signo
col = col * signo
for x in range(j):
SW = SW[0] + row, SW[1] + col
sweep.append(SW)
if i % 2 == 0:
j = j + 1
row = 0
col = 1
row = row * signo
col = col * signo
for x in range(j):
SW = SW[0] + row, SW[1] + col
sweep.append(SW)
shuff = sweep[1:8]
shuffle(shuff)
sweep = [sweep[0]] + shuff + sweep[8:]
return sweep
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Goverment:
<|reserved_special_token_0|>
def __init__(self):
self.listID = []
self.listCells = []
self.globalTime = 0
def createPopulation(self, position, map, agility, smellInstinct):
if map.createCell(position) == False:
return False
else:
IDx = len(self.listID)
self.listID.append(IDx)
self.listCells.append(MotherCell(IDx, goverment_i.globalTime,
position, agility, smellInstinct, 5, 5, [10, 10], 5))
return True
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class Map:
"""manage map(x,y); collision, edges, plot...
map as 3dim matrix, (row, col, feeds (numfeeds + 1/0 if cell in position)
cell in position: [N][[N][pos, feed1, feed2, feed3, feed4, feed5]
"""
def __init__(self, size, num_feeds):
self.size = size
self.map_cells = np.zeros((self.size, self.size))
def available(self, position):
row = position[0]
col = position[1]
if row < 0 or row > self.size - 1 or col < 0 or col > self.size - 1:
return False
elif self.map_cells[row, col] == 1:
return False
else:
return True
def moveInMap(self, actual_position, position):
if actual_position == position:
return True
if self.available(position):
self.map_cells[position[0]][position[1]] = 1
self.map_cells[actual_position[0]][actual_position[1]] = 0
return True
else:
return False
def createCell(self, pos):
if self.map_cells[pos[0]][pos[1]] == 1:
return False
else:
self.map_cells[pos[0]][pos[1]] = 1
return True
def ploting(self):
plt.ion()
plt.figure()
while True:
f1 = plt.subplot2grid((2, 2), (0, 0))
f2 = plt.subplot2grid((2, 2), (0, 1))
f3 = plt.subplot2grid((2, 2), (1, 0), colspan=2)
f1.matshow(nature_i.map_feeds[0], cmap=plt.cm.gray, vmin=0,
vmax=nature_i.abundance)
f2.matshow(nature_i.map_feeds[1], cmap=plt.cm.gray, vmin=0,
vmax=nature_i.abundance)
f3.matshow(self.map_cells, cmap=plt.get_cmap('gray'), vmin=0,
vmax=1)
plt.draw()
plt.pause(0.05)
<|reserved_special_token_0|>
class Nature:
"""manage feed seeds, delete feeds (eat by cells)"""
def __init__(self, abundance, num_feeds, size):
self.abundance = abundance
self.num_feeds = num_feeds
self.feeds = 0
self.size = size
map_feed_size = np.zeros((self.num_feeds, self.size, self.size))
self.map_size = map_feed_size.shape
self.map_feeds = np.random.randint(0, self.abundance, size=self.
map_size)
def deleteFeed(self, position, feed):
map_i.map_feeds[feed][position[0]][position[1]] = map_i.map_feeds[feed
][position[0]][position[1]] - 1
def createFeed(self, position, feed):
map_i.map_feeds[feed][position[0]][position[1]] = map_i.map_feeds[feed
][position[0]][position[1]] + 1
<|reserved_special_token_0|>
class MotherCell:
"""
Steps in a cell:
1/ update skills:
- hungry(feeds)
- mutability(feeds)
- reproductibility(feeds, time)
- mortality (feeds, time)
2/ check reproduction:
True: create cell with actual mutability skill, use feeds
False: pass
3/ check food:
check hungry:
True: calculate distance with smell:
distance = 0: eat(feeds)
distance > 0: move (x, y time) use feeds
4/ check dead(feeds, time):
True: dead
False: pass
"""
def __init__(self, ID, time, position, agility, smellInstinct,
reproduction, mutability, feeds, mortality):
self.ID = ID
self.localTime = goverment_i.globalTime - time
self.position = position
self.agility = agilityMin * agility
self.smellInstinct = smellInstinct
self.mutability = mutability
self.mortality = mortality
self.reproduction = reproduction
self.feeds = feeds
self.sweep = self.sweep()
self.moving = False
self.virtualPos = self.position
"""------------------------"""
def updateStates(self):
self.liveBar = sum(self.feeds) / len(self.feeds)
self.hungry = self.liveBar - self.mortality
self.burnFood()
self.food(self.feeds, self.instinct, self.hungry)
self.reproduction(self.mutability, self.feeds)
self.dead(self.liveBar, self.mortality, self.ID)
def reproduction(self):
pass
def food(self):
if self.hungry >= 4:
self.smell()
else:
pass
def burnFood(self):
if self.localTime % 1 == 0:
for i, x in enumerate(self.feeds):
self.feeds[i] = x - 1
def dead(self):
if self.liveBar - self.mortality == 0:
goverment_i.retirePopulation(self.ID)
"""------------------------"""
def smell(self):
for smellingPos in self.sweep:
pos = self.position[0] + smellingPos[0], self.position[1
] + smellingPos[1]
if not (pos[0] < 0 or pos[1] < 0 or pos[0] >= map_i.size or pos
[1] >= map_i.size):
for i in range(len(self.feeds)):
feeds = nature_i.map_feeds[i][int(pos[0])][int(pos[1])]
if feeds != 0:
self.move(pos)
if map_i.moveInMap(self.position, self.virtualPos
) is not True:
return
else:
self.eat((i, pos[0], pos[1]), nature_i)
self.position = self.virtualPos
time.sleep(0.0005)
return
def move(self, position_smelled):
direct = position_smelled[0] - self.position[0], position_smelled[1
] - self.position[1]
self.virtualPos = self.position[0] + T * self.agility * direct[0
], self.position[1] + T * self.agility * direct[1]
self.virtualPos = int(round(self.virtualPos[0], 0)), int(round(self
.virtualPos[1], 0))
def eat(self, food, nature_i):
self.feeds[food[0]] += 1
nature_i.map_feeds[food[0]][food[1]][food[2]] -= 1
def sweep(self):
sweep = []
signo = 1
SW = 0, 1
j = 1
sweep = [(0, 0), (0, 1)]
iterations = self.smellInstinct * 2 + 1
iterations = iterations * 2 + (iterations - 2) * 2
for i in range(1, iterations):
if i % 2 != 0:
signo = signo * -1
row = 1
col = 0
row = row * signo
col = col * signo
for x in range(j):
SW = SW[0] + row, SW[1] + col
sweep.append(SW)
if i % 2 == 0:
j = j + 1
row = 0
col = 1
row = row * signo
col = col * signo
for x in range(j):
SW = SW[0] + row, SW[1] + col
sweep.append(SW)
shuff = sweep[1:8]
shuffle(shuff)
sweep = [sweep[0]] + shuff + sweep[8:]
return sweep
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
T = 1
eps = 1e-09
agilityMin = 1 / T
<|reserved_special_token_0|>
class Goverment:
""" manage population """
def __init__(self):
self.listID = []
self.listCells = []
self.globalTime = 0
def createPopulation(self, position, map, agility, smellInstinct):
if map.createCell(position) == False:
return False
else:
IDx = len(self.listID)
self.listID.append(IDx)
self.listCells.append(MotherCell(IDx, goverment_i.globalTime,
position, agility, smellInstinct, 5, 5, [10, 10], 5))
return True
def retirePopulation(self, IDx):
self.listID[IDx] = 0
def clock(self):
self.globalTime += T
<|reserved_special_token_0|>
class Map:
"""manage map(x,y); collision, edges, plot...
map as 3dim matrix, (row, col, feeds (numfeeds + 1/0 if cell in position)
cell in position: [N][[N][pos, feed1, feed2, feed3, feed4, feed5]
"""
def __init__(self, size, num_feeds):
self.size = size
self.map_cells = np.zeros((self.size, self.size))
def available(self, position):
row = position[0]
col = position[1]
if row < 0 or row > self.size - 1 or col < 0 or col > self.size - 1:
return False
elif self.map_cells[row, col] == 1:
return False
else:
return True
def moveInMap(self, actual_position, position):
if actual_position == position:
return True
if self.available(position):
self.map_cells[position[0]][position[1]] = 1
self.map_cells[actual_position[0]][actual_position[1]] = 0
return True
else:
return False
def createCell(self, pos):
if self.map_cells[pos[0]][pos[1]] == 1:
return False
else:
self.map_cells[pos[0]][pos[1]] = 1
return True
def ploting(self):
plt.ion()
plt.figure()
while True:
f1 = plt.subplot2grid((2, 2), (0, 0))
f2 = plt.subplot2grid((2, 2), (0, 1))
f3 = plt.subplot2grid((2, 2), (1, 0), colspan=2)
f1.matshow(nature_i.map_feeds[0], cmap=plt.cm.gray, vmin=0,
vmax=nature_i.abundance)
f2.matshow(nature_i.map_feeds[1], cmap=plt.cm.gray, vmin=0,
vmax=nature_i.abundance)
f3.matshow(self.map_cells, cmap=plt.get_cmap('gray'), vmin=0,
vmax=1)
plt.draw()
plt.pause(0.05)
<|reserved_special_token_0|>
class Nature:
"""manage feed seeds, delete feeds (eat by cells)"""
def __init__(self, abundance, num_feeds, size):
self.abundance = abundance
self.num_feeds = num_feeds
self.feeds = 0
self.size = size
map_feed_size = np.zeros((self.num_feeds, self.size, self.size))
self.map_size = map_feed_size.shape
self.map_feeds = np.random.randint(0, self.abundance, size=self.
map_size)
def deleteFeed(self, position, feed):
map_i.map_feeds[feed][position[0]][position[1]] = map_i.map_feeds[feed
][position[0]][position[1]] - 1
def createFeed(self, position, feed):
map_i.map_feeds[feed][position[0]][position[1]] = map_i.map_feeds[feed
][position[0]][position[1]] + 1
<|reserved_special_token_0|>
class MotherCell:
"""
Steps in a cell:
1/ update skills:
- hungry(feeds)
- mutability(feeds)
- reproductibility(feeds, time)
- mortality (feeds, time)
2/ check reproduction:
True: create cell with actual mutability skill, use feeds
False: pass
3/ check food:
check hungry:
True: calculate distance with smell:
distance = 0: eat(feeds)
distance > 0: move (x, y time) use feeds
4/ check dead(feeds, time):
True: dead
False: pass
"""
def __init__(self, ID, time, position, agility, smellInstinct,
reproduction, mutability, feeds, mortality):
self.ID = ID
self.localTime = goverment_i.globalTime - time
self.position = position
self.agility = agilityMin * agility
self.smellInstinct = smellInstinct
self.mutability = mutability
self.mortality = mortality
self.reproduction = reproduction
self.feeds = feeds
self.sweep = self.sweep()
self.moving = False
self.virtualPos = self.position
"""------------------------"""
def updateStates(self):
self.liveBar = sum(self.feeds) / len(self.feeds)
self.hungry = self.liveBar - self.mortality
self.burnFood()
self.food(self.feeds, self.instinct, self.hungry)
self.reproduction(self.mutability, self.feeds)
self.dead(self.liveBar, self.mortality, self.ID)
def reproduction(self):
pass
def food(self):
if self.hungry >= 4:
self.smell()
else:
pass
def burnFood(self):
if self.localTime % 1 == 0:
for i, x in enumerate(self.feeds):
self.feeds[i] = x - 1
def dead(self):
if self.liveBar - self.mortality == 0:
goverment_i.retirePopulation(self.ID)
"""------------------------"""
def smell(self):
for smellingPos in self.sweep:
pos = self.position[0] + smellingPos[0], self.position[1
] + smellingPos[1]
if not (pos[0] < 0 or pos[1] < 0 or pos[0] >= map_i.size or pos
[1] >= map_i.size):
for i in range(len(self.feeds)):
feeds = nature_i.map_feeds[i][int(pos[0])][int(pos[1])]
if feeds != 0:
self.move(pos)
if map_i.moveInMap(self.position, self.virtualPos
) is not True:
return
else:
self.eat((i, pos[0], pos[1]), nature_i)
self.position = self.virtualPos
time.sleep(0.0005)
return
def move(self, position_smelled):
direct = position_smelled[0] - self.position[0], position_smelled[1
] - self.position[1]
self.virtualPos = self.position[0] + T * self.agility * direct[0
], self.position[1] + T * self.agility * direct[1]
self.virtualPos = int(round(self.virtualPos[0], 0)), int(round(self
.virtualPos[1], 0))
def eat(self, food, nature_i):
self.feeds[food[0]] += 1
nature_i.map_feeds[food[0]][food[1]][food[2]] -= 1
def sweep(self):
sweep = []
signo = 1
SW = 0, 1
j = 1
sweep = [(0, 0), (0, 1)]
iterations = self.smellInstinct * 2 + 1
iterations = iterations * 2 + (iterations - 2) * 2
for i in range(1, iterations):
if i % 2 != 0:
signo = signo * -1
row = 1
col = 0
row = row * signo
col = col * signo
for x in range(j):
SW = SW[0] + row, SW[1] + col
sweep.append(SW)
if i % 2 == 0:
j = j + 1
row = 0
col = 1
row = row * signo
col = col * signo
for x in range(j):
SW = SW[0] + row, SW[1] + col
sweep.append(SW)
shuff = sweep[1:8]
shuffle(shuff)
sweep = [sweep[0]] + shuff + sweep[8:]
return sweep
<|reserved_special_token_0|>
if __name__ == '__main__':
goverment_i = Goverment()
num_feeds = 2
size = 70
abundance = 3
nature_i = Nature(3, num_feeds, size)
map_i = Map(size, num_feeds)
goverment_i.clock()
goverment_i.createPopulation((5, 5), map_i, 1, 5)
goverment_i.createPopulation((20, 20), map_i, 2, 2)
goverment_i.createPopulation((40, 40), map_i, 3, 4)
t_map_feeds = threading.Thread(target=map_i.ploting)
print('Iniciada la vida')
print('Cell position: ', goverment_i.listCells[0].position)
print('Cell position: ', goverment_i.listCells[1].position)
print('Cell position: ', goverment_i.listCells[2].position)
t_map_feeds.start()
time.sleep(1)
for x in range(30000):
goverment_i.listCells[0].smell()
goverment_i.listCells[1].smell()
goverment_i.listCells[2].smell()
time.sleep(0.005)
<|reserved_special_token_1|>
import time
import numpy as np
import matplotlib.pyplot as plt #tutorial: http://pybonacci.org/2012/05/19/manual-de-introduccion-a-matplotlib-pyplot-ii-creando-y-manejando-ventanas-y-configurando-la-sesion/
import threading
from random import shuffle
T = 1
eps = 0.000000001
agilityMin = 1/T
'''------------GOVERMENT'''
class Goverment:
''' manage population '''
def __init__(self ):
self.listID = []
self.listCells = []
self.globalTime = 0
def createPopulation(self, position, map, agility, smellInstinct):
if map.createCell(position) == False:
return False
else:
IDx = len(self.listID)
self.listID.append(IDx)
self.listCells.append(MotherCell(IDx, goverment_i.globalTime, position, agility, smellInstinct, 5, 5, [10, 10], 5))
#(ID, time, positio n, agility, smellInstinct, reproduction, mutability, feeds, mortality)
return True
def retirePopulation (self, IDx):
self.listID[IDx] = 0 #instancia cell no esta borrada creo
def clock(self):
self.globalTime += T
'''------------MAP'''
class Map:
'''manage map(x,y); collision, edges, plot...
map as 3dim matrix, (row, col, feeds (numfeeds + 1/0 if cell in position)
cell in position: [N][[N][pos, feed1, feed2, feed3, feed4, feed5]
'''
def __init__(self, size, num_feeds):
self.size = size
self.map_cells = np.zeros((self.size, self.size)) #ncluye posicion celula
def available(self, position):
#position as row/col
#return True if occupy
row = position[0]
col = position[1]
if row < 0 or row > (self.size - 1) or col < 0 or col > (self.size - 1):
return False
elif self.map_cells[row, col] == 1:
return False
else:
return True
def moveInMap(self, actual_position, position):
if actual_position == position:
return True
if self.available(position):
self.map_cells[position[0]][position[1]] = 1
self.map_cells[actual_position[0]][actual_position[1]] = 0
return True
else:
return False
def createCell(self, pos):
if self.map_cells[pos[0]][pos[1]] == 1:
return False
else:
self.map_cells[pos[0]][pos[1]] = 1
return True
def ploting(self):
plt.ion()
plt.figure()
#plt.matshow(nature_i.map_feeds[0], fignum=1, cmap=plt.cm.gray)
while True:
f1 = plt.subplot2grid((2, 2), (0, 0))
f2 = plt.subplot2grid((2, 2), (0, 1))
f3 = plt.subplot2grid((2, 2), (1, 0), colspan=2)
f1.matshow(nature_i.map_feeds[0], cmap=plt.cm.gray, vmin=0, vmax=nature_i.abundance)
f2.matshow(nature_i.map_feeds[1], cmap=plt.cm.gray, vmin=0, vmax=nature_i.abundance)
f3.matshow(self.map_cells, cmap=plt.get_cmap('gray'), vmin=0, vmax=1)
plt.draw()
plt.pause(0.05)
'''------------NATURE'''
class Nature:
'''manage feed seeds, delete feeds (eat by cells)'''
def __init__(self, abundance, num_feeds, size):
self.abundance = abundance
self.num_feeds = num_feeds
self.feeds = 0
self.size = size
map_feed_size = np.zeros((self.num_feeds, self.size, self.size))#incluye recusros de celda
self.map_size = map_feed_size.shape
self.map_feeds = np.random.randint(0, self.abundance, size = self.map_size)
def deleteFeed(self, position, feed):
map_i.map_feeds[feed][position[0]][position[1]] =\
map_i.map_feeds[feed][position[0]][position[1]] - 1#lo ultimo la columna siempre
def createFeed(self, position, feed):
map_i.map_feeds[feed][position[0]][position[1]] = \
map_i.map_feeds[feed][position[0]][position[1]] + 1
'''------------CELLS'''
class MotherCell:
'''
Steps in a cell:
1/ update skills:
- hungry(feeds)
- mutability(feeds)
- reproductibility(feeds, time)
- mortality (feeds, time)
2/ check reproduction:
True: create cell with actual mutability skill, use feeds
False: pass
3/ check food:
check hungry:
True: calculate distance with smell:
distance = 0: eat(feeds)
distance > 0: move (x, y time) use feeds
4/ check dead(feeds, time):
True: dead
False: pass
'''
def __init__(self,ID, time, position, agility, smellInstinct, reproduction, mutability, feeds, mortality):
self.ID = ID
self.localTime = goverment_i.globalTime - time
self.position = position
#Skills
self.agility = agilityMin * agility# agility 0--5
self.smellInstinct = smellInstinct # from 0 to 10, radious of smeelled cels
self.mutability = mutability # from 0 to 10
self.mortality = mortality # from 0 to 10
self.reproduction = reproduction
self.feeds = feeds #[0, 0] from 0 to 10
self.sweep = self.sweep()# created the sweep list with smellInstinct radious
self.moving = False
self.virtualPos = self.position
'''------------------------'''
def updateStates(self):
#states
self.liveBar = sum(self.feeds) / len(self.feeds)#if liveBar - mortality == 0 => dead
self.hungry = self.liveBar - self.mortality
self.burnFood()
self.food(self.feeds, self.instinct, self.hungry)
self.reproduction(self.mutability, self.feeds)
self.dead(self.liveBar, self.mortality, self.ID)
def reproduction(self):
#mutability, feeds, time?
pass
def food(self):
#feeds, instinct
if self.hungry >= 4:
self.smell()
else:
pass
def burnFood(self):
if self.localTime % 1 == 0:
for i, x in enumerate(self.feeds):
self.feeds[i] = x - 1
def dead(self):
#mortality
if self.liveBar - self.mortality == 0:
goverment_i.retirePopulation(self.ID)
'''------------------------'''
def smell(self):
for smellingPos in self.sweep:
pos = (self.position[0] + smellingPos[0], self.position[1] + smellingPos[1])
if not (pos[0] < 0 or pos[1] < 0 or pos[0] >= map_i.size or pos[1] >= map_i.size):
for i in range(len(self.feeds)):
feeds = nature_i.map_feeds[i][int(pos[0])][int(pos[1])]
if feeds != 0:
self.move(pos)
if map_i.moveInMap(self.position, self.virtualPos) is not True:
return
else:
self.eat((i, pos[0], pos[1]), nature_i)
self.position = self.virtualPos
# print('position: {}, virtualPos: {}feed({}) remain: {}. sweep: {}'.format(
# self.position,self.virtualPos, i, feeds, smellingPos))
time.sleep(0.0005)
return
def move(self, position_smelled):
#manage agility
direct = (position_smelled[0] - self.position[0], position_smelled[1] - self.position[1])
self.virtualPos = (self.position[0] + (T * self.agility)* direct[0],
self.position[1] + (T * self.agility)* direct[1])
self.virtualPos = int(round(self.virtualPos[0],0)), int(round(self.virtualPos[1],0))
def eat(self, food, nature_i):#food = (feed, pos, pos)
self.feeds[food[0]] += 1
nature_i.map_feeds[food[0]][food[1]][food[2]] -= 1
def sweep(self):
sweep = []
signo = 1;
SW = (0, 1);
j = 1;
sweep = [(0, 0), (0, 1)]
iterations = (self.smellInstinct*2) + 1
iterations = (iterations * 2) + ((iterations - 2) * 2)
for i in range(1, iterations):
if i % 2 != 0:
signo = signo * (-1)
row = 1;
col = 0
row = row * signo;
col = col * signo
for x in range(j):
SW = (SW[0] + row, SW[1] + col)
sweep.append(SW)
if i % 2 == 0:
j = j + 1
row = 0;
col = 1;
row = row * signo;
col = col * signo
for x in range(j):
SW = (SW[0] + row, SW[1] + col)
sweep.append((SW))
shuff = sweep[1:8]
shuffle(shuff)
sweep = [sweep[0]] + shuff + sweep[8:]
return sweep
'''-----------MAIN'''
if __name__ == '__main__':
goverment_i = Goverment()
num_feeds = 2
size = 70
abundance = 3
nature_i = Nature(3, num_feeds, size)#abundance and number of feeds
map_i = Map(size, num_feeds)#size, num of feeds
goverment_i.clock()
goverment_i.createPopulation((5, 5), map_i, 1, 5)#position, map, agility, smellInstict
goverment_i.createPopulation((20, 20), map_i, 2, 2)
goverment_i.createPopulation((40, 40), map_i, 3, 4)
t_map_feeds = threading.Thread(target=map_i.ploting)
print ("Iniciada la vida")
print ("Cell position: ", goverment_i.listCells[0].position)
print ("Cell position: ", goverment_i.listCells[1].position)
print ("Cell position: ", goverment_i.listCells[2].position)
t_map_feeds.start()
time.sleep(1)
for x in range(30000):
goverment_i.listCells[0].smell()
goverment_i.listCells[1].smell()
goverment_i.listCells[2].smell()
time.sleep(0.005)
|
flexible
|
{
"blob_id": "ab0c3cf3e43f34874dd94629b746ca1237c3349a",
"index": 7494,
"step-1": "<mask token>\n\n\nclass Map:\n <mask token>\n\n def __init__(self, size, num_feeds):\n self.size = size\n self.map_cells = np.zeros((self.size, self.size))\n <mask token>\n <mask token>\n\n def createCell(self, pos):\n if self.map_cells[pos[0]][pos[1]] == 1:\n return False\n else:\n self.map_cells[pos[0]][pos[1]] = 1\n return True\n\n def ploting(self):\n plt.ion()\n plt.figure()\n while True:\n f1 = plt.subplot2grid((2, 2), (0, 0))\n f2 = plt.subplot2grid((2, 2), (0, 1))\n f3 = plt.subplot2grid((2, 2), (1, 0), colspan=2)\n f1.matshow(nature_i.map_feeds[0], cmap=plt.cm.gray, vmin=0,\n vmax=nature_i.abundance)\n f2.matshow(nature_i.map_feeds[1], cmap=plt.cm.gray, vmin=0,\n vmax=nature_i.abundance)\n f3.matshow(self.map_cells, cmap=plt.get_cmap('gray'), vmin=0,\n vmax=1)\n plt.draw()\n plt.pause(0.05)\n\n\n<mask token>\n\n\nclass Nature:\n \"\"\"manage feed seeds, delete feeds (eat by cells)\"\"\"\n\n def __init__(self, abundance, num_feeds, size):\n self.abundance = abundance\n self.num_feeds = num_feeds\n self.feeds = 0\n self.size = size\n map_feed_size = np.zeros((self.num_feeds, self.size, self.size))\n self.map_size = map_feed_size.shape\n self.map_feeds = np.random.randint(0, self.abundance, size=self.\n map_size)\n\n def deleteFeed(self, position, feed):\n map_i.map_feeds[feed][position[0]][position[1]] = map_i.map_feeds[feed\n ][position[0]][position[1]] - 1\n\n def createFeed(self, position, feed):\n map_i.map_feeds[feed][position[0]][position[1]] = map_i.map_feeds[feed\n ][position[0]][position[1]] + 1\n\n\n<mask token>\n\n\nclass MotherCell:\n \"\"\"\n Steps in a cell:\n 1/ update skills:\n - hungry(feeds)\n - mutability(feeds)\n - reproductibility(feeds, time)\n - mortality (feeds, time)\n 2/ check reproduction:\n True: create cell with actual mutability skill, use feeds\n False: pass\n 3/ check food:\n check hungry:\n True: calculate distance with smell:\n distance = 0: eat(feeds)\n distance > 0: move (x, y time) use feeds\n 4/ check dead(feeds, time):\n True: dead\n False: pass\n\n \"\"\"\n\n def __init__(self, ID, time, position, agility, smellInstinct,\n reproduction, mutability, feeds, mortality):\n self.ID = ID\n self.localTime = goverment_i.globalTime - time\n self.position = position\n self.agility = agilityMin * agility\n self.smellInstinct = smellInstinct\n self.mutability = mutability\n self.mortality = mortality\n self.reproduction = reproduction\n self.feeds = feeds\n self.sweep = self.sweep()\n self.moving = False\n self.virtualPos = self.position\n \"\"\"------------------------\"\"\"\n\n def updateStates(self):\n self.liveBar = sum(self.feeds) / len(self.feeds)\n self.hungry = self.liveBar - self.mortality\n self.burnFood()\n self.food(self.feeds, self.instinct, self.hungry)\n self.reproduction(self.mutability, self.feeds)\n self.dead(self.liveBar, self.mortality, self.ID)\n\n def reproduction(self):\n pass\n\n def food(self):\n if self.hungry >= 4:\n self.smell()\n else:\n pass\n\n def burnFood(self):\n if self.localTime % 1 == 0:\n for i, x in enumerate(self.feeds):\n self.feeds[i] = x - 1\n\n def dead(self):\n if self.liveBar - self.mortality == 0:\n goverment_i.retirePopulation(self.ID)\n \"\"\"------------------------\"\"\"\n\n def smell(self):\n for smellingPos in self.sweep:\n pos = self.position[0] + smellingPos[0], self.position[1\n ] + smellingPos[1]\n if not (pos[0] < 0 or pos[1] < 0 or pos[0] >= map_i.size or pos\n [1] >= map_i.size):\n for i in range(len(self.feeds)):\n feeds = nature_i.map_feeds[i][int(pos[0])][int(pos[1])]\n if feeds != 0:\n self.move(pos)\n if map_i.moveInMap(self.position, self.virtualPos\n ) is not True:\n return\n else:\n self.eat((i, pos[0], pos[1]), nature_i)\n self.position = self.virtualPos\n time.sleep(0.0005)\n return\n\n def move(self, position_smelled):\n direct = position_smelled[0] - self.position[0], position_smelled[1\n ] - self.position[1]\n self.virtualPos = self.position[0] + T * self.agility * direct[0\n ], self.position[1] + T * self.agility * direct[1]\n self.virtualPos = int(round(self.virtualPos[0], 0)), int(round(self\n .virtualPos[1], 0))\n\n def eat(self, food, nature_i):\n self.feeds[food[0]] += 1\n nature_i.map_feeds[food[0]][food[1]][food[2]] -= 1\n\n def sweep(self):\n sweep = []\n signo = 1\n SW = 0, 1\n j = 1\n sweep = [(0, 0), (0, 1)]\n iterations = self.smellInstinct * 2 + 1\n iterations = iterations * 2 + (iterations - 2) * 2\n for i in range(1, iterations):\n if i % 2 != 0:\n signo = signo * -1\n row = 1\n col = 0\n row = row * signo\n col = col * signo\n for x in range(j):\n SW = SW[0] + row, SW[1] + col\n sweep.append(SW)\n if i % 2 == 0:\n j = j + 1\n row = 0\n col = 1\n row = row * signo\n col = col * signo\n for x in range(j):\n SW = SW[0] + row, SW[1] + col\n sweep.append(SW)\n shuff = sweep[1:8]\n shuffle(shuff)\n sweep = [sweep[0]] + shuff + sweep[8:]\n return sweep\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Goverment:\n <mask token>\n <mask token>\n\n def createPopulation(self, position, map, agility, smellInstinct):\n if map.createCell(position) == False:\n return False\n else:\n IDx = len(self.listID)\n self.listID.append(IDx)\n self.listCells.append(MotherCell(IDx, goverment_i.globalTime,\n position, agility, smellInstinct, 5, 5, [10, 10], 5))\n return True\n <mask token>\n <mask token>\n\n\n<mask token>\n\n\nclass Map:\n \"\"\"manage map(x,y); collision, edges, plot...\n map as 3dim matrix, (row, col, feeds (numfeeds + 1/0 if cell in position)\n cell in position: [N][[N][pos, feed1, feed2, feed3, feed4, feed5]\n \"\"\"\n\n def __init__(self, size, num_feeds):\n self.size = size\n self.map_cells = np.zeros((self.size, self.size))\n\n def available(self, position):\n row = position[0]\n col = position[1]\n if row < 0 or row > self.size - 1 or col < 0 or col > self.size - 1:\n return False\n elif self.map_cells[row, col] == 1:\n return False\n else:\n return True\n\n def moveInMap(self, actual_position, position):\n if actual_position == position:\n return True\n if self.available(position):\n self.map_cells[position[0]][position[1]] = 1\n self.map_cells[actual_position[0]][actual_position[1]] = 0\n return True\n else:\n return False\n\n def createCell(self, pos):\n if self.map_cells[pos[0]][pos[1]] == 1:\n return False\n else:\n self.map_cells[pos[0]][pos[1]] = 1\n return True\n\n def ploting(self):\n plt.ion()\n plt.figure()\n while True:\n f1 = plt.subplot2grid((2, 2), (0, 0))\n f2 = plt.subplot2grid((2, 2), (0, 1))\n f3 = plt.subplot2grid((2, 2), (1, 0), colspan=2)\n f1.matshow(nature_i.map_feeds[0], cmap=plt.cm.gray, vmin=0,\n vmax=nature_i.abundance)\n f2.matshow(nature_i.map_feeds[1], cmap=plt.cm.gray, vmin=0,\n vmax=nature_i.abundance)\n f3.matshow(self.map_cells, cmap=plt.get_cmap('gray'), vmin=0,\n vmax=1)\n plt.draw()\n plt.pause(0.05)\n\n\n<mask token>\n\n\nclass Nature:\n \"\"\"manage feed seeds, delete feeds (eat by cells)\"\"\"\n\n def __init__(self, abundance, num_feeds, size):\n self.abundance = abundance\n self.num_feeds = num_feeds\n self.feeds = 0\n self.size = size\n map_feed_size = np.zeros((self.num_feeds, self.size, self.size))\n self.map_size = map_feed_size.shape\n self.map_feeds = np.random.randint(0, self.abundance, size=self.\n map_size)\n\n def deleteFeed(self, position, feed):\n map_i.map_feeds[feed][position[0]][position[1]] = map_i.map_feeds[feed\n ][position[0]][position[1]] - 1\n\n def createFeed(self, position, feed):\n map_i.map_feeds[feed][position[0]][position[1]] = map_i.map_feeds[feed\n ][position[0]][position[1]] + 1\n\n\n<mask token>\n\n\nclass MotherCell:\n \"\"\"\n Steps in a cell:\n 1/ update skills:\n - hungry(feeds)\n - mutability(feeds)\n - reproductibility(feeds, time)\n - mortality (feeds, time)\n 2/ check reproduction:\n True: create cell with actual mutability skill, use feeds\n False: pass\n 3/ check food:\n check hungry:\n True: calculate distance with smell:\n distance = 0: eat(feeds)\n distance > 0: move (x, y time) use feeds\n 4/ check dead(feeds, time):\n True: dead\n False: pass\n\n \"\"\"\n\n def __init__(self, ID, time, position, agility, smellInstinct,\n reproduction, mutability, feeds, mortality):\n self.ID = ID\n self.localTime = goverment_i.globalTime - time\n self.position = position\n self.agility = agilityMin * agility\n self.smellInstinct = smellInstinct\n self.mutability = mutability\n self.mortality = mortality\n self.reproduction = reproduction\n self.feeds = feeds\n self.sweep = self.sweep()\n self.moving = False\n self.virtualPos = self.position\n \"\"\"------------------------\"\"\"\n\n def updateStates(self):\n self.liveBar = sum(self.feeds) / len(self.feeds)\n self.hungry = self.liveBar - self.mortality\n self.burnFood()\n self.food(self.feeds, self.instinct, self.hungry)\n self.reproduction(self.mutability, self.feeds)\n self.dead(self.liveBar, self.mortality, self.ID)\n\n def reproduction(self):\n pass\n\n def food(self):\n if self.hungry >= 4:\n self.smell()\n else:\n pass\n\n def burnFood(self):\n if self.localTime % 1 == 0:\n for i, x in enumerate(self.feeds):\n self.feeds[i] = x - 1\n\n def dead(self):\n if self.liveBar - self.mortality == 0:\n goverment_i.retirePopulation(self.ID)\n \"\"\"------------------------\"\"\"\n\n def smell(self):\n for smellingPos in self.sweep:\n pos = self.position[0] + smellingPos[0], self.position[1\n ] + smellingPos[1]\n if not (pos[0] < 0 or pos[1] < 0 or pos[0] >= map_i.size or pos\n [1] >= map_i.size):\n for i in range(len(self.feeds)):\n feeds = nature_i.map_feeds[i][int(pos[0])][int(pos[1])]\n if feeds != 0:\n self.move(pos)\n if map_i.moveInMap(self.position, self.virtualPos\n ) is not True:\n return\n else:\n self.eat((i, pos[0], pos[1]), nature_i)\n self.position = self.virtualPos\n time.sleep(0.0005)\n return\n\n def move(self, position_smelled):\n direct = position_smelled[0] - self.position[0], position_smelled[1\n ] - self.position[1]\n self.virtualPos = self.position[0] + T * self.agility * direct[0\n ], self.position[1] + T * self.agility * direct[1]\n self.virtualPos = int(round(self.virtualPos[0], 0)), int(round(self\n .virtualPos[1], 0))\n\n def eat(self, food, nature_i):\n self.feeds[food[0]] += 1\n nature_i.map_feeds[food[0]][food[1]][food[2]] -= 1\n\n def sweep(self):\n sweep = []\n signo = 1\n SW = 0, 1\n j = 1\n sweep = [(0, 0), (0, 1)]\n iterations = self.smellInstinct * 2 + 1\n iterations = iterations * 2 + (iterations - 2) * 2\n for i in range(1, iterations):\n if i % 2 != 0:\n signo = signo * -1\n row = 1\n col = 0\n row = row * signo\n col = col * signo\n for x in range(j):\n SW = SW[0] + row, SW[1] + col\n sweep.append(SW)\n if i % 2 == 0:\n j = j + 1\n row = 0\n col = 1\n row = row * signo\n col = col * signo\n for x in range(j):\n SW = SW[0] + row, SW[1] + col\n sweep.append(SW)\n shuff = sweep[1:8]\n shuffle(shuff)\n sweep = [sweep[0]] + shuff + sweep[8:]\n return sweep\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Goverment:\n <mask token>\n\n def __init__(self):\n self.listID = []\n self.listCells = []\n self.globalTime = 0\n\n def createPopulation(self, position, map, agility, smellInstinct):\n if map.createCell(position) == False:\n return False\n else:\n IDx = len(self.listID)\n self.listID.append(IDx)\n self.listCells.append(MotherCell(IDx, goverment_i.globalTime,\n position, agility, smellInstinct, 5, 5, [10, 10], 5))\n return True\n <mask token>\n <mask token>\n\n\n<mask token>\n\n\nclass Map:\n \"\"\"manage map(x,y); collision, edges, plot...\n map as 3dim matrix, (row, col, feeds (numfeeds + 1/0 if cell in position)\n cell in position: [N][[N][pos, feed1, feed2, feed3, feed4, feed5]\n \"\"\"\n\n def __init__(self, size, num_feeds):\n self.size = size\n self.map_cells = np.zeros((self.size, self.size))\n\n def available(self, position):\n row = position[0]\n col = position[1]\n if row < 0 or row > self.size - 1 or col < 0 or col > self.size - 1:\n return False\n elif self.map_cells[row, col] == 1:\n return False\n else:\n return True\n\n def moveInMap(self, actual_position, position):\n if actual_position == position:\n return True\n if self.available(position):\n self.map_cells[position[0]][position[1]] = 1\n self.map_cells[actual_position[0]][actual_position[1]] = 0\n return True\n else:\n return False\n\n def createCell(self, pos):\n if self.map_cells[pos[0]][pos[1]] == 1:\n return False\n else:\n self.map_cells[pos[0]][pos[1]] = 1\n return True\n\n def ploting(self):\n plt.ion()\n plt.figure()\n while True:\n f1 = plt.subplot2grid((2, 2), (0, 0))\n f2 = plt.subplot2grid((2, 2), (0, 1))\n f3 = plt.subplot2grid((2, 2), (1, 0), colspan=2)\n f1.matshow(nature_i.map_feeds[0], cmap=plt.cm.gray, vmin=0,\n vmax=nature_i.abundance)\n f2.matshow(nature_i.map_feeds[1], cmap=plt.cm.gray, vmin=0,\n vmax=nature_i.abundance)\n f3.matshow(self.map_cells, cmap=plt.get_cmap('gray'), vmin=0,\n vmax=1)\n plt.draw()\n plt.pause(0.05)\n\n\n<mask token>\n\n\nclass Nature:\n \"\"\"manage feed seeds, delete feeds (eat by cells)\"\"\"\n\n def __init__(self, abundance, num_feeds, size):\n self.abundance = abundance\n self.num_feeds = num_feeds\n self.feeds = 0\n self.size = size\n map_feed_size = np.zeros((self.num_feeds, self.size, self.size))\n self.map_size = map_feed_size.shape\n self.map_feeds = np.random.randint(0, self.abundance, size=self.\n map_size)\n\n def deleteFeed(self, position, feed):\n map_i.map_feeds[feed][position[0]][position[1]] = map_i.map_feeds[feed\n ][position[0]][position[1]] - 1\n\n def createFeed(self, position, feed):\n map_i.map_feeds[feed][position[0]][position[1]] = map_i.map_feeds[feed\n ][position[0]][position[1]] + 1\n\n\n<mask token>\n\n\nclass MotherCell:\n \"\"\"\n Steps in a cell:\n 1/ update skills:\n - hungry(feeds)\n - mutability(feeds)\n - reproductibility(feeds, time)\n - mortality (feeds, time)\n 2/ check reproduction:\n True: create cell with actual mutability skill, use feeds\n False: pass\n 3/ check food:\n check hungry:\n True: calculate distance with smell:\n distance = 0: eat(feeds)\n distance > 0: move (x, y time) use feeds\n 4/ check dead(feeds, time):\n True: dead\n False: pass\n\n \"\"\"\n\n def __init__(self, ID, time, position, agility, smellInstinct,\n reproduction, mutability, feeds, mortality):\n self.ID = ID\n self.localTime = goverment_i.globalTime - time\n self.position = position\n self.agility = agilityMin * agility\n self.smellInstinct = smellInstinct\n self.mutability = mutability\n self.mortality = mortality\n self.reproduction = reproduction\n self.feeds = feeds\n self.sweep = self.sweep()\n self.moving = False\n self.virtualPos = self.position\n \"\"\"------------------------\"\"\"\n\n def updateStates(self):\n self.liveBar = sum(self.feeds) / len(self.feeds)\n self.hungry = self.liveBar - self.mortality\n self.burnFood()\n self.food(self.feeds, self.instinct, self.hungry)\n self.reproduction(self.mutability, self.feeds)\n self.dead(self.liveBar, self.mortality, self.ID)\n\n def reproduction(self):\n pass\n\n def food(self):\n if self.hungry >= 4:\n self.smell()\n else:\n pass\n\n def burnFood(self):\n if self.localTime % 1 == 0:\n for i, x in enumerate(self.feeds):\n self.feeds[i] = x - 1\n\n def dead(self):\n if self.liveBar - self.mortality == 0:\n goverment_i.retirePopulation(self.ID)\n \"\"\"------------------------\"\"\"\n\n def smell(self):\n for smellingPos in self.sweep:\n pos = self.position[0] + smellingPos[0], self.position[1\n ] + smellingPos[1]\n if not (pos[0] < 0 or pos[1] < 0 or pos[0] >= map_i.size or pos\n [1] >= map_i.size):\n for i in range(len(self.feeds)):\n feeds = nature_i.map_feeds[i][int(pos[0])][int(pos[1])]\n if feeds != 0:\n self.move(pos)\n if map_i.moveInMap(self.position, self.virtualPos\n ) is not True:\n return\n else:\n self.eat((i, pos[0], pos[1]), nature_i)\n self.position = self.virtualPos\n time.sleep(0.0005)\n return\n\n def move(self, position_smelled):\n direct = position_smelled[0] - self.position[0], position_smelled[1\n ] - self.position[1]\n self.virtualPos = self.position[0] + T * self.agility * direct[0\n ], self.position[1] + T * self.agility * direct[1]\n self.virtualPos = int(round(self.virtualPos[0], 0)), int(round(self\n .virtualPos[1], 0))\n\n def eat(self, food, nature_i):\n self.feeds[food[0]] += 1\n nature_i.map_feeds[food[0]][food[1]][food[2]] -= 1\n\n def sweep(self):\n sweep = []\n signo = 1\n SW = 0, 1\n j = 1\n sweep = [(0, 0), (0, 1)]\n iterations = self.smellInstinct * 2 + 1\n iterations = iterations * 2 + (iterations - 2) * 2\n for i in range(1, iterations):\n if i % 2 != 0:\n signo = signo * -1\n row = 1\n col = 0\n row = row * signo\n col = col * signo\n for x in range(j):\n SW = SW[0] + row, SW[1] + col\n sweep.append(SW)\n if i % 2 == 0:\n j = j + 1\n row = 0\n col = 1\n row = row * signo\n col = col * signo\n for x in range(j):\n SW = SW[0] + row, SW[1] + col\n sweep.append(SW)\n shuff = sweep[1:8]\n shuffle(shuff)\n sweep = [sweep[0]] + shuff + sweep[8:]\n return sweep\n\n\n<mask token>\n",
"step-4": "<mask token>\nT = 1\neps = 1e-09\nagilityMin = 1 / T\n<mask token>\n\n\nclass Goverment:\n \"\"\" manage population \"\"\"\n\n def __init__(self):\n self.listID = []\n self.listCells = []\n self.globalTime = 0\n\n def createPopulation(self, position, map, agility, smellInstinct):\n if map.createCell(position) == False:\n return False\n else:\n IDx = len(self.listID)\n self.listID.append(IDx)\n self.listCells.append(MotherCell(IDx, goverment_i.globalTime,\n position, agility, smellInstinct, 5, 5, [10, 10], 5))\n return True\n\n def retirePopulation(self, IDx):\n self.listID[IDx] = 0\n\n def clock(self):\n self.globalTime += T\n\n\n<mask token>\n\n\nclass Map:\n \"\"\"manage map(x,y); collision, edges, plot...\n map as 3dim matrix, (row, col, feeds (numfeeds + 1/0 if cell in position)\n cell in position: [N][[N][pos, feed1, feed2, feed3, feed4, feed5]\n \"\"\"\n\n def __init__(self, size, num_feeds):\n self.size = size\n self.map_cells = np.zeros((self.size, self.size))\n\n def available(self, position):\n row = position[0]\n col = position[1]\n if row < 0 or row > self.size - 1 or col < 0 or col > self.size - 1:\n return False\n elif self.map_cells[row, col] == 1:\n return False\n else:\n return True\n\n def moveInMap(self, actual_position, position):\n if actual_position == position:\n return True\n if self.available(position):\n self.map_cells[position[0]][position[1]] = 1\n self.map_cells[actual_position[0]][actual_position[1]] = 0\n return True\n else:\n return False\n\n def createCell(self, pos):\n if self.map_cells[pos[0]][pos[1]] == 1:\n return False\n else:\n self.map_cells[pos[0]][pos[1]] = 1\n return True\n\n def ploting(self):\n plt.ion()\n plt.figure()\n while True:\n f1 = plt.subplot2grid((2, 2), (0, 0))\n f2 = plt.subplot2grid((2, 2), (0, 1))\n f3 = plt.subplot2grid((2, 2), (1, 0), colspan=2)\n f1.matshow(nature_i.map_feeds[0], cmap=plt.cm.gray, vmin=0,\n vmax=nature_i.abundance)\n f2.matshow(nature_i.map_feeds[1], cmap=plt.cm.gray, vmin=0,\n vmax=nature_i.abundance)\n f3.matshow(self.map_cells, cmap=plt.get_cmap('gray'), vmin=0,\n vmax=1)\n plt.draw()\n plt.pause(0.05)\n\n\n<mask token>\n\n\nclass Nature:\n \"\"\"manage feed seeds, delete feeds (eat by cells)\"\"\"\n\n def __init__(self, abundance, num_feeds, size):\n self.abundance = abundance\n self.num_feeds = num_feeds\n self.feeds = 0\n self.size = size\n map_feed_size = np.zeros((self.num_feeds, self.size, self.size))\n self.map_size = map_feed_size.shape\n self.map_feeds = np.random.randint(0, self.abundance, size=self.\n map_size)\n\n def deleteFeed(self, position, feed):\n map_i.map_feeds[feed][position[0]][position[1]] = map_i.map_feeds[feed\n ][position[0]][position[1]] - 1\n\n def createFeed(self, position, feed):\n map_i.map_feeds[feed][position[0]][position[1]] = map_i.map_feeds[feed\n ][position[0]][position[1]] + 1\n\n\n<mask token>\n\n\nclass MotherCell:\n \"\"\"\n Steps in a cell:\n 1/ update skills:\n - hungry(feeds)\n - mutability(feeds)\n - reproductibility(feeds, time)\n - mortality (feeds, time)\n 2/ check reproduction:\n True: create cell with actual mutability skill, use feeds\n False: pass\n 3/ check food:\n check hungry:\n True: calculate distance with smell:\n distance = 0: eat(feeds)\n distance > 0: move (x, y time) use feeds\n 4/ check dead(feeds, time):\n True: dead\n False: pass\n\n \"\"\"\n\n def __init__(self, ID, time, position, agility, smellInstinct,\n reproduction, mutability, feeds, mortality):\n self.ID = ID\n self.localTime = goverment_i.globalTime - time\n self.position = position\n self.agility = agilityMin * agility\n self.smellInstinct = smellInstinct\n self.mutability = mutability\n self.mortality = mortality\n self.reproduction = reproduction\n self.feeds = feeds\n self.sweep = self.sweep()\n self.moving = False\n self.virtualPos = self.position\n \"\"\"------------------------\"\"\"\n\n def updateStates(self):\n self.liveBar = sum(self.feeds) / len(self.feeds)\n self.hungry = self.liveBar - self.mortality\n self.burnFood()\n self.food(self.feeds, self.instinct, self.hungry)\n self.reproduction(self.mutability, self.feeds)\n self.dead(self.liveBar, self.mortality, self.ID)\n\n def reproduction(self):\n pass\n\n def food(self):\n if self.hungry >= 4:\n self.smell()\n else:\n pass\n\n def burnFood(self):\n if self.localTime % 1 == 0:\n for i, x in enumerate(self.feeds):\n self.feeds[i] = x - 1\n\n def dead(self):\n if self.liveBar - self.mortality == 0:\n goverment_i.retirePopulation(self.ID)\n \"\"\"------------------------\"\"\"\n\n def smell(self):\n for smellingPos in self.sweep:\n pos = self.position[0] + smellingPos[0], self.position[1\n ] + smellingPos[1]\n if not (pos[0] < 0 or pos[1] < 0 or pos[0] >= map_i.size or pos\n [1] >= map_i.size):\n for i in range(len(self.feeds)):\n feeds = nature_i.map_feeds[i][int(pos[0])][int(pos[1])]\n if feeds != 0:\n self.move(pos)\n if map_i.moveInMap(self.position, self.virtualPos\n ) is not True:\n return\n else:\n self.eat((i, pos[0], pos[1]), nature_i)\n self.position = self.virtualPos\n time.sleep(0.0005)\n return\n\n def move(self, position_smelled):\n direct = position_smelled[0] - self.position[0], position_smelled[1\n ] - self.position[1]\n self.virtualPos = self.position[0] + T * self.agility * direct[0\n ], self.position[1] + T * self.agility * direct[1]\n self.virtualPos = int(round(self.virtualPos[0], 0)), int(round(self\n .virtualPos[1], 0))\n\n def eat(self, food, nature_i):\n self.feeds[food[0]] += 1\n nature_i.map_feeds[food[0]][food[1]][food[2]] -= 1\n\n def sweep(self):\n sweep = []\n signo = 1\n SW = 0, 1\n j = 1\n sweep = [(0, 0), (0, 1)]\n iterations = self.smellInstinct * 2 + 1\n iterations = iterations * 2 + (iterations - 2) * 2\n for i in range(1, iterations):\n if i % 2 != 0:\n signo = signo * -1\n row = 1\n col = 0\n row = row * signo\n col = col * signo\n for x in range(j):\n SW = SW[0] + row, SW[1] + col\n sweep.append(SW)\n if i % 2 == 0:\n j = j + 1\n row = 0\n col = 1\n row = row * signo\n col = col * signo\n for x in range(j):\n SW = SW[0] + row, SW[1] + col\n sweep.append(SW)\n shuff = sweep[1:8]\n shuffle(shuff)\n sweep = [sweep[0]] + shuff + sweep[8:]\n return sweep\n\n\n<mask token>\nif __name__ == '__main__':\n goverment_i = Goverment()\n num_feeds = 2\n size = 70\n abundance = 3\n nature_i = Nature(3, num_feeds, size)\n map_i = Map(size, num_feeds)\n goverment_i.clock()\n goverment_i.createPopulation((5, 5), map_i, 1, 5)\n goverment_i.createPopulation((20, 20), map_i, 2, 2)\n goverment_i.createPopulation((40, 40), map_i, 3, 4)\n t_map_feeds = threading.Thread(target=map_i.ploting)\n print('Iniciada la vida')\n print('Cell position: ', goverment_i.listCells[0].position)\n print('Cell position: ', goverment_i.listCells[1].position)\n print('Cell position: ', goverment_i.listCells[2].position)\n t_map_feeds.start()\n time.sleep(1)\n for x in range(30000):\n goverment_i.listCells[0].smell()\n goverment_i.listCells[1].smell()\n goverment_i.listCells[2].smell()\n time.sleep(0.005)\n",
"step-5": "import time\nimport numpy as np\nimport matplotlib.pyplot as plt #tutorial: http://pybonacci.org/2012/05/19/manual-de-introduccion-a-matplotlib-pyplot-ii-creando-y-manejando-ventanas-y-configurando-la-sesion/\nimport threading\nfrom random import shuffle\n\n\nT = 1\neps = 0.000000001\nagilityMin = 1/T\n\n'''------------GOVERMENT'''\nclass Goverment:\n ''' manage population '''\n def __init__(self ):\n self.listID = []\n self.listCells = []\n self.globalTime = 0\n\n def createPopulation(self, position, map, agility, smellInstinct):\n if map.createCell(position) == False:\n return False\n else:\n IDx = len(self.listID)\n self.listID.append(IDx)\n self.listCells.append(MotherCell(IDx, goverment_i.globalTime, position, agility, smellInstinct, 5, 5, [10, 10], 5))\n #(ID, time, positio n, agility, smellInstinct, reproduction, mutability, feeds, mortality)\n return True\n\n def retirePopulation (self, IDx):\n self.listID[IDx] = 0 #instancia cell no esta borrada creo\n\n def clock(self):\n self.globalTime += T\n\n\n'''------------MAP'''\nclass Map:\n '''manage map(x,y); collision, edges, plot...\n map as 3dim matrix, (row, col, feeds (numfeeds + 1/0 if cell in position)\n cell in position: [N][[N][pos, feed1, feed2, feed3, feed4, feed5]\n '''\n def __init__(self, size, num_feeds):\n self.size = size\n self.map_cells = np.zeros((self.size, self.size)) #ncluye posicion celula\n\n def available(self, position):\n #position as row/col\n #return True if occupy\n row = position[0]\n col = position[1]\n if row < 0 or row > (self.size - 1) or col < 0 or col > (self.size - 1):\n return False\n elif self.map_cells[row, col] == 1:\n return False\n else:\n return True\n\n def moveInMap(self, actual_position, position):\n if actual_position == position:\n return True\n if self.available(position):\n self.map_cells[position[0]][position[1]] = 1\n self.map_cells[actual_position[0]][actual_position[1]] = 0\n return True\n else:\n return False\n\n def createCell(self, pos):\n if self.map_cells[pos[0]][pos[1]] == 1:\n return False\n else:\n self.map_cells[pos[0]][pos[1]] = 1\n return True\n\n def ploting(self):\n plt.ion()\n plt.figure()\n #plt.matshow(nature_i.map_feeds[0], fignum=1, cmap=plt.cm.gray)\n while True:\n f1 = plt.subplot2grid((2, 2), (0, 0))\n f2 = plt.subplot2grid((2, 2), (0, 1))\n f3 = plt.subplot2grid((2, 2), (1, 0), colspan=2)\n f1.matshow(nature_i.map_feeds[0], cmap=plt.cm.gray, vmin=0, vmax=nature_i.abundance)\n f2.matshow(nature_i.map_feeds[1], cmap=plt.cm.gray, vmin=0, vmax=nature_i.abundance)\n f3.matshow(self.map_cells, cmap=plt.get_cmap('gray'), vmin=0, vmax=1)\n plt.draw()\n plt.pause(0.05)\n\n'''------------NATURE'''\nclass Nature:\n '''manage feed seeds, delete feeds (eat by cells)'''\n def __init__(self, abundance, num_feeds, size):\n self.abundance = abundance\n self.num_feeds = num_feeds\n self.feeds = 0\n self.size = size\n map_feed_size = np.zeros((self.num_feeds, self.size, self.size))#incluye recusros de celda\n self.map_size = map_feed_size.shape\n self.map_feeds = np.random.randint(0, self.abundance, size = self.map_size)\n\n def deleteFeed(self, position, feed):\n map_i.map_feeds[feed][position[0]][position[1]] =\\\n map_i.map_feeds[feed][position[0]][position[1]] - 1#lo ultimo la columna siempre\n\n def createFeed(self, position, feed):\n map_i.map_feeds[feed][position[0]][position[1]] = \\\n map_i.map_feeds[feed][position[0]][position[1]] + 1\n\n\n'''------------CELLS'''\nclass MotherCell:\n '''\n Steps in a cell:\n 1/ update skills:\n - hungry(feeds)\n - mutability(feeds)\n - reproductibility(feeds, time)\n - mortality (feeds, time)\n 2/ check reproduction:\n True: create cell with actual mutability skill, use feeds\n False: pass\n 3/ check food:\n check hungry:\n True: calculate distance with smell:\n distance = 0: eat(feeds)\n distance > 0: move (x, y time) use feeds\n 4/ check dead(feeds, time):\n True: dead\n False: pass\n\n '''\n def __init__(self,ID, time, position, agility, smellInstinct, reproduction, mutability, feeds, mortality):\n self.ID = ID\n self.localTime = goverment_i.globalTime - time\n self.position = position\n #Skills\n self.agility = agilityMin * agility# agility 0--5\n self.smellInstinct = smellInstinct # from 0 to 10, radious of smeelled cels\n self.mutability = mutability # from 0 to 10\n self.mortality = mortality # from 0 to 10\n self.reproduction = reproduction\n self.feeds = feeds #[0, 0] from 0 to 10\n self.sweep = self.sweep()# created the sweep list with smellInstinct radious\n self.moving = False\n self.virtualPos = self.position\n\n '''------------------------'''\n def updateStates(self):\n #states\n self.liveBar = sum(self.feeds) / len(self.feeds)#if liveBar - mortality == 0 => dead\n self.hungry = self.liveBar - self.mortality\n self.burnFood()\n self.food(self.feeds, self.instinct, self.hungry)\n self.reproduction(self.mutability, self.feeds)\n self.dead(self.liveBar, self.mortality, self.ID)\n\n def reproduction(self):\n #mutability, feeds, time?\n pass\n\n def food(self):\n #feeds, instinct\n if self.hungry >= 4:\n self.smell()\n else:\n pass\n\n def burnFood(self):\n if self.localTime % 1 == 0:\n for i, x in enumerate(self.feeds):\n self.feeds[i] = x - 1\n\n def dead(self):\n #mortality\n if self.liveBar - self.mortality == 0:\n goverment_i.retirePopulation(self.ID)\n\n '''------------------------'''\n def smell(self):\n for smellingPos in self.sweep:\n pos = (self.position[0] + smellingPos[0], self.position[1] + smellingPos[1])\n if not (pos[0] < 0 or pos[1] < 0 or pos[0] >= map_i.size or pos[1] >= map_i.size):\n for i in range(len(self.feeds)):\n feeds = nature_i.map_feeds[i][int(pos[0])][int(pos[1])]\n if feeds != 0:\n self.move(pos)\n if map_i.moveInMap(self.position, self.virtualPos) is not True:\n return\n else:\n self.eat((i, pos[0], pos[1]), nature_i)\n self.position = self.virtualPos\n # print('position: {}, virtualPos: {}feed({}) remain: {}. sweep: {}'.format(\n # self.position,self.virtualPos, i, feeds, smellingPos))\n time.sleep(0.0005)\n return\n\n\n def move(self, position_smelled):\n #manage agility\n direct = (position_smelled[0] - self.position[0], position_smelled[1] - self.position[1])\n self.virtualPos = (self.position[0] + (T * self.agility)* direct[0],\n self.position[1] + (T * self.agility)* direct[1])\n self.virtualPos = int(round(self.virtualPos[0],0)), int(round(self.virtualPos[1],0))\n\n\n def eat(self, food, nature_i):#food = (feed, pos, pos)\n self.feeds[food[0]] += 1\n nature_i.map_feeds[food[0]][food[1]][food[2]] -= 1\n\n\n def sweep(self):\n sweep = []\n signo = 1;\n SW = (0, 1);\n j = 1;\n sweep = [(0, 0), (0, 1)]\n iterations = (self.smellInstinct*2) + 1\n iterations = (iterations * 2) + ((iterations - 2) * 2)\n for i in range(1, iterations):\n if i % 2 != 0:\n signo = signo * (-1)\n row = 1;\n col = 0\n row = row * signo;\n col = col * signo\n for x in range(j):\n SW = (SW[0] + row, SW[1] + col)\n sweep.append(SW)\n if i % 2 == 0:\n j = j + 1\n row = 0;\n col = 1;\n row = row * signo;\n col = col * signo\n for x in range(j):\n SW = (SW[0] + row, SW[1] + col)\n sweep.append((SW))\n\n shuff = sweep[1:8]\n shuffle(shuff)\n sweep = [sweep[0]] + shuff + sweep[8:]\n return sweep\n\n\n'''-----------MAIN'''\nif __name__ == '__main__':\n\n goverment_i = Goverment()\n num_feeds = 2\n size = 70\n abundance = 3\n nature_i = Nature(3, num_feeds, size)#abundance and number of feeds\n map_i = Map(size, num_feeds)#size, num of feeds\n goverment_i.clock()\n\n goverment_i.createPopulation((5, 5), map_i, 1, 5)#position, map, agility, smellInstict\n goverment_i.createPopulation((20, 20), map_i, 2, 2)\n goverment_i.createPopulation((40, 40), map_i, 3, 4)\n\n t_map_feeds = threading.Thread(target=map_i.ploting)\n print (\"Iniciada la vida\")\n print (\"Cell position: \", goverment_i.listCells[0].position)\n print (\"Cell position: \", goverment_i.listCells[1].position)\n print (\"Cell position: \", goverment_i.listCells[2].position)\n\n t_map_feeds.start()\n time.sleep(1)\n for x in range(30000):\n goverment_i.listCells[0].smell()\n goverment_i.listCells[1].smell()\n goverment_i.listCells[2].smell()\n time.sleep(0.005)\n",
"step-ids": [
21,
26,
27,
32,
34
]
}
|
[
21,
26,
27,
32,
34
] |
<|reserved_special_token_0|>
def query_doc(cursor, lang, title):
cursor.execute(index_db.select_lang_title, (lang, title))
result = cursor.fetchone()
if not result:
return None
return {'lang': result[0], 'doc_id': result[1], 'doc_path': result[2],
'title': result[4], 'begin': result[5], 'end': result[6]}
def locate_single_topic_texts(lang_title_dict, cursor):
same_topic = (query_doc(cursor, l, t) for l, t in lang_title_dict.items())
return sorted((i for i in same_topic if i), key=lambda x: x['lang'])
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def query_doc(cursor, lang, title):
cursor.execute(index_db.select_lang_title, (lang, title))
result = cursor.fetchone()
if not result:
return None
return {'lang': result[0], 'doc_id': result[1], 'doc_path': result[2],
'title': result[4], 'begin': result[5], 'end': result[6]}
def locate_single_topic_texts(lang_title_dict, cursor):
same_topic = (query_doc(cursor, l, t) for l, t in lang_title_dict.items())
return sorted((i for i in same_topic if i), key=lambda x: x['lang'])
def locate_interlanguage_texts(file_path, db_path):
with open(file_path, 'rt') as f:
interlangauge = json.load(f)
with sqlite3.connect(db_path) as conn:
c = conn.cursor()
return [locate_single_topic_texts(pairs, c) for pairs in interlangauge]
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def query_doc(cursor, lang, title):
cursor.execute(index_db.select_lang_title, (lang, title))
result = cursor.fetchone()
if not result:
return None
return {'lang': result[0], 'doc_id': result[1], 'doc_path': result[2],
'title': result[4], 'begin': result[5], 'end': result[6]}
def locate_single_topic_texts(lang_title_dict, cursor):
same_topic = (query_doc(cursor, l, t) for l, t in lang_title_dict.items())
return sorted((i for i in same_topic if i), key=lambda x: x['lang'])
def locate_interlanguage_texts(file_path, db_path):
with open(file_path, 'rt') as f:
interlangauge = json.load(f)
with sqlite3.connect(db_path) as conn:
c = conn.cursor()
return [locate_single_topic_texts(pairs, c) for pairs in interlangauge]
if __name__ == '__main__':
parser = argparse.ArgumentParser(description=
'Locate same topic texts over multiple languages.')
parser.add_argument('--db', dest='db_path', default=index_db.
default_path, help='a sqlite database file generated by index.py')
parser.add_argument('--input', dest='input_path', default=
'interlanguage_topics.json', help=
'a json file containing sets of topics over multiple languages')
parser.add_argument('--output', dest='output_path', default=
'interlanguage_location.json', help=
'a json file locating same topic texts over multiple languages')
args = parser.parse_args()
location_infos = locate_interlanguage_texts(args.input_path, args.db_path)
with open(args.output_path, 'wt') as f:
json.dump(location_infos, f)
<|reserved_special_token_1|>
import sqlite3
import argparse
import json
import index_db
from collections import defaultdict
def query_doc(cursor, lang, title):
cursor.execute(index_db.select_lang_title, (lang, title))
result = cursor.fetchone()
if not result:
return None
return {'lang': result[0], 'doc_id': result[1], 'doc_path': result[2],
'title': result[4], 'begin': result[5], 'end': result[6]}
def locate_single_topic_texts(lang_title_dict, cursor):
same_topic = (query_doc(cursor, l, t) for l, t in lang_title_dict.items())
return sorted((i for i in same_topic if i), key=lambda x: x['lang'])
def locate_interlanguage_texts(file_path, db_path):
with open(file_path, 'rt') as f:
interlangauge = json.load(f)
with sqlite3.connect(db_path) as conn:
c = conn.cursor()
return [locate_single_topic_texts(pairs, c) for pairs in interlangauge]
if __name__ == '__main__':
parser = argparse.ArgumentParser(description=
'Locate same topic texts over multiple languages.')
parser.add_argument('--db', dest='db_path', default=index_db.
default_path, help='a sqlite database file generated by index.py')
parser.add_argument('--input', dest='input_path', default=
'interlanguage_topics.json', help=
'a json file containing sets of topics over multiple languages')
parser.add_argument('--output', dest='output_path', default=
'interlanguage_location.json', help=
'a json file locating same topic texts over multiple languages')
args = parser.parse_args()
location_infos = locate_interlanguage_texts(args.input_path, args.db_path)
with open(args.output_path, 'wt') as f:
json.dump(location_infos, f)
<|reserved_special_token_1|>
import sqlite3
import argparse
import json
import index_db
from collections import defaultdict
def query_doc(cursor, lang, title):
cursor.execute(index_db.select_lang_title, (lang, title))
result = cursor.fetchone()
if not result:
return None
return {
'lang': result[0],
'doc_id': result[1],
'doc_path': result[2],
# 'url': result[3], # I don't think url is needed here...
'title': result[4],
'begin': result[5],
'end': result[6]
}
def locate_single_topic_texts(lang_title_dict, cursor):
same_topic = (query_doc(cursor, l, t) for l, t in lang_title_dict.items())
return sorted(
(i for i in same_topic if i),
key=lambda x: x['lang']
)
def locate_interlanguage_texts(file_path, db_path):
with open(file_path, 'rt') as f:
interlangauge = json.load(f)
with sqlite3.connect(db_path) as conn:
c = conn.cursor()
return [locate_single_topic_texts(pairs, c) for pairs in interlangauge]
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Locate same topic texts over multiple languages.')
parser.add_argument('--db', dest='db_path', default=index_db.default_path,
help='a sqlite database file generated by index.py')
parser.add_argument('--input', dest='input_path',
default='interlanguage_topics.json',
help='a json file containing sets of topics over '
'multiple languages')
parser.add_argument('--output', dest='output_path',
default='interlanguage_location.json',
help='a json file locating same topic texts over '
'multiple languages')
args = parser.parse_args()
location_infos = locate_interlanguage_texts(args.input_path, args.db_path)
with open(args.output_path, 'wt') as f:
json.dump(location_infos, f)
|
flexible
|
{
"blob_id": "95e7e025660e71cbdf6a6a0812964fc26d4beec0",
"index": 9657,
"step-1": "<mask token>\n\n\ndef query_doc(cursor, lang, title):\n cursor.execute(index_db.select_lang_title, (lang, title))\n result = cursor.fetchone()\n if not result:\n return None\n return {'lang': result[0], 'doc_id': result[1], 'doc_path': result[2],\n 'title': result[4], 'begin': result[5], 'end': result[6]}\n\n\ndef locate_single_topic_texts(lang_title_dict, cursor):\n same_topic = (query_doc(cursor, l, t) for l, t in lang_title_dict.items())\n return sorted((i for i in same_topic if i), key=lambda x: x['lang'])\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef query_doc(cursor, lang, title):\n cursor.execute(index_db.select_lang_title, (lang, title))\n result = cursor.fetchone()\n if not result:\n return None\n return {'lang': result[0], 'doc_id': result[1], 'doc_path': result[2],\n 'title': result[4], 'begin': result[5], 'end': result[6]}\n\n\ndef locate_single_topic_texts(lang_title_dict, cursor):\n same_topic = (query_doc(cursor, l, t) for l, t in lang_title_dict.items())\n return sorted((i for i in same_topic if i), key=lambda x: x['lang'])\n\n\ndef locate_interlanguage_texts(file_path, db_path):\n with open(file_path, 'rt') as f:\n interlangauge = json.load(f)\n with sqlite3.connect(db_path) as conn:\n c = conn.cursor()\n return [locate_single_topic_texts(pairs, c) for pairs in interlangauge]\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef query_doc(cursor, lang, title):\n cursor.execute(index_db.select_lang_title, (lang, title))\n result = cursor.fetchone()\n if not result:\n return None\n return {'lang': result[0], 'doc_id': result[1], 'doc_path': result[2],\n 'title': result[4], 'begin': result[5], 'end': result[6]}\n\n\ndef locate_single_topic_texts(lang_title_dict, cursor):\n same_topic = (query_doc(cursor, l, t) for l, t in lang_title_dict.items())\n return sorted((i for i in same_topic if i), key=lambda x: x['lang'])\n\n\ndef locate_interlanguage_texts(file_path, db_path):\n with open(file_path, 'rt') as f:\n interlangauge = json.load(f)\n with sqlite3.connect(db_path) as conn:\n c = conn.cursor()\n return [locate_single_topic_texts(pairs, c) for pairs in interlangauge]\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description=\n 'Locate same topic texts over multiple languages.')\n parser.add_argument('--db', dest='db_path', default=index_db.\n default_path, help='a sqlite database file generated by index.py')\n parser.add_argument('--input', dest='input_path', default=\n 'interlanguage_topics.json', help=\n 'a json file containing sets of topics over multiple languages')\n parser.add_argument('--output', dest='output_path', default=\n 'interlanguage_location.json', help=\n 'a json file locating same topic texts over multiple languages')\n args = parser.parse_args()\n location_infos = locate_interlanguage_texts(args.input_path, args.db_path)\n with open(args.output_path, 'wt') as f:\n json.dump(location_infos, f)\n",
"step-4": "import sqlite3\nimport argparse\nimport json\nimport index_db\nfrom collections import defaultdict\n\n\ndef query_doc(cursor, lang, title):\n cursor.execute(index_db.select_lang_title, (lang, title))\n result = cursor.fetchone()\n if not result:\n return None\n return {'lang': result[0], 'doc_id': result[1], 'doc_path': result[2],\n 'title': result[4], 'begin': result[5], 'end': result[6]}\n\n\ndef locate_single_topic_texts(lang_title_dict, cursor):\n same_topic = (query_doc(cursor, l, t) for l, t in lang_title_dict.items())\n return sorted((i for i in same_topic if i), key=lambda x: x['lang'])\n\n\ndef locate_interlanguage_texts(file_path, db_path):\n with open(file_path, 'rt') as f:\n interlangauge = json.load(f)\n with sqlite3.connect(db_path) as conn:\n c = conn.cursor()\n return [locate_single_topic_texts(pairs, c) for pairs in interlangauge]\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description=\n 'Locate same topic texts over multiple languages.')\n parser.add_argument('--db', dest='db_path', default=index_db.\n default_path, help='a sqlite database file generated by index.py')\n parser.add_argument('--input', dest='input_path', default=\n 'interlanguage_topics.json', help=\n 'a json file containing sets of topics over multiple languages')\n parser.add_argument('--output', dest='output_path', default=\n 'interlanguage_location.json', help=\n 'a json file locating same topic texts over multiple languages')\n args = parser.parse_args()\n location_infos = locate_interlanguage_texts(args.input_path, args.db_path)\n with open(args.output_path, 'wt') as f:\n json.dump(location_infos, f)\n",
"step-5": "import sqlite3\nimport argparse\nimport json\nimport index_db\nfrom collections import defaultdict\n\n\ndef query_doc(cursor, lang, title):\n cursor.execute(index_db.select_lang_title, (lang, title))\n result = cursor.fetchone()\n if not result:\n return None\n return {\n 'lang': result[0],\n 'doc_id': result[1],\n 'doc_path': result[2],\n # 'url': result[3], # I don't think url is needed here...\n 'title': result[4],\n 'begin': result[5],\n 'end': result[6]\n }\n\n\ndef locate_single_topic_texts(lang_title_dict, cursor):\n same_topic = (query_doc(cursor, l, t) for l, t in lang_title_dict.items())\n return sorted(\n (i for i in same_topic if i),\n key=lambda x: x['lang']\n )\n\n\ndef locate_interlanguage_texts(file_path, db_path):\n with open(file_path, 'rt') as f:\n interlangauge = json.load(f)\n\n with sqlite3.connect(db_path) as conn:\n c = conn.cursor()\n return [locate_single_topic_texts(pairs, c) for pairs in interlangauge]\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(\n description='Locate same topic texts over multiple languages.')\n parser.add_argument('--db', dest='db_path', default=index_db.default_path,\n help='a sqlite database file generated by index.py')\n parser.add_argument('--input', dest='input_path',\n default='interlanguage_topics.json',\n help='a json file containing sets of topics over '\n 'multiple languages')\n parser.add_argument('--output', dest='output_path',\n default='interlanguage_location.json',\n help='a json file locating same topic texts over '\n 'multiple languages')\n args = parser.parse_args()\n location_infos = locate_interlanguage_texts(args.input_path, args.db_path)\n with open(args.output_path, 'wt') as f:\n json.dump(location_infos, f)\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
import torch
import torchvision.transforms.functional as F
import numpy as np
import yaml
from pathlib import Path
IGNORE_LABEL = 255
STATS = {
"vit": {"mean": (0.5, 0.5, 0.5), "std": (0.5, 0.5, 0.5)},
"deit": {"mean": (0.485, 0.456, 0.406), "std": (0.229, 0.224, 0.225)},
}
def seg_to_rgb(seg, colors):
im = torch.zeros((seg.shape[0], seg.shape[1], seg.shape[2], 3)).float()
cls = torch.unique(seg)
for cl in cls:
color = colors[int(cl)]
if len(color.shape) > 1:
color = color[0]
im[seg == cl] = color
return im
def dataset_cat_description(path, cmap=None):
desc = yaml.load(open(path, "r"), Loader=yaml.FullLoader)
colors = {}
names = []
for i, cat in enumerate(desc):
names.append(cat["name"])
if "color" in cat:
colors[cat["id"]] = torch.tensor(cat["color"]).float() / 255
else:
colors[cat["id"]] = torch.tensor(cmap[cat["id"]]).float()
colors[IGNORE_LABEL] = torch.tensor([0.0, 0.0, 0.0]).float()
return names, colors
def rgb_normalize(x, stats):
"""
x : C x *
x \in [0, 1]
"""
return F.normalize(x, stats["mean"], stats["std"])
def rgb_denormalize(x, stats):
"""
x : N x C x *
x \in [-1, 1]
"""
mean = torch.tensor(stats["mean"])
std = torch.tensor(stats["std"])
for i in range(3):
x[:, i, :, :] = x[:, i, :, :] * std[i] + mean[i]
return x
|
normal
|
{
"blob_id": "6c641ace8f1e5e8c42fa776bd7604daf243f9a41",
"index": 2113,
"step-1": "<mask token>\n\n\ndef dataset_cat_description(path, cmap=None):\n desc = yaml.load(open(path, 'r'), Loader=yaml.FullLoader)\n colors = {}\n names = []\n for i, cat in enumerate(desc):\n names.append(cat['name'])\n if 'color' in cat:\n colors[cat['id']] = torch.tensor(cat['color']).float() / 255\n else:\n colors[cat['id']] = torch.tensor(cmap[cat['id']]).float()\n colors[IGNORE_LABEL] = torch.tensor([0.0, 0.0, 0.0]).float()\n return names, colors\n\n\n<mask token>\n\n\ndef rgb_denormalize(x, stats):\n \"\"\"\n x : N x C x *\n x \\\\in [-1, 1]\n \"\"\"\n mean = torch.tensor(stats['mean'])\n std = torch.tensor(stats['std'])\n for i in range(3):\n x[:, i, :, :] = x[:, i, :, :] * std[i] + mean[i]\n return x\n",
"step-2": "<mask token>\n\n\ndef seg_to_rgb(seg, colors):\n im = torch.zeros((seg.shape[0], seg.shape[1], seg.shape[2], 3)).float()\n cls = torch.unique(seg)\n for cl in cls:\n color = colors[int(cl)]\n if len(color.shape) > 1:\n color = color[0]\n im[seg == cl] = color\n return im\n\n\ndef dataset_cat_description(path, cmap=None):\n desc = yaml.load(open(path, 'r'), Loader=yaml.FullLoader)\n colors = {}\n names = []\n for i, cat in enumerate(desc):\n names.append(cat['name'])\n if 'color' in cat:\n colors[cat['id']] = torch.tensor(cat['color']).float() / 255\n else:\n colors[cat['id']] = torch.tensor(cmap[cat['id']]).float()\n colors[IGNORE_LABEL] = torch.tensor([0.0, 0.0, 0.0]).float()\n return names, colors\n\n\ndef rgb_normalize(x, stats):\n \"\"\"\n x : C x *\n x \\\\in [0, 1]\n \"\"\"\n return F.normalize(x, stats['mean'], stats['std'])\n\n\ndef rgb_denormalize(x, stats):\n \"\"\"\n x : N x C x *\n x \\\\in [-1, 1]\n \"\"\"\n mean = torch.tensor(stats['mean'])\n std = torch.tensor(stats['std'])\n for i in range(3):\n x[:, i, :, :] = x[:, i, :, :] * std[i] + mean[i]\n return x\n",
"step-3": "<mask token>\nIGNORE_LABEL = 255\nSTATS = {'vit': {'mean': (0.5, 0.5, 0.5), 'std': (0.5, 0.5, 0.5)}, 'deit':\n {'mean': (0.485, 0.456, 0.406), 'std': (0.229, 0.224, 0.225)}}\n\n\ndef seg_to_rgb(seg, colors):\n im = torch.zeros((seg.shape[0], seg.shape[1], seg.shape[2], 3)).float()\n cls = torch.unique(seg)\n for cl in cls:\n color = colors[int(cl)]\n if len(color.shape) > 1:\n color = color[0]\n im[seg == cl] = color\n return im\n\n\ndef dataset_cat_description(path, cmap=None):\n desc = yaml.load(open(path, 'r'), Loader=yaml.FullLoader)\n colors = {}\n names = []\n for i, cat in enumerate(desc):\n names.append(cat['name'])\n if 'color' in cat:\n colors[cat['id']] = torch.tensor(cat['color']).float() / 255\n else:\n colors[cat['id']] = torch.tensor(cmap[cat['id']]).float()\n colors[IGNORE_LABEL] = torch.tensor([0.0, 0.0, 0.0]).float()\n return names, colors\n\n\ndef rgb_normalize(x, stats):\n \"\"\"\n x : C x *\n x \\\\in [0, 1]\n \"\"\"\n return F.normalize(x, stats['mean'], stats['std'])\n\n\ndef rgb_denormalize(x, stats):\n \"\"\"\n x : N x C x *\n x \\\\in [-1, 1]\n \"\"\"\n mean = torch.tensor(stats['mean'])\n std = torch.tensor(stats['std'])\n for i in range(3):\n x[:, i, :, :] = x[:, i, :, :] * std[i] + mean[i]\n return x\n",
"step-4": "import torch\nimport torchvision.transforms.functional as F\nimport numpy as np\nimport yaml\nfrom pathlib import Path\nIGNORE_LABEL = 255\nSTATS = {'vit': {'mean': (0.5, 0.5, 0.5), 'std': (0.5, 0.5, 0.5)}, 'deit':\n {'mean': (0.485, 0.456, 0.406), 'std': (0.229, 0.224, 0.225)}}\n\n\ndef seg_to_rgb(seg, colors):\n im = torch.zeros((seg.shape[0], seg.shape[1], seg.shape[2], 3)).float()\n cls = torch.unique(seg)\n for cl in cls:\n color = colors[int(cl)]\n if len(color.shape) > 1:\n color = color[0]\n im[seg == cl] = color\n return im\n\n\ndef dataset_cat_description(path, cmap=None):\n desc = yaml.load(open(path, 'r'), Loader=yaml.FullLoader)\n colors = {}\n names = []\n for i, cat in enumerate(desc):\n names.append(cat['name'])\n if 'color' in cat:\n colors[cat['id']] = torch.tensor(cat['color']).float() / 255\n else:\n colors[cat['id']] = torch.tensor(cmap[cat['id']]).float()\n colors[IGNORE_LABEL] = torch.tensor([0.0, 0.0, 0.0]).float()\n return names, colors\n\n\ndef rgb_normalize(x, stats):\n \"\"\"\n x : C x *\n x \\\\in [0, 1]\n \"\"\"\n return F.normalize(x, stats['mean'], stats['std'])\n\n\ndef rgb_denormalize(x, stats):\n \"\"\"\n x : N x C x *\n x \\\\in [-1, 1]\n \"\"\"\n mean = torch.tensor(stats['mean'])\n std = torch.tensor(stats['std'])\n for i in range(3):\n x[:, i, :, :] = x[:, i, :, :] * std[i] + mean[i]\n return x\n",
"step-5": "import torch\nimport torchvision.transforms.functional as F\nimport numpy as np\nimport yaml\nfrom pathlib import Path\n\nIGNORE_LABEL = 255\nSTATS = {\n \"vit\": {\"mean\": (0.5, 0.5, 0.5), \"std\": (0.5, 0.5, 0.5)},\n \"deit\": {\"mean\": (0.485, 0.456, 0.406), \"std\": (0.229, 0.224, 0.225)},\n}\n\n\ndef seg_to_rgb(seg, colors):\n im = torch.zeros((seg.shape[0], seg.shape[1], seg.shape[2], 3)).float()\n cls = torch.unique(seg)\n for cl in cls:\n color = colors[int(cl)]\n if len(color.shape) > 1:\n color = color[0]\n im[seg == cl] = color\n return im\n\n\ndef dataset_cat_description(path, cmap=None):\n desc = yaml.load(open(path, \"r\"), Loader=yaml.FullLoader)\n colors = {}\n names = []\n for i, cat in enumerate(desc):\n names.append(cat[\"name\"])\n if \"color\" in cat:\n colors[cat[\"id\"]] = torch.tensor(cat[\"color\"]).float() / 255\n else:\n colors[cat[\"id\"]] = torch.tensor(cmap[cat[\"id\"]]).float()\n colors[IGNORE_LABEL] = torch.tensor([0.0, 0.0, 0.0]).float()\n return names, colors\n\n\ndef rgb_normalize(x, stats):\n \"\"\"\n x : C x *\n x \\in [0, 1]\n \"\"\"\n return F.normalize(x, stats[\"mean\"], stats[\"std\"])\n\n\ndef rgb_denormalize(x, stats):\n \"\"\"\n x : N x C x *\n x \\in [-1, 1]\n \"\"\"\n mean = torch.tensor(stats[\"mean\"])\n std = torch.tensor(stats[\"std\"])\n for i in range(3):\n x[:, i, :, :] = x[:, i, :, :] * std[i] + mean[i]\n return x\n",
"step-ids": [
2,
4,
5,
6,
7
]
}
|
[
2,
4,
5,
6,
7
] |
import argparse
from flower_classifier import FlowerClassifier
from util import *
parser = argparse.ArgumentParser()
parser.add_argument("data_dir", help="path to training images")
parser.add_argument("--save_dir", default=".", help="path where checkpoint is saved")
parser.add_argument("--arch", default="vgg11", help="which pre-trained model to use as a base. vgg11 or alexnet")
parser.add_argument("--learning_rate", type=float, default=0.003, help="learning rate of the model")
parser.add_argument("--hidden_units", type=int, default=1024, help="size of hidden layer")
parser.add_argument("--gpu", default=False, action="store_true", help="size of hidden layer")
parser.add_argument("--epochs", type=int, default=1, help="number of training epochs")
args = parser.parse_args()
print(args)
def main():
f_class = FlowerClassifier(args.arch, args.hidden_units, args.gpu)
f_class.train(data_dir=args.data_dir, epochs=args.epochs, learning_rate=args.learning_rate)
save_checkpoint(f_class, 'checkpoint.pth')
#print(model.cat_to_name)
top_probs, top_classes = f_class.predict('flowers/valid/1/image_06765.jpg', 3, 'cat_to_name.json')
print(top_probs, top_classes)
if __name__ == "__main__": main()
|
normal
|
{
"blob_id": "0c3947a1699c78080661a55bbaa9215774b4a18e",
"index": 4751,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nparser.add_argument('data_dir', help='path to training images')\nparser.add_argument('--save_dir', default='.', help=\n 'path where checkpoint is saved')\nparser.add_argument('--arch', default='vgg11', help=\n 'which pre-trained model to use as a base. vgg11 or alexnet')\nparser.add_argument('--learning_rate', type=float, default=0.003, help=\n 'learning rate of the model')\nparser.add_argument('--hidden_units', type=int, default=1024, help=\n 'size of hidden layer')\nparser.add_argument('--gpu', default=False, action='store_true', help=\n 'size of hidden layer')\nparser.add_argument('--epochs', type=int, default=1, help=\n 'number of training epochs')\n<mask token>\nprint(args)\n\n\ndef main():\n f_class = FlowerClassifier(args.arch, args.hidden_units, args.gpu)\n f_class.train(data_dir=args.data_dir, epochs=args.epochs, learning_rate\n =args.learning_rate)\n save_checkpoint(f_class, 'checkpoint.pth')\n top_probs, top_classes = f_class.predict('flowers/valid/1/image_06765.jpg',\n 3, 'cat_to_name.json')\n print(top_probs, top_classes)\n\n\nif __name__ == '__main__':\n main()\n",
"step-3": "<mask token>\nparser = argparse.ArgumentParser()\nparser.add_argument('data_dir', help='path to training images')\nparser.add_argument('--save_dir', default='.', help=\n 'path where checkpoint is saved')\nparser.add_argument('--arch', default='vgg11', help=\n 'which pre-trained model to use as a base. vgg11 or alexnet')\nparser.add_argument('--learning_rate', type=float, default=0.003, help=\n 'learning rate of the model')\nparser.add_argument('--hidden_units', type=int, default=1024, help=\n 'size of hidden layer')\nparser.add_argument('--gpu', default=False, action='store_true', help=\n 'size of hidden layer')\nparser.add_argument('--epochs', type=int, default=1, help=\n 'number of training epochs')\nargs = parser.parse_args()\nprint(args)\n\n\ndef main():\n f_class = FlowerClassifier(args.arch, args.hidden_units, args.gpu)\n f_class.train(data_dir=args.data_dir, epochs=args.epochs, learning_rate\n =args.learning_rate)\n save_checkpoint(f_class, 'checkpoint.pth')\n top_probs, top_classes = f_class.predict('flowers/valid/1/image_06765.jpg',\n 3, 'cat_to_name.json')\n print(top_probs, top_classes)\n\n\nif __name__ == '__main__':\n main()\n",
"step-4": "import argparse\nfrom flower_classifier import FlowerClassifier\nfrom util import *\nparser = argparse.ArgumentParser()\nparser.add_argument('data_dir', help='path to training images')\nparser.add_argument('--save_dir', default='.', help=\n 'path where checkpoint is saved')\nparser.add_argument('--arch', default='vgg11', help=\n 'which pre-trained model to use as a base. vgg11 or alexnet')\nparser.add_argument('--learning_rate', type=float, default=0.003, help=\n 'learning rate of the model')\nparser.add_argument('--hidden_units', type=int, default=1024, help=\n 'size of hidden layer')\nparser.add_argument('--gpu', default=False, action='store_true', help=\n 'size of hidden layer')\nparser.add_argument('--epochs', type=int, default=1, help=\n 'number of training epochs')\nargs = parser.parse_args()\nprint(args)\n\n\ndef main():\n f_class = FlowerClassifier(args.arch, args.hidden_units, args.gpu)\n f_class.train(data_dir=args.data_dir, epochs=args.epochs, learning_rate\n =args.learning_rate)\n save_checkpoint(f_class, 'checkpoint.pth')\n top_probs, top_classes = f_class.predict('flowers/valid/1/image_06765.jpg',\n 3, 'cat_to_name.json')\n print(top_probs, top_classes)\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "import argparse\nfrom flower_classifier import FlowerClassifier\nfrom util import *\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"data_dir\", help=\"path to training images\")\nparser.add_argument(\"--save_dir\", default=\".\", help=\"path where checkpoint is saved\")\nparser.add_argument(\"--arch\", default=\"vgg11\", help=\"which pre-trained model to use as a base. vgg11 or alexnet\")\nparser.add_argument(\"--learning_rate\", type=float, default=0.003, help=\"learning rate of the model\")\nparser.add_argument(\"--hidden_units\", type=int, default=1024, help=\"size of hidden layer\")\nparser.add_argument(\"--gpu\", default=False, action=\"store_true\", help=\"size of hidden layer\")\nparser.add_argument(\"--epochs\", type=int, default=1, help=\"number of training epochs\")\nargs = parser.parse_args()\nprint(args)\n\ndef main():\n f_class = FlowerClassifier(args.arch, args.hidden_units, args.gpu)\n f_class.train(data_dir=args.data_dir, epochs=args.epochs, learning_rate=args.learning_rate)\n save_checkpoint(f_class, 'checkpoint.pth')\n #print(model.cat_to_name)\n top_probs, top_classes = f_class.predict('flowers/valid/1/image_06765.jpg', 3, 'cat_to_name.json')\n print(top_probs, top_classes)\n\nif __name__ == \"__main__\": main()\n",
"step-ids": [
0,
2,
3,
4,
5
]
}
|
[
0,
2,
3,
4,
5
] |
valor1=input("Ingrese Primera Cantidad ")
valor2=input("Ingrese Segunda Cantidad ")
Total = valor1 + valor2
print "El total es: " + str(Total)
|
normal
|
{
"blob_id": "5c179752f4c4e1d693346c6edddd79211a895735",
"index": 8685,
"step-1": "valor1=input(\"Ingrese Primera Cantidad \")\nvalor2=input(\"Ingrese Segunda Cantidad \")\nTotal = valor1 + valor2\nprint \"El total es: \" + str(Total)\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
<|reserved_special_token_0|>
class Items(db.Model):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def __init__(self, email, item, description, image, category, price):
self.email = email
self.item = item
self.description = description
self.image = image
self.category = category
self.price = price
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Items(db.Model):
<|reserved_special_token_0|>
id = db.Column(db.Integer, primary_key=True)
email = db.Column(db.String)
item = db.Column(db.String, nullable=False)
description = db.Column(db.String, nullable=False)
image = db.Column(db.String)
category = db.Column(db.String, nullable=False)
price = db.Column(db.String, nullable=False)
def __init__(self, email, item, description, image, category, price):
self.email = email
self.item = item
self.description = description
self.image = image
self.category = category
self.price = price
@property
def serialize(self):
""" Function to return a json object for each
instance of the class Items """
return {'id': self.id, 'item': self.item, 'description': self.
description, 'image': self.image, 'category': self.category,
'price': self.price}
def __repr__(self):
""" Functon to represent the class instance """
return '<item {}>'.format(self.item)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Items(db.Model):
""" Model to store all the information about an item """
id = db.Column(db.Integer, primary_key=True)
email = db.Column(db.String)
item = db.Column(db.String, nullable=False)
description = db.Column(db.String, nullable=False)
image = db.Column(db.String)
category = db.Column(db.String, nullable=False)
price = db.Column(db.String, nullable=False)
def __init__(self, email, item, description, image, category, price):
self.email = email
self.item = item
self.description = description
self.image = image
self.category = category
self.price = price
@property
def serialize(self):
""" Function to return a json object for each
instance of the class Items """
return {'id': self.id, 'item': self.item, 'description': self.
description, 'image': self.image, 'category': self.category,
'price': self.price}
def __repr__(self):
""" Functon to represent the class instance """
return '<item {}>'.format(self.item)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
from catalog import db
class Items(db.Model):
""" Model to store all the information about an item """
id = db.Column(db.Integer, primary_key=True)
email = db.Column(db.String)
item = db.Column(db.String, nullable=False)
description = db.Column(db.String, nullable=False)
image = db.Column(db.String)
category = db.Column(db.String, nullable=False)
price = db.Column(db.String, nullable=False)
def __init__(self, email, item, description, image, category, price):
self.email = email
self.item = item
self.description = description
self.image = image
self.category = category
self.price = price
@property
def serialize(self):
""" Function to return a json object for each
instance of the class Items """
return {'id': self.id, 'item': self.item, 'description': self.
description, 'image': self.image, 'category': self.category,
'price': self.price}
def __repr__(self):
""" Functon to represent the class instance """
return '<item {}>'.format(self.item)
<|reserved_special_token_1|>
''' This module creates the models/tables in the database
catalog using sqlalchemy '''
from catalog import db
class Items(db.Model):
''' Model to store all the information about an item '''
id = db.Column(db.Integer, primary_key=True)
email = db.Column(db.String)
item = db.Column(db.String, nullable=False)
description = db.Column(db.String, nullable=False)
image = db.Column(db.String)
category = db.Column(db.String, nullable=False)
price = db.Column(db.String, nullable=False)
def __init__(self, email, item, description, image, category, price):
self.email = email
self.item = item
self.description = description
self.image = image
self.category = category
self.price = price
@property
def serialize(self):
''' Function to return a json object for each
instance of the class Items '''
return { 'id': self.id,
'item': self.item,
'description': self.description,
'image': self.image,
'category': self.category,
'price': self.price }
def __repr__(self):
''' Functon to represent the class instance '''
return '<item {}>'.format(self.item)
|
flexible
|
{
"blob_id": "ad622ff2e1d9286246b2175694a9ae796f8d2557",
"index": 7535,
"step-1": "<mask token>\n\n\nclass Items(db.Model):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def __init__(self, email, item, description, image, category, price):\n self.email = email\n self.item = item\n self.description = description\n self.image = image\n self.category = category\n self.price = price\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass Items(db.Model):\n <mask token>\n id = db.Column(db.Integer, primary_key=True)\n email = db.Column(db.String)\n item = db.Column(db.String, nullable=False)\n description = db.Column(db.String, nullable=False)\n image = db.Column(db.String)\n category = db.Column(db.String, nullable=False)\n price = db.Column(db.String, nullable=False)\n\n def __init__(self, email, item, description, image, category, price):\n self.email = email\n self.item = item\n self.description = description\n self.image = image\n self.category = category\n self.price = price\n\n @property\n def serialize(self):\n \"\"\" Function to return a json object for each \n instance of the class Items \"\"\"\n return {'id': self.id, 'item': self.item, 'description': self.\n description, 'image': self.image, 'category': self.category,\n 'price': self.price}\n\n def __repr__(self):\n \"\"\" Functon to represent the class instance \"\"\"\n return '<item {}>'.format(self.item)\n",
"step-3": "<mask token>\n\n\nclass Items(db.Model):\n \"\"\" Model to store all the information about an item \"\"\"\n id = db.Column(db.Integer, primary_key=True)\n email = db.Column(db.String)\n item = db.Column(db.String, nullable=False)\n description = db.Column(db.String, nullable=False)\n image = db.Column(db.String)\n category = db.Column(db.String, nullable=False)\n price = db.Column(db.String, nullable=False)\n\n def __init__(self, email, item, description, image, category, price):\n self.email = email\n self.item = item\n self.description = description\n self.image = image\n self.category = category\n self.price = price\n\n @property\n def serialize(self):\n \"\"\" Function to return a json object for each \n instance of the class Items \"\"\"\n return {'id': self.id, 'item': self.item, 'description': self.\n description, 'image': self.image, 'category': self.category,\n 'price': self.price}\n\n def __repr__(self):\n \"\"\" Functon to represent the class instance \"\"\"\n return '<item {}>'.format(self.item)\n",
"step-4": "<mask token>\nfrom catalog import db\n\n\nclass Items(db.Model):\n \"\"\" Model to store all the information about an item \"\"\"\n id = db.Column(db.Integer, primary_key=True)\n email = db.Column(db.String)\n item = db.Column(db.String, nullable=False)\n description = db.Column(db.String, nullable=False)\n image = db.Column(db.String)\n category = db.Column(db.String, nullable=False)\n price = db.Column(db.String, nullable=False)\n\n def __init__(self, email, item, description, image, category, price):\n self.email = email\n self.item = item\n self.description = description\n self.image = image\n self.category = category\n self.price = price\n\n @property\n def serialize(self):\n \"\"\" Function to return a json object for each \n instance of the class Items \"\"\"\n return {'id': self.id, 'item': self.item, 'description': self.\n description, 'image': self.image, 'category': self.category,\n 'price': self.price}\n\n def __repr__(self):\n \"\"\" Functon to represent the class instance \"\"\"\n return '<item {}>'.format(self.item)\n",
"step-5": "''' This module creates the models/tables in the database \r\n catalog using sqlalchemy '''\r\n\r\nfrom catalog import db\r\n\r\n\r\nclass Items(db.Model):\r\n ''' Model to store all the information about an item '''\r\n\r\n id = db.Column(db.Integer, primary_key=True)\r\n email = db.Column(db.String)\r\n item = db.Column(db.String, nullable=False)\r\n description = db.Column(db.String, nullable=False)\r\n image = db.Column(db.String)\r\n category = db.Column(db.String, nullable=False)\r\n price = db.Column(db.String, nullable=False)\r\n\r\n\r\n def __init__(self, email, item, description, image, category, price):\r\n\r\n self.email = email\r\n self.item = item\r\n self.description = description\r\n self.image = image\r\n self.category = category\r\n self.price = price\r\n\r\n\r\n @property\r\n def serialize(self):\r\n ''' Function to return a json object for each \r\n instance of the class Items '''\r\n\r\n return { 'id': self.id,\r\n 'item': self.item,\r\n 'description': self.description,\r\n 'image': self.image,\r\n 'category': self.category,\r\n 'price': self.price }\r\n\r\n\r\n def __repr__(self):\r\n ''' Functon to represent the class instance '''\r\n\r\n return '<item {}>'.format(self.item)\r\n",
"step-ids": [
2,
5,
6,
7,
8
]
}
|
[
2,
5,
6,
7,
8
] |
import pickle
import time
start = time.time()
f = open('my_classifier.pickle', 'rb')
cl = pickle.load(f)
f.close()
print(cl.classify("Where to travel in bangalore ?"))
print(cl.classify("Name a golf course in Myrtle beach ."))
print(cl.classify("What body of water does the Danube River flow into ?"))
#print("Accuracy: {0}".format(cl.accuracy(test)))
print(time.time()-start)
|
normal
|
{
"blob_id": "82a3fca0261b4bde43f7bf258bb22e5b2ea8c28d",
"index": 5370,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nf.close()\nprint(cl.classify('Where to travel in bangalore ?'))\nprint(cl.classify('Name a golf course in Myrtle beach .'))\nprint(cl.classify('What body of water does the Danube River flow into ?'))\nprint(time.time() - start)\n",
"step-3": "<mask token>\nstart = time.time()\nf = open('my_classifier.pickle', 'rb')\ncl = pickle.load(f)\nf.close()\nprint(cl.classify('Where to travel in bangalore ?'))\nprint(cl.classify('Name a golf course in Myrtle beach .'))\nprint(cl.classify('What body of water does the Danube River flow into ?'))\nprint(time.time() - start)\n",
"step-4": "import pickle\nimport time\nstart = time.time()\nf = open('my_classifier.pickle', 'rb')\ncl = pickle.load(f)\nf.close()\nprint(cl.classify('Where to travel in bangalore ?'))\nprint(cl.classify('Name a golf course in Myrtle beach .'))\nprint(cl.classify('What body of water does the Danube River flow into ?'))\nprint(time.time() - start)\n",
"step-5": "import pickle\nimport time\n\nstart = time.time()\nf = open('my_classifier.pickle', 'rb')\ncl = pickle.load(f)\nf.close()\n\nprint(cl.classify(\"Where to travel in bangalore ?\"))\nprint(cl.classify(\"Name a golf course in Myrtle beach .\"))\nprint(cl.classify(\"What body of water does the Danube River flow into ?\"))\n#print(\"Accuracy: {0}\".format(cl.accuracy(test)))\nprint(time.time()-start)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
lc_headers = {'User-Agent':
'Mozilla/5.0 (Macintosh; Intel Mac OS X 11_0) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/14.0 Safari/605.1.15'
, 'authority': 'leetcode.com'}
lc_all = 'https://leetcode.com/api/problems/all/'
lc_submissions = (
'https://leetcode.com/api/submissions/?offset=%(offset)s&limit=%(limit)s&lastkey=%(lastkey)s'
)
lc_graphql = 'https://leetcode.com/graphql'
query_string = """query questionData($titleSlug: String!) {
question(titleSlug: $titleSlug) {
questionId
questionFrontendId
boundTopicId
title
titleSlug
content
translatedTitle
translatedContent
isPaidOnly
difficulty
likes
dislikes
isLiked
similarQuestions
contributors {
username
profileUrl
avatarUrl
__typename
}
topicTags {
name
slug
translatedName
__typename
}
companyTagStats
codeSnippets {
lang
langSlug
code
__typename
}
stats
hints
solution {
id
canSeeDetail
paidOnly
__typename
}
status
sampleTestCase
metaData
judgerAvailable
judgeType
mysqlSchemas
enableRunCode
enableTestMode
enableDebugger
envInfo
libraryUrl
adminUrl
__typename
}
}
"""
md_template = """# [%(id)s] %(title)s (%(difficulty)s)
%(small_tags)s
:+1: %(likes)s :thumbsdown: %(dislikes)s
---
## My Submission
- Language: %(lang)s
- Runtime: %(runtime)s
- Completed time: %(time)s
```%(lang)s
%(code)s
```
## Content
%(contents)s
## Related Problems
%(related_problems)s
## What a(n) %(difficulty)s problem!
Among **%(submission)s** total submissions, **%(accepted)s** are accepted, with an acceptance rate of **%(acc_rate)s**. <br>
- Likes: %(likes)s
- Dislikes: %(dislikes)s
"""
related_template = (
'[%(related_title)s](%(link)s) (%(related_difficulty)s) <br>')
tag_template = (
'[s-%(color)s.svg)](%(URL)s) '
)
raw_md_template = """## [%(id)s] %(title)s (%(difficulty)s)
%(small_tags)s
👍 %(likes)s 👎 %(dislikes)s
---
## My Submission
- Language: %(lang)s
- Runtime: %(runtime)s
- Completed time: %(time)s
```%(lang)s
%(code)s
```
## Related Problems
%(related_problems)s
## What a(n) %(difficulty)s problem!
Among **%(submission)s** total submissions, **%(accepted)s** are accepted, with an acceptance rate of **%(acc_rate)s**.
- Likes: %(likes)s
- Dislikes: %(dislikes)s
"""
<|reserved_special_token_1|>
lc_headers = {
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 11_0) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/14.0 Safari/605.1.15",
"authority": "leetcode.com",
}
lc_all = "https://leetcode.com/api/problems/all/"
lc_submissions = "https://leetcode.com/api/submissions/?offset=%(offset)s&limit=%(limit)s&lastkey=%(lastkey)s"
lc_graphql = "https://leetcode.com/graphql"
query_string = 'query questionData($titleSlug: String!) {\n question(titleSlug: $titleSlug) {\n questionId\n questionFrontendId\n boundTopicId\n title\n titleSlug\n content\n translatedTitle\n translatedContent\n isPaidOnly\n difficulty\n likes\n dislikes\n isLiked\n similarQuestions\n contributors {\n username\n profileUrl\n avatarUrl\n __typename\n }\n topicTags {\n name\n slug\n translatedName\n __typename\n }\n companyTagStats\n codeSnippets {\n lang\n langSlug\n code\n __typename\n }\n stats\n hints\n solution {\n id\n canSeeDetail\n paidOnly\n __typename\n }\n status\n sampleTestCase\n metaData\n judgerAvailable\n judgeType\n mysqlSchemas\n enableRunCode\n enableTestMode\n enableDebugger\n envInfo\n libraryUrl\n adminUrl\n __typename\n }\n}\n'
md_template = '''# [%(id)s] %(title)s (%(difficulty)s)
%(small_tags)s
:+1: %(likes)s :thumbsdown: %(dislikes)s
---
## My Submission
- Language: %(lang)s
- Runtime: %(runtime)s
- Completed time: %(time)s
```%(lang)s
%(code)s
```
## Content
%(contents)s
## Related Problems
%(related_problems)s
## What a(n) %(difficulty)s problem!
Among **%(submission)s** total submissions, **%(accepted)s** are accepted, with an acceptance rate of **%(acc_rate)s**. <br>
- Likes: %(likes)s
- Dislikes: %(dislikes)s
'''
related_template = "[%(related_title)s](%(link)s) (%(related_difficulty)s) <br>"
tag_template = "[s-%(color)s.svg)](%(URL)s) "
raw_md_template = '''## [%(id)s] %(title)s (%(difficulty)s)
%(small_tags)s
👍 %(likes)s 👎 %(dislikes)s
---
## My Submission
- Language: %(lang)s
- Runtime: %(runtime)s
- Completed time: %(time)s
```%(lang)s
%(code)s
```
## Related Problems
%(related_problems)s
## What a(n) %(difficulty)s problem!
Among **%(submission)s** total submissions, **%(accepted)s** are accepted, with an acceptance rate of **%(acc_rate)s**.
- Likes: %(likes)s
- Dislikes: %(dislikes)s
'''
|
flexible
|
{
"blob_id": "f715628da2f1b950b8fbf8aa5b033e5299d3e224",
"index": 7857,
"step-1": "<mask token>\n",
"step-2": "lc_headers = {'User-Agent':\n 'Mozilla/5.0 (Macintosh; Intel Mac OS X 11_0) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/14.0 Safari/605.1.15'\n , 'authority': 'leetcode.com'}\nlc_all = 'https://leetcode.com/api/problems/all/'\nlc_submissions = (\n 'https://leetcode.com/api/submissions/?offset=%(offset)s&limit=%(limit)s&lastkey=%(lastkey)s'\n )\nlc_graphql = 'https://leetcode.com/graphql'\nquery_string = \"\"\"query questionData($titleSlug: String!) {\n question(titleSlug: $titleSlug) {\n questionId\n questionFrontendId\n boundTopicId\n title\n titleSlug\n content\n translatedTitle\n translatedContent\n isPaidOnly\n difficulty\n likes\n dislikes\n isLiked\n similarQuestions\n contributors {\n username\n profileUrl\n avatarUrl\n __typename\n }\n topicTags {\n name\n slug\n translatedName\n __typename\n }\n companyTagStats\n codeSnippets {\n lang\n langSlug\n code\n __typename\n }\n stats\n hints\n solution {\n id\n canSeeDetail\n paidOnly\n __typename\n }\n status\n sampleTestCase\n metaData\n judgerAvailable\n judgeType\n mysqlSchemas\n enableRunCode\n enableTestMode\n enableDebugger\n envInfo\n libraryUrl\n adminUrl\n __typename\n }\n}\n\"\"\"\nmd_template = \"\"\"# [%(id)s] %(title)s (%(difficulty)s)\n\n%(small_tags)s\n\n:+1: %(likes)s :thumbsdown: %(dislikes)s\n\n---\n\n## My Submission\n\n- Language: %(lang)s\n- Runtime: %(runtime)s\n- Completed time: %(time)s\n\n```%(lang)s\n%(code)s\n```\n\n## Content\n%(contents)s\n\n## Related Problems\n%(related_problems)s\n\n## What a(n) %(difficulty)s problem!\nAmong **%(submission)s** total submissions, **%(accepted)s** are accepted, with an acceptance rate of **%(acc_rate)s**. <br>\n\n- Likes: %(likes)s\n- Dislikes: %(dislikes)s\n\n\"\"\"\nrelated_template = (\n '[%(related_title)s](%(link)s) (%(related_difficulty)s) <br>')\ntag_template = (\n '[s-%(color)s.svg)](%(URL)s) '\n )\nraw_md_template = \"\"\"## [%(id)s] %(title)s (%(difficulty)s)\n\n%(small_tags)s\n\n👍 %(likes)s 👎 %(dislikes)s\n\n---\n\n## My Submission\n\n- Language: %(lang)s\n- Runtime: %(runtime)s\n- Completed time: %(time)s\n\n```%(lang)s\n%(code)s\n```\n\n## Related Problems\n%(related_problems)s\n\n## What a(n) %(difficulty)s problem!\nAmong **%(submission)s** total submissions, **%(accepted)s** are accepted, with an acceptance rate of **%(acc_rate)s**.\n\n- Likes: %(likes)s\n- Dislikes: %(dislikes)s\n\n\"\"\"\n",
"step-3": "lc_headers = {\n \"User-Agent\": \"Mozilla/5.0 (Macintosh; Intel Mac OS X 11_0) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/14.0 Safari/605.1.15\",\n \"authority\": \"leetcode.com\",\n }\n\nlc_all = \"https://leetcode.com/api/problems/all/\"\nlc_submissions = \"https://leetcode.com/api/submissions/?offset=%(offset)s&limit=%(limit)s&lastkey=%(lastkey)s\"\nlc_graphql = \"https://leetcode.com/graphql\"\n\nquery_string = 'query questionData($titleSlug: String!) {\\n question(titleSlug: $titleSlug) {\\n questionId\\n questionFrontendId\\n boundTopicId\\n title\\n titleSlug\\n content\\n translatedTitle\\n translatedContent\\n isPaidOnly\\n difficulty\\n likes\\n dislikes\\n isLiked\\n similarQuestions\\n contributors {\\n username\\n profileUrl\\n avatarUrl\\n __typename\\n }\\n topicTags {\\n name\\n slug\\n translatedName\\n __typename\\n }\\n companyTagStats\\n codeSnippets {\\n lang\\n langSlug\\n code\\n __typename\\n }\\n stats\\n hints\\n solution {\\n id\\n canSeeDetail\\n paidOnly\\n __typename\\n }\\n status\\n sampleTestCase\\n metaData\\n judgerAvailable\\n judgeType\\n mysqlSchemas\\n enableRunCode\\n enableTestMode\\n enableDebugger\\n envInfo\\n libraryUrl\\n adminUrl\\n __typename\\n }\\n}\\n'\n\nmd_template = '''# [%(id)s] %(title)s (%(difficulty)s)\n\n%(small_tags)s\n\n:+1: %(likes)s :thumbsdown: %(dislikes)s\n\n---\n\n## My Submission\n\n- Language: %(lang)s\n- Runtime: %(runtime)s\n- Completed time: %(time)s\n\n```%(lang)s\n%(code)s\n```\n\n## Content\n%(contents)s\n\n## Related Problems\n%(related_problems)s\n\n## What a(n) %(difficulty)s problem!\nAmong **%(submission)s** total submissions, **%(accepted)s** are accepted, with an acceptance rate of **%(acc_rate)s**. <br>\n\n- Likes: %(likes)s\n- Dislikes: %(dislikes)s\n\n'''\n\nrelated_template = \"[%(related_title)s](%(link)s) (%(related_difficulty)s) <br>\"\n\ntag_template = \"[s-%(color)s.svg)](%(URL)s) \"\n\nraw_md_template = '''## [%(id)s] %(title)s (%(difficulty)s)\n\n%(small_tags)s\n\n👍 %(likes)s 👎 %(dislikes)s\n\n---\n\n## My Submission\n\n- Language: %(lang)s\n- Runtime: %(runtime)s\n- Completed time: %(time)s\n\n```%(lang)s\n%(code)s\n```\n\n## Related Problems\n%(related_problems)s\n\n## What a(n) %(difficulty)s problem!\nAmong **%(submission)s** total submissions, **%(accepted)s** are accepted, with an acceptance rate of **%(acc_rate)s**.\n\n- Likes: %(likes)s\n- Dislikes: %(dislikes)s\n\n'''\n\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
if len(inspect_tables) == 0:
for k, t in enumerate(tickers):
ticker_data = pd.DataFrame()
try:
ticker_data = wb.DataReader(t, data_source='yahoo', start=start_1)
ticker_data.to_sql(tablenames[k], con=engine, if_exists='replace')
except:
print('New Import from {} went wrong'.format(t))
else:
print('New Import from {} is done'.format(t))
else:
for k, t in enumerate(tickers):
if tablenames[k] not in inspect_tables:
ticker_data = pd.DataFrame()
try:
ticker_data = wb.DataReader(t, data_source='yahoo', start=
start_1)
ticker_data.to_sql(tablenames[k], con=engine, if_exists=
'replace')
except:
print('New Import from {} went wrong'.format(t))
else:
print('New Import from {} is done'.format(t))
else:
ticker_data = pd.DataFrame()
check_last_value = pd.read_sql_query(Select_last_value.format(
tablenames[k]), con=engine)
check_last_value2 = pd.to_datetime(check_last_value['Date'][0],
format='%Y-%m-%d').date()
if check_last_value2 != date_today:
try:
ticker_data = wb.DataReader(t, data_source='yahoo',
start=check_last_value2 + dt.timedelta(days=1))
ticker_data.to_sql(tablenames[k], con=engine, if_exists
='append')
except:
print('Update Import from {} went wrong'.format(t))
else:
print('Update Import from {} is done from {}'.format(t,
str(check_last_value2 + dt.timedelta(days=1))))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
conn = sqlite3.connect('Portfolio_dividens.db')
c = conn.cursor()
<|reserved_special_token_0|>
engine = create_engine('sqlite:///Portfolio_dividens.db')
<|reserved_special_token_0|>
path = os.getcwd()
<|reserved_special_token_0|>
date_today = dt.date.today()
start_1 = '2005-1-1'
tickers_df = pd.read_excel(path + '\\Tickers.xlsx')
tickers = list(tickers_df['Ticker'])
tablenames = list(tickers_df['tablenames'])
<|reserved_special_token_0|>
inspector = inspect(engine)
inspect_tables = inspector.get_table_names()
Select_last_value = "Select Date from '{}' order by date desc limit 1;"
Check_table_exists = 'SHOW TABLES LIKE {}'
if len(inspect_tables) == 0:
for k, t in enumerate(tickers):
ticker_data = pd.DataFrame()
try:
ticker_data = wb.DataReader(t, data_source='yahoo', start=start_1)
ticker_data.to_sql(tablenames[k], con=engine, if_exists='replace')
except:
print('New Import from {} went wrong'.format(t))
else:
print('New Import from {} is done'.format(t))
else:
for k, t in enumerate(tickers):
if tablenames[k] not in inspect_tables:
ticker_data = pd.DataFrame()
try:
ticker_data = wb.DataReader(t, data_source='yahoo', start=
start_1)
ticker_data.to_sql(tablenames[k], con=engine, if_exists=
'replace')
except:
print('New Import from {} went wrong'.format(t))
else:
print('New Import from {} is done'.format(t))
else:
ticker_data = pd.DataFrame()
check_last_value = pd.read_sql_query(Select_last_value.format(
tablenames[k]), con=engine)
check_last_value2 = pd.to_datetime(check_last_value['Date'][0],
format='%Y-%m-%d').date()
if check_last_value2 != date_today:
try:
ticker_data = wb.DataReader(t, data_source='yahoo',
start=check_last_value2 + dt.timedelta(days=1))
ticker_data.to_sql(tablenames[k], con=engine, if_exists
='append')
except:
print('Update Import from {} went wrong'.format(t))
else:
print('Update Import from {} is done from {}'.format(t,
str(check_last_value2 + dt.timedelta(days=1))))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
import sqlite3
conn = sqlite3.connect('Portfolio_dividens.db')
c = conn.cursor()
from sqlalchemy import create_engine
engine = create_engine('sqlite:///Portfolio_dividens.db')
import os
path = os.getcwd()
<|reserved_special_token_0|>
import pandas as pd
from pandas_datareader import data as wb
import datetime as dt
date_today = dt.date.today()
start_1 = '2005-1-1'
tickers_df = pd.read_excel(path + '\\Tickers.xlsx')
tickers = list(tickers_df['Ticker'])
tablenames = list(tickers_df['tablenames'])
from sqlalchemy import inspect
inspector = inspect(engine)
inspect_tables = inspector.get_table_names()
Select_last_value = "Select Date from '{}' order by date desc limit 1;"
Check_table_exists = 'SHOW TABLES LIKE {}'
if len(inspect_tables) == 0:
for k, t in enumerate(tickers):
ticker_data = pd.DataFrame()
try:
ticker_data = wb.DataReader(t, data_source='yahoo', start=start_1)
ticker_data.to_sql(tablenames[k], con=engine, if_exists='replace')
except:
print('New Import from {} went wrong'.format(t))
else:
print('New Import from {} is done'.format(t))
else:
for k, t in enumerate(tickers):
if tablenames[k] not in inspect_tables:
ticker_data = pd.DataFrame()
try:
ticker_data = wb.DataReader(t, data_source='yahoo', start=
start_1)
ticker_data.to_sql(tablenames[k], con=engine, if_exists=
'replace')
except:
print('New Import from {} went wrong'.format(t))
else:
print('New Import from {} is done'.format(t))
else:
ticker_data = pd.DataFrame()
check_last_value = pd.read_sql_query(Select_last_value.format(
tablenames[k]), con=engine)
check_last_value2 = pd.to_datetime(check_last_value['Date'][0],
format='%Y-%m-%d').date()
if check_last_value2 != date_today:
try:
ticker_data = wb.DataReader(t, data_source='yahoo',
start=check_last_value2 + dt.timedelta(days=1))
ticker_data.to_sql(tablenames[k], con=engine, if_exists
='append')
except:
print('Update Import from {} went wrong'.format(t))
else:
print('Update Import from {} is done from {}'.format(t,
str(check_last_value2 + dt.timedelta(days=1))))
<|reserved_special_token_1|>
"""
Created on Wed Nov 6 13:03:42 2019
@author: antonio.blago
"""
#%% Connect to database
import sqlite3
conn = sqlite3.connect('Portfolio_dividens.db')
c = conn.cursor()
from sqlalchemy import create_engine #suport pd.dataframe to sql table
#import mysqlclient
engine = create_engine("sqlite:///Portfolio_dividens.db")
#%% Set up path
import os
# detect the current working directory and print it
path = os.getcwd()
#%%
''' Yahoo finance'''
import pandas as pd
from pandas_datareader import data as wb
import datetime as dt
date_today=dt.date.today()
start_1='2005-1-1'
tickers_df=pd.read_excel(path+r'\Tickers.xlsx')
tickers=list(tickers_df['Ticker'])
tablenames=list(tickers_df['tablenames'])
from sqlalchemy import inspect
inspector = inspect(engine)
# Get table information
inspect_tables=inspector.get_table_names()
Select_last_value= "Select Date from '{}' order by date desc limit 1;"
Check_table_exists="SHOW TABLES LIKE {}"
#pd.read_sql_query(Select_last_value,con=engine)
if len(inspect_tables)==0: #first initialize db
for k, t in enumerate(tickers):
ticker_data=pd.DataFrame()
try:
ticker_data=wb.DataReader(t, data_source='yahoo',start=start_1)
ticker_data.to_sql(tablenames[k],con=engine,if_exists="replace" )
except:
print("New Import from {} went wrong".format(t))
else:
print("New Import from {} is done".format(t))
else:
for k, t in enumerate(tickers):
if tablenames[k] not in inspect_tables: #check table is existing
ticker_data=pd.DataFrame()
try:
ticker_data=wb.DataReader(t, data_source='yahoo',start=start_1)
ticker_data.to_sql(tablenames[k],con=engine,if_exists="replace" )
except:
print("New Import from {} went wrong".format(t))
else:
print("New Import from {} is done".format(t))
else:
ticker_data=pd.DataFrame()
check_last_value=pd.read_sql_query(Select_last_value.format(tablenames[k]),con=engine)
check_last_value2=(pd.to_datetime(check_last_value['Date'][0],format="%Y-%m-%d")).date()
if check_last_value2!=date_today: #dt.datetime.strptime("2019-11-13", "%Y-%m-%d")==pd.to_datetime(check_last_value['Date'][0])
try:
ticker_data=wb.DataReader(t, data_source='yahoo',start=check_last_value2+dt.timedelta(days=1))
ticker_data.to_sql(tablenames[k],con=engine,if_exists="append")
except:
print("Update Import from {} went wrong".format(t))
else:
print("Update Import from {} is done from {}".format(t,str(check_last_value2+dt.timedelta(days=1))))
|
flexible
|
{
"blob_id": "cee77a97503cca517d03ce7cce189974da282a03",
"index": 2500,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif len(inspect_tables) == 0:\n for k, t in enumerate(tickers):\n ticker_data = pd.DataFrame()\n try:\n ticker_data = wb.DataReader(t, data_source='yahoo', start=start_1)\n ticker_data.to_sql(tablenames[k], con=engine, if_exists='replace')\n except:\n print('New Import from {} went wrong'.format(t))\n else:\n print('New Import from {} is done'.format(t))\nelse:\n for k, t in enumerate(tickers):\n if tablenames[k] not in inspect_tables:\n ticker_data = pd.DataFrame()\n try:\n ticker_data = wb.DataReader(t, data_source='yahoo', start=\n start_1)\n ticker_data.to_sql(tablenames[k], con=engine, if_exists=\n 'replace')\n except:\n print('New Import from {} went wrong'.format(t))\n else:\n print('New Import from {} is done'.format(t))\n else:\n ticker_data = pd.DataFrame()\n check_last_value = pd.read_sql_query(Select_last_value.format(\n tablenames[k]), con=engine)\n check_last_value2 = pd.to_datetime(check_last_value['Date'][0],\n format='%Y-%m-%d').date()\n if check_last_value2 != date_today:\n try:\n ticker_data = wb.DataReader(t, data_source='yahoo',\n start=check_last_value2 + dt.timedelta(days=1))\n ticker_data.to_sql(tablenames[k], con=engine, if_exists\n ='append')\n except:\n print('Update Import from {} went wrong'.format(t))\n else:\n print('Update Import from {} is done from {}'.format(t,\n str(check_last_value2 + dt.timedelta(days=1))))\n",
"step-3": "<mask token>\nconn = sqlite3.connect('Portfolio_dividens.db')\nc = conn.cursor()\n<mask token>\nengine = create_engine('sqlite:///Portfolio_dividens.db')\n<mask token>\npath = os.getcwd()\n<mask token>\ndate_today = dt.date.today()\nstart_1 = '2005-1-1'\ntickers_df = pd.read_excel(path + '\\\\Tickers.xlsx')\ntickers = list(tickers_df['Ticker'])\ntablenames = list(tickers_df['tablenames'])\n<mask token>\ninspector = inspect(engine)\ninspect_tables = inspector.get_table_names()\nSelect_last_value = \"Select Date from '{}' order by date desc limit 1;\"\nCheck_table_exists = 'SHOW TABLES LIKE {}'\nif len(inspect_tables) == 0:\n for k, t in enumerate(tickers):\n ticker_data = pd.DataFrame()\n try:\n ticker_data = wb.DataReader(t, data_source='yahoo', start=start_1)\n ticker_data.to_sql(tablenames[k], con=engine, if_exists='replace')\n except:\n print('New Import from {} went wrong'.format(t))\n else:\n print('New Import from {} is done'.format(t))\nelse:\n for k, t in enumerate(tickers):\n if tablenames[k] not in inspect_tables:\n ticker_data = pd.DataFrame()\n try:\n ticker_data = wb.DataReader(t, data_source='yahoo', start=\n start_1)\n ticker_data.to_sql(tablenames[k], con=engine, if_exists=\n 'replace')\n except:\n print('New Import from {} went wrong'.format(t))\n else:\n print('New Import from {} is done'.format(t))\n else:\n ticker_data = pd.DataFrame()\n check_last_value = pd.read_sql_query(Select_last_value.format(\n tablenames[k]), con=engine)\n check_last_value2 = pd.to_datetime(check_last_value['Date'][0],\n format='%Y-%m-%d').date()\n if check_last_value2 != date_today:\n try:\n ticker_data = wb.DataReader(t, data_source='yahoo',\n start=check_last_value2 + dt.timedelta(days=1))\n ticker_data.to_sql(tablenames[k], con=engine, if_exists\n ='append')\n except:\n print('Update Import from {} went wrong'.format(t))\n else:\n print('Update Import from {} is done from {}'.format(t,\n str(check_last_value2 + dt.timedelta(days=1))))\n",
"step-4": "<mask token>\nimport sqlite3\nconn = sqlite3.connect('Portfolio_dividens.db')\nc = conn.cursor()\nfrom sqlalchemy import create_engine\nengine = create_engine('sqlite:///Portfolio_dividens.db')\nimport os\npath = os.getcwd()\n<mask token>\nimport pandas as pd\nfrom pandas_datareader import data as wb\nimport datetime as dt\ndate_today = dt.date.today()\nstart_1 = '2005-1-1'\ntickers_df = pd.read_excel(path + '\\\\Tickers.xlsx')\ntickers = list(tickers_df['Ticker'])\ntablenames = list(tickers_df['tablenames'])\nfrom sqlalchemy import inspect\ninspector = inspect(engine)\ninspect_tables = inspector.get_table_names()\nSelect_last_value = \"Select Date from '{}' order by date desc limit 1;\"\nCheck_table_exists = 'SHOW TABLES LIKE {}'\nif len(inspect_tables) == 0:\n for k, t in enumerate(tickers):\n ticker_data = pd.DataFrame()\n try:\n ticker_data = wb.DataReader(t, data_source='yahoo', start=start_1)\n ticker_data.to_sql(tablenames[k], con=engine, if_exists='replace')\n except:\n print('New Import from {} went wrong'.format(t))\n else:\n print('New Import from {} is done'.format(t))\nelse:\n for k, t in enumerate(tickers):\n if tablenames[k] not in inspect_tables:\n ticker_data = pd.DataFrame()\n try:\n ticker_data = wb.DataReader(t, data_source='yahoo', start=\n start_1)\n ticker_data.to_sql(tablenames[k], con=engine, if_exists=\n 'replace')\n except:\n print('New Import from {} went wrong'.format(t))\n else:\n print('New Import from {} is done'.format(t))\n else:\n ticker_data = pd.DataFrame()\n check_last_value = pd.read_sql_query(Select_last_value.format(\n tablenames[k]), con=engine)\n check_last_value2 = pd.to_datetime(check_last_value['Date'][0],\n format='%Y-%m-%d').date()\n if check_last_value2 != date_today:\n try:\n ticker_data = wb.DataReader(t, data_source='yahoo',\n start=check_last_value2 + dt.timedelta(days=1))\n ticker_data.to_sql(tablenames[k], con=engine, if_exists\n ='append')\n except:\n print('Update Import from {} went wrong'.format(t))\n else:\n print('Update Import from {} is done from {}'.format(t,\n str(check_last_value2 + dt.timedelta(days=1))))\n",
"step-5": "\"\"\"\r\nCreated on Wed Nov 6 13:03:42 2019\r\n\r\n@author: antonio.blago\r\n\"\"\"\r\n#%% Connect to database\r\n\r\nimport sqlite3\r\nconn = sqlite3.connect('Portfolio_dividens.db')\r\nc = conn.cursor()\r\n\r\nfrom sqlalchemy import create_engine #suport pd.dataframe to sql table\r\n#import mysqlclient\r\n\r\nengine = create_engine(\"sqlite:///Portfolio_dividens.db\")\r\n\r\n#%% Set up path\r\n\r\nimport os\r\n# detect the current working directory and print it\r\npath = os.getcwd()\r\n\r\n#%%\r\n''' Yahoo finance'''\r\nimport pandas as pd\r\nfrom pandas_datareader import data as wb\r\nimport datetime as dt\r\n\r\ndate_today=dt.date.today()\r\n\r\nstart_1='2005-1-1'\r\n\r\ntickers_df=pd.read_excel(path+r'\\Tickers.xlsx')\r\n\r\ntickers=list(tickers_df['Ticker'])\r\n\r\ntablenames=list(tickers_df['tablenames'])\r\n\r\n\r\nfrom sqlalchemy import inspect\r\n\r\n\r\ninspector = inspect(engine)\r\n\r\n# Get table information\r\ninspect_tables=inspector.get_table_names()\r\n\r\n\r\nSelect_last_value= \"Select Date from '{}' order by date desc limit 1;\"\r\n\r\nCheck_table_exists=\"SHOW TABLES LIKE {}\"\r\n\r\n#pd.read_sql_query(Select_last_value,con=engine)\r\n\r\nif len(inspect_tables)==0: #first initialize db\r\n \r\n for k, t in enumerate(tickers):\r\n \r\n ticker_data=pd.DataFrame()\r\n try:\r\n ticker_data=wb.DataReader(t, data_source='yahoo',start=start_1)\r\n ticker_data.to_sql(tablenames[k],con=engine,if_exists=\"replace\" )\r\n \r\n except:\r\n print(\"New Import from {} went wrong\".format(t))\r\n else:\r\n print(\"New Import from {} is done\".format(t))\r\n \r\nelse:\r\n \r\n for k, t in enumerate(tickers):\r\n \r\n if tablenames[k] not in inspect_tables: #check table is existing\r\n ticker_data=pd.DataFrame()\r\n try:\r\n ticker_data=wb.DataReader(t, data_source='yahoo',start=start_1)\r\n ticker_data.to_sql(tablenames[k],con=engine,if_exists=\"replace\" )\r\n \r\n except:\r\n print(\"New Import from {} went wrong\".format(t))\r\n else:\r\n print(\"New Import from {} is done\".format(t))\r\n \r\n \r\n else: \r\n \r\n \r\n ticker_data=pd.DataFrame()\r\n \r\n check_last_value=pd.read_sql_query(Select_last_value.format(tablenames[k]),con=engine)\r\n check_last_value2=(pd.to_datetime(check_last_value['Date'][0],format=\"%Y-%m-%d\")).date()\r\n \r\n if check_last_value2!=date_today: #dt.datetime.strptime(\"2019-11-13\", \"%Y-%m-%d\")==pd.to_datetime(check_last_value['Date'][0])\r\n \r\n try:\r\n ticker_data=wb.DataReader(t, data_source='yahoo',start=check_last_value2+dt.timedelta(days=1))\r\n ticker_data.to_sql(tablenames[k],con=engine,if_exists=\"append\")\r\n \r\n except:\r\n print(\"Update Import from {} went wrong\".format(t))\r\n else:\r\n print(\"Update Import from {} is done from {}\".format(t,str(check_last_value2+dt.timedelta(days=1))))\r\n \r\n \r\n \r\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
#
# PySNMP MIB module SYSLOG-TC-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/SYSLOG-TC-MIB
# Produced by pysmi-0.3.4 at Mon Apr 29 20:31:53 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
Integer, ObjectIdentifier, OctetString = mibBuilder.importSymbols("ASN1", "Integer", "ObjectIdentifier", "OctetString")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
SingleValueConstraint, ConstraintsUnion, ValueSizeConstraint, ValueRangeConstraint, ConstraintsIntersection = mibBuilder.importSymbols("ASN1-REFINEMENT", "SingleValueConstraint", "ConstraintsUnion", "ValueSizeConstraint", "ValueRangeConstraint", "ConstraintsIntersection")
NotificationGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ModuleCompliance")
TimeTicks, ObjectIdentity, Integer32, MibScalar, MibTable, MibTableRow, MibTableColumn, Unsigned32, Bits, Gauge32, MibIdentifier, iso, ModuleIdentity, NotificationType, Counter32, Counter64, IpAddress, mib_2 = mibBuilder.importSymbols("SNMPv2-SMI", "TimeTicks", "ObjectIdentity", "Integer32", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "Unsigned32", "Bits", "Gauge32", "MibIdentifier", "iso", "ModuleIdentity", "NotificationType", "Counter32", "Counter64", "IpAddress", "mib-2")
TextualConvention, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "DisplayString")
syslogTCMIB = ModuleIdentity((1, 3, 6, 1, 2, 1, 173))
syslogTCMIB.setRevisions(('2009-03-30 00:00',))
if mibBuilder.loadTexts: syslogTCMIB.setLastUpdated('200903300000Z')
if mibBuilder.loadTexts: syslogTCMIB.setOrganization('IETF Syslog Working Group')
class SyslogFacility(TextualConvention, Integer32):
reference = 'The Syslog Protocol (RFC5424): Table 1'
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23))
namedValues = NamedValues(("kern", 0), ("user", 1), ("mail", 2), ("daemon", 3), ("auth", 4), ("syslog", 5), ("lpr", 6), ("news", 7), ("uucp", 8), ("cron", 9), ("authpriv", 10), ("ftp", 11), ("ntp", 12), ("audit", 13), ("console", 14), ("cron2", 15), ("local0", 16), ("local1", 17), ("local2", 18), ("local3", 19), ("local4", 20), ("local5", 21), ("local6", 22), ("local7", 23))
class SyslogSeverity(TextualConvention, Integer32):
reference = 'The Syslog Protocol (RFC5424): Table 2'
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(0, 1, 2, 3, 4, 5, 6, 7))
namedValues = NamedValues(("emerg", 0), ("alert", 1), ("crit", 2), ("err", 3), ("warning", 4), ("notice", 5), ("info", 6), ("debug", 7))
mibBuilder.exportSymbols("SYSLOG-TC-MIB", syslogTCMIB=syslogTCMIB, SyslogFacility=SyslogFacility, PYSNMP_MODULE_ID=syslogTCMIB, SyslogSeverity=SyslogSeverity)
|
normal
|
{
"blob_id": "46cdea08cab620ea099ad7fa200782717249b91b",
"index": 6741,
"step-1": "<mask token>\n\n\nclass SyslogSeverity(TextualConvention, Integer32):\n reference = 'The Syslog Protocol (RFC5424): Table 2'\n status = 'current'\n subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(\n SingleValueConstraint(0, 1, 2, 3, 4, 5, 6, 7))\n namedValues = NamedValues(('emerg', 0), ('alert', 1), ('crit', 2), (\n 'err', 3), ('warning', 4), ('notice', 5), ('info', 6), ('debug', 7))\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass SyslogFacility(TextualConvention, Integer32):\n reference = 'The Syslog Protocol (RFC5424): Table 1'\n status = 'current'\n subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(\n SingleValueConstraint(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13,\n 14, 15, 16, 17, 18, 19, 20, 21, 22, 23))\n namedValues = NamedValues(('kern', 0), ('user', 1), ('mail', 2), (\n 'daemon', 3), ('auth', 4), ('syslog', 5), ('lpr', 6), ('news', 7),\n ('uucp', 8), ('cron', 9), ('authpriv', 10), ('ftp', 11), ('ntp', 12\n ), ('audit', 13), ('console', 14), ('cron2', 15), ('local0', 16), (\n 'local1', 17), ('local2', 18), ('local3', 19), ('local4', 20), (\n 'local5', 21), ('local6', 22), ('local7', 23))\n\n\nclass SyslogSeverity(TextualConvention, Integer32):\n reference = 'The Syslog Protocol (RFC5424): Table 2'\n status = 'current'\n subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(\n SingleValueConstraint(0, 1, 2, 3, 4, 5, 6, 7))\n namedValues = NamedValues(('emerg', 0), ('alert', 1), ('crit', 2), (\n 'err', 3), ('warning', 4), ('notice', 5), ('info', 6), ('debug', 7))\n\n\n<mask token>\n",
"step-3": "<mask token>\nsyslogTCMIB.setRevisions(('2009-03-30 00:00',))\nif mibBuilder.loadTexts:\n syslogTCMIB.setLastUpdated('200903300000Z')\nif mibBuilder.loadTexts:\n syslogTCMIB.setOrganization('IETF Syslog Working Group')\n\n\nclass SyslogFacility(TextualConvention, Integer32):\n reference = 'The Syslog Protocol (RFC5424): Table 1'\n status = 'current'\n subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(\n SingleValueConstraint(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13,\n 14, 15, 16, 17, 18, 19, 20, 21, 22, 23))\n namedValues = NamedValues(('kern', 0), ('user', 1), ('mail', 2), (\n 'daemon', 3), ('auth', 4), ('syslog', 5), ('lpr', 6), ('news', 7),\n ('uucp', 8), ('cron', 9), ('authpriv', 10), ('ftp', 11), ('ntp', 12\n ), ('audit', 13), ('console', 14), ('cron2', 15), ('local0', 16), (\n 'local1', 17), ('local2', 18), ('local3', 19), ('local4', 20), (\n 'local5', 21), ('local6', 22), ('local7', 23))\n\n\nclass SyslogSeverity(TextualConvention, Integer32):\n reference = 'The Syslog Protocol (RFC5424): Table 2'\n status = 'current'\n subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(\n SingleValueConstraint(0, 1, 2, 3, 4, 5, 6, 7))\n namedValues = NamedValues(('emerg', 0), ('alert', 1), ('crit', 2), (\n 'err', 3), ('warning', 4), ('notice', 5), ('info', 6), ('debug', 7))\n\n\nmibBuilder.exportSymbols('SYSLOG-TC-MIB', syslogTCMIB=syslogTCMIB,\n SyslogFacility=SyslogFacility, PYSNMP_MODULE_ID=syslogTCMIB,\n SyslogSeverity=SyslogSeverity)\n",
"step-4": "Integer, ObjectIdentifier, OctetString = mibBuilder.importSymbols('ASN1',\n 'Integer', 'ObjectIdentifier', 'OctetString')\nNamedValues, = mibBuilder.importSymbols('ASN1-ENUMERATION', 'NamedValues')\n(SingleValueConstraint, ConstraintsUnion, ValueSizeConstraint,\n ValueRangeConstraint, ConstraintsIntersection) = (mibBuilder.\n importSymbols('ASN1-REFINEMENT', 'SingleValueConstraint',\n 'ConstraintsUnion', 'ValueSizeConstraint', 'ValueRangeConstraint',\n 'ConstraintsIntersection'))\nNotificationGroup, ModuleCompliance = mibBuilder.importSymbols('SNMPv2-CONF',\n 'NotificationGroup', 'ModuleCompliance')\n(TimeTicks, ObjectIdentity, Integer32, MibScalar, MibTable, MibTableRow,\n MibTableColumn, Unsigned32, Bits, Gauge32, MibIdentifier, iso,\n ModuleIdentity, NotificationType, Counter32, Counter64, IpAddress, mib_2\n ) = (mibBuilder.importSymbols('SNMPv2-SMI', 'TimeTicks',\n 'ObjectIdentity', 'Integer32', 'MibScalar', 'MibTable', 'MibTableRow',\n 'MibTableColumn', 'Unsigned32', 'Bits', 'Gauge32', 'MibIdentifier',\n 'iso', 'ModuleIdentity', 'NotificationType', 'Counter32', 'Counter64',\n 'IpAddress', 'mib-2'))\nTextualConvention, DisplayString = mibBuilder.importSymbols('SNMPv2-TC',\n 'TextualConvention', 'DisplayString')\nsyslogTCMIB = ModuleIdentity((1, 3, 6, 1, 2, 1, 173))\nsyslogTCMIB.setRevisions(('2009-03-30 00:00',))\nif mibBuilder.loadTexts:\n syslogTCMIB.setLastUpdated('200903300000Z')\nif mibBuilder.loadTexts:\n syslogTCMIB.setOrganization('IETF Syslog Working Group')\n\n\nclass SyslogFacility(TextualConvention, Integer32):\n reference = 'The Syslog Protocol (RFC5424): Table 1'\n status = 'current'\n subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(\n SingleValueConstraint(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13,\n 14, 15, 16, 17, 18, 19, 20, 21, 22, 23))\n namedValues = NamedValues(('kern', 0), ('user', 1), ('mail', 2), (\n 'daemon', 3), ('auth', 4), ('syslog', 5), ('lpr', 6), ('news', 7),\n ('uucp', 8), ('cron', 9), ('authpriv', 10), ('ftp', 11), ('ntp', 12\n ), ('audit', 13), ('console', 14), ('cron2', 15), ('local0', 16), (\n 'local1', 17), ('local2', 18), ('local3', 19), ('local4', 20), (\n 'local5', 21), ('local6', 22), ('local7', 23))\n\n\nclass SyslogSeverity(TextualConvention, Integer32):\n reference = 'The Syslog Protocol (RFC5424): Table 2'\n status = 'current'\n subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(\n SingleValueConstraint(0, 1, 2, 3, 4, 5, 6, 7))\n namedValues = NamedValues(('emerg', 0), ('alert', 1), ('crit', 2), (\n 'err', 3), ('warning', 4), ('notice', 5), ('info', 6), ('debug', 7))\n\n\nmibBuilder.exportSymbols('SYSLOG-TC-MIB', syslogTCMIB=syslogTCMIB,\n SyslogFacility=SyslogFacility, PYSNMP_MODULE_ID=syslogTCMIB,\n SyslogSeverity=SyslogSeverity)\n",
"step-5": "#\n# PySNMP MIB module SYSLOG-TC-MIB (http://snmplabs.com/pysmi)\n# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/SYSLOG-TC-MIB\n# Produced by pysmi-0.3.4 at Mon Apr 29 20:31:53 2019\n# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4\n# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15) \n#\nInteger, ObjectIdentifier, OctetString = mibBuilder.importSymbols(\"ASN1\", \"Integer\", \"ObjectIdentifier\", \"OctetString\")\nNamedValues, = mibBuilder.importSymbols(\"ASN1-ENUMERATION\", \"NamedValues\")\nSingleValueConstraint, ConstraintsUnion, ValueSizeConstraint, ValueRangeConstraint, ConstraintsIntersection = mibBuilder.importSymbols(\"ASN1-REFINEMENT\", \"SingleValueConstraint\", \"ConstraintsUnion\", \"ValueSizeConstraint\", \"ValueRangeConstraint\", \"ConstraintsIntersection\")\nNotificationGroup, ModuleCompliance = mibBuilder.importSymbols(\"SNMPv2-CONF\", \"NotificationGroup\", \"ModuleCompliance\")\nTimeTicks, ObjectIdentity, Integer32, MibScalar, MibTable, MibTableRow, MibTableColumn, Unsigned32, Bits, Gauge32, MibIdentifier, iso, ModuleIdentity, NotificationType, Counter32, Counter64, IpAddress, mib_2 = mibBuilder.importSymbols(\"SNMPv2-SMI\", \"TimeTicks\", \"ObjectIdentity\", \"Integer32\", \"MibScalar\", \"MibTable\", \"MibTableRow\", \"MibTableColumn\", \"Unsigned32\", \"Bits\", \"Gauge32\", \"MibIdentifier\", \"iso\", \"ModuleIdentity\", \"NotificationType\", \"Counter32\", \"Counter64\", \"IpAddress\", \"mib-2\")\nTextualConvention, DisplayString = mibBuilder.importSymbols(\"SNMPv2-TC\", \"TextualConvention\", \"DisplayString\")\nsyslogTCMIB = ModuleIdentity((1, 3, 6, 1, 2, 1, 173))\nsyslogTCMIB.setRevisions(('2009-03-30 00:00',))\nif mibBuilder.loadTexts: syslogTCMIB.setLastUpdated('200903300000Z')\nif mibBuilder.loadTexts: syslogTCMIB.setOrganization('IETF Syslog Working Group')\nclass SyslogFacility(TextualConvention, Integer32):\n reference = 'The Syslog Protocol (RFC5424): Table 1'\n status = 'current'\n subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23))\n namedValues = NamedValues((\"kern\", 0), (\"user\", 1), (\"mail\", 2), (\"daemon\", 3), (\"auth\", 4), (\"syslog\", 5), (\"lpr\", 6), (\"news\", 7), (\"uucp\", 8), (\"cron\", 9), (\"authpriv\", 10), (\"ftp\", 11), (\"ntp\", 12), (\"audit\", 13), (\"console\", 14), (\"cron2\", 15), (\"local0\", 16), (\"local1\", 17), (\"local2\", 18), (\"local3\", 19), (\"local4\", 20), (\"local5\", 21), (\"local6\", 22), (\"local7\", 23))\n\nclass SyslogSeverity(TextualConvention, Integer32):\n reference = 'The Syslog Protocol (RFC5424): Table 2'\n status = 'current'\n subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(0, 1, 2, 3, 4, 5, 6, 7))\n namedValues = NamedValues((\"emerg\", 0), (\"alert\", 1), (\"crit\", 2), (\"err\", 3), (\"warning\", 4), (\"notice\", 5), (\"info\", 6), (\"debug\", 7))\n\nmibBuilder.exportSymbols(\"SYSLOG-TC-MIB\", syslogTCMIB=syslogTCMIB, SyslogFacility=SyslogFacility, PYSNMP_MODULE_ID=syslogTCMIB, SyslogSeverity=SyslogSeverity)\n",
"step-ids": [
2,
4,
5,
6,
7
]
}
|
[
2,
4,
5,
6,
7
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Migration(migrations.Migration):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Migration(migrations.Migration):
dependencies = [('training_area', '0002_event')]
operations = [migrations.AddField(model_name='event', name='athlete',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.
models.deletion.CASCADE, related_name='athlete_calendar', to=
'training_area.Athlete')), migrations.AddField(model_name='event',
name='coach', field=models.ForeignKey(blank=True, null=True,
on_delete=django.db.models.deletion.CASCADE, related_name=
'coach_calendar', to='training_area.Coach'))]
<|reserved_special_token_1|>
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [('training_area', '0002_event')]
operations = [migrations.AddField(model_name='event', name='athlete',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.
models.deletion.CASCADE, related_name='athlete_calendar', to=
'training_area.Athlete')), migrations.AddField(model_name='event',
name='coach', field=models.ForeignKey(blank=True, null=True,
on_delete=django.db.models.deletion.CASCADE, related_name=
'coach_calendar', to='training_area.Coach'))]
<|reserved_special_token_1|>
# Generated by Django 2.1.7 on 2019-03-14 07:27
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('training_area', '0002_event'),
]
operations = [
migrations.AddField(
model_name='event',
name='athlete',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='athlete_calendar', to='training_area.Athlete'),
),
migrations.AddField(
model_name='event',
name='coach',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='coach_calendar', to='training_area.Coach'),
),
]
|
flexible
|
{
"blob_id": "9555ed63b3906ec23c31839691a089aad9d96c63",
"index": 9917,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('training_area', '0002_event')]\n operations = [migrations.AddField(model_name='event', name='athlete',\n field=models.ForeignKey(blank=True, null=True, on_delete=django.db.\n models.deletion.CASCADE, related_name='athlete_calendar', to=\n 'training_area.Athlete')), migrations.AddField(model_name='event',\n name='coach', field=models.ForeignKey(blank=True, null=True,\n on_delete=django.db.models.deletion.CASCADE, related_name=\n 'coach_calendar', to='training_area.Coach'))]\n",
"step-4": "from django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n dependencies = [('training_area', '0002_event')]\n operations = [migrations.AddField(model_name='event', name='athlete',\n field=models.ForeignKey(blank=True, null=True, on_delete=django.db.\n models.deletion.CASCADE, related_name='athlete_calendar', to=\n 'training_area.Athlete')), migrations.AddField(model_name='event',\n name='coach', field=models.ForeignKey(blank=True, null=True,\n on_delete=django.db.models.deletion.CASCADE, related_name=\n 'coach_calendar', to='training_area.Coach'))]\n",
"step-5": "# Generated by Django 2.1.7 on 2019-03-14 07:27\n\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('training_area', '0002_event'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='event',\n name='athlete',\n field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='athlete_calendar', to='training_area.Athlete'),\n ),\n migrations.AddField(\n model_name='event',\n name='coach',\n field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='coach_calendar', to='training_area.Coach'),\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
from .. import dataclass
<|reserved_special_token_1|>
from .. import dataclass # trigger the register in the dataclass package
|
flexible
|
{
"blob_id": "681750dbf489a6a32e9ef1d6f64d493cc252b272",
"index": 6386,
"step-1": "<mask token>\n",
"step-2": "from .. import dataclass\n",
"step-3": "from .. import dataclass # trigger the register in the dataclass package\r\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
# -*- coding: utf-8 -*-
import hashlib
import time
from datetime import datetime, timedelta
# 像访问对象一样, 访问字典
class ObjectLikeDict(dict):
def __getattr__(self, name):
try:
return self[name]
except:
return ''
# 合并字典
def merge_dict(dict1, dict2):
return (lambda a, b: (lambda a_copy: a_copy.update(b) or a_copy)(a.copy()))(dict1 or {}, dict2 or {})
# 转数组
def to_list(obj):
if isinstance(obj, list): return obj
else: return [obj]
# 格式化时间, 默认返回当前时间
def fmt_time(fmt='%Y-%m-%d %H:%M:%S', seconds=None):
if not seconds: seconds = now()
t = datetime.utcfromtimestamp(seconds)
t = t + timedelta(hours=+8) # 时区
return t.strftime(fmt)
# 当前时间戳(精确到秒)
def now():
return int(time.time())
# 字符串MD5值
def md5(s):
m = hashlib.md5(s)
m.digest()
return m.hexdigest()
# Test
if __name__ == "__main__":
dict1 = {'a': 1, 'b': 2}
dict2 = {'c': 3, 'd': 4}
print merge_dict(dict1, dict2)
print md5('')
print now()
print fmt_time()
|
normal
|
{
"blob_id": "f1c32fe7a29cddf4f881b46f4feab06390a76a44",
"index": 7516,
"step-1": "# -*- coding: utf-8 -*-\nimport hashlib\nimport time\nfrom datetime import datetime, timedelta\n\n# 像访问对象一样, 访问字典\nclass ObjectLikeDict(dict):\n def __getattr__(self, name):\n try:\n return self[name]\n except:\n return ''\n\n\n# 合并字典\ndef merge_dict(dict1, dict2):\n return (lambda a, b: (lambda a_copy: a_copy.update(b) or a_copy)(a.copy()))(dict1 or {}, dict2 or {})\n\n\n# 转数组\ndef to_list(obj):\n if isinstance(obj, list): return obj\n else: return [obj]\n\n\n# 格式化时间, 默认返回当前时间\ndef fmt_time(fmt='%Y-%m-%d %H:%M:%S', seconds=None):\n if not seconds: seconds = now()\n t = datetime.utcfromtimestamp(seconds)\n t = t + timedelta(hours=+8) # 时区\n return t.strftime(fmt)\n\n\n# 当前时间戳(精确到秒)\ndef now():\n return int(time.time())\n\n\n# 字符串MD5值\ndef md5(s):\n m = hashlib.md5(s)\n m.digest()\n return m.hexdigest()\n\n\n# Test\nif __name__ == \"__main__\":\n dict1 = {'a': 1, 'b': 2}\n dict2 = {'c': 3, 'd': 4}\n print merge_dict(dict1, dict2)\n\n print md5('')\n\n print now()\n print fmt_time()",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
import sys
from PyQt5 import uic
from PyQt5.QtWidgets import QWidget
from PyQt5.QtCore import Qt
from PyQt5.QtGui import QPixmap
class Instruction(QWidget):
def __init__(self):
super().__init__()
# Set UI file
uic.loadUi('../ui/instruction.ui', self)
# Connect handlers of buttons
self.OK_btn.clicked.connect(self.show_game)
self.set_background_instruction()
# Set background of the windows
def set_background_instruction(self):
img = QPixmap('../images/background_instruction.jpg')
self.background_instruction.setPixmap(img)
# Show window of the game
def show_game(self):
self.parent().show_game()
|
normal
|
{
"blob_id": "da30cea4cfb1ffccabe708fe15e5a633b06d299f",
"index": 2265,
"step-1": "<mask token>\n\n\nclass Instruction(QWidget):\n <mask token>\n\n def set_background_instruction(self):\n img = QPixmap('../images/background_instruction.jpg')\n self.background_instruction.setPixmap(img)\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass Instruction(QWidget):\n\n def __init__(self):\n super().__init__()\n uic.loadUi('../ui/instruction.ui', self)\n self.OK_btn.clicked.connect(self.show_game)\n self.set_background_instruction()\n\n def set_background_instruction(self):\n img = QPixmap('../images/background_instruction.jpg')\n self.background_instruction.setPixmap(img)\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Instruction(QWidget):\n\n def __init__(self):\n super().__init__()\n uic.loadUi('../ui/instruction.ui', self)\n self.OK_btn.clicked.connect(self.show_game)\n self.set_background_instruction()\n\n def set_background_instruction(self):\n img = QPixmap('../images/background_instruction.jpg')\n self.background_instruction.setPixmap(img)\n\n def show_game(self):\n self.parent().show_game()\n",
"step-4": "import sys\nfrom PyQt5 import uic\nfrom PyQt5.QtWidgets import QWidget\nfrom PyQt5.QtCore import Qt\nfrom PyQt5.QtGui import QPixmap\n\n\nclass Instruction(QWidget):\n\n def __init__(self):\n super().__init__()\n uic.loadUi('../ui/instruction.ui', self)\n self.OK_btn.clicked.connect(self.show_game)\n self.set_background_instruction()\n\n def set_background_instruction(self):\n img = QPixmap('../images/background_instruction.jpg')\n self.background_instruction.setPixmap(img)\n\n def show_game(self):\n self.parent().show_game()\n",
"step-5": "import sys\nfrom PyQt5 import uic\nfrom PyQt5.QtWidgets import QWidget\nfrom PyQt5.QtCore import Qt\nfrom PyQt5.QtGui import QPixmap\n\n\nclass Instruction(QWidget):\n def __init__(self):\n super().__init__()\n\n # Set UI file\n uic.loadUi('../ui/instruction.ui', self)\n\n # Connect handlers of buttons\n self.OK_btn.clicked.connect(self.show_game)\n\n self.set_background_instruction()\n\n # Set background of the windows\n def set_background_instruction(self):\n img = QPixmap('../images/background_instruction.jpg')\n self.background_instruction.setPixmap(img)\n\n # Show window of the game\n def show_game(self):\n self.parent().show_game()\n ",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
import pandas as pd
from sklearn.pipeline import Pipeline
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.svm import LinearSVC
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report
if __name__ == "__main__":
dataset = pd.read_csv('./dataset.csv')
X_train, X_test, y_train, y_test = train_test_split(
dataset["text"], dataset["label"], test_size=0.2, random_state=1, shuffle=True
)
baseline_pipeline = Pipeline(
[("vect", TfidfVectorizer(ngram_range=(1, 3))), ("svc", LinearSVC())]
)
baseline_pipeline.fit(X_train, y_train)
print(classification_report(y_test, baseline_pipeline.predict(X_test), digits=4))
|
normal
|
{
"blob_id": "f82c961fc1accd362b34a685bac4cc35d98f44ef",
"index": 6371,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif __name__ == '__main__':\n dataset = pd.read_csv('./dataset.csv')\n X_train, X_test, y_train, y_test = train_test_split(dataset['text'],\n dataset['label'], test_size=0.2, random_state=1, shuffle=True)\n baseline_pipeline = Pipeline([('vect', TfidfVectorizer(ngram_range=(1, \n 3))), ('svc', LinearSVC())])\n baseline_pipeline.fit(X_train, y_train)\n print(classification_report(y_test, baseline_pipeline.predict(X_test),\n digits=4))\n",
"step-3": "import pandas as pd\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.svm import LinearSVC\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import classification_report\nif __name__ == '__main__':\n dataset = pd.read_csv('./dataset.csv')\n X_train, X_test, y_train, y_test = train_test_split(dataset['text'],\n dataset['label'], test_size=0.2, random_state=1, shuffle=True)\n baseline_pipeline = Pipeline([('vect', TfidfVectorizer(ngram_range=(1, \n 3))), ('svc', LinearSVC())])\n baseline_pipeline.fit(X_train, y_train)\n print(classification_report(y_test, baseline_pipeline.predict(X_test),\n digits=4))\n",
"step-4": "import pandas as pd\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.svm import LinearSVC\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import classification_report\n\nif __name__ == \"__main__\":\n dataset = pd.read_csv('./dataset.csv')\n \n X_train, X_test, y_train, y_test = train_test_split(\n dataset[\"text\"], dataset[\"label\"], test_size=0.2, random_state=1, shuffle=True\n )\n\n baseline_pipeline = Pipeline(\n [(\"vect\", TfidfVectorizer(ngram_range=(1, 3))), (\"svc\", LinearSVC())]\n )\n\n baseline_pipeline.fit(X_train, y_train)\n print(classification_report(y_test, baseline_pipeline.predict(X_test), digits=4))",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
class Pessoa:
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def compara(self, outro_agente):
if self.distancia > outro_agente.distancia:
return True
else:
return False
def adiciona_sorte(self):
self.adiciona_distancia(self.luck)
def match(self, outro_agente):
self.partner = outro_agente
outro_agente.partner = self
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Pessoa:
def __init__(self, name, distancia):
self.id = name
self.distancia = distancia
self.members = dict()
self.ranking = list()
self.luck = random.randrange(1, 60)
self.partner = None
def adiciona_distancia(self, quantia):
self.distancia += quantia
def compara(self, outro_agente):
if self.distancia > outro_agente.distancia:
return True
else:
return False
def adiciona_sorte(self):
self.adiciona_distancia(self.luck)
def match(self, outro_agente):
self.partner = outro_agente
outro_agente.partner = self
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Pessoa:
def __init__(self, name, distancia):
self.id = name
self.distancia = distancia
self.members = dict()
self.ranking = list()
self.luck = random.randrange(1, 60)
self.partner = None
def adiciona_distancia(self, quantia):
self.distancia += quantia
def compara(self, outro_agente):
if self.distancia > outro_agente.distancia:
return True
else:
return False
def adiciona_sorte(self):
self.adiciona_distancia(self.luck)
def match(self, outro_agente):
self.partner = outro_agente
outro_agente.partner = self
if __name__ == '__main__':
tita = Pessoa('Tita', 10)
max = Pessoa('Max', 20)
fred = Pessoa('Fred', 0)
aveia = Pessoa('Aveia', 11)
print(aveia.compara(tita))
max.match(aveia)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
import random
class Pessoa:
def __init__(self, name, distancia):
self.id = name
self.distancia = distancia
self.members = dict()
self.ranking = list()
self.luck = random.randrange(1, 60)
self.partner = None
def adiciona_distancia(self, quantia):
self.distancia += quantia
def compara(self, outro_agente):
if self.distancia > outro_agente.distancia:
return True
else:
return False
def adiciona_sorte(self):
self.adiciona_distancia(self.luck)
def match(self, outro_agente):
self.partner = outro_agente
outro_agente.partner = self
if __name__ == '__main__':
tita = Pessoa('Tita', 10)
max = Pessoa('Max', 20)
fred = Pessoa('Fred', 0)
aveia = Pessoa('Aveia', 11)
print(aveia.compara(tita))
max.match(aveia)
<|reserved_special_token_1|>
""" Class template
Ipea's Python for agent-based modeling course
"""
import random
# class name typically Capital letter
class Pessoa:
# Usually has an __init__ method called at the moment of instance creation
def __init__(self, name, distancia):
# Armazena os parâmetros de início dentro daquela instância
# É comum ter uma ID de identificação única, ou nome
self.id = name
self.distancia = distancia
# Pode conter containers, data structures
self.members = dict()
self.ranking = list()
# Ou ainda, um valor randômico
self.luck = random.randrange(1, 60)
self.partner = None
def adiciona_distancia(self, quantia):
# Modifica um valor armazenado
self.distancia += quantia
def compara(self, outro_agente):
# Pode comparar-se com outro agente e acessar métodos do outro agente
if self.distancia > outro_agente.distancia:
return True
else:
return False
def adiciona_sorte(self):
# Um método pode acessar um outro método.
# Nesse caso, adicionando um valor aleatório ao arg1!
self.adiciona_distancia(self.luck)
def match(self, outro_agente):
# Esse método recebe outro agente (dessa mesma classe) e guarda/adiciona/salva o outro agente como uma variavel,
# dentro deste próprio agente
self.partner = outro_agente
outro_agente.partner = self
if __name__ == '__main__':
tita = Pessoa('Tita', 10)
max = Pessoa('Max', 20)
fred = Pessoa('Fred', 0)
aveia = Pessoa('Aveia', 11)
print(aveia.compara(tita))
max.match(aveia)
|
flexible
|
{
"blob_id": "d18bfdb606e4ba8a67acbb07cd9a3a6d2a0855e3",
"index": 6880,
"step-1": "<mask token>\n\n\nclass Pessoa:\n <mask token>\n <mask token>\n\n def compara(self, outro_agente):\n if self.distancia > outro_agente.distancia:\n return True\n else:\n return False\n\n def adiciona_sorte(self):\n self.adiciona_distancia(self.luck)\n\n def match(self, outro_agente):\n self.partner = outro_agente\n outro_agente.partner = self\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Pessoa:\n\n def __init__(self, name, distancia):\n self.id = name\n self.distancia = distancia\n self.members = dict()\n self.ranking = list()\n self.luck = random.randrange(1, 60)\n self.partner = None\n\n def adiciona_distancia(self, quantia):\n self.distancia += quantia\n\n def compara(self, outro_agente):\n if self.distancia > outro_agente.distancia:\n return True\n else:\n return False\n\n def adiciona_sorte(self):\n self.adiciona_distancia(self.luck)\n\n def match(self, outro_agente):\n self.partner = outro_agente\n outro_agente.partner = self\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Pessoa:\n\n def __init__(self, name, distancia):\n self.id = name\n self.distancia = distancia\n self.members = dict()\n self.ranking = list()\n self.luck = random.randrange(1, 60)\n self.partner = None\n\n def adiciona_distancia(self, quantia):\n self.distancia += quantia\n\n def compara(self, outro_agente):\n if self.distancia > outro_agente.distancia:\n return True\n else:\n return False\n\n def adiciona_sorte(self):\n self.adiciona_distancia(self.luck)\n\n def match(self, outro_agente):\n self.partner = outro_agente\n outro_agente.partner = self\n\n\nif __name__ == '__main__':\n tita = Pessoa('Tita', 10)\n max = Pessoa('Max', 20)\n fred = Pessoa('Fred', 0)\n aveia = Pessoa('Aveia', 11)\n print(aveia.compara(tita))\n max.match(aveia)\n",
"step-4": "<mask token>\nimport random\n\n\nclass Pessoa:\n\n def __init__(self, name, distancia):\n self.id = name\n self.distancia = distancia\n self.members = dict()\n self.ranking = list()\n self.luck = random.randrange(1, 60)\n self.partner = None\n\n def adiciona_distancia(self, quantia):\n self.distancia += quantia\n\n def compara(self, outro_agente):\n if self.distancia > outro_agente.distancia:\n return True\n else:\n return False\n\n def adiciona_sorte(self):\n self.adiciona_distancia(self.luck)\n\n def match(self, outro_agente):\n self.partner = outro_agente\n outro_agente.partner = self\n\n\nif __name__ == '__main__':\n tita = Pessoa('Tita', 10)\n max = Pessoa('Max', 20)\n fred = Pessoa('Fred', 0)\n aveia = Pessoa('Aveia', 11)\n print(aveia.compara(tita))\n max.match(aveia)\n",
"step-5": "\"\"\" Class template\n Ipea's Python for agent-based modeling course\n \"\"\"\n\nimport random\n\n\n# class name typically Capital letter\nclass Pessoa:\n # Usually has an __init__ method called at the moment of instance creation\n def __init__(self, name, distancia):\n # Armazena os parâmetros de início dentro daquela instância\n # É comum ter uma ID de identificação única, ou nome\n self.id = name\n self.distancia = distancia\n\n # Pode conter containers, data structures\n self.members = dict()\n self.ranking = list()\n\n # Ou ainda, um valor randômico\n self.luck = random.randrange(1, 60)\n self.partner = None\n\n def adiciona_distancia(self, quantia):\n # Modifica um valor armazenado\n self.distancia += quantia\n\n def compara(self, outro_agente):\n # Pode comparar-se com outro agente e acessar métodos do outro agente\n if self.distancia > outro_agente.distancia:\n return True\n else:\n return False\n\n def adiciona_sorte(self):\n # Um método pode acessar um outro método.\n # Nesse caso, adicionando um valor aleatório ao arg1!\n self.adiciona_distancia(self.luck)\n\n def match(self, outro_agente):\n # Esse método recebe outro agente (dessa mesma classe) e guarda/adiciona/salva o outro agente como uma variavel,\n # dentro deste próprio agente\n self.partner = outro_agente\n outro_agente.partner = self\n\n\nif __name__ == '__main__':\n tita = Pessoa('Tita', 10)\n max = Pessoa('Max', 20)\n fred = Pessoa('Fred', 0)\n aveia = Pessoa('Aveia', 11)\n print(aveia.compara(tita))\n max.match(aveia)\n",
"step-ids": [
4,
6,
7,
8,
9
]
}
|
[
4,
6,
7,
8,
9
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
with open(sys.argv[1], 'r') as test_cases:
for test in test_cases:
stringe = test.strip()
list1 = stringe.split(' | ')
list2 = list1[0].split(' ')
kha = 0
for item in list2:
for c in list1[1]:
if c in item:
kha += 1
if kha == len(list1[1]):
print(item)
break
else:
print(False)
break
<|reserved_special_token_1|>
import sys
with open(sys.argv[1], 'r') as test_cases:
for test in test_cases:
stringe = test.strip()
list1 = stringe.split(' | ')
list2 = list1[0].split(' ')
kha = 0
for item in list2:
for c in list1[1]:
if c in item:
kha += 1
if kha == len(list1[1]):
print(item)
break
else:
print(False)
break
<|reserved_special_token_1|>
import sys
with open(sys.argv[1], 'r') as test_cases:
for test in test_cases:
stringe = test.strip()
list1 = stringe.split(" | ")
list2 = list1[0].split(" ")
kha = 0
for item in list2:
for c in list1[1]:
if c in item:
kha +=1
if kha == len(list1[1]):
print (item)
break
else:
print (False)
break
|
flexible
|
{
"blob_id": "def2721cd89501b1004d5d3f4f58df300616c1be",
"index": 2747,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwith open(sys.argv[1], 'r') as test_cases:\n for test in test_cases:\n stringe = test.strip()\n list1 = stringe.split(' | ')\n list2 = list1[0].split(' ')\n kha = 0\n for item in list2:\n for c in list1[1]:\n if c in item:\n kha += 1\n if kha == len(list1[1]):\n print(item)\n break\n else:\n print(False)\n break\n",
"step-3": "import sys\nwith open(sys.argv[1], 'r') as test_cases:\n for test in test_cases:\n stringe = test.strip()\n list1 = stringe.split(' | ')\n list2 = list1[0].split(' ')\n kha = 0\n for item in list2:\n for c in list1[1]:\n if c in item:\n kha += 1\n if kha == len(list1[1]):\n print(item)\n break\n else:\n print(False)\n break\n",
"step-4": "\r\nimport sys\r\n\r\nwith open(sys.argv[1], 'r') as test_cases:\r\n for test in test_cases:\r\n stringe = test.strip()\r\n list1 = stringe.split(\" | \")\r\n list2 = list1[0].split(\" \")\r\n kha = 0\r\n for item in list2:\r\n for c in list1[1]:\r\n if c in item:\r\n kha +=1\r\n if kha == len(list1[1]):\r\n print (item)\r\n break\r\n else:\r\n print (False)\r\n break",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
class DimerGridSearch(BaseDriver_):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class DimerGridSearch(BaseDriver_):
<|reserved_special_token_0|>
def __init__(self, folder='', min_sr=0.75, max_sr=1.3, box=-1,
grid_spacing=2.5, angle_spacing=30, inter_list=[], tol=0.1, vdw=[],
bonds_kw={}, comm=None):
raise Exception()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class DimerGridSearch(BaseDriver_):
"""
Generates all dimer structures that should be considered for a grid search
to find the best dimer arangements. Grid search is performed over all
x,y,z positions for the COM and all orientations of the molecule. Only
dimers with physically relevant intermolecular distances are kept for the
user by providing maximum and minimum scaled vdW distances as max_sr and
min_sr. Grid search can be performed using a a single unqiue molecule or
two distinct molecules as input.
This method is parallelized using MPI. The user may launch as many MPI ranks
as they would like in order to reduce the computational burden for each
rank and speedup the time-to-solution.
Arguments
---------
min_sr: float
Minimum specific radius to use for dimer distance checks.
max_sr: float
Maximum specific radius multiplier that is allowed to be the minimum
distance between two dimers, thereby removing dimers formed from molecules
that are far away.
box: float,list
Box size to search over for x,y,z positions. It's assumes that first
molecule of the dimer will be placed at 0,0,0. If the box size is a
float, a box will be placed at 0,0,0 and will extend by this value in
all directions. If a list is provided, the box will only extend by these
lengths in the x,y,z directions respectively, and due to symmetry, in
the -x,-y,-z directions. Default behavior is that the box size will
automatically be detected based on the size of the input molecules.
grid_spacing: float
Grid spacing to use for x,y,z position spacing
angle_spacing: float
Spacing of orientation angles to use for every x,y,z position. Assumed
to be in degrees.
cutoff: float
Distance between COM to neglect from dimer grid search.
tol: float
Tolerance used for the rmsd comparison. If the difference between the
structures is less than tol, then they are considered duplicates.
vdw: list
List of all vdw radii for all elements in periodic table
bonds_kw: dict
Keyword arguments for Structure.get_bonds method. This is used
for recognizing molecular connectivity.
inter_list: list
List of tuples of elements that should be considered for the distance
calculations. For example ("Li", "O"). Then, if the distance between the
Li in one molecule and the O in another molecule is outside the min_sr
to max_sr range then the dimer system will be removed. This is helpful
to reduce the search space based on chemical intution.
"""
def __init__(self, folder='', min_sr=0.75, max_sr=1.3, box=-1,
grid_spacing=2.5, angle_spacing=30, inter_list=[], tol=0.1, vdw=[],
bonds_kw={}, comm=None):
raise Exception()
<|reserved_special_token_1|>
from mcse.core.driver import BaseDriver_
class DimerGridSearch(BaseDriver_):
"""
Generates all dimer structures that should be considered for a grid search
to find the best dimer arangements. Grid search is performed over all
x,y,z positions for the COM and all orientations of the molecule. Only
dimers with physically relevant intermolecular distances are kept for the
user by providing maximum and minimum scaled vdW distances as max_sr and
min_sr. Grid search can be performed using a a single unqiue molecule or
two distinct molecules as input.
This method is parallelized using MPI. The user may launch as many MPI ranks
as they would like in order to reduce the computational burden for each
rank and speedup the time-to-solution.
Arguments
---------
min_sr: float
Minimum specific radius to use for dimer distance checks.
max_sr: float
Maximum specific radius multiplier that is allowed to be the minimum
distance between two dimers, thereby removing dimers formed from molecules
that are far away.
box: float,list
Box size to search over for x,y,z positions. It's assumes that first
molecule of the dimer will be placed at 0,0,0. If the box size is a
float, a box will be placed at 0,0,0 and will extend by this value in
all directions. If a list is provided, the box will only extend by these
lengths in the x,y,z directions respectively, and due to symmetry, in
the -x,-y,-z directions. Default behavior is that the box size will
automatically be detected based on the size of the input molecules.
grid_spacing: float
Grid spacing to use for x,y,z position spacing
angle_spacing: float
Spacing of orientation angles to use for every x,y,z position. Assumed
to be in degrees.
cutoff: float
Distance between COM to neglect from dimer grid search.
tol: float
Tolerance used for the rmsd comparison. If the difference between the
structures is less than tol, then they are considered duplicates.
vdw: list
List of all vdw radii for all elements in periodic table
bonds_kw: dict
Keyword arguments for Structure.get_bonds method. This is used
for recognizing molecular connectivity.
inter_list: list
List of tuples of elements that should be considered for the distance
calculations. For example ("Li", "O"). Then, if the distance between the
Li in one molecule and the O in another molecule is outside the min_sr
to max_sr range then the dimer system will be removed. This is helpful
to reduce the search space based on chemical intution.
"""
def __init__(self, folder='', min_sr=0.75, max_sr=1.3, box=-1,
grid_spacing=2.5, angle_spacing=30, inter_list=[], tol=0.1, vdw=[],
bonds_kw={}, comm=None):
raise Exception()
<|reserved_special_token_1|>
from mcse.core.driver import BaseDriver_
class DimerGridSearch(BaseDriver_):
"""
Generates all dimer structures that should be considered for a grid search
to find the best dimer arangements. Grid search is performed over all
x,y,z positions for the COM and all orientations of the molecule. Only
dimers with physically relevant intermolecular distances are kept for the
user by providing maximum and minimum scaled vdW distances as max_sr and
min_sr. Grid search can be performed using a a single unqiue molecule or
two distinct molecules as input.
This method is parallelized using MPI. The user may launch as many MPI ranks
as they would like in order to reduce the computational burden for each
rank and speedup the time-to-solution.
Arguments
---------
min_sr: float
Minimum specific radius to use for dimer distance checks.
max_sr: float
Maximum specific radius multiplier that is allowed to be the minimum
distance between two dimers, thereby removing dimers formed from molecules
that are far away.
box: float,list
Box size to search over for x,y,z positions. It's assumes that first
molecule of the dimer will be placed at 0,0,0. If the box size is a
float, a box will be placed at 0,0,0 and will extend by this value in
all directions. If a list is provided, the box will only extend by these
lengths in the x,y,z directions respectively, and due to symmetry, in
the -x,-y,-z directions. Default behavior is that the box size will
automatically be detected based on the size of the input molecules.
grid_spacing: float
Grid spacing to use for x,y,z position spacing
angle_spacing: float
Spacing of orientation angles to use for every x,y,z position. Assumed
to be in degrees.
cutoff: float
Distance between COM to neglect from dimer grid search.
tol: float
Tolerance used for the rmsd comparison. If the difference between the
structures is less than tol, then they are considered duplicates.
vdw: list
List of all vdw radii for all elements in periodic table
bonds_kw: dict
Keyword arguments for Structure.get_bonds method. This is used
for recognizing molecular connectivity.
inter_list: list
List of tuples of elements that should be considered for the distance
calculations. For example ("Li", "O"). Then, if the distance between the
Li in one molecule and the O in another molecule is outside the min_sr
to max_sr range then the dimer system will be removed. This is helpful
to reduce the search space based on chemical intution.
"""
def __init__(self,
folder="",
min_sr=0.75,
max_sr=1.30,
box=-1,
grid_spacing=2.5,
angle_spacing=30,
inter_list=[],
tol=0.1,
vdw=[],
bonds_kw={},
comm=None):
raise Exception()
|
flexible
|
{
"blob_id": "9db4bca3e907d70d9696f98506efb6d6042b5723",
"index": 6710,
"step-1": "<mask token>\n\n\nclass DimerGridSearch(BaseDriver_):\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass DimerGridSearch(BaseDriver_):\n <mask token>\n\n def __init__(self, folder='', min_sr=0.75, max_sr=1.3, box=-1,\n grid_spacing=2.5, angle_spacing=30, inter_list=[], tol=0.1, vdw=[],\n bonds_kw={}, comm=None):\n raise Exception()\n",
"step-3": "<mask token>\n\n\nclass DimerGridSearch(BaseDriver_):\n \"\"\"\n Generates all dimer structures that should be considered for a grid search\n to find the best dimer arangements. Grid search is performed over all \n x,y,z positions for the COM and all orientations of the molecule. Only\n dimers with physically relevant intermolecular distances are kept for the \n user by providing maximum and minimum scaled vdW distances as max_sr and \n min_sr. Grid search can be performed using a a single unqiue molecule or \n two distinct molecules as input. \n \n This method is parallelized using MPI. The user may launch as many MPI ranks\n as they would like in order to reduce the computational burden for each \n rank and speedup the time-to-solution. \n \n Arguments\n ---------\n min_sr: float\n Minimum specific radius to use for dimer distance checks.\n max_sr: float\n Maximum specific radius multiplier that is allowed to be the minimum \n distance between two dimers, thereby removing dimers formed from molecules\n that are far away. \n box: float,list\n Box size to search over for x,y,z positions. It's assumes that first \n molecule of the dimer will be placed at 0,0,0. If the box size is a \n float, a box will be placed at 0,0,0 and will extend by this value in\n all directions. If a list is provided, the box will only extend by these\n lengths in the x,y,z directions respectively, and due to symmetry, in \n the -x,-y,-z directions. Default behavior is that the box size will\n automatically be detected based on the size of the input molecules.\n grid_spacing: float\n Grid spacing to use for x,y,z position spacing\n angle_spacing: float\n Spacing of orientation angles to use for every x,y,z position. Assumed\n to be in degrees. \n cutoff: float\n Distance between COM to neglect from dimer grid search. \n tol: float\n Tolerance used for the rmsd comparison. If the difference between the\n structures is less than tol, then they are considered duplicates. \n vdw: list\n List of all vdw radii for all elements in periodic table\n bonds_kw: dict\n Keyword arguments for Structure.get_bonds method. This is used\n for recognizing molecular connectivity. \n inter_list: list\n List of tuples of elements that should be considered for the distance\n calculations. For example (\"Li\", \"O\"). Then, if the distance between the\n Li in one molecule and the O in another molecule is outside the min_sr\n to max_sr range then the dimer system will be removed. This is helpful\n to reduce the search space based on chemical intution.\n \n \"\"\"\n\n def __init__(self, folder='', min_sr=0.75, max_sr=1.3, box=-1,\n grid_spacing=2.5, angle_spacing=30, inter_list=[], tol=0.1, vdw=[],\n bonds_kw={}, comm=None):\n raise Exception()\n",
"step-4": "from mcse.core.driver import BaseDriver_\n\n\nclass DimerGridSearch(BaseDriver_):\n \"\"\"\n Generates all dimer structures that should be considered for a grid search\n to find the best dimer arangements. Grid search is performed over all \n x,y,z positions for the COM and all orientations of the molecule. Only\n dimers with physically relevant intermolecular distances are kept for the \n user by providing maximum and minimum scaled vdW distances as max_sr and \n min_sr. Grid search can be performed using a a single unqiue molecule or \n two distinct molecules as input. \n \n This method is parallelized using MPI. The user may launch as many MPI ranks\n as they would like in order to reduce the computational burden for each \n rank and speedup the time-to-solution. \n \n Arguments\n ---------\n min_sr: float\n Minimum specific radius to use for dimer distance checks.\n max_sr: float\n Maximum specific radius multiplier that is allowed to be the minimum \n distance between two dimers, thereby removing dimers formed from molecules\n that are far away. \n box: float,list\n Box size to search over for x,y,z positions. It's assumes that first \n molecule of the dimer will be placed at 0,0,0. If the box size is a \n float, a box will be placed at 0,0,0 and will extend by this value in\n all directions. If a list is provided, the box will only extend by these\n lengths in the x,y,z directions respectively, and due to symmetry, in \n the -x,-y,-z directions. Default behavior is that the box size will\n automatically be detected based on the size of the input molecules.\n grid_spacing: float\n Grid spacing to use for x,y,z position spacing\n angle_spacing: float\n Spacing of orientation angles to use for every x,y,z position. Assumed\n to be in degrees. \n cutoff: float\n Distance between COM to neglect from dimer grid search. \n tol: float\n Tolerance used for the rmsd comparison. If the difference between the\n structures is less than tol, then they are considered duplicates. \n vdw: list\n List of all vdw radii for all elements in periodic table\n bonds_kw: dict\n Keyword arguments for Structure.get_bonds method. This is used\n for recognizing molecular connectivity. \n inter_list: list\n List of tuples of elements that should be considered for the distance\n calculations. For example (\"Li\", \"O\"). Then, if the distance between the\n Li in one molecule and the O in another molecule is outside the min_sr\n to max_sr range then the dimer system will be removed. This is helpful\n to reduce the search space based on chemical intution.\n \n \"\"\"\n\n def __init__(self, folder='', min_sr=0.75, max_sr=1.3, box=-1,\n grid_spacing=2.5, angle_spacing=30, inter_list=[], tol=0.1, vdw=[],\n bonds_kw={}, comm=None):\n raise Exception()\n",
"step-5": "\n\nfrom mcse.core.driver import BaseDriver_\n\n\n\nclass DimerGridSearch(BaseDriver_):\n \"\"\"\n Generates all dimer structures that should be considered for a grid search\n to find the best dimer arangements. Grid search is performed over all \n x,y,z positions for the COM and all orientations of the molecule. Only\n dimers with physically relevant intermolecular distances are kept for the \n user by providing maximum and minimum scaled vdW distances as max_sr and \n min_sr. Grid search can be performed using a a single unqiue molecule or \n two distinct molecules as input. \n \n This method is parallelized using MPI. The user may launch as many MPI ranks\n as they would like in order to reduce the computational burden for each \n rank and speedup the time-to-solution. \n \n Arguments\n ---------\n min_sr: float\n Minimum specific radius to use for dimer distance checks.\n max_sr: float\n Maximum specific radius multiplier that is allowed to be the minimum \n distance between two dimers, thereby removing dimers formed from molecules\n that are far away. \n box: float,list\n Box size to search over for x,y,z positions. It's assumes that first \n molecule of the dimer will be placed at 0,0,0. If the box size is a \n float, a box will be placed at 0,0,0 and will extend by this value in\n all directions. If a list is provided, the box will only extend by these\n lengths in the x,y,z directions respectively, and due to symmetry, in \n the -x,-y,-z directions. Default behavior is that the box size will\n automatically be detected based on the size of the input molecules.\n grid_spacing: float\n Grid spacing to use for x,y,z position spacing\n angle_spacing: float\n Spacing of orientation angles to use for every x,y,z position. Assumed\n to be in degrees. \n cutoff: float\n Distance between COM to neglect from dimer grid search. \n tol: float\n Tolerance used for the rmsd comparison. If the difference between the\n structures is less than tol, then they are considered duplicates. \n vdw: list\n List of all vdw radii for all elements in periodic table\n bonds_kw: dict\n Keyword arguments for Structure.get_bonds method. This is used\n for recognizing molecular connectivity. \n inter_list: list\n List of tuples of elements that should be considered for the distance\n calculations. For example (\"Li\", \"O\"). Then, if the distance between the\n Li in one molecule and the O in another molecule is outside the min_sr\n to max_sr range then the dimer system will be removed. This is helpful\n to reduce the search space based on chemical intution.\n \n \"\"\"\n def __init__(self, \n folder=\"\",\n min_sr=0.75,\n max_sr=1.30,\n box=-1, \n grid_spacing=2.5, \n angle_spacing=30, \n inter_list=[],\n tol=0.1,\n vdw=[],\n bonds_kw={},\n comm=None):\n raise Exception()",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
import json
startTime = ""
endTime = ""
controller = 0
for files in range(30):
file = open("NewResults" + str(files+1) + ".data")
for line in file:
if line != "\n":
j = json.loads(line)
if controller == 0:
startTime = j['metrics'][0]['startTime']
helper = startTime.split(" ")
hour = helper[1].split(":")[0]
minute = helper[1].split(":")[1]
second = helper[1].split(":")[2]
print("startTime: " + hour + " : " + minute + " : " + second)
elif controller == 14:
endTime = j['metrics'][0]['startTime']
helper = endTime.split(" ")
hour = helper[1].split(":")[0]
minute = helper[1].split(":")[1]
second = helper[1].split(":")[2]
print("endTime: " + hour + " : " + minute + " : " + second)
controller = 0
break
controller += 1
file = open("request-file-burst-1.data", "r")
for line in file:
data = line.split(" ")
grossTime = data[0].split(":")
hour = grossTime[0].split("[")[1]
minute = grossTime[1]
second = grossTime[2].split("]")[0]
print(hour + " : " + minute + " : " + second)
break
|
normal
|
{
"blob_id": "03284f20e614a5f8f5c21939acf49490d6ffd3a3",
"index": 7812,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor files in range(30):\n file = open('NewResults' + str(files + 1) + '.data')\n for line in file:\n if line != '\\n':\n j = json.loads(line)\n if controller == 0:\n startTime = j['metrics'][0]['startTime']\n helper = startTime.split(' ')\n hour = helper[1].split(':')[0]\n minute = helper[1].split(':')[1]\n second = helper[1].split(':')[2]\n print('startTime: ' + hour + ' : ' + minute + ' : ' + second)\n elif controller == 14:\n endTime = j['metrics'][0]['startTime']\n helper = endTime.split(' ')\n hour = helper[1].split(':')[0]\n minute = helper[1].split(':')[1]\n second = helper[1].split(':')[2]\n print('endTime: ' + hour + ' : ' + minute + ' : ' + second)\n controller = 0\n break\n controller += 1\n<mask token>\nfor line in file:\n data = line.split(' ')\n grossTime = data[0].split(':')\n hour = grossTime[0].split('[')[1]\n minute = grossTime[1]\n second = grossTime[2].split(']')[0]\n print(hour + ' : ' + minute + ' : ' + second)\n break\n",
"step-3": "<mask token>\nstartTime = ''\nendTime = ''\ncontroller = 0\nfor files in range(30):\n file = open('NewResults' + str(files + 1) + '.data')\n for line in file:\n if line != '\\n':\n j = json.loads(line)\n if controller == 0:\n startTime = j['metrics'][0]['startTime']\n helper = startTime.split(' ')\n hour = helper[1].split(':')[0]\n minute = helper[1].split(':')[1]\n second = helper[1].split(':')[2]\n print('startTime: ' + hour + ' : ' + minute + ' : ' + second)\n elif controller == 14:\n endTime = j['metrics'][0]['startTime']\n helper = endTime.split(' ')\n hour = helper[1].split(':')[0]\n minute = helper[1].split(':')[1]\n second = helper[1].split(':')[2]\n print('endTime: ' + hour + ' : ' + minute + ' : ' + second)\n controller = 0\n break\n controller += 1\nfile = open('request-file-burst-1.data', 'r')\nfor line in file:\n data = line.split(' ')\n grossTime = data[0].split(':')\n hour = grossTime[0].split('[')[1]\n minute = grossTime[1]\n second = grossTime[2].split(']')[0]\n print(hour + ' : ' + minute + ' : ' + second)\n break\n",
"step-4": "import json\nstartTime = ''\nendTime = ''\ncontroller = 0\nfor files in range(30):\n file = open('NewResults' + str(files + 1) + '.data')\n for line in file:\n if line != '\\n':\n j = json.loads(line)\n if controller == 0:\n startTime = j['metrics'][0]['startTime']\n helper = startTime.split(' ')\n hour = helper[1].split(':')[0]\n minute = helper[1].split(':')[1]\n second = helper[1].split(':')[2]\n print('startTime: ' + hour + ' : ' + minute + ' : ' + second)\n elif controller == 14:\n endTime = j['metrics'][0]['startTime']\n helper = endTime.split(' ')\n hour = helper[1].split(':')[0]\n minute = helper[1].split(':')[1]\n second = helper[1].split(':')[2]\n print('endTime: ' + hour + ' : ' + minute + ' : ' + second)\n controller = 0\n break\n controller += 1\nfile = open('request-file-burst-1.data', 'r')\nfor line in file:\n data = line.split(' ')\n grossTime = data[0].split(':')\n hour = grossTime[0].split('[')[1]\n minute = grossTime[1]\n second = grossTime[2].split(']')[0]\n print(hour + ' : ' + minute + ' : ' + second)\n break\n",
"step-5": "import json\n\nstartTime = \"\"\nendTime = \"\"\n\ncontroller = 0\nfor files in range(30):\n\tfile = open(\"NewResults\" + str(files+1) + \".data\")\n\tfor line in file:\n\t\tif line != \"\\n\":\n\t\t\tj = json.loads(line)\n\t\t\tif controller == 0:\n\t\t\t\tstartTime = j['metrics'][0]['startTime']\n\t\t\t\thelper = startTime.split(\" \")\n\t\t\t\thour = helper[1].split(\":\")[0]\n\t\t\t\tminute = helper[1].split(\":\")[1]\n\t\t\t\tsecond = helper[1].split(\":\")[2]\n\t\t\t\tprint(\"startTime: \" + hour + \" : \" + minute + \" : \" + second)\n\t\t\telif controller == 14:\n\t\t\t\tendTime = j['metrics'][0]['startTime']\n\t\t\t\thelper = endTime.split(\" \")\n\t\t\t\thour = helper[1].split(\":\")[0]\n\t\t\t\tminute = helper[1].split(\":\")[1]\n\t\t\t\tsecond = helper[1].split(\":\")[2]\n\t\t\t\tprint(\"endTime: \" + hour + \" : \" + minute + \" : \" + second)\n\t\t\t\tcontroller = 0\n\t\t\t\tbreak\n\t\t\tcontroller += 1\n\nfile = open(\"request-file-burst-1.data\", \"r\")\nfor line in file:\n\tdata = line.split(\" \")\n\tgrossTime = data[0].split(\":\")\n\thour = grossTime[0].split(\"[\")[1]\n\tminute = grossTime[1]\n\tsecond = grossTime[2].split(\"]\")[0]\n\tprint(hour + \" : \" + minute + \" : \" + second)\n\tbreak\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\t\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import pygame
class MenuManager():
def __init__(self, manager):
print "Menu manager created. Continue? [y/n]"
self.manager = manager
self.paused = False
self.intro_done = False
self.menus = []
self.menus.append(Pause_menu(self))
self.menus.append(Start_screen(self))
def get_paused(self):
return self.paused
def set_paused(self, pause):
self.paused = pause
def set_intro_done(self, startup):
self.intro_done = startup
def get_intro_done(self):
return self.intro_done
def set_active(self, menu_index):
self.menus[menu_index].set_active()
def unset_active(self, menu_index):
self.menus[menu_index].unset_active()
def exit_game(self):
self.manager.exit_game()
def pass_event(self, event):
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_p:
self.unset_active(1)
self.paused = not self.paused
def draw(self, screen):
if self.paused and self.menus[1].is_active() == False:
self.set_active(0)
else:
self.unset_active(0)
for menu in self.menus:
if menu.is_active():
menu.draw(screen)
class Button():
def __init__(self, pos, size, color, font, font_size, font_color, image=None, text=None):
self.pos = pos
self.size = size
self.rect = pygame.Rect(self.pos, self.size)
self.color = color
self.d_color = 40
if self.color[0]>235 or self.color[1]>235 or self.color[2]>235:
self.hover_color = (self.color[0]-self.d_color, self.color[1]-self.d_color, self.color[2]-self.d_color)
else:
self.hover_color = (self.color[0]+self.d_color, self.color[1]+self.d_color, self.color[2]+self.d_color)
self.font = pygame.font.SysFont(font, font_size)
self.font_color = font_color
self.font.set_bold(True)
if image != None: self.image = pygame.image.load("Sprites/"+image+".png")
else: self.image = None
if text != None:
self.text = self.font.render(text, True, self.font_color)
def draw(self, screen):
draw_pos = (screen.get_width()/2+self.pos[0]-self.size[0]/2, screen.get_height()/2+self.pos[1])
if self.image != None:
screen.blit(self.image, draw_pos)
else:
self.rect = pygame.Rect(draw_pos, self.size)
if self.rect.collidepoint(pygame.mouse.get_pos()[0], pygame.mouse.get_pos()[1]):
draw_color = self.hover_color
else:
draw_color = self.color
pygame.draw.rect(screen, draw_color, self.rect)
screen.blit(self.text, (self.rect.x+self.rect.w/2-self.text.get_width()/2, self.rect.y+self.rect.h/2-self.text.get_height()/2))
pygame.draw.rect(screen, (0,0,0), self.rect, 1)
def is_clicked(self):
if self.rect.collidepoint(pygame.mouse.get_pos()):
return True
else:
return False
class Pause_menu():
def __init__(self, manager):
self.manager = manager
self.buttons = []
self.buttons.append(Button((-100, 30), (100,50), (255,255,255), "Arial", 20, (255,0,0), text="Continue"))
self.buttons.append(Button((100, 30), (120,50), (255,255,255), "Arial", 20, (255,0,0), text="Exit game"))
self.active = False
def draw(self, screen):
for button in self.buttons:
self.check_clicked()
button.draw(screen)
def is_active(self):
return self.active
def set_active(self):
self.active = True
def unset_active(self):
self.active = False
def check_clicked(self):
for button_i in range(len(self.buttons)):
if pygame.mouse.get_pressed()[0] == True and self.buttons[button_i].is_clicked():
if button_i == 0:
self.manager.set_paused(False)
self.manager.unset_active(1)
elif button_i == 1:
print "Exit button pressed. Goodbye"
self.manager.exit_game()
class Start_screen():
def __init__(self, manager):
self.manager = manager
self.active = False
self.image = pygame.image.load("Files/Start_screen.png")
self.buttons = []
self.buttons.append(Button((-100, 150), (130,50), (255,255,255), "Arial", 20, (255,0,0), text="Start"))
self.buttons.append(Button((100, 150), (190,50), (255,255,255), "Arial", 20, (255,0,0), text="Exit game [ESC]"))
def draw(self, screen):
draw_pos = (screen.get_width()/2-self.image.get_width()/2, 20)
self.check_clicked()
for button in self.buttons:
button.draw(screen)
screen.blit(self.image, draw_pos)
def is_active(self):
return self.active
def set_active(self):
self.active = True
def unset_active(self):
self.active = False
def check_clicked(self):
for button_i in range(len(self.buttons)):
if pygame.mouse.get_pressed()[0] == True:
if self.buttons[button_i].is_clicked():
if button_i == 0:
self.manager.set_intro_done(True)
self.manager.unset_active(1)
self.manager.manager.get_universe().set_can_shoot(True)
elif button_i == 1:
print "Exit button pressed. Goodbye"
self.manager.exit_game()
|
normal
|
{
"blob_id": "f0ac2e66cc7fe9730c77a8feb77a74e26986a3f8",
"index": 1380,
"step-1": "import pygame\r\n\r\nclass MenuManager():\r\n def __init__(self, manager):\r\n print \"Menu manager created. Continue? [y/n]\"\r\n self.manager = manager\r\n self.paused = False\r\n self.intro_done = False\r\n self.menus = []\r\n self.menus.append(Pause_menu(self))\r\n self.menus.append(Start_screen(self))\r\n\r\n def get_paused(self):\r\n return self.paused\r\n\r\n def set_paused(self, pause):\r\n self.paused = pause\r\n\r\n def set_intro_done(self, startup):\r\n self.intro_done = startup\r\n\r\n def get_intro_done(self):\r\n return self.intro_done\r\n\r\n def set_active(self, menu_index):\r\n self.menus[menu_index].set_active()\r\n\r\n def unset_active(self, menu_index):\r\n self.menus[menu_index].unset_active()\r\n\r\n def exit_game(self):\r\n self.manager.exit_game()\r\n\r\n def pass_event(self, event):\r\n if event.type == pygame.KEYDOWN:\r\n if event.key == pygame.K_p:\r\n self.unset_active(1)\r\n self.paused = not self.paused\r\n\r\n def draw(self, screen):\r\n if self.paused and self.menus[1].is_active() == False:\r\n self.set_active(0)\r\n else:\r\n self.unset_active(0)\r\n\r\n for menu in self.menus:\r\n if menu.is_active():\r\n menu.draw(screen)\r\n\r\n\r\nclass Button():\r\n def __init__(self, pos, size, color, font, font_size, font_color, image=None, text=None):\r\n self.pos = pos\r\n self.size = size\r\n self.rect = pygame.Rect(self.pos, self.size)\r\n self.color = color\r\n self.d_color = 40\r\n if self.color[0]>235 or self.color[1]>235 or self.color[2]>235:\r\n self.hover_color = (self.color[0]-self.d_color, self.color[1]-self.d_color, self.color[2]-self.d_color)\r\n else:\r\n self.hover_color = (self.color[0]+self.d_color, self.color[1]+self.d_color, self.color[2]+self.d_color)\r\n self.font = pygame.font.SysFont(font, font_size)\r\n self.font_color = font_color\r\n self.font.set_bold(True)\r\n if image != None: self.image = pygame.image.load(\"Sprites/\"+image+\".png\")\r\n else: self.image = None\r\n if text != None:\r\n self.text = self.font.render(text, True, self.font_color)\r\n\r\n def draw(self, screen):\r\n draw_pos = (screen.get_width()/2+self.pos[0]-self.size[0]/2, screen.get_height()/2+self.pos[1])\r\n if self.image != None:\r\n screen.blit(self.image, draw_pos)\r\n else:\r\n self.rect = pygame.Rect(draw_pos, self.size)\r\n if self.rect.collidepoint(pygame.mouse.get_pos()[0], pygame.mouse.get_pos()[1]):\r\n draw_color = self.hover_color\r\n else:\r\n draw_color = self.color\r\n pygame.draw.rect(screen, draw_color, self.rect)\r\n screen.blit(self.text, (self.rect.x+self.rect.w/2-self.text.get_width()/2, self.rect.y+self.rect.h/2-self.text.get_height()/2))\r\n pygame.draw.rect(screen, (0,0,0), self.rect, 1)\r\n\r\n def is_clicked(self):\r\n if self.rect.collidepoint(pygame.mouse.get_pos()):\r\n return True\r\n else:\r\n return False\r\n\r\n\r\n\r\nclass Pause_menu():\r\n def __init__(self, manager):\r\n self.manager = manager\r\n self.buttons = []\r\n self.buttons.append(Button((-100, 30), (100,50), (255,255,255), \"Arial\", 20, (255,0,0), text=\"Continue\"))\r\n self.buttons.append(Button((100, 30), (120,50), (255,255,255), \"Arial\", 20, (255,0,0), text=\"Exit game\"))\r\n self.active = False\r\n\r\n def draw(self, screen):\r\n for button in self.buttons:\r\n self.check_clicked()\r\n button.draw(screen)\r\n\r\n def is_active(self):\r\n return self.active\r\n\r\n def set_active(self):\r\n self.active = True\r\n\r\n def unset_active(self):\r\n self.active = False\r\n\r\n def check_clicked(self):\r\n for button_i in range(len(self.buttons)):\r\n if pygame.mouse.get_pressed()[0] == True and self.buttons[button_i].is_clicked():\r\n if button_i == 0:\r\n self.manager.set_paused(False)\r\n self.manager.unset_active(1)\r\n elif button_i == 1:\r\n print \"Exit button pressed. Goodbye\"\r\n self.manager.exit_game()\r\n\r\n\r\nclass Start_screen():\r\n def __init__(self, manager):\r\n self.manager = manager\r\n self.active = False\r\n self.image = pygame.image.load(\"Files/Start_screen.png\")\r\n self.buttons = []\r\n self.buttons.append(Button((-100, 150), (130,50), (255,255,255), \"Arial\", 20, (255,0,0), text=\"Start\"))\r\n self.buttons.append(Button((100, 150), (190,50), (255,255,255), \"Arial\", 20, (255,0,0), text=\"Exit game [ESC]\"))\r\n\r\n def draw(self, screen):\r\n draw_pos = (screen.get_width()/2-self.image.get_width()/2, 20)\r\n self.check_clicked()\r\n for button in self.buttons:\r\n button.draw(screen)\r\n screen.blit(self.image, draw_pos)\r\n\r\n def is_active(self):\r\n return self.active\r\n\r\n def set_active(self):\r\n self.active = True\r\n\r\n def unset_active(self):\r\n self.active = False\r\n\r\n def check_clicked(self):\r\n for button_i in range(len(self.buttons)):\r\n if pygame.mouse.get_pressed()[0] == True: \r\n if self.buttons[button_i].is_clicked():\r\n if button_i == 0:\r\n self.manager.set_intro_done(True)\r\n self.manager.unset_active(1)\r\n self.manager.manager.get_universe().set_can_shoot(True)\r\n elif button_i == 1:\r\n print \"Exit button pressed. Goodbye\"\r\n self.manager.exit_game()",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
""" Utility functions and classes for SRP
Context : SRP
Module : Statsistics
Version : 1.0.0
Author : Stefano Covino
Date : 04/04/2013
E-mail : [email protected]
URL: : http://www.merate.mi.astro.it/utenti/covino
Usage : to be imported
Remarks : inputs are a 1D vectors to be cross-correlated. Optionally you can
give a vector of x-axis units. It returns the cross-correlation
value.
History : (04/04/2013) First version.
"""
import numpy
def XCorr_1D (data, refdata, xdata=None):
if data.ndim == 1 and refdata.ndim == 1:
ycorr = numpy.correlate(data, refdata, mode="full")
xcorr = numpy.arange(ycorr.size)
lags = xcorr - (data.size-1)
if xdata == None:
distPerLag = 1.
elif xdata.ndim == 1:
distPerLag = (xdata[-1] - xdata[0])/float(xdata.size)
else:
return None
#
offsets = -lags*distPerLag
#
mx = ycorr.argmax()
ox = offsets[mx]
return ox
else:
return None
|
normal
|
{
"blob_id": "c62ffcaa9095d772e51be086be349d200346bc22",
"index": 9662,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef XCorr_1D(data, refdata, xdata=None):\n if data.ndim == 1 and refdata.ndim == 1:\n ycorr = numpy.correlate(data, refdata, mode='full')\n xcorr = numpy.arange(ycorr.size)\n lags = xcorr - (data.size - 1)\n if xdata == None:\n distPerLag = 1.0\n elif xdata.ndim == 1:\n distPerLag = (xdata[-1] - xdata[0]) / float(xdata.size)\n else:\n return None\n offsets = -lags * distPerLag\n mx = ycorr.argmax()\n ox = offsets[mx]\n return ox\n else:\n return None\n",
"step-3": "<mask token>\nimport numpy\n\n\ndef XCorr_1D(data, refdata, xdata=None):\n if data.ndim == 1 and refdata.ndim == 1:\n ycorr = numpy.correlate(data, refdata, mode='full')\n xcorr = numpy.arange(ycorr.size)\n lags = xcorr - (data.size - 1)\n if xdata == None:\n distPerLag = 1.0\n elif xdata.ndim == 1:\n distPerLag = (xdata[-1] - xdata[0]) / float(xdata.size)\n else:\n return None\n offsets = -lags * distPerLag\n mx = ycorr.argmax()\n ox = offsets[mx]\n return ox\n else:\n return None\n",
"step-4": "\"\"\" Utility functions and classes for SRP\n\nContext : SRP\nModule : Statsistics\nVersion : 1.0.0\nAuthor : Stefano Covino\nDate : 04/04/2013\nE-mail : [email protected]\nURL: : http://www.merate.mi.astro.it/utenti/covino\n\nUsage : to be imported\n\nRemarks : inputs are a 1D vectors to be cross-correlated. Optionally you can\n give a vector of x-axis units. It returns the cross-correlation \n value.\n\nHistory : (04/04/2013) First version.\n\n\"\"\"\n\nimport numpy\n\n\ndef XCorr_1D (data, refdata, xdata=None):\n if data.ndim == 1 and refdata.ndim == 1:\n ycorr = numpy.correlate(data, refdata, mode=\"full\")\n xcorr = numpy.arange(ycorr.size)\n lags = xcorr - (data.size-1)\n if xdata == None:\n distPerLag = 1.\n elif xdata.ndim == 1:\n distPerLag = (xdata[-1] - xdata[0])/float(xdata.size)\n else:\n return None\n #\n offsets = -lags*distPerLag\n #\n mx = ycorr.argmax()\n ox = offsets[mx]\n return ox\n else:\n return None\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
'''
syntax of if-elif-else
if <condition> :
code to be
executed in
this condition
elif <new condition> :
cdode tbd
some code
else :
code runs in the else condigtion
this can all be multiline code
'''
a = 3
b = 2
if a == b :
print "Values are equal"
elif a < b :
print "a is less than b"
else:
print "b is less than a"
print "jai is awesome"
# attempting to make changes in git
# jai making changes
|
normal
|
{
"blob_id": "d7ce6efa72c9b65d3dd3ce90f9d1f2dd8a889d26",
"index": 444,
"step-1": "\n'''\nsyntax of if-elif-else\n\nif <condition> :\n\tcode to be\n\texecuted in\n\tthis condition\nelif <new condition> :\n\tcdode tbd\n\tsome code\nelse :\n\tcode runs in the else condigtion\n\tthis can all be multiline code\n\n'''\n\n\n\n\na = 3\nb = 2\n\n\nif a == b :\n\tprint \"Values are equal\"\nelif a < b :\n\tprint \"a is less than b\"\nelse:\n\tprint \"b is less than a\"\n\nprint \"jai is awesome\"\n\n\n# attempting to make changes in git\n# jai making changes\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.16 on 2018-12-19 15:17
from __future__ import absolute_import
from __future__ import unicode_literals
from django.db import migrations, models
from django.db.models import Count
from tqdm import tqdm
def remove_duplicate_legal_reasons(apps, purpose_slug, source_object_content_type, source_object_id):
LegalReason = apps.get_model(u'gdpr', u'LegalReason')
duplicate_legal_reason_qs = LegalReason.objects.filter(
purpose_slug=purpose_slug,
source_object_content_type=source_object_content_type,
source_object_id=source_object_id
)
if duplicate_legal_reason_qs.filter(is_active=True).count() > 0:
duplicate_legal_reason_qs.filter(is_active=False).delete()
latest_legal_reason = duplicate_legal_reason_qs.latest(u'expires_at')
duplicate_legal_reason_qs.exclude(pk=latest_legal_reason.pk).delete()
def check_uniqueness_and_keep_latest_active_legal_reason(apps, schema_editor):
LegalReason = apps.get_model(u'gdpr', u'LegalReason')
check_qs = LegalReason.objects.values(u'purpose_slug', u'source_object_content_type', u'source_object_id').annotate(
lr_count=Count(u'purpose_slug')).filter(lr_count__gt=1).order_by(u'-lr_count').distinct()
for legal_reason in tqdm(check_qs.all()):
remove_duplicate_legal_reasons(
apps, legal_reason[u'purpose_slug'], legal_reason[u'source_object_content_type'],
legal_reason[u'source_object_id']
)
def remove_duplicate_legal_reasons_relatives(apps, legal_reason, object_content_type, object_id):
LegalReasonRelatedObject = apps.get_model(u'gdpr', u'LegalReasonRelatedObject')
duplicates_qs = LegalReasonRelatedObject.objects.filter(
legal_reason=legal_reason,
object_content_type=object_content_type,
object_id=object_id
)
latest_legal_reason_related_object = duplicates_qs.latest(u'created_at')
duplicates_qs.exclude(pk=latest_legal_reason_related_object.pk).delete()
def check_uniqueness_and_keep_latest_active_legal_reason_related_object(apps, schema_editor):
LegalReasonRelatedObject = apps.get_model(u'gdpr', u'LegalReasonRelatedObject')
check_qs = LegalReasonRelatedObject.objects.values(u'legal_reason', u'object_content_type', u'object_id').annotate(
lrro_count=Count(u'legal_reason')).filter(lrro_count__gt=1).order_by(u'-lrro_count').distinct()
for legal_reason_related_object in tqdm(check_qs.all(), ncols=100):
remove_duplicate_legal_reasons_relatives(apps, legal_reason_related_object[u'legal_reason'],
legal_reason_related_object[u'object_content_type'],
legal_reason_related_object[u'object_id']
)
class Migration(migrations.Migration):
dependencies = [
(u'gdpr', u'0002_auto_20180509_1518'),
(u'contenttypes', u'0002_remove_content_type_name'),
]
operations = [
migrations.AlterField(
model_name=u'legalreason',
name=u'purpose_slug',
field=models.CharField(choices=[], db_index=True,
max_length=100, verbose_name=u'purpose'),
),
migrations.AlterField(
model_name=u'legalreason',
name=u'source_object_id',
field=models.TextField(verbose_name=u'source object ID', db_index=True),
),
migrations.AlterField(
model_name=u'legalreasonrelatedobject',
name=u'object_id',
field=models.TextField(verbose_name=u'related object ID', db_index=True),
),
migrations.RunPython(check_uniqueness_and_keep_latest_active_legal_reason),
migrations.RunPython(check_uniqueness_and_keep_latest_active_legal_reason_related_object),
]
|
normal
|
{
"blob_id": "6c86b4823756853bb502b34492ac8ad0a75daf7e",
"index": 7036,
"step-1": "<mask token>\n\n\ndef remove_duplicate_legal_reasons(apps, purpose_slug,\n source_object_content_type, source_object_id):\n LegalReason = apps.get_model(u'gdpr', u'LegalReason')\n duplicate_legal_reason_qs = LegalReason.objects.filter(purpose_slug=\n purpose_slug, source_object_content_type=source_object_content_type,\n source_object_id=source_object_id)\n if duplicate_legal_reason_qs.filter(is_active=True).count() > 0:\n duplicate_legal_reason_qs.filter(is_active=False).delete()\n latest_legal_reason = duplicate_legal_reason_qs.latest(u'expires_at')\n duplicate_legal_reason_qs.exclude(pk=latest_legal_reason.pk).delete()\n\n\n<mask token>\n\n\ndef check_uniqueness_and_keep_latest_active_legal_reason_related_object(apps,\n schema_editor):\n LegalReasonRelatedObject = apps.get_model(u'gdpr',\n u'LegalReasonRelatedObject')\n check_qs = LegalReasonRelatedObject.objects.values(u'legal_reason',\n u'object_content_type', u'object_id').annotate(lrro_count=Count(\n u'legal_reason')).filter(lrro_count__gt=1).order_by(u'-lrro_count'\n ).distinct()\n for legal_reason_related_object in tqdm(check_qs.all(), ncols=100):\n remove_duplicate_legal_reasons_relatives(apps,\n legal_reason_related_object[u'legal_reason'],\n legal_reason_related_object[u'object_content_type'],\n legal_reason_related_object[u'object_id'])\n\n\nclass Migration(migrations.Migration):\n dependencies = [(u'gdpr', u'0002_auto_20180509_1518'), (u'contenttypes',\n u'0002_remove_content_type_name')]\n operations = [migrations.AlterField(model_name=u'legalreason', name=\n u'purpose_slug', field=models.CharField(choices=[], db_index=True,\n max_length=100, verbose_name=u'purpose')), migrations.AlterField(\n model_name=u'legalreason', name=u'source_object_id', field=models.\n TextField(verbose_name=u'source object ID', db_index=True)),\n migrations.AlterField(model_name=u'legalreasonrelatedobject', name=\n u'object_id', field=models.TextField(verbose_name=\n u'related object ID', db_index=True)), migrations.RunPython(\n check_uniqueness_and_keep_latest_active_legal_reason), migrations.\n RunPython(\n check_uniqueness_and_keep_latest_active_legal_reason_related_object)]\n",
"step-2": "<mask token>\n\n\ndef remove_duplicate_legal_reasons(apps, purpose_slug,\n source_object_content_type, source_object_id):\n LegalReason = apps.get_model(u'gdpr', u'LegalReason')\n duplicate_legal_reason_qs = LegalReason.objects.filter(purpose_slug=\n purpose_slug, source_object_content_type=source_object_content_type,\n source_object_id=source_object_id)\n if duplicate_legal_reason_qs.filter(is_active=True).count() > 0:\n duplicate_legal_reason_qs.filter(is_active=False).delete()\n latest_legal_reason = duplicate_legal_reason_qs.latest(u'expires_at')\n duplicate_legal_reason_qs.exclude(pk=latest_legal_reason.pk).delete()\n\n\n<mask token>\n\n\ndef remove_duplicate_legal_reasons_relatives(apps, legal_reason,\n object_content_type, object_id):\n LegalReasonRelatedObject = apps.get_model(u'gdpr',\n u'LegalReasonRelatedObject')\n duplicates_qs = LegalReasonRelatedObject.objects.filter(legal_reason=\n legal_reason, object_content_type=object_content_type, object_id=\n object_id)\n latest_legal_reason_related_object = duplicates_qs.latest(u'created_at')\n duplicates_qs.exclude(pk=latest_legal_reason_related_object.pk).delete()\n\n\ndef check_uniqueness_and_keep_latest_active_legal_reason_related_object(apps,\n schema_editor):\n LegalReasonRelatedObject = apps.get_model(u'gdpr',\n u'LegalReasonRelatedObject')\n check_qs = LegalReasonRelatedObject.objects.values(u'legal_reason',\n u'object_content_type', u'object_id').annotate(lrro_count=Count(\n u'legal_reason')).filter(lrro_count__gt=1).order_by(u'-lrro_count'\n ).distinct()\n for legal_reason_related_object in tqdm(check_qs.all(), ncols=100):\n remove_duplicate_legal_reasons_relatives(apps,\n legal_reason_related_object[u'legal_reason'],\n legal_reason_related_object[u'object_content_type'],\n legal_reason_related_object[u'object_id'])\n\n\nclass Migration(migrations.Migration):\n dependencies = [(u'gdpr', u'0002_auto_20180509_1518'), (u'contenttypes',\n u'0002_remove_content_type_name')]\n operations = [migrations.AlterField(model_name=u'legalreason', name=\n u'purpose_slug', field=models.CharField(choices=[], db_index=True,\n max_length=100, verbose_name=u'purpose')), migrations.AlterField(\n model_name=u'legalreason', name=u'source_object_id', field=models.\n TextField(verbose_name=u'source object ID', db_index=True)),\n migrations.AlterField(model_name=u'legalreasonrelatedobject', name=\n u'object_id', field=models.TextField(verbose_name=\n u'related object ID', db_index=True)), migrations.RunPython(\n check_uniqueness_and_keep_latest_active_legal_reason), migrations.\n RunPython(\n check_uniqueness_and_keep_latest_active_legal_reason_related_object)]\n",
"step-3": "<mask token>\n\n\ndef remove_duplicate_legal_reasons(apps, purpose_slug,\n source_object_content_type, source_object_id):\n LegalReason = apps.get_model(u'gdpr', u'LegalReason')\n duplicate_legal_reason_qs = LegalReason.objects.filter(purpose_slug=\n purpose_slug, source_object_content_type=source_object_content_type,\n source_object_id=source_object_id)\n if duplicate_legal_reason_qs.filter(is_active=True).count() > 0:\n duplicate_legal_reason_qs.filter(is_active=False).delete()\n latest_legal_reason = duplicate_legal_reason_qs.latest(u'expires_at')\n duplicate_legal_reason_qs.exclude(pk=latest_legal_reason.pk).delete()\n\n\ndef check_uniqueness_and_keep_latest_active_legal_reason(apps, schema_editor):\n LegalReason = apps.get_model(u'gdpr', u'LegalReason')\n check_qs = LegalReason.objects.values(u'purpose_slug',\n u'source_object_content_type', u'source_object_id').annotate(lr_count\n =Count(u'purpose_slug')).filter(lr_count__gt=1).order_by(u'-lr_count'\n ).distinct()\n for legal_reason in tqdm(check_qs.all()):\n remove_duplicate_legal_reasons(apps, legal_reason[u'purpose_slug'],\n legal_reason[u'source_object_content_type'], legal_reason[\n u'source_object_id'])\n\n\ndef remove_duplicate_legal_reasons_relatives(apps, legal_reason,\n object_content_type, object_id):\n LegalReasonRelatedObject = apps.get_model(u'gdpr',\n u'LegalReasonRelatedObject')\n duplicates_qs = LegalReasonRelatedObject.objects.filter(legal_reason=\n legal_reason, object_content_type=object_content_type, object_id=\n object_id)\n latest_legal_reason_related_object = duplicates_qs.latest(u'created_at')\n duplicates_qs.exclude(pk=latest_legal_reason_related_object.pk).delete()\n\n\ndef check_uniqueness_and_keep_latest_active_legal_reason_related_object(apps,\n schema_editor):\n LegalReasonRelatedObject = apps.get_model(u'gdpr',\n u'LegalReasonRelatedObject')\n check_qs = LegalReasonRelatedObject.objects.values(u'legal_reason',\n u'object_content_type', u'object_id').annotate(lrro_count=Count(\n u'legal_reason')).filter(lrro_count__gt=1).order_by(u'-lrro_count'\n ).distinct()\n for legal_reason_related_object in tqdm(check_qs.all(), ncols=100):\n remove_duplicate_legal_reasons_relatives(apps,\n legal_reason_related_object[u'legal_reason'],\n legal_reason_related_object[u'object_content_type'],\n legal_reason_related_object[u'object_id'])\n\n\nclass Migration(migrations.Migration):\n dependencies = [(u'gdpr', u'0002_auto_20180509_1518'), (u'contenttypes',\n u'0002_remove_content_type_name')]\n operations = [migrations.AlterField(model_name=u'legalreason', name=\n u'purpose_slug', field=models.CharField(choices=[], db_index=True,\n max_length=100, verbose_name=u'purpose')), migrations.AlterField(\n model_name=u'legalreason', name=u'source_object_id', field=models.\n TextField(verbose_name=u'source object ID', db_index=True)),\n migrations.AlterField(model_name=u'legalreasonrelatedobject', name=\n u'object_id', field=models.TextField(verbose_name=\n u'related object ID', db_index=True)), migrations.RunPython(\n check_uniqueness_and_keep_latest_active_legal_reason), migrations.\n RunPython(\n check_uniqueness_and_keep_latest_active_legal_reason_related_object)]\n",
"step-4": "from __future__ import absolute_import\nfrom __future__ import unicode_literals\nfrom django.db import migrations, models\nfrom django.db.models import Count\nfrom tqdm import tqdm\n\n\ndef remove_duplicate_legal_reasons(apps, purpose_slug,\n source_object_content_type, source_object_id):\n LegalReason = apps.get_model(u'gdpr', u'LegalReason')\n duplicate_legal_reason_qs = LegalReason.objects.filter(purpose_slug=\n purpose_slug, source_object_content_type=source_object_content_type,\n source_object_id=source_object_id)\n if duplicate_legal_reason_qs.filter(is_active=True).count() > 0:\n duplicate_legal_reason_qs.filter(is_active=False).delete()\n latest_legal_reason = duplicate_legal_reason_qs.latest(u'expires_at')\n duplicate_legal_reason_qs.exclude(pk=latest_legal_reason.pk).delete()\n\n\ndef check_uniqueness_and_keep_latest_active_legal_reason(apps, schema_editor):\n LegalReason = apps.get_model(u'gdpr', u'LegalReason')\n check_qs = LegalReason.objects.values(u'purpose_slug',\n u'source_object_content_type', u'source_object_id').annotate(lr_count\n =Count(u'purpose_slug')).filter(lr_count__gt=1).order_by(u'-lr_count'\n ).distinct()\n for legal_reason in tqdm(check_qs.all()):\n remove_duplicate_legal_reasons(apps, legal_reason[u'purpose_slug'],\n legal_reason[u'source_object_content_type'], legal_reason[\n u'source_object_id'])\n\n\ndef remove_duplicate_legal_reasons_relatives(apps, legal_reason,\n object_content_type, object_id):\n LegalReasonRelatedObject = apps.get_model(u'gdpr',\n u'LegalReasonRelatedObject')\n duplicates_qs = LegalReasonRelatedObject.objects.filter(legal_reason=\n legal_reason, object_content_type=object_content_type, object_id=\n object_id)\n latest_legal_reason_related_object = duplicates_qs.latest(u'created_at')\n duplicates_qs.exclude(pk=latest_legal_reason_related_object.pk).delete()\n\n\ndef check_uniqueness_and_keep_latest_active_legal_reason_related_object(apps,\n schema_editor):\n LegalReasonRelatedObject = apps.get_model(u'gdpr',\n u'LegalReasonRelatedObject')\n check_qs = LegalReasonRelatedObject.objects.values(u'legal_reason',\n u'object_content_type', u'object_id').annotate(lrro_count=Count(\n u'legal_reason')).filter(lrro_count__gt=1).order_by(u'-lrro_count'\n ).distinct()\n for legal_reason_related_object in tqdm(check_qs.all(), ncols=100):\n remove_duplicate_legal_reasons_relatives(apps,\n legal_reason_related_object[u'legal_reason'],\n legal_reason_related_object[u'object_content_type'],\n legal_reason_related_object[u'object_id'])\n\n\nclass Migration(migrations.Migration):\n dependencies = [(u'gdpr', u'0002_auto_20180509_1518'), (u'contenttypes',\n u'0002_remove_content_type_name')]\n operations = [migrations.AlterField(model_name=u'legalreason', name=\n u'purpose_slug', field=models.CharField(choices=[], db_index=True,\n max_length=100, verbose_name=u'purpose')), migrations.AlterField(\n model_name=u'legalreason', name=u'source_object_id', field=models.\n TextField(verbose_name=u'source object ID', db_index=True)),\n migrations.AlterField(model_name=u'legalreasonrelatedobject', name=\n u'object_id', field=models.TextField(verbose_name=\n u'related object ID', db_index=True)), migrations.RunPython(\n check_uniqueness_and_keep_latest_active_legal_reason), migrations.\n RunPython(\n check_uniqueness_and_keep_latest_active_legal_reason_related_object)]\n",
"step-5": "# -*- coding: utf-8 -*-\n# Generated by Django 1.11.16 on 2018-12-19 15:17\nfrom __future__ import absolute_import\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\nfrom django.db.models import Count\nfrom tqdm import tqdm\n\n\ndef remove_duplicate_legal_reasons(apps, purpose_slug, source_object_content_type, source_object_id):\n LegalReason = apps.get_model(u'gdpr', u'LegalReason')\n duplicate_legal_reason_qs = LegalReason.objects.filter(\n purpose_slug=purpose_slug,\n source_object_content_type=source_object_content_type,\n source_object_id=source_object_id\n )\n\n if duplicate_legal_reason_qs.filter(is_active=True).count() > 0:\n duplicate_legal_reason_qs.filter(is_active=False).delete()\n\n latest_legal_reason = duplicate_legal_reason_qs.latest(u'expires_at')\n duplicate_legal_reason_qs.exclude(pk=latest_legal_reason.pk).delete()\n\n\ndef check_uniqueness_and_keep_latest_active_legal_reason(apps, schema_editor):\n LegalReason = apps.get_model(u'gdpr', u'LegalReason')\n check_qs = LegalReason.objects.values(u'purpose_slug', u'source_object_content_type', u'source_object_id').annotate(\n lr_count=Count(u'purpose_slug')).filter(lr_count__gt=1).order_by(u'-lr_count').distinct()\n\n for legal_reason in tqdm(check_qs.all()):\n remove_duplicate_legal_reasons(\n apps, legal_reason[u'purpose_slug'], legal_reason[u'source_object_content_type'],\n legal_reason[u'source_object_id']\n )\n\n\ndef remove_duplicate_legal_reasons_relatives(apps, legal_reason, object_content_type, object_id):\n LegalReasonRelatedObject = apps.get_model(u'gdpr', u'LegalReasonRelatedObject')\n duplicates_qs = LegalReasonRelatedObject.objects.filter(\n legal_reason=legal_reason,\n object_content_type=object_content_type,\n object_id=object_id\n )\n latest_legal_reason_related_object = duplicates_qs.latest(u'created_at')\n duplicates_qs.exclude(pk=latest_legal_reason_related_object.pk).delete()\n\n\ndef check_uniqueness_and_keep_latest_active_legal_reason_related_object(apps, schema_editor):\n LegalReasonRelatedObject = apps.get_model(u'gdpr', u'LegalReasonRelatedObject')\n check_qs = LegalReasonRelatedObject.objects.values(u'legal_reason', u'object_content_type', u'object_id').annotate(\n lrro_count=Count(u'legal_reason')).filter(lrro_count__gt=1).order_by(u'-lrro_count').distinct()\n\n for legal_reason_related_object in tqdm(check_qs.all(), ncols=100):\n remove_duplicate_legal_reasons_relatives(apps, legal_reason_related_object[u'legal_reason'],\n legal_reason_related_object[u'object_content_type'],\n legal_reason_related_object[u'object_id']\n )\n\n\nclass Migration(migrations.Migration):\n dependencies = [\n (u'gdpr', u'0002_auto_20180509_1518'),\n (u'contenttypes', u'0002_remove_content_type_name'),\n ]\n operations = [\n migrations.AlterField(\n model_name=u'legalreason',\n name=u'purpose_slug',\n field=models.CharField(choices=[], db_index=True,\n max_length=100, verbose_name=u'purpose'),\n ),\n migrations.AlterField(\n model_name=u'legalreason',\n name=u'source_object_id',\n field=models.TextField(verbose_name=u'source object ID', db_index=True),\n ),\n migrations.AlterField(\n model_name=u'legalreasonrelatedobject',\n name=u'object_id',\n field=models.TextField(verbose_name=u'related object ID', db_index=True),\n ),\n migrations.RunPython(check_uniqueness_and_keep_latest_active_legal_reason),\n migrations.RunPython(check_uniqueness_and_keep_latest_active_legal_reason_related_object),\n ]\n",
"step-ids": [
4,
5,
6,
7,
8
]
}
|
[
4,
5,
6,
7,
8
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
for i in range(B):
p1 = 0.0
for j in range(N1):
if rnd.uniform(0, 1) < p1mle:
p1 += 1
p1 /= N1
p2 = 0.0
for j in range(N2):
if rnd.uniform(0, 1) < p2mle:
p2 += 1
p2 /= N2
estimate.append(p2 - p1)
<|reserved_special_token_0|>
for t in allt:
cur = np.mean(np.sqrt(N1 + N2) * (estimate - taumle) < t)
if np.abs(target - cur) < tol:
print(t)
print(cur)
break
<|reserved_special_token_1|>
<|reserved_special_token_0|>
B = 100000
N1 = 50
N2 = 50
p1mle = 0.3
p2mle = 0.4
taumle = p2mle - p1mle
estimate = []
for i in range(B):
p1 = 0.0
for j in range(N1):
if rnd.uniform(0, 1) < p1mle:
p1 += 1
p1 /= N1
p2 = 0.0
for j in range(N2):
if rnd.uniform(0, 1) < p2mle:
p2 += 1
p2 /= N2
estimate.append(p2 - p1)
t = -10
estimate = np.array(estimate)
allt = [(0.01 * t) for t in xrange(-5000, 5000)]
target = 0.95
tol = 0.01
for t in allt:
cur = np.mean(np.sqrt(N1 + N2) * (estimate - taumle) < t)
if np.abs(target - cur) < tol:
print(t)
print(cur)
break
<|reserved_special_token_1|>
import numpy.random as rnd
import numpy as np
B = 100000
N1 = 50
N2 = 50
p1mle = 0.3
p2mle = 0.4
taumle = p2mle - p1mle
estimate = []
for i in range(B):
p1 = 0.0
for j in range(N1):
if rnd.uniform(0, 1) < p1mle:
p1 += 1
p1 /= N1
p2 = 0.0
for j in range(N2):
if rnd.uniform(0, 1) < p2mle:
p2 += 1
p2 /= N2
estimate.append(p2 - p1)
t = -10
estimate = np.array(estimate)
allt = [(0.01 * t) for t in xrange(-5000, 5000)]
target = 0.95
tol = 0.01
for t in allt:
cur = np.mean(np.sqrt(N1 + N2) * (estimate - taumle) < t)
if np.abs(target - cur) < tol:
print(t)
print(cur)
break
<|reserved_special_token_1|>
import numpy.random as rnd
import numpy as np
B=100000
N1=50
N2=50
p1mle=0.3
p2mle=0.4
taumle=p2mle-p1mle
estimate=[]
for i in range(B):
p1=0.0
for j in range(N1):
if(rnd.uniform(0,1)<p1mle):
p1+=1
p1/=N1
p2=0.0
for j in range(N2):
if(rnd.uniform(0,1)<p2mle):
p2+=1
p2/=N2
estimate.append(p2-p1)
t=-10
estimate=np.array(estimate)
allt=[0.01*t for t in xrange(-5000,5000)]
target=0.95
tol=0.01
for t in allt:
cur=np.mean(np.sqrt(N1+N2)*(estimate-taumle)<t)
if(np.abs(target-cur)<tol):
print(t)
print(cur)
break
|
flexible
|
{
"blob_id": "0db0daf9bea254cffaec1280cd13b2d70368cd94",
"index": 289,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor i in range(B):\n p1 = 0.0\n for j in range(N1):\n if rnd.uniform(0, 1) < p1mle:\n p1 += 1\n p1 /= N1\n p2 = 0.0\n for j in range(N2):\n if rnd.uniform(0, 1) < p2mle:\n p2 += 1\n p2 /= N2\n estimate.append(p2 - p1)\n<mask token>\nfor t in allt:\n cur = np.mean(np.sqrt(N1 + N2) * (estimate - taumle) < t)\n if np.abs(target - cur) < tol:\n print(t)\n print(cur)\n break\n",
"step-3": "<mask token>\nB = 100000\nN1 = 50\nN2 = 50\np1mle = 0.3\np2mle = 0.4\ntaumle = p2mle - p1mle\nestimate = []\nfor i in range(B):\n p1 = 0.0\n for j in range(N1):\n if rnd.uniform(0, 1) < p1mle:\n p1 += 1\n p1 /= N1\n p2 = 0.0\n for j in range(N2):\n if rnd.uniform(0, 1) < p2mle:\n p2 += 1\n p2 /= N2\n estimate.append(p2 - p1)\nt = -10\nestimate = np.array(estimate)\nallt = [(0.01 * t) for t in xrange(-5000, 5000)]\ntarget = 0.95\ntol = 0.01\nfor t in allt:\n cur = np.mean(np.sqrt(N1 + N2) * (estimate - taumle) < t)\n if np.abs(target - cur) < tol:\n print(t)\n print(cur)\n break\n",
"step-4": "import numpy.random as rnd\nimport numpy as np\nB = 100000\nN1 = 50\nN2 = 50\np1mle = 0.3\np2mle = 0.4\ntaumle = p2mle - p1mle\nestimate = []\nfor i in range(B):\n p1 = 0.0\n for j in range(N1):\n if rnd.uniform(0, 1) < p1mle:\n p1 += 1\n p1 /= N1\n p2 = 0.0\n for j in range(N2):\n if rnd.uniform(0, 1) < p2mle:\n p2 += 1\n p2 /= N2\n estimate.append(p2 - p1)\nt = -10\nestimate = np.array(estimate)\nallt = [(0.01 * t) for t in xrange(-5000, 5000)]\ntarget = 0.95\ntol = 0.01\nfor t in allt:\n cur = np.mean(np.sqrt(N1 + N2) * (estimate - taumle) < t)\n if np.abs(target - cur) < tol:\n print(t)\n print(cur)\n break\n",
"step-5": "import numpy.random as rnd\nimport numpy as np\n\nB=100000\nN1=50\nN2=50\n\np1mle=0.3\t\np2mle=0.4\ntaumle=p2mle-p1mle\n\nestimate=[]\n\nfor i in range(B):\n\n\tp1=0.0\n\tfor j in range(N1):\n\t\tif(rnd.uniform(0,1)<p1mle):\n\t\t\tp1+=1\n\n\tp1/=N1\n\n\tp2=0.0\n\tfor j in range(N2):\n\t\tif(rnd.uniform(0,1)<p2mle):\n\t\t\tp2+=1\n\n\tp2/=N2\n\n\testimate.append(p2-p1)\n\nt=-10\n\nestimate=np.array(estimate)\n\nallt=[0.01*t for t in xrange(-5000,5000)]\n\ntarget=0.95\ntol=0.01\n\nfor t in allt:\n\tcur=np.mean(np.sqrt(N1+N2)*(estimate-taumle)<t)\n\tif(np.abs(target-cur)<tol):\n\t\tprint(t)\n\t\tprint(cur)\n\t\tbreak",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
if subject == '电子信息工程' and age > 25 or subject == '电子信息工程' and college == '是' or age < 28 and subject == '计算机':
print('恭喜您被录取!')
else:
print('抱歉,您未达到面试要求')
<|reserved_special_token_1|>
age = int(input('请输入您的年龄:'))
subject = input('请输入您的专业:')
college = input('请输入您是否毕业于重点大学:(是/不是)')
if subject == '电子信息工程' and age > 25 or subject == '电子信息工程' and college == '是' or age < 28 and subject == '计算机':
print('恭喜您被录取!')
else:
print('抱歉,您未达到面试要求')
<|reserved_special_token_1|>
age=int(input('请输入您的年龄:'))
subject=input('请输入您的专业:')
college=input('请输入您是否毕业于重点大学:(是/不是)')
if (subject=='电子信息工程' and age>25) or (subject=='电子信息工程' and college=='是') or (age<28 and subject=='计算机'):
print('恭喜您被录取!')
else:
print('抱歉,您未达到面试要求')
|
flexible
|
{
"blob_id": "4282303e3e6ee122f1379bea73c619870f983f61",
"index": 8580,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif subject == '电子信息工程' and age > 25 or subject == '电子信息工程' and college == '是' or age < 28 and subject == '计算机':\n print('恭喜您被录取!')\nelse:\n print('抱歉,您未达到面试要求')\n",
"step-3": "age = int(input('请输入您的年龄:'))\nsubject = input('请输入您的专业:')\ncollege = input('请输入您是否毕业于重点大学:(是/不是)')\nif subject == '电子信息工程' and age > 25 or subject == '电子信息工程' and college == '是' or age < 28 and subject == '计算机':\n print('恭喜您被录取!')\nelse:\n print('抱歉,您未达到面试要求')\n",
"step-4": "age=int(input('请输入您的年龄:'))\nsubject=input('请输入您的专业:')\ncollege=input('请输入您是否毕业于重点大学:(是/不是)')\nif (subject=='电子信息工程' and age>25) or (subject=='电子信息工程' and college=='是') or (age<28 and subject=='计算机'):\n print('恭喜您被录取!')\nelse:\n print('抱歉,您未达到面试要求')\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from manim import *
class SlidingDoorIllustration(Scene):
def construct(self):
waiting_room = Rectangle(color=BLUE, stroke_width=8)
waiting_room.shift(LEFT + DOWN)
workspace = Rectangle(color=BLUE, stroke_width=8)
workspace.next_to(waiting_room, RIGHT + UP, buff=0)
workspace.shift(LEFT)
t1 = Text("Waiting Room").move_to(waiting_room.get_center()).scale(0.5)
t2 = Text("Workspace").move_to(workspace.get_center()).scale(0.5)
doors = Line(workspace.get_corner(DL) + LEFT, waiting_room.get_corner(UR), color=RED, stroke_width=8)
door = Line(workspace.get_corner(DL), waiting_room.get_corner(UR), color=GREEN, stroke_width=8)
self.add(waiting_room, workspace, t1, t2, doors, door)
self.play(door.animate.shift(LEFT))
self.wait()
self.play(door.animate.shift(RIGHT))
self.wait()
|
normal
|
{
"blob_id": "e93d5461a2604d3b8015489397c68e16d1cb222e",
"index": 3695,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass SlidingDoorIllustration(Scene):\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass SlidingDoorIllustration(Scene):\n\n def construct(self):\n waiting_room = Rectangle(color=BLUE, stroke_width=8)\n waiting_room.shift(LEFT + DOWN)\n workspace = Rectangle(color=BLUE, stroke_width=8)\n workspace.next_to(waiting_room, RIGHT + UP, buff=0)\n workspace.shift(LEFT)\n t1 = Text('Waiting Room').move_to(waiting_room.get_center()).scale(0.5)\n t2 = Text('Workspace').move_to(workspace.get_center()).scale(0.5)\n doors = Line(workspace.get_corner(DL) + LEFT, waiting_room.\n get_corner(UR), color=RED, stroke_width=8)\n door = Line(workspace.get_corner(DL), waiting_room.get_corner(UR),\n color=GREEN, stroke_width=8)\n self.add(waiting_room, workspace, t1, t2, doors, door)\n self.play(door.animate.shift(LEFT))\n self.wait()\n self.play(door.animate.shift(RIGHT))\n self.wait()\n",
"step-4": "from manim import *\n\n\nclass SlidingDoorIllustration(Scene):\n\n def construct(self):\n waiting_room = Rectangle(color=BLUE, stroke_width=8)\n waiting_room.shift(LEFT + DOWN)\n workspace = Rectangle(color=BLUE, stroke_width=8)\n workspace.next_to(waiting_room, RIGHT + UP, buff=0)\n workspace.shift(LEFT)\n t1 = Text('Waiting Room').move_to(waiting_room.get_center()).scale(0.5)\n t2 = Text('Workspace').move_to(workspace.get_center()).scale(0.5)\n doors = Line(workspace.get_corner(DL) + LEFT, waiting_room.\n get_corner(UR), color=RED, stroke_width=8)\n door = Line(workspace.get_corner(DL), waiting_room.get_corner(UR),\n color=GREEN, stroke_width=8)\n self.add(waiting_room, workspace, t1, t2, doors, door)\n self.play(door.animate.shift(LEFT))\n self.wait()\n self.play(door.animate.shift(RIGHT))\n self.wait()\n",
"step-5": "from manim import *\n\n\nclass SlidingDoorIllustration(Scene):\n def construct(self):\n waiting_room = Rectangle(color=BLUE, stroke_width=8)\n waiting_room.shift(LEFT + DOWN)\n workspace = Rectangle(color=BLUE, stroke_width=8)\n workspace.next_to(waiting_room, RIGHT + UP, buff=0)\n workspace.shift(LEFT)\n t1 = Text(\"Waiting Room\").move_to(waiting_room.get_center()).scale(0.5)\n t2 = Text(\"Workspace\").move_to(workspace.get_center()).scale(0.5)\n doors = Line(workspace.get_corner(DL) + LEFT, waiting_room.get_corner(UR), color=RED, stroke_width=8)\n door = Line(workspace.get_corner(DL), waiting_room.get_corner(UR), color=GREEN, stroke_width=8)\n self.add(waiting_room, workspace, t1, t2, doors, door)\n self.play(door.animate.shift(LEFT))\n self.wait()\n self.play(door.animate.shift(RIGHT))\n self.wait()\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from selenium import webdriver
from time import sleep
from bs4 import BeautifulSoup
"""
With selenium we need web driver for our browser.
If you use google chrome, you can download chrome driver from here:
http://chromedriver.chromium.org/downloads
In linux (my OS) I extracted downloaded zip file and placed
exe file in "/home/UserName/bin"
I did this in order not to write chrome driver path everytime
"""
# IF you did not locate exe file in user/bin or user/local/bin
# then you have to specify the driver path while creating driver object
# driver object is browser which you can programatically control
driver = webdriver.Chrome('/Users/UserName/Downloads/chromedriver')
# open some page using get method
driver.get('https://www.facebook.com')
# driver.page_source
# Opens facebook's source html file
soup = BeautifulSoup(driver.page_source,'lxml')
print(soup.prettify())
# close webdriver object
driver.close()
|
normal
|
{
"blob_id": "03b2b722832eb46f3f81618f70fd0475f1f08c94",
"index": 2997,
"step-1": "<mask token>\n",
"step-2": "<mask token>\ndriver.get('https://www.facebook.com')\n<mask token>\nprint(soup.prettify())\ndriver.close()\n",
"step-3": "<mask token>\ndriver = webdriver.Chrome('/Users/UserName/Downloads/chromedriver')\ndriver.get('https://www.facebook.com')\nsoup = BeautifulSoup(driver.page_source, 'lxml')\nprint(soup.prettify())\ndriver.close()\n",
"step-4": "from selenium import webdriver\nfrom time import sleep\nfrom bs4 import BeautifulSoup\n<mask token>\ndriver = webdriver.Chrome('/Users/UserName/Downloads/chromedriver')\ndriver.get('https://www.facebook.com')\nsoup = BeautifulSoup(driver.page_source, 'lxml')\nprint(soup.prettify())\ndriver.close()\n",
"step-5": "\n\n\nfrom selenium import webdriver\nfrom time import sleep\nfrom bs4 import BeautifulSoup\n\n\n\n\n\"\"\"\n\nWith selenium we need web driver for our browser.\nIf you use google chrome, you can download chrome driver from here:\n \nhttp://chromedriver.chromium.org/downloads\n\n\nIn linux (my OS) I extracted downloaded zip file and placed\nexe file in \"/home/UserName/bin\"\n\n\nI did this in order not to write chrome driver path everytime\n\n\n\"\"\"\n\n# IF you did not locate exe file in user/bin or user/local/bin\n# then you have to specify the driver path while creating driver object\n# driver object is browser which you can programatically control\ndriver = webdriver.Chrome('/Users/UserName/Downloads/chromedriver')\n\n\n\n# open some page using get method\ndriver.get('https://www.facebook.com')\n\n\n# driver.page_source\n\n# Opens facebook's source html file\nsoup = BeautifulSoup(driver.page_source,'lxml')\n\nprint(soup.prettify())\n\n\n\n# close webdriver object\ndriver.close()\n\n\n\n\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def XCorr_1D(data, refdata, xdata=None):
if data.ndim == 1 and refdata.ndim == 1:
ycorr = numpy.correlate(data, refdata, mode='full')
xcorr = numpy.arange(ycorr.size)
lags = xcorr - (data.size - 1)
if xdata == None:
distPerLag = 1.0
elif xdata.ndim == 1:
distPerLag = (xdata[-1] - xdata[0]) / float(xdata.size)
else:
return None
offsets = -lags * distPerLag
mx = ycorr.argmax()
ox = offsets[mx]
return ox
else:
return None
<|reserved_special_token_1|>
<|reserved_special_token_0|>
import numpy
def XCorr_1D(data, refdata, xdata=None):
if data.ndim == 1 and refdata.ndim == 1:
ycorr = numpy.correlate(data, refdata, mode='full')
xcorr = numpy.arange(ycorr.size)
lags = xcorr - (data.size - 1)
if xdata == None:
distPerLag = 1.0
elif xdata.ndim == 1:
distPerLag = (xdata[-1] - xdata[0]) / float(xdata.size)
else:
return None
offsets = -lags * distPerLag
mx = ycorr.argmax()
ox = offsets[mx]
return ox
else:
return None
<|reserved_special_token_1|>
""" Utility functions and classes for SRP
Context : SRP
Module : Statsistics
Version : 1.0.0
Author : Stefano Covino
Date : 04/04/2013
E-mail : [email protected]
URL: : http://www.merate.mi.astro.it/utenti/covino
Usage : to be imported
Remarks : inputs are a 1D vectors to be cross-correlated. Optionally you can
give a vector of x-axis units. It returns the cross-correlation
value.
History : (04/04/2013) First version.
"""
import numpy
def XCorr_1D (data, refdata, xdata=None):
if data.ndim == 1 and refdata.ndim == 1:
ycorr = numpy.correlate(data, refdata, mode="full")
xcorr = numpy.arange(ycorr.size)
lags = xcorr - (data.size-1)
if xdata == None:
distPerLag = 1.
elif xdata.ndim == 1:
distPerLag = (xdata[-1] - xdata[0])/float(xdata.size)
else:
return None
#
offsets = -lags*distPerLag
#
mx = ycorr.argmax()
ox = offsets[mx]
return ox
else:
return None
|
flexible
|
{
"blob_id": "c62ffcaa9095d772e51be086be349d200346bc22",
"index": 9662,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef XCorr_1D(data, refdata, xdata=None):\n if data.ndim == 1 and refdata.ndim == 1:\n ycorr = numpy.correlate(data, refdata, mode='full')\n xcorr = numpy.arange(ycorr.size)\n lags = xcorr - (data.size - 1)\n if xdata == None:\n distPerLag = 1.0\n elif xdata.ndim == 1:\n distPerLag = (xdata[-1] - xdata[0]) / float(xdata.size)\n else:\n return None\n offsets = -lags * distPerLag\n mx = ycorr.argmax()\n ox = offsets[mx]\n return ox\n else:\n return None\n",
"step-3": "<mask token>\nimport numpy\n\n\ndef XCorr_1D(data, refdata, xdata=None):\n if data.ndim == 1 and refdata.ndim == 1:\n ycorr = numpy.correlate(data, refdata, mode='full')\n xcorr = numpy.arange(ycorr.size)\n lags = xcorr - (data.size - 1)\n if xdata == None:\n distPerLag = 1.0\n elif xdata.ndim == 1:\n distPerLag = (xdata[-1] - xdata[0]) / float(xdata.size)\n else:\n return None\n offsets = -lags * distPerLag\n mx = ycorr.argmax()\n ox = offsets[mx]\n return ox\n else:\n return None\n",
"step-4": "\"\"\" Utility functions and classes for SRP\n\nContext : SRP\nModule : Statsistics\nVersion : 1.0.0\nAuthor : Stefano Covino\nDate : 04/04/2013\nE-mail : [email protected]\nURL: : http://www.merate.mi.astro.it/utenti/covino\n\nUsage : to be imported\n\nRemarks : inputs are a 1D vectors to be cross-correlated. Optionally you can\n give a vector of x-axis units. It returns the cross-correlation \n value.\n\nHistory : (04/04/2013) First version.\n\n\"\"\"\n\nimport numpy\n\n\ndef XCorr_1D (data, refdata, xdata=None):\n if data.ndim == 1 and refdata.ndim == 1:\n ycorr = numpy.correlate(data, refdata, mode=\"full\")\n xcorr = numpy.arange(ycorr.size)\n lags = xcorr - (data.size-1)\n if xdata == None:\n distPerLag = 1.\n elif xdata.ndim == 1:\n distPerLag = (xdata[-1] - xdata[0])/float(xdata.size)\n else:\n return None\n #\n offsets = -lags*distPerLag\n #\n mx = ycorr.argmax()\n ox = offsets[mx]\n return ox\n else:\n return None\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
class EdgeListError(ValueError):
pass
<|reserved_special_token_0|>
class AdjacencyMatrixError(ValueError):
pass
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class EdgeListError(ValueError):
pass
<|reserved_special_token_0|>
class AdjacencyMatrixError(ValueError):
pass
def check_adj_matrix(adj_matrix):
"""Checks that the input adjacency matrix is valid."""
if adj_matrix.ndim != 2:
raise AdjacencyMatrixError('The numpy array must be of dimension 2.')
if adj_matrix.shape[0] != adj_matrix.shape[1]:
raise AdjacencyMatrixError('The matrix must be squared.')
<|reserved_special_token_0|>
def wv_to_numpy_array(wv):
vocab_keys = [int(key) for key in wv.vocab.keys()]
embeddings = [wv[str(key)] for key in sorted(vocab_keys)]
return np.array(embeddings, dtype=np.float32)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class EdgeListError(ValueError):
pass
def check_edge_list(src_nodes, dst_nodes, edge_weights):
"""Checks that the input edge list is valid."""
if len(src_nodes) != len(dst_nodes):
raise EdgeListError('src_nodes and dst_nodes must be of same length.')
if edge_weights is None:
return
if len(edge_weights) != len(src_nodes):
raise EdgeListError(
'src_nodes and edge_weights must be of same length.')
class AdjacencyMatrixError(ValueError):
pass
def check_adj_matrix(adj_matrix):
"""Checks that the input adjacency matrix is valid."""
if adj_matrix.ndim != 2:
raise AdjacencyMatrixError('The numpy array must be of dimension 2.')
if adj_matrix.shape[0] != adj_matrix.shape[1]:
raise AdjacencyMatrixError('The matrix must be squared.')
<|reserved_special_token_0|>
def wv_to_numpy_array(wv):
vocab_keys = [int(key) for key in wv.vocab.keys()]
embeddings = [wv[str(key)] for key in sorted(vocab_keys)]
return np.array(embeddings, dtype=np.float32)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class EdgeListError(ValueError):
pass
def check_edge_list(src_nodes, dst_nodes, edge_weights):
"""Checks that the input edge list is valid."""
if len(src_nodes) != len(dst_nodes):
raise EdgeListError('src_nodes and dst_nodes must be of same length.')
if edge_weights is None:
return
if len(edge_weights) != len(src_nodes):
raise EdgeListError(
'src_nodes and edge_weights must be of same length.')
class AdjacencyMatrixError(ValueError):
pass
def check_adj_matrix(adj_matrix):
"""Checks that the input adjacency matrix is valid."""
if adj_matrix.ndim != 2:
raise AdjacencyMatrixError('The numpy array must be of dimension 2.')
if adj_matrix.shape[0] != adj_matrix.shape[1]:
raise AdjacencyMatrixError('The matrix must be squared.')
def is_symmetric(matrix):
return np.array_equal(matrix, matrix.T)
def wv_to_numpy_array(wv):
vocab_keys = [int(key) for key in wv.vocab.keys()]
embeddings = [wv[str(key)] for key in sorted(vocab_keys)]
return np.array(embeddings, dtype=np.float32)
<|reserved_special_token_1|>
import numpy as np
class EdgeListError(ValueError):
pass
def check_edge_list(src_nodes, dst_nodes, edge_weights):
"""Checks that the input edge list is valid."""
if len(src_nodes) != len(dst_nodes):
raise EdgeListError("src_nodes and dst_nodes must be of same length.")
if edge_weights is None:
return
if len(edge_weights) != len(src_nodes):
raise EdgeListError("src_nodes and edge_weights must be of same length.")
class AdjacencyMatrixError(ValueError):
pass
def check_adj_matrix(adj_matrix):
"""Checks that the input adjacency matrix is valid."""
if adj_matrix.ndim != 2:
raise AdjacencyMatrixError("The numpy array must be of dimension 2.")
if adj_matrix.shape[0] != adj_matrix.shape[1]:
raise AdjacencyMatrixError("The matrix must be squared.")
def is_symmetric(matrix):
return np.array_equal(matrix, matrix.T)
def wv_to_numpy_array(wv):
vocab_keys = [int(key) for key in wv.vocab.keys()]
embeddings = [wv[str(key)] for key in sorted(vocab_keys)]
return np.array(embeddings, dtype=np.float32)
|
flexible
|
{
"blob_id": "cdbc7d703da69adaef593e6a505be25d78beb7ce",
"index": 7815,
"step-1": "<mask token>\n\n\nclass EdgeListError(ValueError):\n pass\n\n\n<mask token>\n\n\nclass AdjacencyMatrixError(ValueError):\n pass\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass EdgeListError(ValueError):\n pass\n\n\n<mask token>\n\n\nclass AdjacencyMatrixError(ValueError):\n pass\n\n\ndef check_adj_matrix(adj_matrix):\n \"\"\"Checks that the input adjacency matrix is valid.\"\"\"\n if adj_matrix.ndim != 2:\n raise AdjacencyMatrixError('The numpy array must be of dimension 2.')\n if adj_matrix.shape[0] != adj_matrix.shape[1]:\n raise AdjacencyMatrixError('The matrix must be squared.')\n\n\n<mask token>\n\n\ndef wv_to_numpy_array(wv):\n vocab_keys = [int(key) for key in wv.vocab.keys()]\n embeddings = [wv[str(key)] for key in sorted(vocab_keys)]\n return np.array(embeddings, dtype=np.float32)\n",
"step-3": "<mask token>\n\n\nclass EdgeListError(ValueError):\n pass\n\n\ndef check_edge_list(src_nodes, dst_nodes, edge_weights):\n \"\"\"Checks that the input edge list is valid.\"\"\"\n if len(src_nodes) != len(dst_nodes):\n raise EdgeListError('src_nodes and dst_nodes must be of same length.')\n if edge_weights is None:\n return\n if len(edge_weights) != len(src_nodes):\n raise EdgeListError(\n 'src_nodes and edge_weights must be of same length.')\n\n\nclass AdjacencyMatrixError(ValueError):\n pass\n\n\ndef check_adj_matrix(adj_matrix):\n \"\"\"Checks that the input adjacency matrix is valid.\"\"\"\n if adj_matrix.ndim != 2:\n raise AdjacencyMatrixError('The numpy array must be of dimension 2.')\n if adj_matrix.shape[0] != adj_matrix.shape[1]:\n raise AdjacencyMatrixError('The matrix must be squared.')\n\n\n<mask token>\n\n\ndef wv_to_numpy_array(wv):\n vocab_keys = [int(key) for key in wv.vocab.keys()]\n embeddings = [wv[str(key)] for key in sorted(vocab_keys)]\n return np.array(embeddings, dtype=np.float32)\n",
"step-4": "<mask token>\n\n\nclass EdgeListError(ValueError):\n pass\n\n\ndef check_edge_list(src_nodes, dst_nodes, edge_weights):\n \"\"\"Checks that the input edge list is valid.\"\"\"\n if len(src_nodes) != len(dst_nodes):\n raise EdgeListError('src_nodes and dst_nodes must be of same length.')\n if edge_weights is None:\n return\n if len(edge_weights) != len(src_nodes):\n raise EdgeListError(\n 'src_nodes and edge_weights must be of same length.')\n\n\nclass AdjacencyMatrixError(ValueError):\n pass\n\n\ndef check_adj_matrix(adj_matrix):\n \"\"\"Checks that the input adjacency matrix is valid.\"\"\"\n if adj_matrix.ndim != 2:\n raise AdjacencyMatrixError('The numpy array must be of dimension 2.')\n if adj_matrix.shape[0] != adj_matrix.shape[1]:\n raise AdjacencyMatrixError('The matrix must be squared.')\n\n\ndef is_symmetric(matrix):\n return np.array_equal(matrix, matrix.T)\n\n\ndef wv_to_numpy_array(wv):\n vocab_keys = [int(key) for key in wv.vocab.keys()]\n embeddings = [wv[str(key)] for key in sorted(vocab_keys)]\n return np.array(embeddings, dtype=np.float32)\n",
"step-5": "import numpy as np\n\n\nclass EdgeListError(ValueError):\n pass\n\n\ndef check_edge_list(src_nodes, dst_nodes, edge_weights):\n \"\"\"Checks that the input edge list is valid.\"\"\"\n\n if len(src_nodes) != len(dst_nodes):\n raise EdgeListError(\"src_nodes and dst_nodes must be of same length.\")\n\n if edge_weights is None:\n return\n\n if len(edge_weights) != len(src_nodes):\n raise EdgeListError(\"src_nodes and edge_weights must be of same length.\")\n\n\nclass AdjacencyMatrixError(ValueError):\n pass\n\n\ndef check_adj_matrix(adj_matrix):\n \"\"\"Checks that the input adjacency matrix is valid.\"\"\"\n if adj_matrix.ndim != 2:\n raise AdjacencyMatrixError(\"The numpy array must be of dimension 2.\")\n\n if adj_matrix.shape[0] != adj_matrix.shape[1]:\n raise AdjacencyMatrixError(\"The matrix must be squared.\")\n\n\ndef is_symmetric(matrix):\n return np.array_equal(matrix, matrix.T)\n\n\ndef wv_to_numpy_array(wv):\n vocab_keys = [int(key) for key in wv.vocab.keys()]\n embeddings = [wv[str(key)] for key in sorted(vocab_keys)]\n return np.array(embeddings, dtype=np.float32)\n",
"step-ids": [
2,
4,
5,
6,
8
]
}
|
[
2,
4,
5,
6,
8
] |
import os
import json
import codecs
import markdown
from flask import current_app
def get_json_file(filename, lang='en'):
"""
Get the contents of a JSON file.
"""
filepath = os.path.join(current_app.config['APP_PATH'], 'data', filename)
with open(filepath, 'r') as f:
return json.loads(f.read())
def get_markdown_file(name, lang='en'):
"""
Get the contents of a markdown file.
"""
filename_temp = "{0}_{1}.markdown"
md_dir = os.path.join(current_app.config['APP_PATH'], 'markdown')
filepath = os.path.join(md_dir, filename_temp.format(name, lang))
if not os.path.isfile(filepath) and lang == 'fr':
filepath = os.path.join(md_dir, filename_temp.format(name, 'en'))
if not os.path.isfile(filepath):
return None
with codecs.open(filepath, mode='r', encoding="utf-8") as f:
return markdown.markdown(f.read())
|
normal
|
{
"blob_id": "213ab22a269abc8180524462a8966e5d929ef7d1",
"index": 322,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef get_markdown_file(name, lang='en'):\n \"\"\"\n Get the contents of a markdown file.\n \"\"\"\n filename_temp = '{0}_{1}.markdown'\n md_dir = os.path.join(current_app.config['APP_PATH'], 'markdown')\n filepath = os.path.join(md_dir, filename_temp.format(name, lang))\n if not os.path.isfile(filepath) and lang == 'fr':\n filepath = os.path.join(md_dir, filename_temp.format(name, 'en'))\n if not os.path.isfile(filepath):\n return None\n with codecs.open(filepath, mode='r', encoding='utf-8') as f:\n return markdown.markdown(f.read())\n",
"step-3": "<mask token>\n\n\ndef get_json_file(filename, lang='en'):\n \"\"\"\n Get the contents of a JSON file.\n \"\"\"\n filepath = os.path.join(current_app.config['APP_PATH'], 'data', filename)\n with open(filepath, 'r') as f:\n return json.loads(f.read())\n\n\ndef get_markdown_file(name, lang='en'):\n \"\"\"\n Get the contents of a markdown file.\n \"\"\"\n filename_temp = '{0}_{1}.markdown'\n md_dir = os.path.join(current_app.config['APP_PATH'], 'markdown')\n filepath = os.path.join(md_dir, filename_temp.format(name, lang))\n if not os.path.isfile(filepath) and lang == 'fr':\n filepath = os.path.join(md_dir, filename_temp.format(name, 'en'))\n if not os.path.isfile(filepath):\n return None\n with codecs.open(filepath, mode='r', encoding='utf-8') as f:\n return markdown.markdown(f.read())\n",
"step-4": "import os\nimport json\nimport codecs\nimport markdown\nfrom flask import current_app\n\n\ndef get_json_file(filename, lang='en'):\n \"\"\"\n Get the contents of a JSON file.\n \"\"\"\n filepath = os.path.join(current_app.config['APP_PATH'], 'data', filename)\n with open(filepath, 'r') as f:\n return json.loads(f.read())\n\n\ndef get_markdown_file(name, lang='en'):\n \"\"\"\n Get the contents of a markdown file.\n \"\"\"\n filename_temp = '{0}_{1}.markdown'\n md_dir = os.path.join(current_app.config['APP_PATH'], 'markdown')\n filepath = os.path.join(md_dir, filename_temp.format(name, lang))\n if not os.path.isfile(filepath) and lang == 'fr':\n filepath = os.path.join(md_dir, filename_temp.format(name, 'en'))\n if not os.path.isfile(filepath):\n return None\n with codecs.open(filepath, mode='r', encoding='utf-8') as f:\n return markdown.markdown(f.read())\n",
"step-5": "import os\nimport json\nimport codecs\n\nimport markdown\n\nfrom flask import current_app\n\n\ndef get_json_file(filename, lang='en'):\n \"\"\"\n Get the contents of a JSON file.\n \"\"\"\n\n filepath = os.path.join(current_app.config['APP_PATH'], 'data', filename)\n\n with open(filepath, 'r') as f:\n return json.loads(f.read())\n\n\ndef get_markdown_file(name, lang='en'):\n \"\"\"\n Get the contents of a markdown file.\n \"\"\"\n\n filename_temp = \"{0}_{1}.markdown\"\n\n md_dir = os.path.join(current_app.config['APP_PATH'], 'markdown')\n\n filepath = os.path.join(md_dir, filename_temp.format(name, lang))\n\n if not os.path.isfile(filepath) and lang == 'fr':\n filepath = os.path.join(md_dir, filename_temp.format(name, 'en'))\n\n if not os.path.isfile(filepath):\n return None\n\n with codecs.open(filepath, mode='r', encoding=\"utf-8\") as f:\n return markdown.markdown(f.read())\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from api import url, key, opposite
import requests
import json
import time
import os
from miner import mine
from cpu import *
class Player:
def __init__(self):
data = self._get_status()
time.sleep(data['cooldown'])
self.name = data['name']
self.cooldown = data['cooldown']
self.encumbrance = data['encumbrance']
self.strength = data['strength']
self.speed = data['speed']
self.gold = data['gold']
self.bodywear = data['bodywear']
self.footwear = data['footwear']
self.inventory = data['inventory']
self.abilities = data['abilities']
self.status = data['status']
self.has_mined = data['has_mined']
self.errors = data['errors']
self.messages = data['messages']
self.snitches = data['snitches'] if data['snitches'] else 0
self.current_room = self.check_room()
self.world = "dark" if self.current_room['room_id'] > 499 else "light"
self.map = self._read_file('map.txt')
self.graph = self._read_file('graph.txt')
def _get_status(self):
r = requests.post(f"{url}/api/adv/status/",
headers={'Authorization': f"Token {key}", "Content-Type": "application/json"})
return r.json()
def _read_file(self, filepath):
if self.world == 'dark':
filepath = 'dark_' + filepath
if not os.path.exists(filepath):
f = open(filepath, 'w+')
room = self.current_room
if 'graph' in filepath:
room = {room['room_id']: {d: '?' for d in room['exits']}}
self._write_file(filepath, {self.current_room['room_id']: room})
with open(filepath, 'r') as f:
data = json.load(f)
return data
def _write_file(self, filepath, data):
if self.world == 'dark' and 'dark' not in filepath:
filepath = 'dark_' + filepath
with open(filepath, 'w+') as outfile:
json.dump(data, outfile)
def check_room(self):
r = requests.get(f"{url}/api/adv/init/",
headers={'Authorization': f"Token {key}"})
data = r.json()
if 'players' in data:
del data['players']
return data
def check_self(self, cause=None):
data = self._get_status()
cleaned = {**data} # How cool is the spread operator!
cleaned['status'].append("Glasowyn's hands stand Empty and Effervescent, see them filled.") if len(
cleaned['status']) < 1 else None
cleaned["world"] = self.world
cut = ['has_mined', 'errors', ]
for k in cut:
del cleaned[k]
if cause == "item pick up":
ret = f" You are now held down by the weight of {cleaned['encumbrance']} Stones.\n Your Experience and equipment Grant you the ability to\n carry {cleaned['strength']} stones before you need to take longer rests.\n Your bag now carries {cleaned['inventory']}"
print(ret + f"\n Your ghost seems to have the space to carry an additional item if you would like" if "carry" in cleaned['abilities'] and len(
cleaned['status']) else ret)
else:
print('\n'+"*"*22+' '+"Your Current State"+' '+"*"*22)
for item in cleaned.items():
print(f"{item[0]}: {item[1]}")
print("*"*64+'\n')
self.name = data['name']
self.cooldown = data['cooldown']
self.encumbrance = data['encumbrance']
self.strength = data['strength']
self.speed = data['speed']
self.gold = data['gold']
self.bodywear = data['bodywear']
self.footwear = data['footwear']
self.inventory = data['inventory']
self.abilities = data['abilities']
self.status = data['status']
self.has_mined = data['has_mined']
self.errors = data['errors']
self.messages = data['messages']
self.snitches = data['snitches'] if data['snitches'] else 0
self.map = self._read_file('map.txt')
self.graph = self._read_file('graph.txt')
def dash(self, direction, num_rooms, room_ids):
if "dash" not in self.abilities:
print("Error! You can't dash yet!")
return
time.sleep(self.cooldown)
curr_id = self.current_room['room_id']
print("\n======================================")
print(f"Dashing {direction} from room {curr_id}...")
json = {"direction": direction,
"num_rooms": num_rooms, "next_room_ids": room_ids}
r = requests.post(f"{url}/api/adv/dash/", headers={
'Authorization': f"Token {key}", "Content-Type": "application/json"}, json=json)
next_room = r.json()
if 'players' in next_room:
del next_room['players']
next_id = next_room['room_id']
# update map with room info
self.map[next_id] = next_room
self._write_file('map.txt', self.map)
# change current room and update cooldown
self.current_room = next_room
self.cooldown = self.current_room['cooldown']
if self.world == 'dark' and 'golden snitch' in next_room['items']:
try:
self.pick_up_loot('golden snitch')
except:
print("Somebody already got that snitch!")
elif self.world == 'light' and len(next_room['items']):
for item in next_room['items']:
self.pick_up_loot(item)
for message in next_room['messages']:
print(f"{message}")
print(f"Now the player is in {self.current_room['room_id']}")
print(f"Cooldown before next action: {self.cooldown} seconds")
print("======================================\n")
def travel(self, direction, method="move"):
time.sleep(self.cooldown)
curr_id = self.current_room['room_id']
print("\n======================================")
if "fly" in self.abilities and self.map[str(curr_id)]['terrain'] in ['MOUNTAIN', 'NORMAL']:
method = "fly"
print(f"Flying {direction} from room {curr_id}...")
else:
print(f"Walking {direction} from room {curr_id}...")
if direction not in self.graph[str(curr_id)]:
print("Error! Not a valid direction from the current room")
else:
json = {"direction": direction}
if self.graph[str(curr_id)][direction] != "?":
json['next_room_id'] = str(self.graph[str(curr_id)][direction])
next_room = requests.post(f"{url}/api/adv/{method}/", headers={
'Authorization': f"Token {key}", "Content-Type": "application/json"}, json=json).json()
# change current room and update cooldown
self.current_room = next_room
self.cooldown = self.current_room['cooldown']
if self.world != 'dark':
# Code for looting any items in the room if the space is available
if len(next_room['items']) > 0 and self.encumbrance < self.strength:
for item in next_room['items']:
time.sleep(next_room['cooldown'])
self.pick_up_loot(item)
else:
if 'golden snitch' in next_room['items']:
self.pick_up_loot('golden snitch')
if 'players' in next_room:
del next_room['players']
next_id = next_room['room_id']
# add to graph and map, in addition to making graph connections
if str(next_id) not in self.graph:
print(f"New room! # {next_id}")
self.graph[str(next_id)] = {
e: '?' for e in next_room['exits']}
# make graph connections and update graph
self.graph[str(curr_id)][direction] = next_id
self.graph[str(next_id)][opposite[direction]] = curr_id
self._write_file('graph.txt', self.graph)
# update map with room info
self.map[next_id] = next_room
self._write_file('map.txt', self.map)
for message in next_room['messages']:
print(f"{message}")
print(f"Now the player is in {self.current_room['room_id']}")
print(f"Cooldown before next action: {self.cooldown} seconds")
if len(self.graph) < 500:
print(
f"Total number of rooms explored so far: {len(self.graph)}")
print("======================================\n")
def get_coin(self):
time.sleep(self.cooldown)
data = mine()
self.cooldown = data['cooldown']
if len(data['errors']) > 0:
self.get_coin()
def pick_up_loot(self, item):
print(f"Looting {item}")
json = {"name": item}
if self.encumbrance < self.strength:
time.sleep(self.cooldown)
req = requests.post(f"{url}/api/adv/take/", headers={
'Authorization': f"Token {key}", "Content-Type": "application/json"}, json=json).json()
self.cooldown = req['cooldown']
time.sleep(self.cooldown)
self.check_self("item pick up") if self.world == 'light' else print(' Success!\n '+req['messages'][0] if len(req['messages']) > 0 else print(
" Oh NO!\n just as quickly as you arrived, the Golden Snitch disappeared to the next room and out of grasp!"))
else:
if "carry" in self.abilities:
if len(self.status) != 0:
print(
"It seems your Bag is full and Glasowyn is already carring something!")
else:
req = requests.post(f"{url}/api/adv/carry/", headers={
'Authorization': f"Token {key}", "Content-Type": "application/json"}, json=json).json()
self.cooldown = req['cooldown']
print(req)
else:
print("Your Bag is full!")
def drop_loot(self, item):
time.sleep(self.cooldown)
json = {"name": item}
req = requests.post(f"{url}/api/adv/drop/", headers={
'Authorization': f"Token {key}", "Content-Type": "application/json"}, json=json).json()
time.sleep(req['cooldown'])
self.check_self()
def buy_name(self, name):
time.sleep(self.cooldown)
json = {"name": name}
req = requests.post(f"{url}/api/adv/change_name/", headers={
'Authorization': f"Token {key}", "Content-Type": "application/json"}, json=json).json()
print(req)
time.sleep(req['cooldown'])
json['confirm'] = "aye"
r1_conf = requests.post(f"{url}/api/adv/change_name/", headers={
'Authorization': f"Token {key}", "Content-Type": "application/json"}, json=json).json()
print(r1_conf)
time.sleep(r1_conf['cooldown'])
self.check_self()
def examine(self, item):
time.sleep(self.cooldown)
json = {"name": item}
req = requests.post(f"{url}/api/adv/examine/", headers={
'Authorization': f"Token {key}", "Content-Type": "application/json"}, json=json).json()
self.cooldown = req['cooldown']
if item == "WELL": # Examining well gives binary code to be deciphered for next coin location
if os.path.exists("hint.txt"):
os.remove("hint.txt")
desc = req['description']
instructions = desc.split('\n')
for line in instructions[2:]:
with open("hint.txt", "a") as f:
f.write(f"{line}\n")
cpu = CPU()
cpu.load('hint.txt')
cpu.run()
# clean up after itself and remove the hint file after used (new one will be made for future hints anyway)
if os.path.exists("hint.txt"):
os.remove("hint.txt")
# full message for light is "Mine your coin in room ###"
# but message for dark well is "Find your snitch in room ###"
limiter = 23 if self.world == 'light' else 24
return cpu.hint[limiter:]
else:
print(req['description'])
def pray(self):
time.sleep(self.cooldown)
req = requests.post(f"{url}/api/adv/pray/", headers={
'Authorization': f"Token {key}", "Content-Type": "application/json"}).json()
print(req)
time.sleep(req['cooldown'])
self.check_self()
def wear(self, item):
time.sleep(self.cooldown)
json = {"name": item}
req = requests.post(f"{url}/api/adv/wear/", headers={
'Authorization': f"Token {key}", "Content-Type": "application/json"}, json=json).json()
self.cooldown = req['cooldown']
time.sleep(self.cooldown)
self.check_self()
def check_balance(self):
time.sleep(self.cooldown)
req = requests.get(f"{url}/api/bc/get_balance/", headers={
'Authorization': f"Token {key}"}).json()
self.coins = float(req['messages'][0].split(' ')[5])
self.cooldown = req['cooldown']
print(f"\n{req['messages'][0]}\n")
def transform_coin(self, item):
time.sleep(self.cooldown)
self.check_balance()
json = {"name": item}
if self.coins > 0 and item in self.inventory:
time.sleep(self.cooldown)
req = requests.post(f"{url}/api/adv/transmogrify/", headers={
'Authorization': f"Token {key}", "Content-Type": "application/json"}, json=json).json()
print(req)
self.cooldown = req['cooldown']
for item in req['items']:
self.pick_up_loot(item)
def warp(self):
if "warp" in self.abilities:
time.sleep(self.cooldown)
req = requests.post(f"{url}/api/adv/warp/", headers={
'Authorization': f"Token {key}", "Content-Type": "application/json"}).json()
print(req['messages'][0])
self.cooldown = req['cooldown']
if self.world == 'light':
self.world = 'dark'
else:
self.world = 'light'
self.current_room = req
time.sleep(self.cooldown)
self.check_self()
if req['room_id'] not in self.graph:
# Just warped to a previously unknown room, add it to graph and map
g = self.graph
g[req['room_id']] = {d: '?' for d in req['exits']}
self._write_file('graph.txt', g)
m = self.map
m[req['room_id']] = req
self._write_file('map.txt', m)
else:
print("You do not have the warp ability yet!")
|
normal
|
{
"blob_id": "edd70f55e76418911d304d6eb41a6d2a93005a58",
"index": 890,
"step-1": "<mask token>\n\n\nclass Player:\n\n def __init__(self):\n data = self._get_status()\n time.sleep(data['cooldown'])\n self.name = data['name']\n self.cooldown = data['cooldown']\n self.encumbrance = data['encumbrance']\n self.strength = data['strength']\n self.speed = data['speed']\n self.gold = data['gold']\n self.bodywear = data['bodywear']\n self.footwear = data['footwear']\n self.inventory = data['inventory']\n self.abilities = data['abilities']\n self.status = data['status']\n self.has_mined = data['has_mined']\n self.errors = data['errors']\n self.messages = data['messages']\n self.snitches = data['snitches'] if data['snitches'] else 0\n self.current_room = self.check_room()\n self.world = 'dark' if self.current_room['room_id'] > 499 else 'light'\n self.map = self._read_file('map.txt')\n self.graph = self._read_file('graph.txt')\n <mask token>\n\n def _read_file(self, filepath):\n if self.world == 'dark':\n filepath = 'dark_' + filepath\n if not os.path.exists(filepath):\n f = open(filepath, 'w+')\n room = self.current_room\n if 'graph' in filepath:\n room = {room['room_id']: {d: '?' for d in room['exits']}}\n self._write_file(filepath, {self.current_room['room_id']: room})\n with open(filepath, 'r') as f:\n data = json.load(f)\n return data\n <mask token>\n <mask token>\n <mask token>\n\n def dash(self, direction, num_rooms, room_ids):\n if 'dash' not in self.abilities:\n print(\"Error! You can't dash yet!\")\n return\n time.sleep(self.cooldown)\n curr_id = self.current_room['room_id']\n print('\\n======================================')\n print(f'Dashing {direction} from room {curr_id}...')\n json = {'direction': direction, 'num_rooms': num_rooms,\n 'next_room_ids': room_ids}\n r = requests.post(f'{url}/api/adv/dash/', headers={'Authorization':\n f'Token {key}', 'Content-Type': 'application/json'}, json=json)\n next_room = r.json()\n if 'players' in next_room:\n del next_room['players']\n next_id = next_room['room_id']\n self.map[next_id] = next_room\n self._write_file('map.txt', self.map)\n self.current_room = next_room\n self.cooldown = self.current_room['cooldown']\n if self.world == 'dark' and 'golden snitch' in next_room['items']:\n try:\n self.pick_up_loot('golden snitch')\n except:\n print('Somebody already got that snitch!')\n elif self.world == 'light' and len(next_room['items']):\n for item in next_room['items']:\n self.pick_up_loot(item)\n for message in next_room['messages']:\n print(f'{message}')\n print(f\"Now the player is in {self.current_room['room_id']}\")\n print(f'Cooldown before next action: {self.cooldown} seconds')\n print('======================================\\n')\n\n def travel(self, direction, method='move'):\n time.sleep(self.cooldown)\n curr_id = self.current_room['room_id']\n print('\\n======================================')\n if 'fly' in self.abilities and self.map[str(curr_id)]['terrain'] in [\n 'MOUNTAIN', 'NORMAL']:\n method = 'fly'\n print(f'Flying {direction} from room {curr_id}...')\n else:\n print(f'Walking {direction} from room {curr_id}...')\n if direction not in self.graph[str(curr_id)]:\n print('Error! Not a valid direction from the current room')\n else:\n json = {'direction': direction}\n if self.graph[str(curr_id)][direction] != '?':\n json['next_room_id'] = str(self.graph[str(curr_id)][direction])\n next_room = requests.post(f'{url}/api/adv/{method}/', headers={\n 'Authorization': f'Token {key}', 'Content-Type':\n 'application/json'}, json=json).json()\n self.current_room = next_room\n self.cooldown = self.current_room['cooldown']\n if self.world != 'dark':\n if len(next_room['items']\n ) > 0 and self.encumbrance < self.strength:\n for item in next_room['items']:\n time.sleep(next_room['cooldown'])\n self.pick_up_loot(item)\n elif 'golden snitch' in next_room['items']:\n self.pick_up_loot('golden snitch')\n if 'players' in next_room:\n del next_room['players']\n next_id = next_room['room_id']\n if str(next_id) not in self.graph:\n print(f'New room! # {next_id}')\n self.graph[str(next_id)] = {e: '?' for e in next_room['exits']}\n self.graph[str(curr_id)][direction] = next_id\n self.graph[str(next_id)][opposite[direction]] = curr_id\n self._write_file('graph.txt', self.graph)\n self.map[next_id] = next_room\n self._write_file('map.txt', self.map)\n for message in next_room['messages']:\n print(f'{message}')\n print(f\"Now the player is in {self.current_room['room_id']}\")\n print(f'Cooldown before next action: {self.cooldown} seconds')\n if len(self.graph) < 500:\n print(\n f'Total number of rooms explored so far: {len(self.graph)}'\n )\n print('======================================\\n')\n\n def get_coin(self):\n time.sleep(self.cooldown)\n data = mine()\n self.cooldown = data['cooldown']\n if len(data['errors']) > 0:\n self.get_coin()\n <mask token>\n <mask token>\n <mask token>\n\n def examine(self, item):\n time.sleep(self.cooldown)\n json = {'name': item}\n req = requests.post(f'{url}/api/adv/examine/', headers={\n 'Authorization': f'Token {key}', 'Content-Type':\n 'application/json'}, json=json).json()\n self.cooldown = req['cooldown']\n if item == 'WELL':\n if os.path.exists('hint.txt'):\n os.remove('hint.txt')\n desc = req['description']\n instructions = desc.split('\\n')\n for line in instructions[2:]:\n with open('hint.txt', 'a') as f:\n f.write(f'{line}\\n')\n cpu = CPU()\n cpu.load('hint.txt')\n cpu.run()\n if os.path.exists('hint.txt'):\n os.remove('hint.txt')\n limiter = 23 if self.world == 'light' else 24\n return cpu.hint[limiter:]\n else:\n print(req['description'])\n\n def pray(self):\n time.sleep(self.cooldown)\n req = requests.post(f'{url}/api/adv/pray/', headers={\n 'Authorization': f'Token {key}', 'Content-Type':\n 'application/json'}).json()\n print(req)\n time.sleep(req['cooldown'])\n self.check_self()\n <mask token>\n\n def check_balance(self):\n time.sleep(self.cooldown)\n req = requests.get(f'{url}/api/bc/get_balance/', headers={\n 'Authorization': f'Token {key}'}).json()\n self.coins = float(req['messages'][0].split(' ')[5])\n self.cooldown = req['cooldown']\n print(f\"\\n{req['messages'][0]}\\n\")\n\n def transform_coin(self, item):\n time.sleep(self.cooldown)\n self.check_balance()\n json = {'name': item}\n if self.coins > 0 and item in self.inventory:\n time.sleep(self.cooldown)\n req = requests.post(f'{url}/api/adv/transmogrify/', headers={\n 'Authorization': f'Token {key}', 'Content-Type':\n 'application/json'}, json=json).json()\n print(req)\n self.cooldown = req['cooldown']\n for item in req['items']:\n self.pick_up_loot(item)\n\n def warp(self):\n if 'warp' in self.abilities:\n time.sleep(self.cooldown)\n req = requests.post(f'{url}/api/adv/warp/', headers={\n 'Authorization': f'Token {key}', 'Content-Type':\n 'application/json'}).json()\n print(req['messages'][0])\n self.cooldown = req['cooldown']\n if self.world == 'light':\n self.world = 'dark'\n else:\n self.world = 'light'\n self.current_room = req\n time.sleep(self.cooldown)\n self.check_self()\n if req['room_id'] not in self.graph:\n g = self.graph\n g[req['room_id']] = {d: '?' for d in req['exits']}\n self._write_file('graph.txt', g)\n m = self.map\n m[req['room_id']] = req\n self._write_file('map.txt', m)\n else:\n print('You do not have the warp ability yet!')\n",
"step-2": "<mask token>\n\n\nclass Player:\n\n def __init__(self):\n data = self._get_status()\n time.sleep(data['cooldown'])\n self.name = data['name']\n self.cooldown = data['cooldown']\n self.encumbrance = data['encumbrance']\n self.strength = data['strength']\n self.speed = data['speed']\n self.gold = data['gold']\n self.bodywear = data['bodywear']\n self.footwear = data['footwear']\n self.inventory = data['inventory']\n self.abilities = data['abilities']\n self.status = data['status']\n self.has_mined = data['has_mined']\n self.errors = data['errors']\n self.messages = data['messages']\n self.snitches = data['snitches'] if data['snitches'] else 0\n self.current_room = self.check_room()\n self.world = 'dark' if self.current_room['room_id'] > 499 else 'light'\n self.map = self._read_file('map.txt')\n self.graph = self._read_file('graph.txt')\n\n def _get_status(self):\n r = requests.post(f'{url}/api/adv/status/', headers={\n 'Authorization': f'Token {key}', 'Content-Type':\n 'application/json'})\n return r.json()\n\n def _read_file(self, filepath):\n if self.world == 'dark':\n filepath = 'dark_' + filepath\n if not os.path.exists(filepath):\n f = open(filepath, 'w+')\n room = self.current_room\n if 'graph' in filepath:\n room = {room['room_id']: {d: '?' for d in room['exits']}}\n self._write_file(filepath, {self.current_room['room_id']: room})\n with open(filepath, 'r') as f:\n data = json.load(f)\n return data\n\n def _write_file(self, filepath, data):\n if self.world == 'dark' and 'dark' not in filepath:\n filepath = 'dark_' + filepath\n with open(filepath, 'w+') as outfile:\n json.dump(data, outfile)\n <mask token>\n <mask token>\n\n def dash(self, direction, num_rooms, room_ids):\n if 'dash' not in self.abilities:\n print(\"Error! You can't dash yet!\")\n return\n time.sleep(self.cooldown)\n curr_id = self.current_room['room_id']\n print('\\n======================================')\n print(f'Dashing {direction} from room {curr_id}...')\n json = {'direction': direction, 'num_rooms': num_rooms,\n 'next_room_ids': room_ids}\n r = requests.post(f'{url}/api/adv/dash/', headers={'Authorization':\n f'Token {key}', 'Content-Type': 'application/json'}, json=json)\n next_room = r.json()\n if 'players' in next_room:\n del next_room['players']\n next_id = next_room['room_id']\n self.map[next_id] = next_room\n self._write_file('map.txt', self.map)\n self.current_room = next_room\n self.cooldown = self.current_room['cooldown']\n if self.world == 'dark' and 'golden snitch' in next_room['items']:\n try:\n self.pick_up_loot('golden snitch')\n except:\n print('Somebody already got that snitch!')\n elif self.world == 'light' and len(next_room['items']):\n for item in next_room['items']:\n self.pick_up_loot(item)\n for message in next_room['messages']:\n print(f'{message}')\n print(f\"Now the player is in {self.current_room['room_id']}\")\n print(f'Cooldown before next action: {self.cooldown} seconds')\n print('======================================\\n')\n\n def travel(self, direction, method='move'):\n time.sleep(self.cooldown)\n curr_id = self.current_room['room_id']\n print('\\n======================================')\n if 'fly' in self.abilities and self.map[str(curr_id)]['terrain'] in [\n 'MOUNTAIN', 'NORMAL']:\n method = 'fly'\n print(f'Flying {direction} from room {curr_id}...')\n else:\n print(f'Walking {direction} from room {curr_id}...')\n if direction not in self.graph[str(curr_id)]:\n print('Error! Not a valid direction from the current room')\n else:\n json = {'direction': direction}\n if self.graph[str(curr_id)][direction] != '?':\n json['next_room_id'] = str(self.graph[str(curr_id)][direction])\n next_room = requests.post(f'{url}/api/adv/{method}/', headers={\n 'Authorization': f'Token {key}', 'Content-Type':\n 'application/json'}, json=json).json()\n self.current_room = next_room\n self.cooldown = self.current_room['cooldown']\n if self.world != 'dark':\n if len(next_room['items']\n ) > 0 and self.encumbrance < self.strength:\n for item in next_room['items']:\n time.sleep(next_room['cooldown'])\n self.pick_up_loot(item)\n elif 'golden snitch' in next_room['items']:\n self.pick_up_loot('golden snitch')\n if 'players' in next_room:\n del next_room['players']\n next_id = next_room['room_id']\n if str(next_id) not in self.graph:\n print(f'New room! # {next_id}')\n self.graph[str(next_id)] = {e: '?' for e in next_room['exits']}\n self.graph[str(curr_id)][direction] = next_id\n self.graph[str(next_id)][opposite[direction]] = curr_id\n self._write_file('graph.txt', self.graph)\n self.map[next_id] = next_room\n self._write_file('map.txt', self.map)\n for message in next_room['messages']:\n print(f'{message}')\n print(f\"Now the player is in {self.current_room['room_id']}\")\n print(f'Cooldown before next action: {self.cooldown} seconds')\n if len(self.graph) < 500:\n print(\n f'Total number of rooms explored so far: {len(self.graph)}'\n )\n print('======================================\\n')\n\n def get_coin(self):\n time.sleep(self.cooldown)\n data = mine()\n self.cooldown = data['cooldown']\n if len(data['errors']) > 0:\n self.get_coin()\n\n def pick_up_loot(self, item):\n print(f'Looting {item}')\n json = {'name': item}\n if self.encumbrance < self.strength:\n time.sleep(self.cooldown)\n req = requests.post(f'{url}/api/adv/take/', headers={\n 'Authorization': f'Token {key}', 'Content-Type':\n 'application/json'}, json=json).json()\n self.cooldown = req['cooldown']\n time.sleep(self.cooldown)\n self.check_self('item pick up'\n ) if self.world == 'light' else print(' Success!\\n ' +\n req['messages'][0] if len(req['messages']) > 0 else print(\n \"\"\" Oh NO!\n just as quickly as you arrived, the Golden Snitch disappeared to the next room and out of grasp!\"\"\"\n ))\n elif 'carry' in self.abilities:\n if len(self.status) != 0:\n print(\n 'It seems your Bag is full and Glasowyn is already carring something!'\n )\n else:\n req = requests.post(f'{url}/api/adv/carry/', headers={\n 'Authorization': f'Token {key}', 'Content-Type':\n 'application/json'}, json=json).json()\n self.cooldown = req['cooldown']\n print(req)\n else:\n print('Your Bag is full!')\n <mask token>\n\n def buy_name(self, name):\n time.sleep(self.cooldown)\n json = {'name': name}\n req = requests.post(f'{url}/api/adv/change_name/', headers={\n 'Authorization': f'Token {key}', 'Content-Type':\n 'application/json'}, json=json).json()\n print(req)\n time.sleep(req['cooldown'])\n json['confirm'] = 'aye'\n r1_conf = requests.post(f'{url}/api/adv/change_name/', headers={\n 'Authorization': f'Token {key}', 'Content-Type':\n 'application/json'}, json=json).json()\n print(r1_conf)\n time.sleep(r1_conf['cooldown'])\n self.check_self()\n\n def examine(self, item):\n time.sleep(self.cooldown)\n json = {'name': item}\n req = requests.post(f'{url}/api/adv/examine/', headers={\n 'Authorization': f'Token {key}', 'Content-Type':\n 'application/json'}, json=json).json()\n self.cooldown = req['cooldown']\n if item == 'WELL':\n if os.path.exists('hint.txt'):\n os.remove('hint.txt')\n desc = req['description']\n instructions = desc.split('\\n')\n for line in instructions[2:]:\n with open('hint.txt', 'a') as f:\n f.write(f'{line}\\n')\n cpu = CPU()\n cpu.load('hint.txt')\n cpu.run()\n if os.path.exists('hint.txt'):\n os.remove('hint.txt')\n limiter = 23 if self.world == 'light' else 24\n return cpu.hint[limiter:]\n else:\n print(req['description'])\n\n def pray(self):\n time.sleep(self.cooldown)\n req = requests.post(f'{url}/api/adv/pray/', headers={\n 'Authorization': f'Token {key}', 'Content-Type':\n 'application/json'}).json()\n print(req)\n time.sleep(req['cooldown'])\n self.check_self()\n <mask token>\n\n def check_balance(self):\n time.sleep(self.cooldown)\n req = requests.get(f'{url}/api/bc/get_balance/', headers={\n 'Authorization': f'Token {key}'}).json()\n self.coins = float(req['messages'][0].split(' ')[5])\n self.cooldown = req['cooldown']\n print(f\"\\n{req['messages'][0]}\\n\")\n\n def transform_coin(self, item):\n time.sleep(self.cooldown)\n self.check_balance()\n json = {'name': item}\n if self.coins > 0 and item in self.inventory:\n time.sleep(self.cooldown)\n req = requests.post(f'{url}/api/adv/transmogrify/', headers={\n 'Authorization': f'Token {key}', 'Content-Type':\n 'application/json'}, json=json).json()\n print(req)\n self.cooldown = req['cooldown']\n for item in req['items']:\n self.pick_up_loot(item)\n\n def warp(self):\n if 'warp' in self.abilities:\n time.sleep(self.cooldown)\n req = requests.post(f'{url}/api/adv/warp/', headers={\n 'Authorization': f'Token {key}', 'Content-Type':\n 'application/json'}).json()\n print(req['messages'][0])\n self.cooldown = req['cooldown']\n if self.world == 'light':\n self.world = 'dark'\n else:\n self.world = 'light'\n self.current_room = req\n time.sleep(self.cooldown)\n self.check_self()\n if req['room_id'] not in self.graph:\n g = self.graph\n g[req['room_id']] = {d: '?' for d in req['exits']}\n self._write_file('graph.txt', g)\n m = self.map\n m[req['room_id']] = req\n self._write_file('map.txt', m)\n else:\n print('You do not have the warp ability yet!')\n",
"step-3": "<mask token>\n\n\nclass Player:\n\n def __init__(self):\n data = self._get_status()\n time.sleep(data['cooldown'])\n self.name = data['name']\n self.cooldown = data['cooldown']\n self.encumbrance = data['encumbrance']\n self.strength = data['strength']\n self.speed = data['speed']\n self.gold = data['gold']\n self.bodywear = data['bodywear']\n self.footwear = data['footwear']\n self.inventory = data['inventory']\n self.abilities = data['abilities']\n self.status = data['status']\n self.has_mined = data['has_mined']\n self.errors = data['errors']\n self.messages = data['messages']\n self.snitches = data['snitches'] if data['snitches'] else 0\n self.current_room = self.check_room()\n self.world = 'dark' if self.current_room['room_id'] > 499 else 'light'\n self.map = self._read_file('map.txt')\n self.graph = self._read_file('graph.txt')\n\n def _get_status(self):\n r = requests.post(f'{url}/api/adv/status/', headers={\n 'Authorization': f'Token {key}', 'Content-Type':\n 'application/json'})\n return r.json()\n\n def _read_file(self, filepath):\n if self.world == 'dark':\n filepath = 'dark_' + filepath\n if not os.path.exists(filepath):\n f = open(filepath, 'w+')\n room = self.current_room\n if 'graph' in filepath:\n room = {room['room_id']: {d: '?' for d in room['exits']}}\n self._write_file(filepath, {self.current_room['room_id']: room})\n with open(filepath, 'r') as f:\n data = json.load(f)\n return data\n\n def _write_file(self, filepath, data):\n if self.world == 'dark' and 'dark' not in filepath:\n filepath = 'dark_' + filepath\n with open(filepath, 'w+') as outfile:\n json.dump(data, outfile)\n\n def check_room(self):\n r = requests.get(f'{url}/api/adv/init/', headers={'Authorization':\n f'Token {key}'})\n data = r.json()\n if 'players' in data:\n del data['players']\n return data\n\n def check_self(self, cause=None):\n data = self._get_status()\n cleaned = {**data}\n cleaned['status'].append(\n \"Glasowyn's hands stand Empty and Effervescent, see them filled.\"\n ) if len(cleaned['status']) < 1 else None\n cleaned['world'] = self.world\n cut = ['has_mined', 'errors']\n for k in cut:\n del cleaned[k]\n if cause == 'item pick up':\n ret = f\"\"\" You are now held down by the weight of {cleaned['encumbrance']} Stones.\n Your Experience and equipment Grant you the ability to\n carry {cleaned['strength']} stones before you need to take longer rests.\n Your bag now carries {cleaned['inventory']}\"\"\"\n print(ret +\n f\"\"\"\n Your ghost seems to have the space to carry an additional item if you would like\"\"\"\n if 'carry' in cleaned['abilities'] and len(cleaned[\n 'status']) else ret)\n else:\n print('\\n' + '*' * 22 + ' ' + 'Your Current State' + ' ' + '*' * 22\n )\n for item in cleaned.items():\n print(f'{item[0]}: {item[1]}')\n print('*' * 64 + '\\n')\n self.name = data['name']\n self.cooldown = data['cooldown']\n self.encumbrance = data['encumbrance']\n self.strength = data['strength']\n self.speed = data['speed']\n self.gold = data['gold']\n self.bodywear = data['bodywear']\n self.footwear = data['footwear']\n self.inventory = data['inventory']\n self.abilities = data['abilities']\n self.status = data['status']\n self.has_mined = data['has_mined']\n self.errors = data['errors']\n self.messages = data['messages']\n self.snitches = data['snitches'] if data['snitches'] else 0\n self.map = self._read_file('map.txt')\n self.graph = self._read_file('graph.txt')\n\n def dash(self, direction, num_rooms, room_ids):\n if 'dash' not in self.abilities:\n print(\"Error! You can't dash yet!\")\n return\n time.sleep(self.cooldown)\n curr_id = self.current_room['room_id']\n print('\\n======================================')\n print(f'Dashing {direction} from room {curr_id}...')\n json = {'direction': direction, 'num_rooms': num_rooms,\n 'next_room_ids': room_ids}\n r = requests.post(f'{url}/api/adv/dash/', headers={'Authorization':\n f'Token {key}', 'Content-Type': 'application/json'}, json=json)\n next_room = r.json()\n if 'players' in next_room:\n del next_room['players']\n next_id = next_room['room_id']\n self.map[next_id] = next_room\n self._write_file('map.txt', self.map)\n self.current_room = next_room\n self.cooldown = self.current_room['cooldown']\n if self.world == 'dark' and 'golden snitch' in next_room['items']:\n try:\n self.pick_up_loot('golden snitch')\n except:\n print('Somebody already got that snitch!')\n elif self.world == 'light' and len(next_room['items']):\n for item in next_room['items']:\n self.pick_up_loot(item)\n for message in next_room['messages']:\n print(f'{message}')\n print(f\"Now the player is in {self.current_room['room_id']}\")\n print(f'Cooldown before next action: {self.cooldown} seconds')\n print('======================================\\n')\n\n def travel(self, direction, method='move'):\n time.sleep(self.cooldown)\n curr_id = self.current_room['room_id']\n print('\\n======================================')\n if 'fly' in self.abilities and self.map[str(curr_id)]['terrain'] in [\n 'MOUNTAIN', 'NORMAL']:\n method = 'fly'\n print(f'Flying {direction} from room {curr_id}...')\n else:\n print(f'Walking {direction} from room {curr_id}...')\n if direction not in self.graph[str(curr_id)]:\n print('Error! Not a valid direction from the current room')\n else:\n json = {'direction': direction}\n if self.graph[str(curr_id)][direction] != '?':\n json['next_room_id'] = str(self.graph[str(curr_id)][direction])\n next_room = requests.post(f'{url}/api/adv/{method}/', headers={\n 'Authorization': f'Token {key}', 'Content-Type':\n 'application/json'}, json=json).json()\n self.current_room = next_room\n self.cooldown = self.current_room['cooldown']\n if self.world != 'dark':\n if len(next_room['items']\n ) > 0 and self.encumbrance < self.strength:\n for item in next_room['items']:\n time.sleep(next_room['cooldown'])\n self.pick_up_loot(item)\n elif 'golden snitch' in next_room['items']:\n self.pick_up_loot('golden snitch')\n if 'players' in next_room:\n del next_room['players']\n next_id = next_room['room_id']\n if str(next_id) not in self.graph:\n print(f'New room! # {next_id}')\n self.graph[str(next_id)] = {e: '?' for e in next_room['exits']}\n self.graph[str(curr_id)][direction] = next_id\n self.graph[str(next_id)][opposite[direction]] = curr_id\n self._write_file('graph.txt', self.graph)\n self.map[next_id] = next_room\n self._write_file('map.txt', self.map)\n for message in next_room['messages']:\n print(f'{message}')\n print(f\"Now the player is in {self.current_room['room_id']}\")\n print(f'Cooldown before next action: {self.cooldown} seconds')\n if len(self.graph) < 500:\n print(\n f'Total number of rooms explored so far: {len(self.graph)}'\n )\n print('======================================\\n')\n\n def get_coin(self):\n time.sleep(self.cooldown)\n data = mine()\n self.cooldown = data['cooldown']\n if len(data['errors']) > 0:\n self.get_coin()\n\n def pick_up_loot(self, item):\n print(f'Looting {item}')\n json = {'name': item}\n if self.encumbrance < self.strength:\n time.sleep(self.cooldown)\n req = requests.post(f'{url}/api/adv/take/', headers={\n 'Authorization': f'Token {key}', 'Content-Type':\n 'application/json'}, json=json).json()\n self.cooldown = req['cooldown']\n time.sleep(self.cooldown)\n self.check_self('item pick up'\n ) if self.world == 'light' else print(' Success!\\n ' +\n req['messages'][0] if len(req['messages']) > 0 else print(\n \"\"\" Oh NO!\n just as quickly as you arrived, the Golden Snitch disappeared to the next room and out of grasp!\"\"\"\n ))\n elif 'carry' in self.abilities:\n if len(self.status) != 0:\n print(\n 'It seems your Bag is full and Glasowyn is already carring something!'\n )\n else:\n req = requests.post(f'{url}/api/adv/carry/', headers={\n 'Authorization': f'Token {key}', 'Content-Type':\n 'application/json'}, json=json).json()\n self.cooldown = req['cooldown']\n print(req)\n else:\n print('Your Bag is full!')\n <mask token>\n\n def buy_name(self, name):\n time.sleep(self.cooldown)\n json = {'name': name}\n req = requests.post(f'{url}/api/adv/change_name/', headers={\n 'Authorization': f'Token {key}', 'Content-Type':\n 'application/json'}, json=json).json()\n print(req)\n time.sleep(req['cooldown'])\n json['confirm'] = 'aye'\n r1_conf = requests.post(f'{url}/api/adv/change_name/', headers={\n 'Authorization': f'Token {key}', 'Content-Type':\n 'application/json'}, json=json).json()\n print(r1_conf)\n time.sleep(r1_conf['cooldown'])\n self.check_self()\n\n def examine(self, item):\n time.sleep(self.cooldown)\n json = {'name': item}\n req = requests.post(f'{url}/api/adv/examine/', headers={\n 'Authorization': f'Token {key}', 'Content-Type':\n 'application/json'}, json=json).json()\n self.cooldown = req['cooldown']\n if item == 'WELL':\n if os.path.exists('hint.txt'):\n os.remove('hint.txt')\n desc = req['description']\n instructions = desc.split('\\n')\n for line in instructions[2:]:\n with open('hint.txt', 'a') as f:\n f.write(f'{line}\\n')\n cpu = CPU()\n cpu.load('hint.txt')\n cpu.run()\n if os.path.exists('hint.txt'):\n os.remove('hint.txt')\n limiter = 23 if self.world == 'light' else 24\n return cpu.hint[limiter:]\n else:\n print(req['description'])\n\n def pray(self):\n time.sleep(self.cooldown)\n req = requests.post(f'{url}/api/adv/pray/', headers={\n 'Authorization': f'Token {key}', 'Content-Type':\n 'application/json'}).json()\n print(req)\n time.sleep(req['cooldown'])\n self.check_self()\n <mask token>\n\n def check_balance(self):\n time.sleep(self.cooldown)\n req = requests.get(f'{url}/api/bc/get_balance/', headers={\n 'Authorization': f'Token {key}'}).json()\n self.coins = float(req['messages'][0].split(' ')[5])\n self.cooldown = req['cooldown']\n print(f\"\\n{req['messages'][0]}\\n\")\n\n def transform_coin(self, item):\n time.sleep(self.cooldown)\n self.check_balance()\n json = {'name': item}\n if self.coins > 0 and item in self.inventory:\n time.sleep(self.cooldown)\n req = requests.post(f'{url}/api/adv/transmogrify/', headers={\n 'Authorization': f'Token {key}', 'Content-Type':\n 'application/json'}, json=json).json()\n print(req)\n self.cooldown = req['cooldown']\n for item in req['items']:\n self.pick_up_loot(item)\n\n def warp(self):\n if 'warp' in self.abilities:\n time.sleep(self.cooldown)\n req = requests.post(f'{url}/api/adv/warp/', headers={\n 'Authorization': f'Token {key}', 'Content-Type':\n 'application/json'}).json()\n print(req['messages'][0])\n self.cooldown = req['cooldown']\n if self.world == 'light':\n self.world = 'dark'\n else:\n self.world = 'light'\n self.current_room = req\n time.sleep(self.cooldown)\n self.check_self()\n if req['room_id'] not in self.graph:\n g = self.graph\n g[req['room_id']] = {d: '?' for d in req['exits']}\n self._write_file('graph.txt', g)\n m = self.map\n m[req['room_id']] = req\n self._write_file('map.txt', m)\n else:\n print('You do not have the warp ability yet!')\n",
"step-4": "<mask token>\n\n\nclass Player:\n\n def __init__(self):\n data = self._get_status()\n time.sleep(data['cooldown'])\n self.name = data['name']\n self.cooldown = data['cooldown']\n self.encumbrance = data['encumbrance']\n self.strength = data['strength']\n self.speed = data['speed']\n self.gold = data['gold']\n self.bodywear = data['bodywear']\n self.footwear = data['footwear']\n self.inventory = data['inventory']\n self.abilities = data['abilities']\n self.status = data['status']\n self.has_mined = data['has_mined']\n self.errors = data['errors']\n self.messages = data['messages']\n self.snitches = data['snitches'] if data['snitches'] else 0\n self.current_room = self.check_room()\n self.world = 'dark' if self.current_room['room_id'] > 499 else 'light'\n self.map = self._read_file('map.txt')\n self.graph = self._read_file('graph.txt')\n\n def _get_status(self):\n r = requests.post(f'{url}/api/adv/status/', headers={\n 'Authorization': f'Token {key}', 'Content-Type':\n 'application/json'})\n return r.json()\n\n def _read_file(self, filepath):\n if self.world == 'dark':\n filepath = 'dark_' + filepath\n if not os.path.exists(filepath):\n f = open(filepath, 'w+')\n room = self.current_room\n if 'graph' in filepath:\n room = {room['room_id']: {d: '?' for d in room['exits']}}\n self._write_file(filepath, {self.current_room['room_id']: room})\n with open(filepath, 'r') as f:\n data = json.load(f)\n return data\n\n def _write_file(self, filepath, data):\n if self.world == 'dark' and 'dark' not in filepath:\n filepath = 'dark_' + filepath\n with open(filepath, 'w+') as outfile:\n json.dump(data, outfile)\n\n def check_room(self):\n r = requests.get(f'{url}/api/adv/init/', headers={'Authorization':\n f'Token {key}'})\n data = r.json()\n if 'players' in data:\n del data['players']\n return data\n\n def check_self(self, cause=None):\n data = self._get_status()\n cleaned = {**data}\n cleaned['status'].append(\n \"Glasowyn's hands stand Empty and Effervescent, see them filled.\"\n ) if len(cleaned['status']) < 1 else None\n cleaned['world'] = self.world\n cut = ['has_mined', 'errors']\n for k in cut:\n del cleaned[k]\n if cause == 'item pick up':\n ret = f\"\"\" You are now held down by the weight of {cleaned['encumbrance']} Stones.\n Your Experience and equipment Grant you the ability to\n carry {cleaned['strength']} stones before you need to take longer rests.\n Your bag now carries {cleaned['inventory']}\"\"\"\n print(ret +\n f\"\"\"\n Your ghost seems to have the space to carry an additional item if you would like\"\"\"\n if 'carry' in cleaned['abilities'] and len(cleaned[\n 'status']) else ret)\n else:\n print('\\n' + '*' * 22 + ' ' + 'Your Current State' + ' ' + '*' * 22\n )\n for item in cleaned.items():\n print(f'{item[0]}: {item[1]}')\n print('*' * 64 + '\\n')\n self.name = data['name']\n self.cooldown = data['cooldown']\n self.encumbrance = data['encumbrance']\n self.strength = data['strength']\n self.speed = data['speed']\n self.gold = data['gold']\n self.bodywear = data['bodywear']\n self.footwear = data['footwear']\n self.inventory = data['inventory']\n self.abilities = data['abilities']\n self.status = data['status']\n self.has_mined = data['has_mined']\n self.errors = data['errors']\n self.messages = data['messages']\n self.snitches = data['snitches'] if data['snitches'] else 0\n self.map = self._read_file('map.txt')\n self.graph = self._read_file('graph.txt')\n\n def dash(self, direction, num_rooms, room_ids):\n if 'dash' not in self.abilities:\n print(\"Error! You can't dash yet!\")\n return\n time.sleep(self.cooldown)\n curr_id = self.current_room['room_id']\n print('\\n======================================')\n print(f'Dashing {direction} from room {curr_id}...')\n json = {'direction': direction, 'num_rooms': num_rooms,\n 'next_room_ids': room_ids}\n r = requests.post(f'{url}/api/adv/dash/', headers={'Authorization':\n f'Token {key}', 'Content-Type': 'application/json'}, json=json)\n next_room = r.json()\n if 'players' in next_room:\n del next_room['players']\n next_id = next_room['room_id']\n self.map[next_id] = next_room\n self._write_file('map.txt', self.map)\n self.current_room = next_room\n self.cooldown = self.current_room['cooldown']\n if self.world == 'dark' and 'golden snitch' in next_room['items']:\n try:\n self.pick_up_loot('golden snitch')\n except:\n print('Somebody already got that snitch!')\n elif self.world == 'light' and len(next_room['items']):\n for item in next_room['items']:\n self.pick_up_loot(item)\n for message in next_room['messages']:\n print(f'{message}')\n print(f\"Now the player is in {self.current_room['room_id']}\")\n print(f'Cooldown before next action: {self.cooldown} seconds')\n print('======================================\\n')\n\n def travel(self, direction, method='move'):\n time.sleep(self.cooldown)\n curr_id = self.current_room['room_id']\n print('\\n======================================')\n if 'fly' in self.abilities and self.map[str(curr_id)]['terrain'] in [\n 'MOUNTAIN', 'NORMAL']:\n method = 'fly'\n print(f'Flying {direction} from room {curr_id}...')\n else:\n print(f'Walking {direction} from room {curr_id}...')\n if direction not in self.graph[str(curr_id)]:\n print('Error! Not a valid direction from the current room')\n else:\n json = {'direction': direction}\n if self.graph[str(curr_id)][direction] != '?':\n json['next_room_id'] = str(self.graph[str(curr_id)][direction])\n next_room = requests.post(f'{url}/api/adv/{method}/', headers={\n 'Authorization': f'Token {key}', 'Content-Type':\n 'application/json'}, json=json).json()\n self.current_room = next_room\n self.cooldown = self.current_room['cooldown']\n if self.world != 'dark':\n if len(next_room['items']\n ) > 0 and self.encumbrance < self.strength:\n for item in next_room['items']:\n time.sleep(next_room['cooldown'])\n self.pick_up_loot(item)\n elif 'golden snitch' in next_room['items']:\n self.pick_up_loot('golden snitch')\n if 'players' in next_room:\n del next_room['players']\n next_id = next_room['room_id']\n if str(next_id) not in self.graph:\n print(f'New room! # {next_id}')\n self.graph[str(next_id)] = {e: '?' for e in next_room['exits']}\n self.graph[str(curr_id)][direction] = next_id\n self.graph[str(next_id)][opposite[direction]] = curr_id\n self._write_file('graph.txt', self.graph)\n self.map[next_id] = next_room\n self._write_file('map.txt', self.map)\n for message in next_room['messages']:\n print(f'{message}')\n print(f\"Now the player is in {self.current_room['room_id']}\")\n print(f'Cooldown before next action: {self.cooldown} seconds')\n if len(self.graph) < 500:\n print(\n f'Total number of rooms explored so far: {len(self.graph)}'\n )\n print('======================================\\n')\n\n def get_coin(self):\n time.sleep(self.cooldown)\n data = mine()\n self.cooldown = data['cooldown']\n if len(data['errors']) > 0:\n self.get_coin()\n\n def pick_up_loot(self, item):\n print(f'Looting {item}')\n json = {'name': item}\n if self.encumbrance < self.strength:\n time.sleep(self.cooldown)\n req = requests.post(f'{url}/api/adv/take/', headers={\n 'Authorization': f'Token {key}', 'Content-Type':\n 'application/json'}, json=json).json()\n self.cooldown = req['cooldown']\n time.sleep(self.cooldown)\n self.check_self('item pick up'\n ) if self.world == 'light' else print(' Success!\\n ' +\n req['messages'][0] if len(req['messages']) > 0 else print(\n \"\"\" Oh NO!\n just as quickly as you arrived, the Golden Snitch disappeared to the next room and out of grasp!\"\"\"\n ))\n elif 'carry' in self.abilities:\n if len(self.status) != 0:\n print(\n 'It seems your Bag is full and Glasowyn is already carring something!'\n )\n else:\n req = requests.post(f'{url}/api/adv/carry/', headers={\n 'Authorization': f'Token {key}', 'Content-Type':\n 'application/json'}, json=json).json()\n self.cooldown = req['cooldown']\n print(req)\n else:\n print('Your Bag is full!')\n\n def drop_loot(self, item):\n time.sleep(self.cooldown)\n json = {'name': item}\n req = requests.post(f'{url}/api/adv/drop/', headers={\n 'Authorization': f'Token {key}', 'Content-Type':\n 'application/json'}, json=json).json()\n time.sleep(req['cooldown'])\n self.check_self()\n\n def buy_name(self, name):\n time.sleep(self.cooldown)\n json = {'name': name}\n req = requests.post(f'{url}/api/adv/change_name/', headers={\n 'Authorization': f'Token {key}', 'Content-Type':\n 'application/json'}, json=json).json()\n print(req)\n time.sleep(req['cooldown'])\n json['confirm'] = 'aye'\n r1_conf = requests.post(f'{url}/api/adv/change_name/', headers={\n 'Authorization': f'Token {key}', 'Content-Type':\n 'application/json'}, json=json).json()\n print(r1_conf)\n time.sleep(r1_conf['cooldown'])\n self.check_self()\n\n def examine(self, item):\n time.sleep(self.cooldown)\n json = {'name': item}\n req = requests.post(f'{url}/api/adv/examine/', headers={\n 'Authorization': f'Token {key}', 'Content-Type':\n 'application/json'}, json=json).json()\n self.cooldown = req['cooldown']\n if item == 'WELL':\n if os.path.exists('hint.txt'):\n os.remove('hint.txt')\n desc = req['description']\n instructions = desc.split('\\n')\n for line in instructions[2:]:\n with open('hint.txt', 'a') as f:\n f.write(f'{line}\\n')\n cpu = CPU()\n cpu.load('hint.txt')\n cpu.run()\n if os.path.exists('hint.txt'):\n os.remove('hint.txt')\n limiter = 23 if self.world == 'light' else 24\n return cpu.hint[limiter:]\n else:\n print(req['description'])\n\n def pray(self):\n time.sleep(self.cooldown)\n req = requests.post(f'{url}/api/adv/pray/', headers={\n 'Authorization': f'Token {key}', 'Content-Type':\n 'application/json'}).json()\n print(req)\n time.sleep(req['cooldown'])\n self.check_self()\n\n def wear(self, item):\n time.sleep(self.cooldown)\n json = {'name': item}\n req = requests.post(f'{url}/api/adv/wear/', headers={\n 'Authorization': f'Token {key}', 'Content-Type':\n 'application/json'}, json=json).json()\n self.cooldown = req['cooldown']\n time.sleep(self.cooldown)\n self.check_self()\n\n def check_balance(self):\n time.sleep(self.cooldown)\n req = requests.get(f'{url}/api/bc/get_balance/', headers={\n 'Authorization': f'Token {key}'}).json()\n self.coins = float(req['messages'][0].split(' ')[5])\n self.cooldown = req['cooldown']\n print(f\"\\n{req['messages'][0]}\\n\")\n\n def transform_coin(self, item):\n time.sleep(self.cooldown)\n self.check_balance()\n json = {'name': item}\n if self.coins > 0 and item in self.inventory:\n time.sleep(self.cooldown)\n req = requests.post(f'{url}/api/adv/transmogrify/', headers={\n 'Authorization': f'Token {key}', 'Content-Type':\n 'application/json'}, json=json).json()\n print(req)\n self.cooldown = req['cooldown']\n for item in req['items']:\n self.pick_up_loot(item)\n\n def warp(self):\n if 'warp' in self.abilities:\n time.sleep(self.cooldown)\n req = requests.post(f'{url}/api/adv/warp/', headers={\n 'Authorization': f'Token {key}', 'Content-Type':\n 'application/json'}).json()\n print(req['messages'][0])\n self.cooldown = req['cooldown']\n if self.world == 'light':\n self.world = 'dark'\n else:\n self.world = 'light'\n self.current_room = req\n time.sleep(self.cooldown)\n self.check_self()\n if req['room_id'] not in self.graph:\n g = self.graph\n g[req['room_id']] = {d: '?' for d in req['exits']}\n self._write_file('graph.txt', g)\n m = self.map\n m[req['room_id']] = req\n self._write_file('map.txt', m)\n else:\n print('You do not have the warp ability yet!')\n",
"step-5": "from api import url, key, opposite\nimport requests\nimport json\nimport time\nimport os\nfrom miner import mine\nfrom cpu import *\n\n\nclass Player:\n def __init__(self):\n data = self._get_status()\n time.sleep(data['cooldown'])\n self.name = data['name']\n self.cooldown = data['cooldown']\n self.encumbrance = data['encumbrance']\n self.strength = data['strength']\n self.speed = data['speed']\n self.gold = data['gold']\n self.bodywear = data['bodywear']\n self.footwear = data['footwear']\n self.inventory = data['inventory']\n self.abilities = data['abilities']\n self.status = data['status']\n self.has_mined = data['has_mined']\n self.errors = data['errors']\n self.messages = data['messages']\n self.snitches = data['snitches'] if data['snitches'] else 0\n self.current_room = self.check_room()\n self.world = \"dark\" if self.current_room['room_id'] > 499 else \"light\"\n self.map = self._read_file('map.txt')\n self.graph = self._read_file('graph.txt')\n\n def _get_status(self):\n r = requests.post(f\"{url}/api/adv/status/\",\n headers={'Authorization': f\"Token {key}\", \"Content-Type\": \"application/json\"})\n return r.json()\n\n def _read_file(self, filepath):\n if self.world == 'dark':\n filepath = 'dark_' + filepath\n if not os.path.exists(filepath):\n f = open(filepath, 'w+')\n room = self.current_room\n if 'graph' in filepath:\n room = {room['room_id']: {d: '?' for d in room['exits']}}\n\n self._write_file(filepath, {self.current_room['room_id']: room})\n\n with open(filepath, 'r') as f:\n data = json.load(f)\n return data\n\n def _write_file(self, filepath, data):\n if self.world == 'dark' and 'dark' not in filepath:\n filepath = 'dark_' + filepath\n with open(filepath, 'w+') as outfile:\n json.dump(data, outfile)\n\n def check_room(self):\n r = requests.get(f\"{url}/api/adv/init/\",\n headers={'Authorization': f\"Token {key}\"})\n data = r.json()\n if 'players' in data:\n del data['players']\n return data\n\n def check_self(self, cause=None):\n data = self._get_status()\n cleaned = {**data} # How cool is the spread operator!\n cleaned['status'].append(\"Glasowyn's hands stand Empty and Effervescent, see them filled.\") if len(\n cleaned['status']) < 1 else None\n cleaned[\"world\"] = self.world\n cut = ['has_mined', 'errors', ]\n for k in cut:\n del cleaned[k]\n if cause == \"item pick up\":\n ret = f\" You are now held down by the weight of {cleaned['encumbrance']} Stones.\\n Your Experience and equipment Grant you the ability to\\n carry {cleaned['strength']} stones before you need to take longer rests.\\n Your bag now carries {cleaned['inventory']}\"\n\n print(ret + f\"\\n Your ghost seems to have the space to carry an additional item if you would like\" if \"carry\" in cleaned['abilities'] and len(\n cleaned['status']) else ret)\n else:\n print('\\n'+\"*\"*22+' '+\"Your Current State\"+' '+\"*\"*22)\n for item in cleaned.items():\n print(f\"{item[0]}: {item[1]}\")\n print(\"*\"*64+'\\n')\n\n self.name = data['name']\n self.cooldown = data['cooldown']\n self.encumbrance = data['encumbrance']\n self.strength = data['strength']\n self.speed = data['speed']\n self.gold = data['gold']\n self.bodywear = data['bodywear']\n self.footwear = data['footwear']\n self.inventory = data['inventory']\n self.abilities = data['abilities']\n self.status = data['status']\n self.has_mined = data['has_mined']\n self.errors = data['errors']\n self.messages = data['messages']\n self.snitches = data['snitches'] if data['snitches'] else 0\n self.map = self._read_file('map.txt')\n self.graph = self._read_file('graph.txt')\n\n def dash(self, direction, num_rooms, room_ids):\n if \"dash\" not in self.abilities:\n print(\"Error! You can't dash yet!\")\n return\n time.sleep(self.cooldown)\n curr_id = self.current_room['room_id']\n print(\"\\n======================================\")\n print(f\"Dashing {direction} from room {curr_id}...\")\n\n json = {\"direction\": direction,\n \"num_rooms\": num_rooms, \"next_room_ids\": room_ids}\n r = requests.post(f\"{url}/api/adv/dash/\", headers={\n 'Authorization': f\"Token {key}\", \"Content-Type\": \"application/json\"}, json=json)\n next_room = r.json()\n if 'players' in next_room:\n del next_room['players']\n next_id = next_room['room_id']\n\n # update map with room info\n self.map[next_id] = next_room\n self._write_file('map.txt', self.map)\n\n # change current room and update cooldown\n self.current_room = next_room\n self.cooldown = self.current_room['cooldown']\n\n if self.world == 'dark' and 'golden snitch' in next_room['items']:\n try:\n self.pick_up_loot('golden snitch')\n except:\n print(\"Somebody already got that snitch!\")\n elif self.world == 'light' and len(next_room['items']):\n for item in next_room['items']:\n self.pick_up_loot(item)\n\n for message in next_room['messages']:\n print(f\"{message}\")\n\n print(f\"Now the player is in {self.current_room['room_id']}\")\n print(f\"Cooldown before next action: {self.cooldown} seconds\")\n print(\"======================================\\n\")\n\n def travel(self, direction, method=\"move\"):\n time.sleep(self.cooldown)\n curr_id = self.current_room['room_id']\n\n print(\"\\n======================================\")\n if \"fly\" in self.abilities and self.map[str(curr_id)]['terrain'] in ['MOUNTAIN', 'NORMAL']:\n method = \"fly\"\n print(f\"Flying {direction} from room {curr_id}...\")\n else:\n print(f\"Walking {direction} from room {curr_id}...\")\n\n if direction not in self.graph[str(curr_id)]:\n print(\"Error! Not a valid direction from the current room\")\n else:\n json = {\"direction\": direction}\n if self.graph[str(curr_id)][direction] != \"?\":\n json['next_room_id'] = str(self.graph[str(curr_id)][direction])\n next_room = requests.post(f\"{url}/api/adv/{method}/\", headers={\n 'Authorization': f\"Token {key}\", \"Content-Type\": \"application/json\"}, json=json).json()\n\n # change current room and update cooldown\n self.current_room = next_room\n self.cooldown = self.current_room['cooldown']\n\n if self.world != 'dark':\n # Code for looting any items in the room if the space is available\n if len(next_room['items']) > 0 and self.encumbrance < self.strength:\n for item in next_room['items']:\n time.sleep(next_room['cooldown'])\n self.pick_up_loot(item)\n else:\n if 'golden snitch' in next_room['items']:\n self.pick_up_loot('golden snitch')\n\n if 'players' in next_room:\n del next_room['players']\n next_id = next_room['room_id']\n\n # add to graph and map, in addition to making graph connections\n if str(next_id) not in self.graph:\n print(f\"New room! # {next_id}\")\n self.graph[str(next_id)] = {\n e: '?' for e in next_room['exits']}\n\n # make graph connections and update graph\n self.graph[str(curr_id)][direction] = next_id\n self.graph[str(next_id)][opposite[direction]] = curr_id\n self._write_file('graph.txt', self.graph)\n\n # update map with room info\n self.map[next_id] = next_room\n self._write_file('map.txt', self.map)\n\n for message in next_room['messages']:\n print(f\"{message}\")\n\n print(f\"Now the player is in {self.current_room['room_id']}\")\n print(f\"Cooldown before next action: {self.cooldown} seconds\")\n if len(self.graph) < 500:\n print(\n f\"Total number of rooms explored so far: {len(self.graph)}\")\n print(\"======================================\\n\")\n\n def get_coin(self):\n time.sleep(self.cooldown)\n data = mine()\n self.cooldown = data['cooldown']\n if len(data['errors']) > 0:\n self.get_coin()\n\n def pick_up_loot(self, item):\n print(f\"Looting {item}\")\n json = {\"name\": item}\n if self.encumbrance < self.strength:\n time.sleep(self.cooldown)\n req = requests.post(f\"{url}/api/adv/take/\", headers={\n 'Authorization': f\"Token {key}\", \"Content-Type\": \"application/json\"}, json=json).json()\n self.cooldown = req['cooldown']\n time.sleep(self.cooldown)\n self.check_self(\"item pick up\") if self.world == 'light' else print(' Success!\\n '+req['messages'][0] if len(req['messages']) > 0 else print(\n \" Oh NO!\\n just as quickly as you arrived, the Golden Snitch disappeared to the next room and out of grasp!\"))\n else:\n if \"carry\" in self.abilities:\n if len(self.status) != 0:\n print(\n \"It seems your Bag is full and Glasowyn is already carring something!\")\n else:\n req = requests.post(f\"{url}/api/adv/carry/\", headers={\n 'Authorization': f\"Token {key}\", \"Content-Type\": \"application/json\"}, json=json).json()\n self.cooldown = req['cooldown']\n print(req)\n else:\n print(\"Your Bag is full!\")\n\n def drop_loot(self, item):\n time.sleep(self.cooldown)\n json = {\"name\": item}\n req = requests.post(f\"{url}/api/adv/drop/\", headers={\n 'Authorization': f\"Token {key}\", \"Content-Type\": \"application/json\"}, json=json).json()\n time.sleep(req['cooldown'])\n self.check_self()\n\n def buy_name(self, name):\n time.sleep(self.cooldown)\n json = {\"name\": name}\n req = requests.post(f\"{url}/api/adv/change_name/\", headers={\n 'Authorization': f\"Token {key}\", \"Content-Type\": \"application/json\"}, json=json).json()\n print(req)\n\n time.sleep(req['cooldown'])\n\n json['confirm'] = \"aye\"\n r1_conf = requests.post(f\"{url}/api/adv/change_name/\", headers={\n 'Authorization': f\"Token {key}\", \"Content-Type\": \"application/json\"}, json=json).json()\n print(r1_conf)\n time.sleep(r1_conf['cooldown'])\n self.check_self()\n\n def examine(self, item):\n time.sleep(self.cooldown)\n json = {\"name\": item}\n req = requests.post(f\"{url}/api/adv/examine/\", headers={\n 'Authorization': f\"Token {key}\", \"Content-Type\": \"application/json\"}, json=json).json()\n self.cooldown = req['cooldown']\n\n if item == \"WELL\": # Examining well gives binary code to be deciphered for next coin location\n if os.path.exists(\"hint.txt\"):\n os.remove(\"hint.txt\")\n desc = req['description']\n instructions = desc.split('\\n')\n for line in instructions[2:]:\n with open(\"hint.txt\", \"a\") as f:\n f.write(f\"{line}\\n\")\n\n cpu = CPU()\n cpu.load('hint.txt')\n cpu.run()\n # clean up after itself and remove the hint file after used (new one will be made for future hints anyway)\n if os.path.exists(\"hint.txt\"):\n os.remove(\"hint.txt\")\n # full message for light is \"Mine your coin in room ###\"\n # but message for dark well is \"Find your snitch in room ###\"\n limiter = 23 if self.world == 'light' else 24\n return cpu.hint[limiter:]\n else:\n print(req['description'])\n\n def pray(self):\n time.sleep(self.cooldown)\n req = requests.post(f\"{url}/api/adv/pray/\", headers={\n 'Authorization': f\"Token {key}\", \"Content-Type\": \"application/json\"}).json()\n print(req)\n time.sleep(req['cooldown'])\n self.check_self()\n\n def wear(self, item):\n time.sleep(self.cooldown)\n json = {\"name\": item}\n req = requests.post(f\"{url}/api/adv/wear/\", headers={\n 'Authorization': f\"Token {key}\", \"Content-Type\": \"application/json\"}, json=json).json()\n\n self.cooldown = req['cooldown']\n time.sleep(self.cooldown)\n self.check_self()\n\n def check_balance(self):\n time.sleep(self.cooldown)\n req = requests.get(f\"{url}/api/bc/get_balance/\", headers={\n 'Authorization': f\"Token {key}\"}).json()\n self.coins = float(req['messages'][0].split(' ')[5])\n self.cooldown = req['cooldown']\n print(f\"\\n{req['messages'][0]}\\n\")\n\n def transform_coin(self, item):\n time.sleep(self.cooldown)\n self.check_balance()\n json = {\"name\": item}\n if self.coins > 0 and item in self.inventory:\n time.sleep(self.cooldown)\n req = requests.post(f\"{url}/api/adv/transmogrify/\", headers={\n 'Authorization': f\"Token {key}\", \"Content-Type\": \"application/json\"}, json=json).json()\n print(req)\n self.cooldown = req['cooldown']\n for item in req['items']:\n self.pick_up_loot(item)\n\n def warp(self):\n if \"warp\" in self.abilities:\n time.sleep(self.cooldown)\n req = requests.post(f\"{url}/api/adv/warp/\", headers={\n 'Authorization': f\"Token {key}\", \"Content-Type\": \"application/json\"}).json()\n print(req['messages'][0])\n self.cooldown = req['cooldown']\n if self.world == 'light':\n self.world = 'dark'\n else:\n self.world = 'light'\n self.current_room = req\n time.sleep(self.cooldown)\n self.check_self()\n\n if req['room_id'] not in self.graph:\n # Just warped to a previously unknown room, add it to graph and map\n g = self.graph\n g[req['room_id']] = {d: '?' for d in req['exits']}\n self._write_file('graph.txt', g)\n\n m = self.map\n m[req['room_id']] = req\n self._write_file('map.txt', m)\n else:\n print(\"You do not have the warp ability yet!\")\n",
"step-ids": [
11,
15,
17,
19,
21
]
}
|
[
11,
15,
17,
19,
21
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class NodoLista:
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class NodoLista:
def __init__(self, cancion, s, a):
self.elemento = cancion
self.siguiente = s
self.anterior = a
<|reserved_special_token_1|>
from cancion import *
class NodoLista:
def __init__(self, cancion, s, a):
self.elemento = cancion
self.siguiente = s
self.anterior = a
|
flexible
|
{
"blob_id": "1fb3904d48905ade8f83b6e052057e80302ec5a7",
"index": 4253,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass NodoLista:\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass NodoLista:\n\n def __init__(self, cancion, s, a):\n self.elemento = cancion\n self.siguiente = s\n self.anterior = a\n",
"step-4": "from cancion import *\n\n\nclass NodoLista:\n\n def __init__(self, cancion, s, a):\n self.elemento = cancion\n self.siguiente = s\n self.anterior = a\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import os
import numpy as np
from argparse import ArgumentParser
from tqdm import tqdm
from models.networks import Perceptron
from data.perceptron_dataset import Dataset, batchify
from utils.utils import L1Loss, plot_line
from modules.perceptron_trainer import Trainer
if __name__ == '__main__':
parser = ArgumentParser()
parser.add_argument('--name', type=str, default='test')
parser.add_argument('--input_dim', type=int, default=2)
parser.add_argument('--output_dim', type=int, default=1)
parser.add_argument('--batch_size', type=int, default=1)
parser.add_argument('--epochs', type=int, default=5)
parser.add_argument('--lr', type=int, default=0.1)
parser.add_argument('--checkpoints_dir', type=str, default='../saves')
args = parser.parse_args()
input = np.array([[1, 1], [-1, -1], [0, 0.5], [0.1, 0.5], [0.2, 0.2], [0.9, 0.5]])
targets = np.array([1, -1, -1, -1, 1, 1])
args.train_data = Dataset(input, targets)
args.val_data = None
args.mode = 'numpy'
trainer = Trainer(args)
for i, epoch in enumerate(range(1, args.epochs)):
result = trainer.run_epoch()
filename = os.path.join(trainer.save_dir, 'plot_%d.png'%(i+1))
plot_line(trainer.weights, filename)
print("Epochs: [%d]/[%d]"%(epoch, args.epochs))
error_count = result['error_count']
if error_count == 0:
print('No error')
print(trainer.weights)
break
|
normal
|
{
"blob_id": "726aaa0ef129f950e6da6701bb20e893d2f7373b",
"index": 3823,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif __name__ == '__main__':\n parser = ArgumentParser()\n parser.add_argument('--name', type=str, default='test')\n parser.add_argument('--input_dim', type=int, default=2)\n parser.add_argument('--output_dim', type=int, default=1)\n parser.add_argument('--batch_size', type=int, default=1)\n parser.add_argument('--epochs', type=int, default=5)\n parser.add_argument('--lr', type=int, default=0.1)\n parser.add_argument('--checkpoints_dir', type=str, default='../saves')\n args = parser.parse_args()\n input = np.array([[1, 1], [-1, -1], [0, 0.5], [0.1, 0.5], [0.2, 0.2], [\n 0.9, 0.5]])\n targets = np.array([1, -1, -1, -1, 1, 1])\n args.train_data = Dataset(input, targets)\n args.val_data = None\n args.mode = 'numpy'\n trainer = Trainer(args)\n for i, epoch in enumerate(range(1, args.epochs)):\n result = trainer.run_epoch()\n filename = os.path.join(trainer.save_dir, 'plot_%d.png' % (i + 1))\n plot_line(trainer.weights, filename)\n print('Epochs: [%d]/[%d]' % (epoch, args.epochs))\n error_count = result['error_count']\n if error_count == 0:\n print('No error')\n print(trainer.weights)\n break\n",
"step-3": "import os\nimport numpy as np\nfrom argparse import ArgumentParser\nfrom tqdm import tqdm\nfrom models.networks import Perceptron\nfrom data.perceptron_dataset import Dataset, batchify\nfrom utils.utils import L1Loss, plot_line\nfrom modules.perceptron_trainer import Trainer\nif __name__ == '__main__':\n parser = ArgumentParser()\n parser.add_argument('--name', type=str, default='test')\n parser.add_argument('--input_dim', type=int, default=2)\n parser.add_argument('--output_dim', type=int, default=1)\n parser.add_argument('--batch_size', type=int, default=1)\n parser.add_argument('--epochs', type=int, default=5)\n parser.add_argument('--lr', type=int, default=0.1)\n parser.add_argument('--checkpoints_dir', type=str, default='../saves')\n args = parser.parse_args()\n input = np.array([[1, 1], [-1, -1], [0, 0.5], [0.1, 0.5], [0.2, 0.2], [\n 0.9, 0.5]])\n targets = np.array([1, -1, -1, -1, 1, 1])\n args.train_data = Dataset(input, targets)\n args.val_data = None\n args.mode = 'numpy'\n trainer = Trainer(args)\n for i, epoch in enumerate(range(1, args.epochs)):\n result = trainer.run_epoch()\n filename = os.path.join(trainer.save_dir, 'plot_%d.png' % (i + 1))\n plot_line(trainer.weights, filename)\n print('Epochs: [%d]/[%d]' % (epoch, args.epochs))\n error_count = result['error_count']\n if error_count == 0:\n print('No error')\n print(trainer.weights)\n break\n",
"step-4": "import os\nimport numpy as np\nfrom argparse import ArgumentParser\nfrom tqdm import tqdm\n\nfrom models.networks import Perceptron\nfrom data.perceptron_dataset import Dataset, batchify\nfrom utils.utils import L1Loss, plot_line\nfrom modules.perceptron_trainer import Trainer\n\n\nif __name__ == '__main__':\n parser = ArgumentParser()\n parser.add_argument('--name', type=str, default='test')\n parser.add_argument('--input_dim', type=int, default=2)\n parser.add_argument('--output_dim', type=int, default=1)\n parser.add_argument('--batch_size', type=int, default=1)\n parser.add_argument('--epochs', type=int, default=5)\n parser.add_argument('--lr', type=int, default=0.1)\n parser.add_argument('--checkpoints_dir', type=str, default='../saves')\n args = parser.parse_args()\n\n input = np.array([[1, 1], [-1, -1], [0, 0.5], [0.1, 0.5], [0.2, 0.2], [0.9, 0.5]])\n targets = np.array([1, -1, -1, -1, 1, 1])\n args.train_data = Dataset(input, targets)\n args.val_data = None\n args.mode = 'numpy'\n trainer = Trainer(args)\n\n for i, epoch in enumerate(range(1, args.epochs)):\n result = trainer.run_epoch()\n filename = os.path.join(trainer.save_dir, 'plot_%d.png'%(i+1))\n plot_line(trainer.weights, filename)\n print(\"Epochs: [%d]/[%d]\"%(epoch, args.epochs))\n error_count = result['error_count']\n if error_count == 0:\n print('No error')\n print(trainer.weights)\n break\n \n\n\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import time
class Solution(object):
def __init__(self):
self.n = None
self.memory = dict()
def dfs(self, bottom, energy):
# optimize for memory, save search time for duplicate results
if (bottom,energy) in self.memory:
return self.memory[(bottom,energy)]
# ending search
if energy == 1:
return [[bottom]]
results = []
for v in range(bottom, self.n):
tail_list = self.dfs(v+1, energy-1)
for result in tail_list:
results.append([bottom]+result)
self.memory[(bottom,energy)] = results
return results
def memory_search(self, n, k):
self.n = n
self.memory = dict()
results = []
for i in range(1, n+1-k+1):
combinations = self.dfs(i, k)
if combinations is not None:
results = results + combinations
return results
def dp(self, n, k):
# initialize: F[n,1]
tmp = []
pre_k_results = {}
for i in range(1,n+1):
tmp.append([i])
pre_k_results[i] = tmp.copy()
results = pre_k_results
# F[n,k] = F[n-1,k] + (item + [n] for item in F[n-1, k-1])
for col in range(2,k+1):
cur_k_results = {}
for row in range(col,n-k+col+1):
cur_results = []
# Part1: F[n-1, k]
if row > col:
cur_results = cur_results + pre_n_results
# Part2: (item + [n] for item in F[n-1, k-1])
for item in pre_k_results[row-1]:
cur_results.append(item+[row])
pre_n_results = cur_results
cur_k_results[row] = cur_results
pre_k_results = cur_k_results
results = cur_k_results
return results[n]
def combine(self, n, k):
"""
:type n: int
:type k: int
:rtype: List[List[int]]
"""
results = self.memory_search(n, k)
# results = self.dp(n, k)
return results
def main():
# n, k = 4, 1
# start = time.time()
# ret2 = Solution().dp(n, k)
# end = time.time()
# dp_time = round((end-start)*1000*1000,2)
# print(ret2, dp_time)
## time consume test
for n in range(5,10):
for k in range(2,n):
start = time.time()
ret1 = Solution().memory_search(n, k)
end = time.time()
memory_search_time = round((end-start)*1000*1000,2)
start = time.time()
ret2 = Solution().dp(n, k)
end = time.time()
dp_time = round((end-start)*1000*1000,2)
print("n={n},k={k} memory_search consume:{memory_search_time}ms, dp consume:{dp_time}ms".format(**locals()))
if __name__ == '__main__':
main()
|
normal
|
{
"blob_id": "d52b6dda7111aefb7f9a7b10ad606cda615389d9",
"index": 7123,
"step-1": "<mask token>\n\n\nclass Solution(object):\n <mask token>\n\n def dfs(self, bottom, energy):\n if (bottom, energy) in self.memory:\n return self.memory[bottom, energy]\n if energy == 1:\n return [[bottom]]\n results = []\n for v in range(bottom, self.n):\n tail_list = self.dfs(v + 1, energy - 1)\n for result in tail_list:\n results.append([bottom] + result)\n self.memory[bottom, energy] = results\n return results\n <mask token>\n <mask token>\n\n def combine(self, n, k):\n \"\"\"\n :type n: int\n :type k: int\n :rtype: List[List[int]]\n \"\"\"\n results = self.memory_search(n, k)\n return results\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Solution(object):\n\n def __init__(self):\n self.n = None\n self.memory = dict()\n\n def dfs(self, bottom, energy):\n if (bottom, energy) in self.memory:\n return self.memory[bottom, energy]\n if energy == 1:\n return [[bottom]]\n results = []\n for v in range(bottom, self.n):\n tail_list = self.dfs(v + 1, energy - 1)\n for result in tail_list:\n results.append([bottom] + result)\n self.memory[bottom, energy] = results\n return results\n\n def memory_search(self, n, k):\n self.n = n\n self.memory = dict()\n results = []\n for i in range(1, n + 1 - k + 1):\n combinations = self.dfs(i, k)\n if combinations is not None:\n results = results + combinations\n return results\n <mask token>\n\n def combine(self, n, k):\n \"\"\"\n :type n: int\n :type k: int\n :rtype: List[List[int]]\n \"\"\"\n results = self.memory_search(n, k)\n return results\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Solution(object):\n\n def __init__(self):\n self.n = None\n self.memory = dict()\n\n def dfs(self, bottom, energy):\n if (bottom, energy) in self.memory:\n return self.memory[bottom, energy]\n if energy == 1:\n return [[bottom]]\n results = []\n for v in range(bottom, self.n):\n tail_list = self.dfs(v + 1, energy - 1)\n for result in tail_list:\n results.append([bottom] + result)\n self.memory[bottom, energy] = results\n return results\n\n def memory_search(self, n, k):\n self.n = n\n self.memory = dict()\n results = []\n for i in range(1, n + 1 - k + 1):\n combinations = self.dfs(i, k)\n if combinations is not None:\n results = results + combinations\n return results\n\n def dp(self, n, k):\n tmp = []\n pre_k_results = {}\n for i in range(1, n + 1):\n tmp.append([i])\n pre_k_results[i] = tmp.copy()\n results = pre_k_results\n for col in range(2, k + 1):\n cur_k_results = {}\n for row in range(col, n - k + col + 1):\n cur_results = []\n if row > col:\n cur_results = cur_results + pre_n_results\n for item in pre_k_results[row - 1]:\n cur_results.append(item + [row])\n pre_n_results = cur_results\n cur_k_results[row] = cur_results\n pre_k_results = cur_k_results\n results = cur_k_results\n return results[n]\n\n def combine(self, n, k):\n \"\"\"\n :type n: int\n :type k: int\n :rtype: List[List[int]]\n \"\"\"\n results = self.memory_search(n, k)\n return results\n\n\ndef main():\n for n in range(5, 10):\n for k in range(2, n):\n start = time.time()\n ret1 = Solution().memory_search(n, k)\n end = time.time()\n memory_search_time = round((end - start) * 1000 * 1000, 2)\n start = time.time()\n ret2 = Solution().dp(n, k)\n end = time.time()\n dp_time = round((end - start) * 1000 * 1000, 2)\n print(\n 'n={n},k={k} memory_search consume:{memory_search_time}ms, dp consume:{dp_time}ms'\n .format(**locals()))\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\nclass Solution(object):\n\n def __init__(self):\n self.n = None\n self.memory = dict()\n\n def dfs(self, bottom, energy):\n if (bottom, energy) in self.memory:\n return self.memory[bottom, energy]\n if energy == 1:\n return [[bottom]]\n results = []\n for v in range(bottom, self.n):\n tail_list = self.dfs(v + 1, energy - 1)\n for result in tail_list:\n results.append([bottom] + result)\n self.memory[bottom, energy] = results\n return results\n\n def memory_search(self, n, k):\n self.n = n\n self.memory = dict()\n results = []\n for i in range(1, n + 1 - k + 1):\n combinations = self.dfs(i, k)\n if combinations is not None:\n results = results + combinations\n return results\n\n def dp(self, n, k):\n tmp = []\n pre_k_results = {}\n for i in range(1, n + 1):\n tmp.append([i])\n pre_k_results[i] = tmp.copy()\n results = pre_k_results\n for col in range(2, k + 1):\n cur_k_results = {}\n for row in range(col, n - k + col + 1):\n cur_results = []\n if row > col:\n cur_results = cur_results + pre_n_results\n for item in pre_k_results[row - 1]:\n cur_results.append(item + [row])\n pre_n_results = cur_results\n cur_k_results[row] = cur_results\n pre_k_results = cur_k_results\n results = cur_k_results\n return results[n]\n\n def combine(self, n, k):\n \"\"\"\n :type n: int\n :type k: int\n :rtype: List[List[int]]\n \"\"\"\n results = self.memory_search(n, k)\n return results\n\n\ndef main():\n for n in range(5, 10):\n for k in range(2, n):\n start = time.time()\n ret1 = Solution().memory_search(n, k)\n end = time.time()\n memory_search_time = round((end - start) * 1000 * 1000, 2)\n start = time.time()\n ret2 = Solution().dp(n, k)\n end = time.time()\n dp_time = round((end - start) * 1000 * 1000, 2)\n print(\n 'n={n},k={k} memory_search consume:{memory_search_time}ms, dp consume:{dp_time}ms'\n .format(**locals()))\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "import time\nclass Solution(object):\n def __init__(self):\n self.n = None\n self.memory = dict()\n \n def dfs(self, bottom, energy):\n # optimize for memory, save search time for duplicate results\n if (bottom,energy) in self.memory:\n return self.memory[(bottom,energy)]\n # ending search\n if energy == 1:\n return [[bottom]]\n results = []\n for v in range(bottom, self.n):\n tail_list = self.dfs(v+1, energy-1)\n for result in tail_list:\n results.append([bottom]+result)\n self.memory[(bottom,energy)] = results\n return results\n \n def memory_search(self, n, k):\n self.n = n \n self.memory = dict()\n results = []\n for i in range(1, n+1-k+1):\n combinations = self.dfs(i, k)\n if combinations is not None:\n results = results + combinations\n return results\n\n def dp(self, n, k):\n \n # initialize: F[n,1]\n tmp = []\n pre_k_results = {}\n for i in range(1,n+1):\n tmp.append([i])\n pre_k_results[i] = tmp.copy()\n \n results = pre_k_results\n # F[n,k] = F[n-1,k] + (item + [n] for item in F[n-1, k-1])\n for col in range(2,k+1):\n cur_k_results = {}\n for row in range(col,n-k+col+1):\n cur_results = []\n # Part1: F[n-1, k]\n if row > col:\n cur_results = cur_results + pre_n_results\n # Part2: (item + [n] for item in F[n-1, k-1])\n for item in pre_k_results[row-1]:\n cur_results.append(item+[row])\n pre_n_results = cur_results\n cur_k_results[row] = cur_results\n pre_k_results = cur_k_results\n results = cur_k_results\n \n return results[n]\n\n def combine(self, n, k):\n \"\"\"\n :type n: int\n :type k: int\n :rtype: List[List[int]]\n \"\"\"\n results = self.memory_search(n, k)\n # results = self.dp(n, k)\n return results\n\ndef main():\n\n # n, k = 4, 1\n # start = time.time()\n # ret2 = Solution().dp(n, k)\n # end = time.time()\n # dp_time = round((end-start)*1000*1000,2) \n # print(ret2, dp_time)\n\n ## time consume test\n for n in range(5,10):\n for k in range(2,n):\n start = time.time()\n ret1 = Solution().memory_search(n, k)\n end = time.time()\n memory_search_time = round((end-start)*1000*1000,2)\n start = time.time()\n ret2 = Solution().dp(n, k)\n end = time.time()\n dp_time = round((end-start)*1000*1000,2)\n print(\"n={n},k={k} memory_search consume:{memory_search_time}ms, dp consume:{dp_time}ms\".format(**locals()))\n\n\nif __name__ == '__main__':\n main() ",
"step-ids": [
3,
5,
7,
8,
10
]
}
|
[
3,
5,
7,
8,
10
] |
# ----------------------
#
# *** WELCOME TO "HANGMAN" GAME ***
# Let's start programming
#
# ----------------------
def displayBoard(missedLetters, correctLetters, secretWord, alfabet_board, theme):
print(hangnam_pics[len(missedLetters)])
print("Тема:", theme)
# Показываем состояние угадываемого слова на сейчас
for index in range(len(secretWord)):
dashed_word = ""
for char in secretWord:
if char in correctLetters:
dashed_word = dashed_word + char + " "
else:
dashed_word += "_ "
print("Слово на доске: ", dashed_word)
# Показываем остальные буквы, доступные к угадыванию
for index in range (len(alfabet)):
if alfabet[index] in correctLetters or alfabet[index] in missedLetters:
alfabet_board += "_ "
else:
alfabet_board = alfabet_board + alfabet[index] + " "
print("Оставшиеся буквы: ", alfabet_board)
#Показываем список ошибочных букв
print("Ошибочные буквы: ", end = "")
if missedLetters == "":
print(" -", end="")
else:
for letter in missedLetters:
print(letter + " ", end="")
print()
def getRandomWord(themes):
theme = random.choice(tuple(themes.keys()))
word = random.choice(themes[theme])
word = word.upper()
return theme, word
def getGuess(correctLetters, missedLetters):
while True:
print()
guess = input("Введите букву --> ").upper()
if len(guess) != 1:
print("Пожалуйста, введите одну букву.")
elif guess in correctLetters or guess in missedLetters:
print("Вы уже называли эту букву")
elif guess in (" _") or guess not in alfabet or type(guess) != str:
print("Это не буква. Введите БУКВУ")
else:
break
print()
return guess
def gameFinish(correctLetters, missedLetters, secretWord):
unikLettersInSecretWord = set()
for i in secretWord:
unikLettersInSecretWord.add(i)
if len(correctLetters) == len(unikLettersInSecretWord):
print()
print()
print(f''' ПОЗДРАВЛЯЕМ!
Вы угадали слово {secretWord} и выиграли игру "ВИСЕЛИЦА"!''')
return True
elif len(missedLetters) == 6:
print()
print()
print(f''' ИГРА ОКОНЧЕНА!
Вы не угадали слово {secretWord} и програли в игру "ВИСЕЛИЦА"!''')
return True
else:
return False
def oneMore():
while True:
print()
answer = input("Хотите сыграть еще раз? Введите да/нет --->").lower()
if answer == "да":
print()
print()
print()
print()
return True
elif answer == "нет":
return False
else:
print("Ваш ответ не понятен. Попробуем еще раз.")
def mainGame(themes):
missedLetters = ""
correctLetters = ""
alfabet_board = ""
print()
print(
''' Добро пожаловать в игру ВИСЕЛИЦА!
У Вас есть 6 попыток угадать слово по заданной теме.
После каждой неверной попытки к рисунку будет добавляться часть человечка.
Если слово будет угадано до того, как человечек станет виден полностью - Вы выиграли!
Удачи!
''')
print()
input("Нажмите ENTER для старта.")
#Выбираем секретное слово
theme, secretWord = getRandomWord(themes)
while True:
#Показываем текущее состояние игры
displayBoard(missedLetters , correctLetters, secretWord, alfabet_board, theme)
#Проверка результатов Игры - пишется последним
if gameFinish(correctLetters, missedLetters, secretWord):
if oneMore():
mainGame(themes)
else:
break
#Запрос пользователю на введение буквы. Проверка буквы.
guess = getGuess(correctLetters, missedLetters)
#Сверка буквы и запись в соответствующий массив
if guess in secretWord:
print("Такая буква есть в слове!")
correctLetters += guess
time.sleep(2)
else:
print("Такой буквы нет в слове!")
missedLetters += guess
time.sleep(2)
import random
import time
hangnam_pics = [
'''
+---+
|
|
|
===''',
'''
+---+
O |
|
|
===''',
'''
+---+
O |
| |
|
===''',
'''
+---+
O |
|\ |
|
===''',
'''
+---+
O |
/|\ |
|
===''',
'''
+---+
O |
/|\ |
/ |
===''',
'''
+---+
O |
/|\ |
/ \ |
==='''
]
alfabet = ["А","Б","В","Г","Д","Е","Ë","Ж","З","И","Й","К","Л","М","Н","О","П","Р","С","Т","У","Ф", "Х","Ч","Ц","Ч","Ш","Щ","Ь","Ъ","Ы","Э","Ю","Я"]
goroda = ["Киев", "Одесса", "Харьков", "Львов", "Николаев", "Житомир", "Полтава", "Чернигов"]
zhyvotnye = ["аист","акула","бабуин","баран", "тритон", "черепаха", "ястреб", "ящерица", "муравей","барсук","медведь", "медоед", "муравьед", "панда", "ленивец"]
themes = {"Города Украины": goroda, "Животные": zhyvotnye}
mainGame(themes)
print()
print(" ВСЕГО ДОБРОГО!")
|
normal
|
{
"blob_id": "720ab0c0fcb40a50d73770e4ada6a78465e9ff96",
"index": 2755,
"step-1": "def displayBoard(missedLetters, correctLetters, secretWord, alfabet_board,\n theme):\n print(hangnam_pics[len(missedLetters)])\n print('Тема:', theme)\n for index in range(len(secretWord)):\n dashed_word = ''\n for char in secretWord:\n if char in correctLetters:\n dashed_word = dashed_word + char + ' '\n else:\n dashed_word += '_ '\n print('Слово на доске: ', dashed_word)\n for index in range(len(alfabet)):\n if alfabet[index] in correctLetters or alfabet[index] in missedLetters:\n alfabet_board += '_ '\n else:\n alfabet_board = alfabet_board + alfabet[index] + ' '\n print('Оставшиеся буквы: ', alfabet_board)\n print('Ошибочные буквы: ', end='')\n if missedLetters == '':\n print(' -', end='')\n else:\n for letter in missedLetters:\n print(letter + ' ', end='')\n print()\n\n\ndef getRandomWord(themes):\n theme = random.choice(tuple(themes.keys()))\n word = random.choice(themes[theme])\n word = word.upper()\n return theme, word\n\n\ndef getGuess(correctLetters, missedLetters):\n while True:\n print()\n guess = input('Введите букву --> ').upper()\n if len(guess) != 1:\n print('Пожалуйста, введите одну букву.')\n elif guess in correctLetters or guess in missedLetters:\n print('Вы уже называли эту букву')\n elif guess in ' _' or guess not in alfabet or type(guess) != str:\n print('Это не буква. Введите БУКВУ')\n else:\n break\n print()\n return guess\n\n\ndef gameFinish(correctLetters, missedLetters, secretWord):\n unikLettersInSecretWord = set()\n for i in secretWord:\n unikLettersInSecretWord.add(i)\n if len(correctLetters) == len(unikLettersInSecretWord):\n print()\n print()\n print(\n f\"\"\" ПОЗДРАВЛЯЕМ! \n Вы угадали слово {secretWord} и выиграли игру \"ВИСЕЛИЦА\"!\"\"\"\n )\n return True\n elif len(missedLetters) == 6:\n print()\n print()\n print(\n f\"\"\" ИГРА ОКОНЧЕНА! \n Вы не угадали слово {secretWord} и програли в игру \"ВИСЕЛИЦА\"!\"\"\"\n )\n return True\n else:\n return False\n\n\n<mask token>\n",
"step-2": "def displayBoard(missedLetters, correctLetters, secretWord, alfabet_board,\n theme):\n print(hangnam_pics[len(missedLetters)])\n print('Тема:', theme)\n for index in range(len(secretWord)):\n dashed_word = ''\n for char in secretWord:\n if char in correctLetters:\n dashed_word = dashed_word + char + ' '\n else:\n dashed_word += '_ '\n print('Слово на доске: ', dashed_word)\n for index in range(len(alfabet)):\n if alfabet[index] in correctLetters or alfabet[index] in missedLetters:\n alfabet_board += '_ '\n else:\n alfabet_board = alfabet_board + alfabet[index] + ' '\n print('Оставшиеся буквы: ', alfabet_board)\n print('Ошибочные буквы: ', end='')\n if missedLetters == '':\n print(' -', end='')\n else:\n for letter in missedLetters:\n print(letter + ' ', end='')\n print()\n\n\ndef getRandomWord(themes):\n theme = random.choice(tuple(themes.keys()))\n word = random.choice(themes[theme])\n word = word.upper()\n return theme, word\n\n\ndef getGuess(correctLetters, missedLetters):\n while True:\n print()\n guess = input('Введите букву --> ').upper()\n if len(guess) != 1:\n print('Пожалуйста, введите одну букву.')\n elif guess in correctLetters or guess in missedLetters:\n print('Вы уже называли эту букву')\n elif guess in ' _' or guess not in alfabet or type(guess) != str:\n print('Это не буква. Введите БУКВУ')\n else:\n break\n print()\n return guess\n\n\ndef gameFinish(correctLetters, missedLetters, secretWord):\n unikLettersInSecretWord = set()\n for i in secretWord:\n unikLettersInSecretWord.add(i)\n if len(correctLetters) == len(unikLettersInSecretWord):\n print()\n print()\n print(\n f\"\"\" ПОЗДРАВЛЯЕМ! \n Вы угадали слово {secretWord} и выиграли игру \"ВИСЕЛИЦА\"!\"\"\"\n )\n return True\n elif len(missedLetters) == 6:\n print()\n print()\n print(\n f\"\"\" ИГРА ОКОНЧЕНА! \n Вы не угадали слово {secretWord} и програли в игру \"ВИСЕЛИЦА\"!\"\"\"\n )\n return True\n else:\n return False\n\n\ndef oneMore():\n while True:\n print()\n answer = input('Хотите сыграть еще раз? Введите да/нет --->').lower()\n if answer == 'да':\n print()\n print()\n print()\n print()\n return True\n elif answer == 'нет':\n return False\n else:\n print('Ваш ответ не понятен. Попробуем еще раз.')\n\n\ndef mainGame(themes):\n missedLetters = ''\n correctLetters = ''\n alfabet_board = ''\n print()\n print(\n \"\"\" Добро пожаловать в игру ВИСЕЛИЦА!\n У Вас есть 6 попыток угадать слово по заданной теме.\n После каждой неверной попытки к рисунку будет добавляться часть человечка.\n Если слово будет угадано до того, как человечек станет виден полностью - Вы выиграли!\n Удачи!\n \"\"\"\n )\n print()\n input('Нажмите ENTER для старта.')\n theme, secretWord = getRandomWord(themes)\n while True:\n displayBoard(missedLetters, correctLetters, secretWord,\n alfabet_board, theme)\n if gameFinish(correctLetters, missedLetters, secretWord):\n if oneMore():\n mainGame(themes)\n else:\n break\n guess = getGuess(correctLetters, missedLetters)\n if guess in secretWord:\n print('Такая буква есть в слове!')\n correctLetters += guess\n time.sleep(2)\n else:\n print('Такой буквы нет в слове!')\n missedLetters += guess\n time.sleep(2)\n\n\n<mask token>\n",
"step-3": "def displayBoard(missedLetters, correctLetters, secretWord, alfabet_board,\n theme):\n print(hangnam_pics[len(missedLetters)])\n print('Тема:', theme)\n for index in range(len(secretWord)):\n dashed_word = ''\n for char in secretWord:\n if char in correctLetters:\n dashed_word = dashed_word + char + ' '\n else:\n dashed_word += '_ '\n print('Слово на доске: ', dashed_word)\n for index in range(len(alfabet)):\n if alfabet[index] in correctLetters or alfabet[index] in missedLetters:\n alfabet_board += '_ '\n else:\n alfabet_board = alfabet_board + alfabet[index] + ' '\n print('Оставшиеся буквы: ', alfabet_board)\n print('Ошибочные буквы: ', end='')\n if missedLetters == '':\n print(' -', end='')\n else:\n for letter in missedLetters:\n print(letter + ' ', end='')\n print()\n\n\ndef getRandomWord(themes):\n theme = random.choice(tuple(themes.keys()))\n word = random.choice(themes[theme])\n word = word.upper()\n return theme, word\n\n\ndef getGuess(correctLetters, missedLetters):\n while True:\n print()\n guess = input('Введите букву --> ').upper()\n if len(guess) != 1:\n print('Пожалуйста, введите одну букву.')\n elif guess in correctLetters or guess in missedLetters:\n print('Вы уже называли эту букву')\n elif guess in ' _' or guess not in alfabet or type(guess) != str:\n print('Это не буква. Введите БУКВУ')\n else:\n break\n print()\n return guess\n\n\ndef gameFinish(correctLetters, missedLetters, secretWord):\n unikLettersInSecretWord = set()\n for i in secretWord:\n unikLettersInSecretWord.add(i)\n if len(correctLetters) == len(unikLettersInSecretWord):\n print()\n print()\n print(\n f\"\"\" ПОЗДРАВЛЯЕМ! \n Вы угадали слово {secretWord} и выиграли игру \"ВИСЕЛИЦА\"!\"\"\"\n )\n return True\n elif len(missedLetters) == 6:\n print()\n print()\n print(\n f\"\"\" ИГРА ОКОНЧЕНА! \n Вы не угадали слово {secretWord} и програли в игру \"ВИСЕЛИЦА\"!\"\"\"\n )\n return True\n else:\n return False\n\n\ndef oneMore():\n while True:\n print()\n answer = input('Хотите сыграть еще раз? Введите да/нет --->').lower()\n if answer == 'да':\n print()\n print()\n print()\n print()\n return True\n elif answer == 'нет':\n return False\n else:\n print('Ваш ответ не понятен. Попробуем еще раз.')\n\n\ndef mainGame(themes):\n missedLetters = ''\n correctLetters = ''\n alfabet_board = ''\n print()\n print(\n \"\"\" Добро пожаловать в игру ВИСЕЛИЦА!\n У Вас есть 6 попыток угадать слово по заданной теме.\n После каждой неверной попытки к рисунку будет добавляться часть человечка.\n Если слово будет угадано до того, как человечек станет виден полностью - Вы выиграли!\n Удачи!\n \"\"\"\n )\n print()\n input('Нажмите ENTER для старта.')\n theme, secretWord = getRandomWord(themes)\n while True:\n displayBoard(missedLetters, correctLetters, secretWord,\n alfabet_board, theme)\n if gameFinish(correctLetters, missedLetters, secretWord):\n if oneMore():\n mainGame(themes)\n else:\n break\n guess = getGuess(correctLetters, missedLetters)\n if guess in secretWord:\n print('Такая буква есть в слове!')\n correctLetters += guess\n time.sleep(2)\n else:\n print('Такой буквы нет в слове!')\n missedLetters += guess\n time.sleep(2)\n\n\n<mask token>\nhangnam_pics = [\"\"\"\n +---+\n |\n |\n |\n ===\"\"\",\n '\\n +---+\\n O |\\n |\\n |\\n ===',\n \"\"\"\n +---+\n O |\n | |\n |\n ===\"\"\",\n '\\n +---+\\n O |\\n |\\\\ |\\n |\\n ===',\n \"\"\"\n +---+\n O |\n /|\\\\ |\n |\n ===\"\"\",\n ' \\n +---+\\n O |\\n /|\\\\ |\\n / |\\n ===',\n \"\"\" \n +---+\n O |\n /|\\\\ |\n / \\\\ |\n ===\"\"\"]\nalfabet = ['А', 'Б', 'В', 'Г', 'Д', 'Е', 'Ë', 'Ж', 'З', 'И', 'Й', 'К', 'Л',\n 'М', 'Н', 'О', 'П', 'Р', 'С', 'Т', 'У', 'Ф', 'Х', 'Ч', 'Ц', 'Ч', 'Ш',\n 'Щ', 'Ь', 'Ъ', 'Ы', 'Э', 'Ю', 'Я']\ngoroda = ['Киев', 'Одесса', 'Харьков', 'Львов', 'Николаев', 'Житомир',\n 'Полтава', 'Чернигов']\nzhyvotnye = ['аист', 'акула', 'бабуин', 'баран', 'тритон', 'черепаха',\n 'ястреб', 'ящерица', 'муравей', 'барсук', 'медведь', 'медоед',\n 'муравьед', 'панда', 'ленивец']\nthemes = {'Города Украины': goroda, 'Животные': zhyvotnye}\nmainGame(themes)\nprint()\nprint(' ВСЕГО ДОБРОГО!')\n",
"step-4": "def displayBoard(missedLetters, correctLetters, secretWord, alfabet_board,\n theme):\n print(hangnam_pics[len(missedLetters)])\n print('Тема:', theme)\n for index in range(len(secretWord)):\n dashed_word = ''\n for char in secretWord:\n if char in correctLetters:\n dashed_word = dashed_word + char + ' '\n else:\n dashed_word += '_ '\n print('Слово на доске: ', dashed_word)\n for index in range(len(alfabet)):\n if alfabet[index] in correctLetters or alfabet[index] in missedLetters:\n alfabet_board += '_ '\n else:\n alfabet_board = alfabet_board + alfabet[index] + ' '\n print('Оставшиеся буквы: ', alfabet_board)\n print('Ошибочные буквы: ', end='')\n if missedLetters == '':\n print(' -', end='')\n else:\n for letter in missedLetters:\n print(letter + ' ', end='')\n print()\n\n\ndef getRandomWord(themes):\n theme = random.choice(tuple(themes.keys()))\n word = random.choice(themes[theme])\n word = word.upper()\n return theme, word\n\n\ndef getGuess(correctLetters, missedLetters):\n while True:\n print()\n guess = input('Введите букву --> ').upper()\n if len(guess) != 1:\n print('Пожалуйста, введите одну букву.')\n elif guess in correctLetters or guess in missedLetters:\n print('Вы уже называли эту букву')\n elif guess in ' _' or guess not in alfabet or type(guess) != str:\n print('Это не буква. Введите БУКВУ')\n else:\n break\n print()\n return guess\n\n\ndef gameFinish(correctLetters, missedLetters, secretWord):\n unikLettersInSecretWord = set()\n for i in secretWord:\n unikLettersInSecretWord.add(i)\n if len(correctLetters) == len(unikLettersInSecretWord):\n print()\n print()\n print(\n f\"\"\" ПОЗДРАВЛЯЕМ! \n Вы угадали слово {secretWord} и выиграли игру \"ВИСЕЛИЦА\"!\"\"\"\n )\n return True\n elif len(missedLetters) == 6:\n print()\n print()\n print(\n f\"\"\" ИГРА ОКОНЧЕНА! \n Вы не угадали слово {secretWord} и програли в игру \"ВИСЕЛИЦА\"!\"\"\"\n )\n return True\n else:\n return False\n\n\ndef oneMore():\n while True:\n print()\n answer = input('Хотите сыграть еще раз? Введите да/нет --->').lower()\n if answer == 'да':\n print()\n print()\n print()\n print()\n return True\n elif answer == 'нет':\n return False\n else:\n print('Ваш ответ не понятен. Попробуем еще раз.')\n\n\ndef mainGame(themes):\n missedLetters = ''\n correctLetters = ''\n alfabet_board = ''\n print()\n print(\n \"\"\" Добро пожаловать в игру ВИСЕЛИЦА!\n У Вас есть 6 попыток угадать слово по заданной теме.\n После каждой неверной попытки к рисунку будет добавляться часть человечка.\n Если слово будет угадано до того, как человечек станет виден полностью - Вы выиграли!\n Удачи!\n \"\"\"\n )\n print()\n input('Нажмите ENTER для старта.')\n theme, secretWord = getRandomWord(themes)\n while True:\n displayBoard(missedLetters, correctLetters, secretWord,\n alfabet_board, theme)\n if gameFinish(correctLetters, missedLetters, secretWord):\n if oneMore():\n mainGame(themes)\n else:\n break\n guess = getGuess(correctLetters, missedLetters)\n if guess in secretWord:\n print('Такая буква есть в слове!')\n correctLetters += guess\n time.sleep(2)\n else:\n print('Такой буквы нет в слове!')\n missedLetters += guess\n time.sleep(2)\n\n\nimport random\nimport time\nhangnam_pics = [\"\"\"\n +---+\n |\n |\n |\n ===\"\"\",\n '\\n +---+\\n O |\\n |\\n |\\n ===',\n \"\"\"\n +---+\n O |\n | |\n |\n ===\"\"\",\n '\\n +---+\\n O |\\n |\\\\ |\\n |\\n ===',\n \"\"\"\n +---+\n O |\n /|\\\\ |\n |\n ===\"\"\",\n ' \\n +---+\\n O |\\n /|\\\\ |\\n / |\\n ===',\n \"\"\" \n +---+\n O |\n /|\\\\ |\n / \\\\ |\n ===\"\"\"]\nalfabet = ['А', 'Б', 'В', 'Г', 'Д', 'Е', 'Ë', 'Ж', 'З', 'И', 'Й', 'К', 'Л',\n 'М', 'Н', 'О', 'П', 'Р', 'С', 'Т', 'У', 'Ф', 'Х', 'Ч', 'Ц', 'Ч', 'Ш',\n 'Щ', 'Ь', 'Ъ', 'Ы', 'Э', 'Ю', 'Я']\ngoroda = ['Киев', 'Одесса', 'Харьков', 'Львов', 'Николаев', 'Житомир',\n 'Полтава', 'Чернигов']\nzhyvotnye = ['аист', 'акула', 'бабуин', 'баран', 'тритон', 'черепаха',\n 'ястреб', 'ящерица', 'муравей', 'барсук', 'медведь', 'медоед',\n 'муравьед', 'панда', 'ленивец']\nthemes = {'Города Украины': goroda, 'Животные': zhyvotnye}\nmainGame(themes)\nprint()\nprint(' ВСЕГО ДОБРОГО!')\n",
"step-5": "\n# ----------------------\n#\n# *** WELCOME TO \"HANGMAN\" GAME ***\n# Let's start programming\n#\n# ----------------------\n\n\n\ndef displayBoard(missedLetters, correctLetters, secretWord, alfabet_board, theme):\n print(hangnam_pics[len(missedLetters)])\n print(\"Тема:\", theme)\n\n # Показываем состояние угадываемого слова на сейчас\n for index in range(len(secretWord)):\n dashed_word = \"\"\n for char in secretWord:\n if char in correctLetters:\n dashed_word = dashed_word + char + \" \"\n else:\n dashed_word += \"_ \"\n print(\"Слово на доске: \", dashed_word)\n\n\n # Показываем остальные буквы, доступные к угадыванию\n for index in range (len(alfabet)):\n if alfabet[index] in correctLetters or alfabet[index] in missedLetters:\n alfabet_board += \"_ \"\n else:\n alfabet_board = alfabet_board + alfabet[index] + \" \"\n print(\"Оставшиеся буквы: \", alfabet_board)\n\n\n #Показываем список ошибочных букв\n print(\"Ошибочные буквы: \", end = \"\")\n if missedLetters == \"\":\n print(\" -\", end=\"\")\n else:\n for letter in missedLetters:\n print(letter + \" \", end=\"\")\n print()\n\n\n\n\ndef getRandomWord(themes):\n theme = random.choice(tuple(themes.keys()))\n word = random.choice(themes[theme])\n word = word.upper()\n return theme, word\n\n\ndef getGuess(correctLetters, missedLetters):\n while True:\n print()\n guess = input(\"Введите букву --> \").upper()\n if len(guess) != 1:\n print(\"Пожалуйста, введите одну букву.\")\n elif guess in correctLetters or guess in missedLetters:\n print(\"Вы уже называли эту букву\")\n elif guess in (\" _\") or guess not in alfabet or type(guess) != str:\n print(\"Это не буква. Введите БУКВУ\")\n else:\n break\n print()\n return guess\n\n\ndef gameFinish(correctLetters, missedLetters, secretWord):\n unikLettersInSecretWord = set()\n for i in secretWord:\n unikLettersInSecretWord.add(i)\n\n if len(correctLetters) == len(unikLettersInSecretWord):\n print()\n print()\n print(f''' ПОЗДРАВЛЯЕМ! \n Вы угадали слово {secretWord} и выиграли игру \"ВИСЕЛИЦА\"!''')\n return True\n elif len(missedLetters) == 6:\n print()\n print()\n print(f''' ИГРА ОКОНЧЕНА! \n Вы не угадали слово {secretWord} и програли в игру \"ВИСЕЛИЦА\"!''')\n return True\n else:\n return False\n\ndef oneMore():\n while True:\n print()\n answer = input(\"Хотите сыграть еще раз? Введите да/нет --->\").lower()\n if answer == \"да\":\n print()\n print()\n print()\n print()\n return True\n elif answer == \"нет\":\n return False\n else:\n print(\"Ваш ответ не понятен. Попробуем еще раз.\")\n\n\n\n\n\n\ndef mainGame(themes):\n missedLetters = \"\"\n correctLetters = \"\"\n alfabet_board = \"\"\n\n print()\n print(\n ''' Добро пожаловать в игру ВИСЕЛИЦА!\n У Вас есть 6 попыток угадать слово по заданной теме.\n После каждой неверной попытки к рисунку будет добавляться часть человечка.\n Если слово будет угадано до того, как человечек станет виден полностью - Вы выиграли!\n Удачи!\n ''')\n print()\n input(\"Нажмите ENTER для старта.\")\n #Выбираем секретное слово\n theme, secretWord = getRandomWord(themes)\n\n\n while True:\n #Показываем текущее состояние игры\n displayBoard(missedLetters , correctLetters, secretWord, alfabet_board, theme)\n\n #Проверка результатов Игры - пишется последним\n if gameFinish(correctLetters, missedLetters, secretWord):\n if oneMore():\n mainGame(themes)\n else:\n break\n\n #Запрос пользователю на введение буквы. Проверка буквы.\n guess = getGuess(correctLetters, missedLetters)\n\n #Сверка буквы и запись в соответствующий массив\n if guess in secretWord:\n print(\"Такая буква есть в слове!\")\n correctLetters += guess\n time.sleep(2)\n else:\n print(\"Такой буквы нет в слове!\")\n missedLetters += guess\n time.sleep(2)\n\n\n\nimport random\nimport time\n\nhangnam_pics = [\n '''\n +---+\n |\n |\n |\n ===''',\n '''\n +---+\n O |\n |\n |\n ===''',\n '''\n +---+\n O |\n | |\n |\n ===''',\n '''\n +---+\n O |\n |\\ |\n |\n ===''',\n '''\n +---+\n O |\n /|\\ |\n |\n ===''',\n ''' \n +---+\n O |\n /|\\ |\n / |\n ===''',\n ''' \n +---+\n O |\n /|\\ |\n / \\ |\n ==='''\n ]\nalfabet = [\"А\",\"Б\",\"В\",\"Г\",\"Д\",\"Е\",\"Ë\",\"Ж\",\"З\",\"И\",\"Й\",\"К\",\"Л\",\"М\",\"Н\",\"О\",\"П\",\"Р\",\"С\",\"Т\",\"У\",\"Ф\", \"Х\",\"Ч\",\"Ц\",\"Ч\",\"Ш\",\"Щ\",\"Ь\",\"Ъ\",\"Ы\",\"Э\",\"Ю\",\"Я\"]\ngoroda = [\"Киев\", \"Одесса\", \"Харьков\", \"Львов\", \"Николаев\", \"Житомир\", \"Полтава\", \"Чернигов\"]\nzhyvotnye = [\"аист\",\"акула\",\"бабуин\",\"баран\", \"тритон\", \"черепаха\", \"ястреб\", \"ящерица\", \"муравей\",\"барсук\",\"медведь\", \"медоед\", \"муравьед\", \"панда\", \"ленивец\"]\nthemes = {\"Города Украины\": goroda, \"Животные\": zhyvotnye}\n\nmainGame(themes)\nprint()\nprint(\" ВСЕГО ДОБРОГО!\")\n",
"step-ids": [
4,
6,
8,
9,
10
]
}
|
[
4,
6,
8,
9,
10
] |
<|reserved_special_token_0|>
class Table(DashComponent):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Table(DashComponent):
def __init__(self, plot_factory, df, title='Table'):
"""
Displays table at the bottom of the page.
:param plot_factory: Factory with all plot functions
:param df: Dataframe with all data
:param title: Title of the page
"""
super().__init__(title=title)
self.plot_factory = plot_factory
self.df = df
def layout(self, params=None):
"""
Shows the html layout of the table component.
:param params: Parameters selected at the current level of the dashboard.
:return: Html layout of the program.
"""
return html.Div([dcc.Loading(id='loading-icon3', children=[html.Div
(id='output-data-upload')], type='dot')])
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Table(DashComponent):
def __init__(self, plot_factory, df, title='Table'):
"""
Displays table at the bottom of the page.
:param plot_factory: Factory with all plot functions
:param df: Dataframe with all data
:param title: Title of the page
"""
super().__init__(title=title)
self.plot_factory = plot_factory
self.df = df
def layout(self, params=None):
"""
Shows the html layout of the table component.
:param params: Parameters selected at the current level of the dashboard.
:return: Html layout of the program.
"""
return html.Div([dcc.Loading(id='loading-icon3', children=[html.Div
(id='output-data-upload')], type='dot')])
def component_callbacks(self, app):
"""
Automatically does the callbacks of the interactive parts of the table component.
:param app: Dash app that uses the code.
:return: Output of the callback functions.
"""
@app.callback(Output('main_table', 'selected_rows' + self.title),
Input('Mygraph-normal-plot', 'selectedData'))
def display_selected_data(graphPoints):
"""
Display the selected data i the table.
:param graphPoints: Data that is currently displayed
:return: Table
"""
points_selected = []
if graphPoints is not None:
print(graphPoints)
for point in graphPoints['points']:
points_selected.append(point['customdata'][0])
return points_selected
def set_data(self, df):
"""
Loads in possible parameters for the x and y-axis in dropdown from the data.
:param dummy: dummy html property
:return: Possible options for dropdown x-axis.
"""
self.df = df
<|reserved_special_token_1|>
import dash_table
import pandas as pd
import dash_html_components as html
import dash_core_components as dcc
from dash.dependencies import Input, Output
from dash_oop_components import DashComponent
import dash_table
import dash_bootstrap_components as dbc
from dash.dependencies import Input, Output, State
from dash_oop_components import DashFigureFactory, DashComponent, DashComponentTabs, DashApp
from src.main.python.oop.Dataframe import Dataframe
from src.main.python.oop.Figure_factories import VisualFactories
class Table(DashComponent):
def __init__(self, plot_factory, df, title='Table'):
"""
Displays table at the bottom of the page.
:param plot_factory: Factory with all plot functions
:param df: Dataframe with all data
:param title: Title of the page
"""
super().__init__(title=title)
self.plot_factory = plot_factory
self.df = df
def layout(self, params=None):
"""
Shows the html layout of the table component.
:param params: Parameters selected at the current level of the dashboard.
:return: Html layout of the program.
"""
return html.Div([dcc.Loading(id='loading-icon3', children=[html.Div
(id='output-data-upload')], type='dot')])
def component_callbacks(self, app):
"""
Automatically does the callbacks of the interactive parts of the table component.
:param app: Dash app that uses the code.
:return: Output of the callback functions.
"""
@app.callback(Output('main_table', 'selected_rows' + self.title),
Input('Mygraph-normal-plot', 'selectedData'))
def display_selected_data(graphPoints):
"""
Display the selected data i the table.
:param graphPoints: Data that is currently displayed
:return: Table
"""
points_selected = []
if graphPoints is not None:
print(graphPoints)
for point in graphPoints['points']:
points_selected.append(point['customdata'][0])
return points_selected
def set_data(self, df):
"""
Loads in possible parameters for the x and y-axis in dropdown from the data.
:param dummy: dummy html property
:return: Possible options for dropdown x-axis.
"""
self.df = df
<|reserved_special_token_1|>
import dash_table
import pandas as pd
import dash_html_components as html
import dash_core_components as dcc
from dash.dependencies import Input, Output
from dash_oop_components import DashComponent
import dash_table
import dash_bootstrap_components as dbc
from dash.dependencies import Input, Output, State
from dash_oop_components import DashFigureFactory, DashComponent, DashComponentTabs, DashApp
from src.main.python.oop.Dataframe import Dataframe
from src.main.python.oop.Figure_factories import VisualFactories
class Table(DashComponent):
def __init__(self, plot_factory, df, title="Table"):
"""
Displays table at the bottom of the page.
:param plot_factory: Factory with all plot functions
:param df: Dataframe with all data
:param title: Title of the page
"""
super().__init__(title=title)
self.plot_factory = plot_factory
self.df = df
def layout(self, params=None):
"""
Shows the html layout of the table component.
:param params: Parameters selected at the current level of the dashboard.
:return: Html layout of the program.
"""
return html.Div([
dcc.Loading(
id="loading-icon3",
children=[html.Div(id='output-data-upload')],
type="dot",
)
])
def component_callbacks(self, app):
"""
Automatically does the callbacks of the interactive parts of the table component.
:param app: Dash app that uses the code.
:return: Output of the callback functions.
"""
@app.callback(
Output('main_table', 'selected_rows' + self.title),
Input('Mygraph-normal-plot', 'selectedData'))
def display_selected_data(graphPoints):
"""
Display the selected data i the table.
:param graphPoints: Data that is currently displayed
:return: Table
"""
points_selected = []
if graphPoints is not None:
print(graphPoints)
for point in graphPoints['points']:
points_selected.append(point['customdata'][0])
return points_selected
def set_data(self, df):
"""
Loads in possible parameters for the x and y-axis in dropdown from the data.
:param dummy: dummy html property
:return: Possible options for dropdown x-axis.
"""
self.df = df
|
flexible
|
{
"blob_id": "485f85ec5e3f38148978453ea5e7f9a54eb310e1",
"index": 160,
"step-1": "<mask token>\n\n\nclass Table(DashComponent):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass Table(DashComponent):\n\n def __init__(self, plot_factory, df, title='Table'):\n \"\"\"\n Displays table at the bottom of the page.\n :param plot_factory: Factory with all plot functions\n :param df: Dataframe with all data\n :param title: Title of the page\n \"\"\"\n super().__init__(title=title)\n self.plot_factory = plot_factory\n self.df = df\n\n def layout(self, params=None):\n \"\"\"\n Shows the html layout of the table component.\n :param params: Parameters selected at the current level of the dashboard.\n :return: Html layout of the program.\n \"\"\"\n return html.Div([dcc.Loading(id='loading-icon3', children=[html.Div\n (id='output-data-upload')], type='dot')])\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Table(DashComponent):\n\n def __init__(self, plot_factory, df, title='Table'):\n \"\"\"\n Displays table at the bottom of the page.\n :param plot_factory: Factory with all plot functions\n :param df: Dataframe with all data\n :param title: Title of the page\n \"\"\"\n super().__init__(title=title)\n self.plot_factory = plot_factory\n self.df = df\n\n def layout(self, params=None):\n \"\"\"\n Shows the html layout of the table component.\n :param params: Parameters selected at the current level of the dashboard.\n :return: Html layout of the program.\n \"\"\"\n return html.Div([dcc.Loading(id='loading-icon3', children=[html.Div\n (id='output-data-upload')], type='dot')])\n\n def component_callbacks(self, app):\n \"\"\"\n Automatically does the callbacks of the interactive parts of the table component.\n :param app: Dash app that uses the code.\n :return: Output of the callback functions.\n \"\"\"\n\n @app.callback(Output('main_table', 'selected_rows' + self.title),\n Input('Mygraph-normal-plot', 'selectedData'))\n def display_selected_data(graphPoints):\n \"\"\"\n Display the selected data i the table.\n :param graphPoints: Data that is currently displayed\n :return: Table\n \"\"\"\n points_selected = []\n if graphPoints is not None:\n print(graphPoints)\n for point in graphPoints['points']:\n points_selected.append(point['customdata'][0])\n return points_selected\n\n def set_data(self, df):\n \"\"\"\n Loads in possible parameters for the x and y-axis in dropdown from the data.\n :param dummy: dummy html property\n :return: Possible options for dropdown x-axis.\n \"\"\"\n self.df = df\n",
"step-4": "import dash_table\nimport pandas as pd\nimport dash_html_components as html\nimport dash_core_components as dcc\nfrom dash.dependencies import Input, Output\nfrom dash_oop_components import DashComponent\nimport dash_table\nimport dash_bootstrap_components as dbc\nfrom dash.dependencies import Input, Output, State\nfrom dash_oop_components import DashFigureFactory, DashComponent, DashComponentTabs, DashApp\nfrom src.main.python.oop.Dataframe import Dataframe\nfrom src.main.python.oop.Figure_factories import VisualFactories\n\n\nclass Table(DashComponent):\n\n def __init__(self, plot_factory, df, title='Table'):\n \"\"\"\n Displays table at the bottom of the page.\n :param plot_factory: Factory with all plot functions\n :param df: Dataframe with all data\n :param title: Title of the page\n \"\"\"\n super().__init__(title=title)\n self.plot_factory = plot_factory\n self.df = df\n\n def layout(self, params=None):\n \"\"\"\n Shows the html layout of the table component.\n :param params: Parameters selected at the current level of the dashboard.\n :return: Html layout of the program.\n \"\"\"\n return html.Div([dcc.Loading(id='loading-icon3', children=[html.Div\n (id='output-data-upload')], type='dot')])\n\n def component_callbacks(self, app):\n \"\"\"\n Automatically does the callbacks of the interactive parts of the table component.\n :param app: Dash app that uses the code.\n :return: Output of the callback functions.\n \"\"\"\n\n @app.callback(Output('main_table', 'selected_rows' + self.title),\n Input('Mygraph-normal-plot', 'selectedData'))\n def display_selected_data(graphPoints):\n \"\"\"\n Display the selected data i the table.\n :param graphPoints: Data that is currently displayed\n :return: Table\n \"\"\"\n points_selected = []\n if graphPoints is not None:\n print(graphPoints)\n for point in graphPoints['points']:\n points_selected.append(point['customdata'][0])\n return points_selected\n\n def set_data(self, df):\n \"\"\"\n Loads in possible parameters for the x and y-axis in dropdown from the data.\n :param dummy: dummy html property\n :return: Possible options for dropdown x-axis.\n \"\"\"\n self.df = df\n",
"step-5": "import dash_table\nimport pandas as pd\nimport dash_html_components as html\nimport dash_core_components as dcc\nfrom dash.dependencies import Input, Output\nfrom dash_oop_components import DashComponent\nimport dash_table\nimport dash_bootstrap_components as dbc\nfrom dash.dependencies import Input, Output, State\nfrom dash_oop_components import DashFigureFactory, DashComponent, DashComponentTabs, DashApp\n\n\nfrom src.main.python.oop.Dataframe import Dataframe\nfrom src.main.python.oop.Figure_factories import VisualFactories\n\nclass Table(DashComponent):\n def __init__(self, plot_factory, df, title=\"Table\"):\n \"\"\"\n Displays table at the bottom of the page.\n :param plot_factory: Factory with all plot functions\n :param df: Dataframe with all data\n :param title: Title of the page\n \"\"\"\n super().__init__(title=title)\n self.plot_factory = plot_factory\n self.df = df\n\n def layout(self, params=None):\n \"\"\"\n Shows the html layout of the table component.\n :param params: Parameters selected at the current level of the dashboard.\n :return: Html layout of the program.\n \"\"\"\n return html.Div([\n dcc.Loading(\n id=\"loading-icon3\",\n children=[html.Div(id='output-data-upload')],\n type=\"dot\",\n )\n ])\n\n def component_callbacks(self, app):\n \"\"\"\n Automatically does the callbacks of the interactive parts of the table component.\n :param app: Dash app that uses the code.\n :return: Output of the callback functions.\n \"\"\"\n @app.callback(\n Output('main_table', 'selected_rows' + self.title),\n Input('Mygraph-normal-plot', 'selectedData'))\n def display_selected_data(graphPoints):\n \"\"\"\n Display the selected data i the table.\n :param graphPoints: Data that is currently displayed\n :return: Table\n \"\"\"\n points_selected = []\n if graphPoints is not None:\n print(graphPoints)\n for point in graphPoints['points']:\n points_selected.append(point['customdata'][0])\n return points_selected\n\n def set_data(self, df):\n \"\"\"\n Loads in possible parameters for the x and y-axis in dropdown from the data.\n :param dummy: dummy html property\n :return: Possible options for dropdown x-axis.\n \"\"\"\n self.df = df",
"step-ids": [
1,
3,
5,
6,
7
]
}
|
[
1,
3,
5,
6,
7
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
for var in var_list:
grid_files = glob.glob(data_root + 'gridded/*%s*%s.nc' % (eke, var))
for f in grid_files:
output.append(analize_member(f, var, diagnostic_functions))
print('processing %s' % os.path.basename(f))
<|reserved_special_token_0|>
for f in grid_files:
output.append(analize_member(f, var, diagnostic_functions))
print('processing %s' % os.path.basename(f))
<|reserved_special_token_0|>
for f in grid_files:
output.append(analize_member(f, var, diagnostic_functions))
print('processing %s' % os.path.basename(f))
<|reserved_special_token_0|>
data.to_netcdf(path='../data/model_stats/S%s_gridded_stats.nc' % eke, mode='w')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
data_root = '../data/synthetic/standard/'
var_list = ['hs', 'dp', 'spr', 'fp', 'dir', 't0m1']
eke = 0.01
output = []
diagnostic_functions = [basic_stats]
for var in var_list:
grid_files = glob.glob(data_root + 'gridded/*%s*%s.nc' % (eke, var))
for f in grid_files:
output.append(analize_member(f, var, diagnostic_functions))
print('processing %s' % os.path.basename(f))
var = 'hs'
diagnostic_functions = [hs_spectral_slope]
grid_files = glob.glob(data_root + 'gridded/*%s*%s.nc' % (eke, var))
for f in grid_files:
output.append(analize_member(f, var, diagnostic_functions))
print('processing %s' % os.path.basename(f))
var = 'cur'
diagnostic_functions = [flow_stats]
grid_files = glob.glob(data_root + 'gridded/*%s*%s.nc' % (eke, var))
for f in grid_files:
output.append(analize_member(f, var, diagnostic_functions))
print('processing %s' % os.path.basename(f))
ds = xr.merge(output)
df = ds.to_dataframe()
df = df.reset_index()
data = df.to_xarray()
data.to_netcdf(path='../data/model_stats/S%s_gridded_stats.nc' % eke, mode='w')
<|reserved_special_token_1|>
import glob
import xarray as xr
from model_diagnostics import *
data_root = '../data/synthetic/standard/'
var_list = ['hs', 'dp', 'spr', 'fp', 'dir', 't0m1']
eke = 0.01
output = []
diagnostic_functions = [basic_stats]
for var in var_list:
grid_files = glob.glob(data_root + 'gridded/*%s*%s.nc' % (eke, var))
for f in grid_files:
output.append(analize_member(f, var, diagnostic_functions))
print('processing %s' % os.path.basename(f))
var = 'hs'
diagnostic_functions = [hs_spectral_slope]
grid_files = glob.glob(data_root + 'gridded/*%s*%s.nc' % (eke, var))
for f in grid_files:
output.append(analize_member(f, var, diagnostic_functions))
print('processing %s' % os.path.basename(f))
var = 'cur'
diagnostic_functions = [flow_stats]
grid_files = glob.glob(data_root + 'gridded/*%s*%s.nc' % (eke, var))
for f in grid_files:
output.append(analize_member(f, var, diagnostic_functions))
print('processing %s' % os.path.basename(f))
ds = xr.merge(output)
df = ds.to_dataframe()
df = df.reset_index()
data = df.to_xarray()
data.to_netcdf(path='../data/model_stats/S%s_gridded_stats.nc' % eke, mode='w')
<|reserved_special_token_1|>
import glob
import xarray as xr
from model_diagnostics import *
data_root = '../data/synthetic/standard/'
var_list = ['hs', 'dp', 'spr', 'fp', 'dir', 't0m1']
eke = 0.01
##########################
output = []
diagnostic_functions = [basic_stats]
for var in var_list:
grid_files = glob.glob(data_root+'gridded/*%s*%s.nc' %(eke,var))
for f in grid_files:
output.append(analize_member(f, var,diagnostic_functions))
print("processing %s" %os.path.basename(f))
var = 'hs'
diagnostic_functions = [hs_spectral_slope]
grid_files = glob.glob(data_root+'gridded/*%s*%s.nc' %(eke,var))
for f in grid_files:
output.append(analize_member(f, var,diagnostic_functions))
print("processing %s" %os.path.basename(f))
var = 'cur'
diagnostic_functions = [flow_stats]
grid_files = glob.glob(data_root+'gridded/*%s*%s.nc' %(eke,var))
for f in grid_files:
output.append(analize_member(f, var, diagnostic_functions))
print("processing %s" %os.path.basename(f))
ds = xr.merge(output)
df = ds.to_dataframe()
df = df.reset_index()
data = df.to_xarray()
data.to_netcdf(path='../data/model_stats/S%s_gridded_stats.nc'%eke, mode='w')
|
flexible
|
{
"blob_id": "6b727cdfc684db4ba919cd5390fe45de43a806fe",
"index": 309,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor var in var_list:\n grid_files = glob.glob(data_root + 'gridded/*%s*%s.nc' % (eke, var))\n for f in grid_files:\n output.append(analize_member(f, var, diagnostic_functions))\n print('processing %s' % os.path.basename(f))\n<mask token>\nfor f in grid_files:\n output.append(analize_member(f, var, diagnostic_functions))\n print('processing %s' % os.path.basename(f))\n<mask token>\nfor f in grid_files:\n output.append(analize_member(f, var, diagnostic_functions))\n print('processing %s' % os.path.basename(f))\n<mask token>\ndata.to_netcdf(path='../data/model_stats/S%s_gridded_stats.nc' % eke, mode='w')\n",
"step-3": "<mask token>\ndata_root = '../data/synthetic/standard/'\nvar_list = ['hs', 'dp', 'spr', 'fp', 'dir', 't0m1']\neke = 0.01\noutput = []\ndiagnostic_functions = [basic_stats]\nfor var in var_list:\n grid_files = glob.glob(data_root + 'gridded/*%s*%s.nc' % (eke, var))\n for f in grid_files:\n output.append(analize_member(f, var, diagnostic_functions))\n print('processing %s' % os.path.basename(f))\nvar = 'hs'\ndiagnostic_functions = [hs_spectral_slope]\ngrid_files = glob.glob(data_root + 'gridded/*%s*%s.nc' % (eke, var))\nfor f in grid_files:\n output.append(analize_member(f, var, diagnostic_functions))\n print('processing %s' % os.path.basename(f))\nvar = 'cur'\ndiagnostic_functions = [flow_stats]\ngrid_files = glob.glob(data_root + 'gridded/*%s*%s.nc' % (eke, var))\nfor f in grid_files:\n output.append(analize_member(f, var, diagnostic_functions))\n print('processing %s' % os.path.basename(f))\nds = xr.merge(output)\ndf = ds.to_dataframe()\ndf = df.reset_index()\ndata = df.to_xarray()\ndata.to_netcdf(path='../data/model_stats/S%s_gridded_stats.nc' % eke, mode='w')\n",
"step-4": "import glob\nimport xarray as xr\nfrom model_diagnostics import *\ndata_root = '../data/synthetic/standard/'\nvar_list = ['hs', 'dp', 'spr', 'fp', 'dir', 't0m1']\neke = 0.01\noutput = []\ndiagnostic_functions = [basic_stats]\nfor var in var_list:\n grid_files = glob.glob(data_root + 'gridded/*%s*%s.nc' % (eke, var))\n for f in grid_files:\n output.append(analize_member(f, var, diagnostic_functions))\n print('processing %s' % os.path.basename(f))\nvar = 'hs'\ndiagnostic_functions = [hs_spectral_slope]\ngrid_files = glob.glob(data_root + 'gridded/*%s*%s.nc' % (eke, var))\nfor f in grid_files:\n output.append(analize_member(f, var, diagnostic_functions))\n print('processing %s' % os.path.basename(f))\nvar = 'cur'\ndiagnostic_functions = [flow_stats]\ngrid_files = glob.glob(data_root + 'gridded/*%s*%s.nc' % (eke, var))\nfor f in grid_files:\n output.append(analize_member(f, var, diagnostic_functions))\n print('processing %s' % os.path.basename(f))\nds = xr.merge(output)\ndf = ds.to_dataframe()\ndf = df.reset_index()\ndata = df.to_xarray()\ndata.to_netcdf(path='../data/model_stats/S%s_gridded_stats.nc' % eke, mode='w')\n",
"step-5": "import glob\nimport xarray as xr\n\nfrom model_diagnostics import *\n\ndata_root = '../data/synthetic/standard/'\nvar_list = ['hs', 'dp', 'spr', 'fp', 'dir', 't0m1']\neke = 0.01\n##########################\noutput = []\ndiagnostic_functions = [basic_stats]\nfor var in var_list:\n grid_files = glob.glob(data_root+'gridded/*%s*%s.nc' %(eke,var))\n for f in grid_files:\n output.append(analize_member(f, var,diagnostic_functions))\n print(\"processing %s\" %os.path.basename(f))\nvar = 'hs'\ndiagnostic_functions = [hs_spectral_slope]\ngrid_files = glob.glob(data_root+'gridded/*%s*%s.nc' %(eke,var))\nfor f in grid_files:\n output.append(analize_member(f, var,diagnostic_functions))\n print(\"processing %s\" %os.path.basename(f))\nvar = 'cur'\ndiagnostic_functions = [flow_stats]\ngrid_files = glob.glob(data_root+'gridded/*%s*%s.nc' %(eke,var))\nfor f in grid_files:\n output.append(analize_member(f, var, diagnostic_functions))\n print(\"processing %s\" %os.path.basename(f))\n\nds = xr.merge(output)\ndf = ds.to_dataframe()\ndf = df.reset_index()\ndata = df.to_xarray()\ndata.to_netcdf(path='../data/model_stats/S%s_gridded_stats.nc'%eke, mode='w')\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# !/usr/bin/python
# coding:utf-8
import requests
from bs4 import BeautifulSoup
import re
from datetime import datetime
#紀錄檔PATH(建議絕對位置)
log_path='./log.txt'
#登入聯絡簿的個資
sid=''#學號(Ex. 10731187)
cid=''#生份證號(Ex. A123456789)
bir=''#生日(Ex. 2000/1/1)
#line or telegram module
#platform='telegram'
platform='line'
if platform=='line':
from linebot import LineBotApi
from linebot.models import TextSendMessage
#line api token
bottoken=''
#line chat id
chatid=''
line_bot_api = LineBotApi(bottoken)
if platform=='telegram':
#telegram bot token
bottoken=''
#telegram group chat id
chatid=''
#課表
cls=[['學校活動','英文','化學','國文','地理','生物','公民','歷史','數學'],
['彈性課程','地科','數學','數學','資訊','西洋影視','國文','國文','英文'],
['數學','物理','生活科技','體育','國文','化學','音樂','英文','英文'],
['數學','論孟選讀','生物','多元選修','歷史','化學','英文','國防','物理'],
['彈性課程','英文','數學','地理','公民','國文','體育','物理','社團'],[],[]]
def open_log():
global log
global fw
try:
fr = open(log_path, "r")
log=fr.read().split('\n')
fr.close()
except:
fw = open(log_path, "w+")
log=''
return
fw = open(log_path, "a")
return
def login_homework():
res = requests.get('http://www.yphs.tp.edu.tw/tea/tu2.aspx')
soup = BeautifulSoup(res.text, "lxml")
VIEWSTATE=soup.find(id="__VIEWSTATE")
VIEWSTATEGENERATOR=soup.find(id="__VIEWSTATEGENERATOR")
EVENTVALIDATION=soup.find(id="__EVENTVALIDATION")
res=requests.post('http://www.yphs.tp.edu.tw/tea/tu2.aspx', allow_redirects=False, data = {'__VIEWSTATE':VIEWSTATE.get('value'),'__VIEWSTATEGENERATOR':VIEWSTATEGENERATOR.get('value'),'__EVENTVALIDATION':EVENTVALIDATION.get('value'),'chk_id':'學生/家長','tbx_sno':sid,'tbx_sid':cid,'tbx_sbir':bir,'but_login_stud':'登 入'})
global cook
cook=res.cookies['ASP.NET_SessionId']
return
def crawl_and_fetch_today_homework(tomorrow_calendar,tomorrow_class_table):
send = requests.get('http://www.yphs.tp.edu.tw/tea/tu2-1.aspx',cookies={'ASP.NET_SessionId':cook})
soup = BeautifulSoup(send.text, "lxml")
VIEWSTATE=soup.find(id="__VIEWSTATE")
VIEWSTATEGENERATOR=soup.find(id="__VIEWSTATEGENERATOR")
EVENTVALIDATION=soup.find(id="__EVENTVALIDATION")
for x in range(15,1,-1):#第一頁1~15則
try:#用try怕有頁面沒15則post
#數字轉文字
num=str('')
if(x<10):
num='0'+str(x)
else:
num=str(x)
#爬內文
send = requests.post('http://www.yphs.tp.edu.tw/tea/tu2-1.aspx',cookies={'ASP.NET_SessionId':cook}, data = {'__VIEWSTATE':VIEWSTATE.get('value'),'__VIEWSTATEGENERATOR':VIEWSTATEGENERATOR.get('value'),'__EVENTVALIDATION':EVENTVALIDATION.get('value'),('GridViewS$ctl'+num+'$but_vf1'):'詳細內容'})
soup = BeautifulSoup(send.text, "lxml")
#檢查市否已發過
ok=bool(True)
for y in range(0,len(log),1):
if soup.find(id='Lab_purport').text==log[y]:
ok=bool(False)
if ok==True:#沒發過
fw.write(soup.find(id='Lab_purport').text+'\n')
post_title=str('[主旨:'+str(soup.find(id='Lab_purport').text)+']')
post_content=str(soup.find(id='Lab_content').text)
post_attachment=str(' ')
if(soup.find(target='_blank')):
post_attachment=soup.find(target='_blank').get('href')
send_word=post_title+'\n'+post_content+'\n'+post_attachment
if(str(soup.find(id='Lab_purport').text).find('聯絡簿')>=0 and datetime.today().weekday()<4):
send_word=send_word+'\n***系統訊息***\n'+tomorrow_calendar+'\n'+tomorrow_class_table
if(str(soup.find(id='Lab_purport').text).find('聯絡簿')>=0 and datetime.today().weekday() == 4 ):
send_word=send_word
post(send_word)
except:
pass
return
def crawl_tomorrow_calendar():
res = requests.get('http://www.yphs.tp.edu.tw/yphs/gr2.aspx')
soup = BeautifulSoup(res.text, "lxml")
calendar='明日行事曆:\n 全校:'+soup.find_all(color="#404040")[16].text
if(soup.find_all(color="#404040")[16].text==' '):
calendar+='N/A'
calendar=calendar+'\n 高一:'+soup.find_all(color="#404040")[21].text
if(soup.find_all(color="#404040")[21].text==' '):
calendar+='N/A'
return calendar
def fetch_tomorrow_class_table():
count=int(0)
tomorrow_class='\n明日課表:\n 早上:\n '
for i in cls[(datetime.today().weekday()+1)%7]:
if(count==4):
tomorrow_class+='\n 下午:\n '
tomorrow_class+='['+i+']'
if(count<8 and count!=3):
tomorrow_class+='->'
count+=1
return tomorrow_class
def post(send_word):
if platform=='line':
line_bot_api.push_message(chatid,TextSendMessage(text=send_word,wrap=True))
if platform=='telegram':
requests.get("https://api.telegram.org/bot"+bottoken+"/sendMessage?chat_id="+chatid+"&text="+send_word)
'''
!!!contact [email protected] for this function!!!
def crawl_message_board():
res = requests.get('http://59.120.227.144:11300/line/api.php')
soup = BeautifulSoup(res.text, "lxml")
message_board = soup.find_all('td')
message='\n\n留言板( http://59.120.227.144:11300/line/ ) : \n'
for i in range(0,len(message_board),3):
message=message+'第'+str(int((i/3)+1))+'則:\n-'+message_board[i+1].text+"\n--來自:"+message_board[i+2].text+'\n'
return message
'''
def close_log():
fw.close()
def main():
open_log()
login_homework()
crawl_and_fetch_today_homework(crawl_tomorrow_calendar(),fetch_tomorrow_class_table())
close_log()
#星期天提醒明天要上課
if(datetime.today().weekday()==6 and datetime.today().hour == 21 and datetime.today().minute<10):
send_word='[主旨:機器人訊息]\n***系統訊息***\n'+crawl_tomorrow_calendar()+'\n'+fetch_tomorrow_class_table()
post(send_word)
main()
|
normal
|
{
"blob_id": "77f37a80d160e42bb74017a55aa9d06b4c8d4fee",
"index": 4320,
"step-1": "<mask token>\n\n\ndef login_homework():\n res = requests.get('http://www.yphs.tp.edu.tw/tea/tu2.aspx')\n soup = BeautifulSoup(res.text, 'lxml')\n VIEWSTATE = soup.find(id='__VIEWSTATE')\n VIEWSTATEGENERATOR = soup.find(id='__VIEWSTATEGENERATOR')\n EVENTVALIDATION = soup.find(id='__EVENTVALIDATION')\n res = requests.post('http://www.yphs.tp.edu.tw/tea/tu2.aspx',\n allow_redirects=False, data={'__VIEWSTATE': VIEWSTATE.get('value'),\n '__VIEWSTATEGENERATOR': VIEWSTATEGENERATOR.get('value'),\n '__EVENTVALIDATION': EVENTVALIDATION.get('value'), 'chk_id':\n '學生/家長', 'tbx_sno': sid, 'tbx_sid': cid, 'tbx_sbir': bir,\n 'but_login_stud': '登\\u3000\\u3000入'})\n global cook\n cook = res.cookies['ASP.NET_SessionId']\n return\n\n\n<mask token>\n\n\ndef crawl_tomorrow_calendar():\n res = requests.get('http://www.yphs.tp.edu.tw/yphs/gr2.aspx')\n soup = BeautifulSoup(res.text, 'lxml')\n calendar = '明日行事曆:\\n 全校:' + soup.find_all(color='#404040')[16].text\n if soup.find_all(color='#404040')[16].text == '\\xa0':\n calendar += 'N/A'\n calendar = calendar + '\\n 高一:' + soup.find_all(color='#404040')[21].text\n if soup.find_all(color='#404040')[21].text == '\\xa0':\n calendar += 'N/A'\n return calendar\n\n\ndef fetch_tomorrow_class_table():\n count = int(0)\n tomorrow_class = '\\n明日課表:\\n 早上:\\n '\n for i in cls[(datetime.today().weekday() + 1) % 7]:\n if count == 4:\n tomorrow_class += '\\n 下午:\\n '\n tomorrow_class += '[' + i + ']'\n if count < 8 and count != 3:\n tomorrow_class += '->'\n count += 1\n return tomorrow_class\n\n\ndef post(send_word):\n if platform == 'line':\n line_bot_api.push_message(chatid, TextSendMessage(text=send_word,\n wrap=True))\n if platform == 'telegram':\n requests.get('https://api.telegram.org/bot' + bottoken +\n '/sendMessage?chat_id=' + chatid + '&text=' + send_word)\n\n\n<mask token>\n\n\ndef close_log():\n fw.close()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef open_log():\n global log\n global fw\n try:\n fr = open(log_path, 'r')\n log = fr.read().split('\\n')\n fr.close()\n except:\n fw = open(log_path, 'w+')\n log = ''\n return\n fw = open(log_path, 'a')\n return\n\n\ndef login_homework():\n res = requests.get('http://www.yphs.tp.edu.tw/tea/tu2.aspx')\n soup = BeautifulSoup(res.text, 'lxml')\n VIEWSTATE = soup.find(id='__VIEWSTATE')\n VIEWSTATEGENERATOR = soup.find(id='__VIEWSTATEGENERATOR')\n EVENTVALIDATION = soup.find(id='__EVENTVALIDATION')\n res = requests.post('http://www.yphs.tp.edu.tw/tea/tu2.aspx',\n allow_redirects=False, data={'__VIEWSTATE': VIEWSTATE.get('value'),\n '__VIEWSTATEGENERATOR': VIEWSTATEGENERATOR.get('value'),\n '__EVENTVALIDATION': EVENTVALIDATION.get('value'), 'chk_id':\n '學生/家長', 'tbx_sno': sid, 'tbx_sid': cid, 'tbx_sbir': bir,\n 'but_login_stud': '登\\u3000\\u3000入'})\n global cook\n cook = res.cookies['ASP.NET_SessionId']\n return\n\n\ndef crawl_and_fetch_today_homework(tomorrow_calendar, tomorrow_class_table):\n send = requests.get('http://www.yphs.tp.edu.tw/tea/tu2-1.aspx', cookies\n ={'ASP.NET_SessionId': cook})\n soup = BeautifulSoup(send.text, 'lxml')\n VIEWSTATE = soup.find(id='__VIEWSTATE')\n VIEWSTATEGENERATOR = soup.find(id='__VIEWSTATEGENERATOR')\n EVENTVALIDATION = soup.find(id='__EVENTVALIDATION')\n for x in range(15, 1, -1):\n try:\n num = str('')\n if x < 10:\n num = '0' + str(x)\n else:\n num = str(x)\n send = requests.post('http://www.yphs.tp.edu.tw/tea/tu2-1.aspx',\n cookies={'ASP.NET_SessionId': cook}, data={'__VIEWSTATE':\n VIEWSTATE.get('value'), '__VIEWSTATEGENERATOR':\n VIEWSTATEGENERATOR.get('value'), '__EVENTVALIDATION':\n EVENTVALIDATION.get('value'), ('GridViewS$ctl' + num +\n '$but_vf1'): '詳細內容'})\n soup = BeautifulSoup(send.text, 'lxml')\n ok = bool(True)\n for y in range(0, len(log), 1):\n if soup.find(id='Lab_purport').text == log[y]:\n ok = bool(False)\n if ok == True:\n fw.write(soup.find(id='Lab_purport').text + '\\n')\n post_title = str('[主旨:' + str(soup.find(id='Lab_purport').\n text) + ']')\n post_content = str(soup.find(id='Lab_content').text)\n post_attachment = str(' ')\n if soup.find(target='_blank'):\n post_attachment = soup.find(target='_blank').get('href')\n send_word = (post_title + '\\n' + post_content + '\\n' +\n post_attachment)\n if str(soup.find(id='Lab_purport').text).find('聯絡簿'\n ) >= 0 and datetime.today().weekday() < 4:\n send_word = (send_word + '\\n***系統訊息***\\n' +\n tomorrow_calendar + '\\n' + tomorrow_class_table)\n if str(soup.find(id='Lab_purport').text).find('聯絡簿'\n ) >= 0 and datetime.today().weekday() == 4:\n send_word = send_word\n post(send_word)\n except:\n pass\n return\n\n\ndef crawl_tomorrow_calendar():\n res = requests.get('http://www.yphs.tp.edu.tw/yphs/gr2.aspx')\n soup = BeautifulSoup(res.text, 'lxml')\n calendar = '明日行事曆:\\n 全校:' + soup.find_all(color='#404040')[16].text\n if soup.find_all(color='#404040')[16].text == '\\xa0':\n calendar += 'N/A'\n calendar = calendar + '\\n 高一:' + soup.find_all(color='#404040')[21].text\n if soup.find_all(color='#404040')[21].text == '\\xa0':\n calendar += 'N/A'\n return calendar\n\n\ndef fetch_tomorrow_class_table():\n count = int(0)\n tomorrow_class = '\\n明日課表:\\n 早上:\\n '\n for i in cls[(datetime.today().weekday() + 1) % 7]:\n if count == 4:\n tomorrow_class += '\\n 下午:\\n '\n tomorrow_class += '[' + i + ']'\n if count < 8 and count != 3:\n tomorrow_class += '->'\n count += 1\n return tomorrow_class\n\n\ndef post(send_word):\n if platform == 'line':\n line_bot_api.push_message(chatid, TextSendMessage(text=send_word,\n wrap=True))\n if platform == 'telegram':\n requests.get('https://api.telegram.org/bot' + bottoken +\n '/sendMessage?chat_id=' + chatid + '&text=' + send_word)\n\n\n<mask token>\n\n\ndef close_log():\n fw.close()\n\n\ndef main():\n open_log()\n login_homework()\n crawl_and_fetch_today_homework(crawl_tomorrow_calendar(),\n fetch_tomorrow_class_table())\n close_log()\n if datetime.today().weekday() == 6 and datetime.today(\n ).hour == 21 and datetime.today().minute < 10:\n send_word = '[主旨:機器人訊息]\\n***系統訊息***\\n' + crawl_tomorrow_calendar(\n ) + '\\n' + fetch_tomorrow_class_table()\n post(send_word)\n\n\n<mask token>\n",
"step-3": "<mask token>\nlog_path = './log.txt'\nsid = ''\ncid = ''\nbir = ''\nplatform = 'line'\nif platform == 'line':\n from linebot import LineBotApi\n from linebot.models import TextSendMessage\n bottoken = ''\n chatid = ''\n line_bot_api = LineBotApi(bottoken)\nif platform == 'telegram':\n bottoken = ''\n chatid = ''\ncls = [['學校活動', '英文', '化學', '國文', '地理', '生物', '公民', '歷史', '數學'], ['彈性課程',\n '地科', '數學', '數學', '資訊', '西洋影視', '國文', '國文', '英文'], ['數學', '物理', '生活科技',\n '體育', '國文', '化學', '音樂', '英文', '英文'], ['數學', '論孟選讀', '生物', '多元選修', '歷史',\n '化學', '英文', '國防', '物理'], ['彈性課程', '英文', '數學', '地理', '公民', '國文', '體育',\n '物理', '社團'], [], []]\n\n\ndef open_log():\n global log\n global fw\n try:\n fr = open(log_path, 'r')\n log = fr.read().split('\\n')\n fr.close()\n except:\n fw = open(log_path, 'w+')\n log = ''\n return\n fw = open(log_path, 'a')\n return\n\n\ndef login_homework():\n res = requests.get('http://www.yphs.tp.edu.tw/tea/tu2.aspx')\n soup = BeautifulSoup(res.text, 'lxml')\n VIEWSTATE = soup.find(id='__VIEWSTATE')\n VIEWSTATEGENERATOR = soup.find(id='__VIEWSTATEGENERATOR')\n EVENTVALIDATION = soup.find(id='__EVENTVALIDATION')\n res = requests.post('http://www.yphs.tp.edu.tw/tea/tu2.aspx',\n allow_redirects=False, data={'__VIEWSTATE': VIEWSTATE.get('value'),\n '__VIEWSTATEGENERATOR': VIEWSTATEGENERATOR.get('value'),\n '__EVENTVALIDATION': EVENTVALIDATION.get('value'), 'chk_id':\n '學生/家長', 'tbx_sno': sid, 'tbx_sid': cid, 'tbx_sbir': bir,\n 'but_login_stud': '登\\u3000\\u3000入'})\n global cook\n cook = res.cookies['ASP.NET_SessionId']\n return\n\n\ndef crawl_and_fetch_today_homework(tomorrow_calendar, tomorrow_class_table):\n send = requests.get('http://www.yphs.tp.edu.tw/tea/tu2-1.aspx', cookies\n ={'ASP.NET_SessionId': cook})\n soup = BeautifulSoup(send.text, 'lxml')\n VIEWSTATE = soup.find(id='__VIEWSTATE')\n VIEWSTATEGENERATOR = soup.find(id='__VIEWSTATEGENERATOR')\n EVENTVALIDATION = soup.find(id='__EVENTVALIDATION')\n for x in range(15, 1, -1):\n try:\n num = str('')\n if x < 10:\n num = '0' + str(x)\n else:\n num = str(x)\n send = requests.post('http://www.yphs.tp.edu.tw/tea/tu2-1.aspx',\n cookies={'ASP.NET_SessionId': cook}, data={'__VIEWSTATE':\n VIEWSTATE.get('value'), '__VIEWSTATEGENERATOR':\n VIEWSTATEGENERATOR.get('value'), '__EVENTVALIDATION':\n EVENTVALIDATION.get('value'), ('GridViewS$ctl' + num +\n '$but_vf1'): '詳細內容'})\n soup = BeautifulSoup(send.text, 'lxml')\n ok = bool(True)\n for y in range(0, len(log), 1):\n if soup.find(id='Lab_purport').text == log[y]:\n ok = bool(False)\n if ok == True:\n fw.write(soup.find(id='Lab_purport').text + '\\n')\n post_title = str('[主旨:' + str(soup.find(id='Lab_purport').\n text) + ']')\n post_content = str(soup.find(id='Lab_content').text)\n post_attachment = str(' ')\n if soup.find(target='_blank'):\n post_attachment = soup.find(target='_blank').get('href')\n send_word = (post_title + '\\n' + post_content + '\\n' +\n post_attachment)\n if str(soup.find(id='Lab_purport').text).find('聯絡簿'\n ) >= 0 and datetime.today().weekday() < 4:\n send_word = (send_word + '\\n***系統訊息***\\n' +\n tomorrow_calendar + '\\n' + tomorrow_class_table)\n if str(soup.find(id='Lab_purport').text).find('聯絡簿'\n ) >= 0 and datetime.today().weekday() == 4:\n send_word = send_word\n post(send_word)\n except:\n pass\n return\n\n\ndef crawl_tomorrow_calendar():\n res = requests.get('http://www.yphs.tp.edu.tw/yphs/gr2.aspx')\n soup = BeautifulSoup(res.text, 'lxml')\n calendar = '明日行事曆:\\n 全校:' + soup.find_all(color='#404040')[16].text\n if soup.find_all(color='#404040')[16].text == '\\xa0':\n calendar += 'N/A'\n calendar = calendar + '\\n 高一:' + soup.find_all(color='#404040')[21].text\n if soup.find_all(color='#404040')[21].text == '\\xa0':\n calendar += 'N/A'\n return calendar\n\n\ndef fetch_tomorrow_class_table():\n count = int(0)\n tomorrow_class = '\\n明日課表:\\n 早上:\\n '\n for i in cls[(datetime.today().weekday() + 1) % 7]:\n if count == 4:\n tomorrow_class += '\\n 下午:\\n '\n tomorrow_class += '[' + i + ']'\n if count < 8 and count != 3:\n tomorrow_class += '->'\n count += 1\n return tomorrow_class\n\n\ndef post(send_word):\n if platform == 'line':\n line_bot_api.push_message(chatid, TextSendMessage(text=send_word,\n wrap=True))\n if platform == 'telegram':\n requests.get('https://api.telegram.org/bot' + bottoken +\n '/sendMessage?chat_id=' + chatid + '&text=' + send_word)\n\n\n<mask token>\n\n\ndef close_log():\n fw.close()\n\n\ndef main():\n open_log()\n login_homework()\n crawl_and_fetch_today_homework(crawl_tomorrow_calendar(),\n fetch_tomorrow_class_table())\n close_log()\n if datetime.today().weekday() == 6 and datetime.today(\n ).hour == 21 and datetime.today().minute < 10:\n send_word = '[主旨:機器人訊息]\\n***系統訊息***\\n' + crawl_tomorrow_calendar(\n ) + '\\n' + fetch_tomorrow_class_table()\n post(send_word)\n\n\nmain()\n",
"step-4": "import requests\nfrom bs4 import BeautifulSoup\nimport re\nfrom datetime import datetime\nlog_path = './log.txt'\nsid = ''\ncid = ''\nbir = ''\nplatform = 'line'\nif platform == 'line':\n from linebot import LineBotApi\n from linebot.models import TextSendMessage\n bottoken = ''\n chatid = ''\n line_bot_api = LineBotApi(bottoken)\nif platform == 'telegram':\n bottoken = ''\n chatid = ''\ncls = [['學校活動', '英文', '化學', '國文', '地理', '生物', '公民', '歷史', '數學'], ['彈性課程',\n '地科', '數學', '數學', '資訊', '西洋影視', '國文', '國文', '英文'], ['數學', '物理', '生活科技',\n '體育', '國文', '化學', '音樂', '英文', '英文'], ['數學', '論孟選讀', '生物', '多元選修', '歷史',\n '化學', '英文', '國防', '物理'], ['彈性課程', '英文', '數學', '地理', '公民', '國文', '體育',\n '物理', '社團'], [], []]\n\n\ndef open_log():\n global log\n global fw\n try:\n fr = open(log_path, 'r')\n log = fr.read().split('\\n')\n fr.close()\n except:\n fw = open(log_path, 'w+')\n log = ''\n return\n fw = open(log_path, 'a')\n return\n\n\ndef login_homework():\n res = requests.get('http://www.yphs.tp.edu.tw/tea/tu2.aspx')\n soup = BeautifulSoup(res.text, 'lxml')\n VIEWSTATE = soup.find(id='__VIEWSTATE')\n VIEWSTATEGENERATOR = soup.find(id='__VIEWSTATEGENERATOR')\n EVENTVALIDATION = soup.find(id='__EVENTVALIDATION')\n res = requests.post('http://www.yphs.tp.edu.tw/tea/tu2.aspx',\n allow_redirects=False, data={'__VIEWSTATE': VIEWSTATE.get('value'),\n '__VIEWSTATEGENERATOR': VIEWSTATEGENERATOR.get('value'),\n '__EVENTVALIDATION': EVENTVALIDATION.get('value'), 'chk_id':\n '學生/家長', 'tbx_sno': sid, 'tbx_sid': cid, 'tbx_sbir': bir,\n 'but_login_stud': '登\\u3000\\u3000入'})\n global cook\n cook = res.cookies['ASP.NET_SessionId']\n return\n\n\ndef crawl_and_fetch_today_homework(tomorrow_calendar, tomorrow_class_table):\n send = requests.get('http://www.yphs.tp.edu.tw/tea/tu2-1.aspx', cookies\n ={'ASP.NET_SessionId': cook})\n soup = BeautifulSoup(send.text, 'lxml')\n VIEWSTATE = soup.find(id='__VIEWSTATE')\n VIEWSTATEGENERATOR = soup.find(id='__VIEWSTATEGENERATOR')\n EVENTVALIDATION = soup.find(id='__EVENTVALIDATION')\n for x in range(15, 1, -1):\n try:\n num = str('')\n if x < 10:\n num = '0' + str(x)\n else:\n num = str(x)\n send = requests.post('http://www.yphs.tp.edu.tw/tea/tu2-1.aspx',\n cookies={'ASP.NET_SessionId': cook}, data={'__VIEWSTATE':\n VIEWSTATE.get('value'), '__VIEWSTATEGENERATOR':\n VIEWSTATEGENERATOR.get('value'), '__EVENTVALIDATION':\n EVENTVALIDATION.get('value'), ('GridViewS$ctl' + num +\n '$but_vf1'): '詳細內容'})\n soup = BeautifulSoup(send.text, 'lxml')\n ok = bool(True)\n for y in range(0, len(log), 1):\n if soup.find(id='Lab_purport').text == log[y]:\n ok = bool(False)\n if ok == True:\n fw.write(soup.find(id='Lab_purport').text + '\\n')\n post_title = str('[主旨:' + str(soup.find(id='Lab_purport').\n text) + ']')\n post_content = str(soup.find(id='Lab_content').text)\n post_attachment = str(' ')\n if soup.find(target='_blank'):\n post_attachment = soup.find(target='_blank').get('href')\n send_word = (post_title + '\\n' + post_content + '\\n' +\n post_attachment)\n if str(soup.find(id='Lab_purport').text).find('聯絡簿'\n ) >= 0 and datetime.today().weekday() < 4:\n send_word = (send_word + '\\n***系統訊息***\\n' +\n tomorrow_calendar + '\\n' + tomorrow_class_table)\n if str(soup.find(id='Lab_purport').text).find('聯絡簿'\n ) >= 0 and datetime.today().weekday() == 4:\n send_word = send_word\n post(send_word)\n except:\n pass\n return\n\n\ndef crawl_tomorrow_calendar():\n res = requests.get('http://www.yphs.tp.edu.tw/yphs/gr2.aspx')\n soup = BeautifulSoup(res.text, 'lxml')\n calendar = '明日行事曆:\\n 全校:' + soup.find_all(color='#404040')[16].text\n if soup.find_all(color='#404040')[16].text == '\\xa0':\n calendar += 'N/A'\n calendar = calendar + '\\n 高一:' + soup.find_all(color='#404040')[21].text\n if soup.find_all(color='#404040')[21].text == '\\xa0':\n calendar += 'N/A'\n return calendar\n\n\ndef fetch_tomorrow_class_table():\n count = int(0)\n tomorrow_class = '\\n明日課表:\\n 早上:\\n '\n for i in cls[(datetime.today().weekday() + 1) % 7]:\n if count == 4:\n tomorrow_class += '\\n 下午:\\n '\n tomorrow_class += '[' + i + ']'\n if count < 8 and count != 3:\n tomorrow_class += '->'\n count += 1\n return tomorrow_class\n\n\ndef post(send_word):\n if platform == 'line':\n line_bot_api.push_message(chatid, TextSendMessage(text=send_word,\n wrap=True))\n if platform == 'telegram':\n requests.get('https://api.telegram.org/bot' + bottoken +\n '/sendMessage?chat_id=' + chatid + '&text=' + send_word)\n\n\n<mask token>\n\n\ndef close_log():\n fw.close()\n\n\ndef main():\n open_log()\n login_homework()\n crawl_and_fetch_today_homework(crawl_tomorrow_calendar(),\n fetch_tomorrow_class_table())\n close_log()\n if datetime.today().weekday() == 6 and datetime.today(\n ).hour == 21 and datetime.today().minute < 10:\n send_word = '[主旨:機器人訊息]\\n***系統訊息***\\n' + crawl_tomorrow_calendar(\n ) + '\\n' + fetch_tomorrow_class_table()\n post(send_word)\n\n\nmain()\n",
"step-5": "# !/usr/bin/python \n# coding:utf-8 \nimport requests\nfrom bs4 import BeautifulSoup\nimport re\nfrom datetime import datetime\n\n#紀錄檔PATH(建議絕對位置)\nlog_path='./log.txt'\n\n#登入聯絡簿的個資\nsid=''#學號(Ex. 10731187)\ncid=''#生份證號(Ex. A123456789)\nbir=''#生日(Ex. 2000/1/1)\n\n#line or telegram module\n\n#platform='telegram'\nplatform='line'\n\nif platform=='line':\n from linebot import LineBotApi\n from linebot.models import TextSendMessage\n #line api token\n bottoken=''\n #line chat id\n chatid=''\n\n line_bot_api = LineBotApi(bottoken)\n\nif platform=='telegram':\n #telegram bot token\n bottoken=''\n #telegram group chat id\n chatid=''\n\n#課表\ncls=[['學校活動','英文','化學','國文','地理','生物','公民','歷史','數學'],\n ['彈性課程','地科','數學','數學','資訊','西洋影視','國文','國文','英文'],\n ['數學','物理','生活科技','體育','國文','化學','音樂','英文','英文'],\n ['數學','論孟選讀','生物','多元選修','歷史','化學','英文','國防','物理'],\n ['彈性課程','英文','數學','地理','公民','國文','體育','物理','社團'],[],[]]\n\ndef open_log():\n global log\n global fw\n try:\n fr = open(log_path, \"r\")\n log=fr.read().split('\\n')\n fr.close()\n except:\n fw = open(log_path, \"w+\")\n log=''\n return\n fw = open(log_path, \"a\")\n return\n\ndef login_homework():\n res = requests.get('http://www.yphs.tp.edu.tw/tea/tu2.aspx')\n soup = BeautifulSoup(res.text, \"lxml\")\n VIEWSTATE=soup.find(id=\"__VIEWSTATE\")\n VIEWSTATEGENERATOR=soup.find(id=\"__VIEWSTATEGENERATOR\")\n EVENTVALIDATION=soup.find(id=\"__EVENTVALIDATION\")\n res=requests.post('http://www.yphs.tp.edu.tw/tea/tu2.aspx', allow_redirects=False, data = {'__VIEWSTATE':VIEWSTATE.get('value'),'__VIEWSTATEGENERATOR':VIEWSTATEGENERATOR.get('value'),'__EVENTVALIDATION':EVENTVALIDATION.get('value'),'chk_id':'學生/家長','tbx_sno':sid,'tbx_sid':cid,'tbx_sbir':bir,'but_login_stud':'登 入'})\n global cook\n cook=res.cookies['ASP.NET_SessionId']\n return\n\ndef crawl_and_fetch_today_homework(tomorrow_calendar,tomorrow_class_table):\n send = requests.get('http://www.yphs.tp.edu.tw/tea/tu2-1.aspx',cookies={'ASP.NET_SessionId':cook})\n soup = BeautifulSoup(send.text, \"lxml\")\n VIEWSTATE=soup.find(id=\"__VIEWSTATE\")\n VIEWSTATEGENERATOR=soup.find(id=\"__VIEWSTATEGENERATOR\")\n EVENTVALIDATION=soup.find(id=\"__EVENTVALIDATION\")\n for x in range(15,1,-1):#第一頁1~15則\n try:#用try怕有頁面沒15則post\n #數字轉文字\n num=str('')\n if(x<10):\n num='0'+str(x)\n else:\n num=str(x)\n #爬內文\n send = requests.post('http://www.yphs.tp.edu.tw/tea/tu2-1.aspx',cookies={'ASP.NET_SessionId':cook}, data = {'__VIEWSTATE':VIEWSTATE.get('value'),'__VIEWSTATEGENERATOR':VIEWSTATEGENERATOR.get('value'),'__EVENTVALIDATION':EVENTVALIDATION.get('value'),('GridViewS$ctl'+num+'$but_vf1'):'詳細內容'})\n soup = BeautifulSoup(send.text, \"lxml\")\n #檢查市否已發過\n ok=bool(True)\n for y in range(0,len(log),1):\n if soup.find(id='Lab_purport').text==log[y]:\n ok=bool(False)\n if ok==True:#沒發過\n fw.write(soup.find(id='Lab_purport').text+'\\n')\n post_title=str('[主旨:'+str(soup.find(id='Lab_purport').text)+']')\n post_content=str(soup.find(id='Lab_content').text)\n post_attachment=str(' ')\n if(soup.find(target='_blank')):\n post_attachment=soup.find(target='_blank').get('href')\n send_word=post_title+'\\n'+post_content+'\\n'+post_attachment\n if(str(soup.find(id='Lab_purport').text).find('聯絡簿')>=0 and datetime.today().weekday()<4):\n send_word=send_word+'\\n***系統訊息***\\n'+tomorrow_calendar+'\\n'+tomorrow_class_table\n if(str(soup.find(id='Lab_purport').text).find('聯絡簿')>=0 and datetime.today().weekday() == 4 ):\n send_word=send_word\n post(send_word)\n except:\n pass\n return\n\ndef crawl_tomorrow_calendar():\n res = requests.get('http://www.yphs.tp.edu.tw/yphs/gr2.aspx')\n soup = BeautifulSoup(res.text, \"lxml\")\n calendar='明日行事曆:\\n 全校:'+soup.find_all(color=\"#404040\")[16].text\n if(soup.find_all(color=\"#404040\")[16].text==' '):\n calendar+='N/A'\n calendar=calendar+'\\n 高一:'+soup.find_all(color=\"#404040\")[21].text\n if(soup.find_all(color=\"#404040\")[21].text==' '):\n calendar+='N/A'\n return calendar\n\ndef fetch_tomorrow_class_table():\n count=int(0)\n tomorrow_class='\\n明日課表:\\n 早上:\\n '\n for i in cls[(datetime.today().weekday()+1)%7]:\n if(count==4):\n tomorrow_class+='\\n 下午:\\n '\n tomorrow_class+='['+i+']'\n if(count<8 and count!=3):\n tomorrow_class+='->'\n count+=1\n return tomorrow_class\n\ndef post(send_word):\n if platform=='line':\n line_bot_api.push_message(chatid,TextSendMessage(text=send_word,wrap=True))\n if platform=='telegram':\n requests.get(\"https://api.telegram.org/bot\"+bottoken+\"/sendMessage?chat_id=\"+chatid+\"&text=\"+send_word)\n'''\n\n!!!contact [email protected] for this function!!!\n\ndef crawl_message_board():\n res = requests.get('http://59.120.227.144:11300/line/api.php')\n soup = BeautifulSoup(res.text, \"lxml\")\n message_board = soup.find_all('td')\n message='\\n\\n留言板( http://59.120.227.144:11300/line/ ) : \\n'\n for i in range(0,len(message_board),3):\n message=message+'第'+str(int((i/3)+1))+'則:\\n-'+message_board[i+1].text+\"\\n--來自:\"+message_board[i+2].text+'\\n'\n return message\n'''\n\ndef close_log():\n fw.close()\n\ndef main():\n open_log()\n login_homework()\n crawl_and_fetch_today_homework(crawl_tomorrow_calendar(),fetch_tomorrow_class_table())\n close_log()\n\n #星期天提醒明天要上課\n if(datetime.today().weekday()==6 and datetime.today().hour == 21 and datetime.today().minute<10):\n send_word='[主旨:機器人訊息]\\n***系統訊息***\\n'+crawl_tomorrow_calendar()+'\\n'+fetch_tomorrow_class_table()\n post(send_word)\nmain()",
"step-ids": [
5,
8,
10,
11,
12
]
}
|
[
5,
8,
10,
11,
12
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
_BASE_REPRESENTATIONS = ["Primitive(field='f1', op='eq', value='value')",
"Primitive(field='f1', op='eq', value=42)",
"Primitive(field='f1', op='eq', value=3.14)",
"Primitive(field='f1', op='eq', value=True)",
"Condition(op=Operator.OR, values=[Primitive(field='f1', op='eq', value='aaa'), Primitive(field='f2', op='eq', value=5)])"
,
"Condition(op=Operator.OR, values=[Primitive(field='f1', op='eq', value='aaa'), Primitive(field='f2', op='eq', value=5), Primitive(field='f1', op='eq', value='bbb')])"
,
"Condition(op=Operator.AND, values=[Primitive(field='f1', op='eq', value='aaa'), Primitive(field='f2', op='eq', value=5)])"
,
"Condition(op=Operator.AND, values=[Primitive(field='f1', op='eq', value='aaa'), Primitive(field='f2', op='eq', value=5), Primitive(field='f1', op='eq', value='bbb')])"
,
"Condition(op=Operator.OR, values=[Condition(op=Operator.AND, values=[Primitive(field='f1', op='eq', value=50), Primitive(field='f2', op='eq', value='aaa')]), Primitive(field='f2', op='eq', value='bbb')])"
]
REPRESENTATIONS = _BASE_REPRESENTATIONS + [val.replace("field='f1', op='eq",
"field='f1', op='gt").replace("field='f2', op='eq'",
"field='f2', op='match'") for val in _BASE_REPRESENTATIONS]
<|reserved_special_token_1|>
# -*- coding:utf-8 -*-
# pylint: disable=line-too-long
_BASE_REPRESENTATIONS = [
"Primitive(field='f1', op='eq', value='value')",
"Primitive(field='f1', op='eq', value=42)",
"Primitive(field='f1', op='eq', value=3.14)",
"Primitive(field='f1', op='eq', value=True)",
"Condition(op=Operator.OR, values=[Primitive(field='f1', op='eq', value='aaa'), Primitive(field='f2', op='eq', value=5)])",
"Condition(op=Operator.OR, values=[Primitive(field='f1', op='eq', value='aaa'), Primitive(field='f2', op='eq', value=5), Primitive(field='f1', op='eq', value='bbb')])",
"Condition(op=Operator.AND, values=[Primitive(field='f1', op='eq', value='aaa'), Primitive(field='f2', op='eq', value=5)])",
"Condition(op=Operator.AND, values=[Primitive(field='f1', op='eq', value='aaa'), Primitive(field='f2', op='eq', value=5), Primitive(field='f1', op='eq', value='bbb')])",
"Condition(op=Operator.OR, values=[Condition(op=Operator.AND, values=[Primitive(field='f1', op='eq', value=50), Primitive(field='f2', op='eq', value='aaa')]), Primitive(field='f2', op='eq', value='bbb')])",
]
REPRESENTATIONS = _BASE_REPRESENTATIONS + [
val.replace(
"field='f1', op='eq", "field='f1', op='gt"
).replace(
"field='f2', op='eq'", "field='f2', op='match'"
)
for val in _BASE_REPRESENTATIONS
]
|
flexible
|
{
"blob_id": "137842d50355563b2df6c2fc48864c01a22afa80",
"index": 5567,
"step-1": "<mask token>\n",
"step-2": "_BASE_REPRESENTATIONS = [\"Primitive(field='f1', op='eq', value='value')\",\n \"Primitive(field='f1', op='eq', value=42)\",\n \"Primitive(field='f1', op='eq', value=3.14)\",\n \"Primitive(field='f1', op='eq', value=True)\",\n \"Condition(op=Operator.OR, values=[Primitive(field='f1', op='eq', value='aaa'), Primitive(field='f2', op='eq', value=5)])\"\n ,\n \"Condition(op=Operator.OR, values=[Primitive(field='f1', op='eq', value='aaa'), Primitive(field='f2', op='eq', value=5), Primitive(field='f1', op='eq', value='bbb')])\"\n ,\n \"Condition(op=Operator.AND, values=[Primitive(field='f1', op='eq', value='aaa'), Primitive(field='f2', op='eq', value=5)])\"\n ,\n \"Condition(op=Operator.AND, values=[Primitive(field='f1', op='eq', value='aaa'), Primitive(field='f2', op='eq', value=5), Primitive(field='f1', op='eq', value='bbb')])\"\n ,\n \"Condition(op=Operator.OR, values=[Condition(op=Operator.AND, values=[Primitive(field='f1', op='eq', value=50), Primitive(field='f2', op='eq', value='aaa')]), Primitive(field='f2', op='eq', value='bbb')])\"\n ]\nREPRESENTATIONS = _BASE_REPRESENTATIONS + [val.replace(\"field='f1', op='eq\",\n \"field='f1', op='gt\").replace(\"field='f2', op='eq'\",\n \"field='f2', op='match'\") for val in _BASE_REPRESENTATIONS]\n",
"step-3": "# -*- coding:utf-8 -*-\n# pylint: disable=line-too-long\n\n_BASE_REPRESENTATIONS = [\n \"Primitive(field='f1', op='eq', value='value')\",\n \"Primitive(field='f1', op='eq', value=42)\",\n \"Primitive(field='f1', op='eq', value=3.14)\",\n \"Primitive(field='f1', op='eq', value=True)\",\n \"Condition(op=Operator.OR, values=[Primitive(field='f1', op='eq', value='aaa'), Primitive(field='f2', op='eq', value=5)])\",\n \"Condition(op=Operator.OR, values=[Primitive(field='f1', op='eq', value='aaa'), Primitive(field='f2', op='eq', value=5), Primitive(field='f1', op='eq', value='bbb')])\",\n \"Condition(op=Operator.AND, values=[Primitive(field='f1', op='eq', value='aaa'), Primitive(field='f2', op='eq', value=5)])\",\n \"Condition(op=Operator.AND, values=[Primitive(field='f1', op='eq', value='aaa'), Primitive(field='f2', op='eq', value=5), Primitive(field='f1', op='eq', value='bbb')])\",\n \"Condition(op=Operator.OR, values=[Condition(op=Operator.AND, values=[Primitive(field='f1', op='eq', value=50), Primitive(field='f2', op='eq', value='aaa')]), Primitive(field='f2', op='eq', value='bbb')])\",\n]\n\nREPRESENTATIONS = _BASE_REPRESENTATIONS + [\n val.replace(\n \"field='f1', op='eq\", \"field='f1', op='gt\"\n ).replace(\n \"field='f2', op='eq'\", \"field='f2', op='match'\"\n )\n for val in _BASE_REPRESENTATIONS\n]\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
import json
import glob
import argparse
from model.NewModel import runModel
from collections import namedtuple
import csv
OutputFile = "./HealthSimOutputSheet.csv"
parser = argparse.ArgumentParser(description='Select policy file')
parser.add_argument('-p', type=str, default='default', help='name of a a policy file')
parser.add_argument('-n', type=int, default=100000, help='number of patients')
args = parser.parse_args()
NumPatients = args.n
policyName = args.p
matchingPolicies = glob.glob(f"./policies/{policyName}*")
if len(matchingPolicies) == 0:
raise SystemExit(f"No matching policy named {policyName}")
elif len(matchingPolicies) > 1:
raise SystemExit(f"Multiple matching policies for {policyName}: {matchingPolicies}")
policyFile = matchingPolicies[0]
with open(policyFile, 'r') as stream:
# magic to turn json into an object instead of a dict
# https://stackoverflow.com/a/15882054
policySettings = json.load(stream, object_hook=lambda d: namedtuple('X', d.keys())(*d.values()))
results = runModel(policySettings, NumPatients)
with open(OutputFile, 'w', newline='') as csvfile:
writer = csv.writer(csvfile)
keys = ["Number on Private Insurance:", "Number on Medicare:",
"Number on Medicaid:", "Number of Uninsured:",
"Private Premium:", "Medicare Premium:",
"Medicare Funds:", "Medicaid Funds:"]
for key in keys:
row = [key] + results['runSummary'][key]
writer.writerow(row)
patients = results['patients']
writer.writerow(["Patient ID", "Age", "Ethnicity", "Gender", "Education", "Income", "Income Bracket", "QALY", "Diabetes", "Diagnosed", "Controlled", "Deceased"])
for m in range(len(patients)):
writer.writerow([m, patients[m].age, patients[m].ethnicity, patients[m].gender, patients[m].education, patients[m].income, patients[m].IPR, patients[m].QALY, patients[m].diabetes, patients[m].diagnosed, patients[m].controlled, patients[m].deceased])
|
normal
|
{
"blob_id": "894ce07c6443208483be2d3ef1409f12f24d99f3",
"index": 2852,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nparser.add_argument('-p', type=str, default='default', help=\n 'name of a a policy file')\nparser.add_argument('-n', type=int, default=100000, help='number of patients')\n<mask token>\nif len(matchingPolicies) == 0:\n raise SystemExit(f'No matching policy named {policyName}')\nelif len(matchingPolicies) > 1:\n raise SystemExit(\n f'Multiple matching policies for {policyName}: {matchingPolicies}')\n<mask token>\nwith open(policyFile, 'r') as stream:\n policySettings = json.load(stream, object_hook=lambda d: namedtuple('X',\n d.keys())(*d.values()))\n<mask token>\nwith open(OutputFile, 'w', newline='') as csvfile:\n writer = csv.writer(csvfile)\n keys = ['Number on Private Insurance:', 'Number on Medicare:',\n 'Number on Medicaid:', 'Number of Uninsured:', 'Private Premium:',\n 'Medicare Premium:', 'Medicare Funds:', 'Medicaid Funds:']\n for key in keys:\n row = [key] + results['runSummary'][key]\n writer.writerow(row)\n patients = results['patients']\n writer.writerow(['Patient ID', 'Age', 'Ethnicity', 'Gender',\n 'Education', 'Income', 'Income Bracket', 'QALY', 'Diabetes',\n 'Diagnosed', 'Controlled', 'Deceased'])\n for m in range(len(patients)):\n writer.writerow([m, patients[m].age, patients[m].ethnicity,\n patients[m].gender, patients[m].education, patients[m].income,\n patients[m].IPR, patients[m].QALY, patients[m].diabetes,\n patients[m].diagnosed, patients[m].controlled, patients[m].\n deceased])\n",
"step-3": "<mask token>\nOutputFile = './HealthSimOutputSheet.csv'\nparser = argparse.ArgumentParser(description='Select policy file')\nparser.add_argument('-p', type=str, default='default', help=\n 'name of a a policy file')\nparser.add_argument('-n', type=int, default=100000, help='number of patients')\nargs = parser.parse_args()\nNumPatients = args.n\npolicyName = args.p\nmatchingPolicies = glob.glob(f'./policies/{policyName}*')\nif len(matchingPolicies) == 0:\n raise SystemExit(f'No matching policy named {policyName}')\nelif len(matchingPolicies) > 1:\n raise SystemExit(\n f'Multiple matching policies for {policyName}: {matchingPolicies}')\npolicyFile = matchingPolicies[0]\nwith open(policyFile, 'r') as stream:\n policySettings = json.load(stream, object_hook=lambda d: namedtuple('X',\n d.keys())(*d.values()))\nresults = runModel(policySettings, NumPatients)\nwith open(OutputFile, 'w', newline='') as csvfile:\n writer = csv.writer(csvfile)\n keys = ['Number on Private Insurance:', 'Number on Medicare:',\n 'Number on Medicaid:', 'Number of Uninsured:', 'Private Premium:',\n 'Medicare Premium:', 'Medicare Funds:', 'Medicaid Funds:']\n for key in keys:\n row = [key] + results['runSummary'][key]\n writer.writerow(row)\n patients = results['patients']\n writer.writerow(['Patient ID', 'Age', 'Ethnicity', 'Gender',\n 'Education', 'Income', 'Income Bracket', 'QALY', 'Diabetes',\n 'Diagnosed', 'Controlled', 'Deceased'])\n for m in range(len(patients)):\n writer.writerow([m, patients[m].age, patients[m].ethnicity,\n patients[m].gender, patients[m].education, patients[m].income,\n patients[m].IPR, patients[m].QALY, patients[m].diabetes,\n patients[m].diagnosed, patients[m].controlled, patients[m].\n deceased])\n",
"step-4": "import json\nimport glob\nimport argparse\nfrom model.NewModel import runModel\nfrom collections import namedtuple\nimport csv\nOutputFile = './HealthSimOutputSheet.csv'\nparser = argparse.ArgumentParser(description='Select policy file')\nparser.add_argument('-p', type=str, default='default', help=\n 'name of a a policy file')\nparser.add_argument('-n', type=int, default=100000, help='number of patients')\nargs = parser.parse_args()\nNumPatients = args.n\npolicyName = args.p\nmatchingPolicies = glob.glob(f'./policies/{policyName}*')\nif len(matchingPolicies) == 0:\n raise SystemExit(f'No matching policy named {policyName}')\nelif len(matchingPolicies) > 1:\n raise SystemExit(\n f'Multiple matching policies for {policyName}: {matchingPolicies}')\npolicyFile = matchingPolicies[0]\nwith open(policyFile, 'r') as stream:\n policySettings = json.load(stream, object_hook=lambda d: namedtuple('X',\n d.keys())(*d.values()))\nresults = runModel(policySettings, NumPatients)\nwith open(OutputFile, 'w', newline='') as csvfile:\n writer = csv.writer(csvfile)\n keys = ['Number on Private Insurance:', 'Number on Medicare:',\n 'Number on Medicaid:', 'Number of Uninsured:', 'Private Premium:',\n 'Medicare Premium:', 'Medicare Funds:', 'Medicaid Funds:']\n for key in keys:\n row = [key] + results['runSummary'][key]\n writer.writerow(row)\n patients = results['patients']\n writer.writerow(['Patient ID', 'Age', 'Ethnicity', 'Gender',\n 'Education', 'Income', 'Income Bracket', 'QALY', 'Diabetes',\n 'Diagnosed', 'Controlled', 'Deceased'])\n for m in range(len(patients)):\n writer.writerow([m, patients[m].age, patients[m].ethnicity,\n patients[m].gender, patients[m].education, patients[m].income,\n patients[m].IPR, patients[m].QALY, patients[m].diabetes,\n patients[m].diagnosed, patients[m].controlled, patients[m].\n deceased])\n",
"step-5": "import json\nimport glob\nimport argparse\nfrom model.NewModel import runModel\nfrom collections import namedtuple\nimport csv\n\nOutputFile = \"./HealthSimOutputSheet.csv\"\n\nparser = argparse.ArgumentParser(description='Select policy file')\nparser.add_argument('-p', type=str, default='default', help='name of a a policy file')\nparser.add_argument('-n', type=int, default=100000, help='number of patients')\n\nargs = parser.parse_args()\n\nNumPatients = args.n\n\npolicyName = args.p\nmatchingPolicies = glob.glob(f\"./policies/{policyName}*\")\n\nif len(matchingPolicies) == 0:\n raise SystemExit(f\"No matching policy named {policyName}\")\nelif len(matchingPolicies) > 1:\n raise SystemExit(f\"Multiple matching policies for {policyName}: {matchingPolicies}\")\n\npolicyFile = matchingPolicies[0]\n\nwith open(policyFile, 'r') as stream:\n # magic to turn json into an object instead of a dict\n # https://stackoverflow.com/a/15882054\n policySettings = json.load(stream, object_hook=lambda d: namedtuple('X', d.keys())(*d.values()))\n\n\nresults = runModel(policySettings, NumPatients)\n\nwith open(OutputFile, 'w', newline='') as csvfile:\n writer = csv.writer(csvfile)\n keys = [\"Number on Private Insurance:\", \"Number on Medicare:\",\n \"Number on Medicaid:\", \"Number of Uninsured:\",\n \"Private Premium:\", \"Medicare Premium:\",\n \"Medicare Funds:\", \"Medicaid Funds:\"]\n\n for key in keys:\n row = [key] + results['runSummary'][key]\n writer.writerow(row)\n\n patients = results['patients']\n writer.writerow([\"Patient ID\", \"Age\", \"Ethnicity\", \"Gender\", \"Education\", \"Income\", \"Income Bracket\", \"QALY\", \"Diabetes\", \"Diagnosed\", \"Controlled\", \"Deceased\"])\n for m in range(len(patients)):\n writer.writerow([m, patients[m].age, patients[m].ethnicity, patients[m].gender, patients[m].education, patients[m].income, patients[m].IPR, patients[m].QALY, patients[m].diabetes, patients[m].diagnosed, patients[m].controlled, patients[m].deceased])\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
#!/usr/bin/python
import pprint
import requests
import string
import subprocess
#Create three files
f_arptable = open( 'arptable', 'w+' )
f_maclist = open( 'maclist', 'w+' )
f_maclookup = open( 'maclookup', 'w+' )
#Give write permissions the three files
subprocess.call([ 'chmod','+w','maclist' ])
subprocess.call([ 'chmod','+w','arptable' ])
subprocess.call([ 'chmod','+w','maclookup' ])
#cols = subprocess.Popen(["arp","-a"],stdout=f)
#Run an arp -a command and write the output to the arptable file
subprocess.Popen(['arp','-a'],stdout=f_arptable)
#Pull the company name from the maclookup and save the value
#in the variable devmon
maclookup_url = 'http://macvendors.co/api%s'
req = requests.get( maclookup_url % 'macs' )
req_result = pprint.pprint(req.json())
#Pull the IP and MAC from the arptable file and put them in the
#maclist file along with the value from devmon
for line in open('arptable'):
if line.startswith('?'):
ips = line.split()[1]
macs = line.split()[3]
f_maclist.write('\nIP Address: ' + ips + '\nMAC: ' + macs +
'\nDevice Manufacturer: ' + devmon + '\n' )
subprocess.Popen(['cat','maclist'])
#print("Phase 1 complete")
#with open('maclist') as fp:
# for line in fp:
# #line.getline(1)
# #mac_field = line.split(':')
# print('line'+"\n")
|
normal
|
{
"blob_id": "d566104b00ffd5f08c564ed554e0d71279a93047",
"index": 6394,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nsubprocess.call(['chmod', '+w', 'maclist'])\nsubprocess.call(['chmod', '+w', 'arptable'])\nsubprocess.call(['chmod', '+w', 'maclookup'])\nsubprocess.Popen(['arp', '-a'], stdout=f_arptable)\n<mask token>\nfor line in open('arptable'):\n if line.startswith('?'):\n ips = line.split()[1]\n macs = line.split()[3]\n f_maclist.write('\\nIP Address: ' + ips + '\\nMAC: ' + macs +\n '\\nDevice Manufacturer: ' + devmon + '\\n')\nsubprocess.Popen(['cat', 'maclist'])\n",
"step-3": "<mask token>\nf_arptable = open('arptable', 'w+')\nf_maclist = open('maclist', 'w+')\nf_maclookup = open('maclookup', 'w+')\nsubprocess.call(['chmod', '+w', 'maclist'])\nsubprocess.call(['chmod', '+w', 'arptable'])\nsubprocess.call(['chmod', '+w', 'maclookup'])\nsubprocess.Popen(['arp', '-a'], stdout=f_arptable)\nmaclookup_url = 'http://macvendors.co/api%s'\nreq = requests.get(maclookup_url % 'macs')\nreq_result = pprint.pprint(req.json())\nfor line in open('arptable'):\n if line.startswith('?'):\n ips = line.split()[1]\n macs = line.split()[3]\n f_maclist.write('\\nIP Address: ' + ips + '\\nMAC: ' + macs +\n '\\nDevice Manufacturer: ' + devmon + '\\n')\nsubprocess.Popen(['cat', 'maclist'])\n",
"step-4": "import pprint\nimport requests\nimport string\nimport subprocess\nf_arptable = open('arptable', 'w+')\nf_maclist = open('maclist', 'w+')\nf_maclookup = open('maclookup', 'w+')\nsubprocess.call(['chmod', '+w', 'maclist'])\nsubprocess.call(['chmod', '+w', 'arptable'])\nsubprocess.call(['chmod', '+w', 'maclookup'])\nsubprocess.Popen(['arp', '-a'], stdout=f_arptable)\nmaclookup_url = 'http://macvendors.co/api%s'\nreq = requests.get(maclookup_url % 'macs')\nreq_result = pprint.pprint(req.json())\nfor line in open('arptable'):\n if line.startswith('?'):\n ips = line.split()[1]\n macs = line.split()[3]\n f_maclist.write('\\nIP Address: ' + ips + '\\nMAC: ' + macs +\n '\\nDevice Manufacturer: ' + devmon + '\\n')\nsubprocess.Popen(['cat', 'maclist'])\n",
"step-5": "#!/usr/bin/python\n\nimport pprint\nimport requests\nimport string \nimport subprocess\n\n#Create three files\nf_arptable = open( 'arptable', 'w+' )\nf_maclist = open( 'maclist', 'w+' )\nf_maclookup = open( 'maclookup', 'w+' )\n\n#Give write permissions the three files\nsubprocess.call([ 'chmod','+w','maclist' ])\nsubprocess.call([ 'chmod','+w','arptable' ])\nsubprocess.call([ 'chmod','+w','maclookup' ])\n\n#cols = subprocess.Popen([\"arp\",\"-a\"],stdout=f)\n\n#Run an arp -a command and write the output to the arptable file\nsubprocess.Popen(['arp','-a'],stdout=f_arptable)\n\n#Pull the company name from the maclookup and save the value\n#in the variable devmon\nmaclookup_url = 'http://macvendors.co/api%s'\nreq = requests.get( maclookup_url % 'macs' )\nreq_result = pprint.pprint(req.json())\n\n#Pull the IP and MAC from the arptable file and put them in the\n#maclist file along with the value from devmon\nfor line in open('arptable'):\n if line.startswith('?'):\n ips = line.split()[1]\n macs = line.split()[3]\t\n f_maclist.write('\\nIP Address: ' + ips + '\\nMAC: ' + macs +\n '\\nDevice Manufacturer: ' + devmon + '\\n' )\n\nsubprocess.Popen(['cat','maclist'])\n\n#print(\"Phase 1 complete\")\n\n#with open('maclist') as fp:\n# for line in fp:\n# #line.getline(1)\n# #mac_field = line.split(':')\n# print('line'+\"\\n\")\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
class KernelNC:
<|reserved_special_token_0|>
def __init__(self, classes):
self.classes = classes
def compute_dist(self, X, Y):
K_x = np.dot(X, X.T).toarray()
K_y = np.dot(Y, Y.T).toarray()
K_xy = np.dot(X, Y.T).toarray()
return np.diag(K_x) - 2 * K_xy.mean(axis=1) + K_y.mean()
def predict(self, X):
dists = np.array([self.compute_dist(X, classe) for classe in self.
classes])
return dists.argmin(axis=0)
def score(self, X, y):
y__ = self.predict(X)
return 100 * (y__ == y).mean()
class MultiKerOpt:
def __init__(self, alpha=0.01, tol=1e-07, degree=2, method='klr', hide=
False):
self.alpha = alpha
self.tol = tol
self.degree = degree
self.method = method
self.hide = hide
def scale(self, u, norm):
if norm == 'l1':
return u / np.sum(u)
elif norm == 'l2':
return u / np.sqrt(np.sum(u ** 2))
else:
raise Exception('l1 and l2 are the only available norms')
def bound(self, u, u_0, gamma, norm):
u__ = u - u_0
u__ = np.abs(self.scale(u__, norm) * gamma)
return u__ + u_0
def KrrIterate(self, Kernels, y, coef, weights=None):
"""
Weighted KRR iterations
"""
K_w = np.sum(Kernels * coef[:, None, None], axis=0) ** self.degree
N, D = K_w.shape
if weights is None:
c = np.linalg.solve(np.linalg.inv(K_w + self.alpha * np.eye(N,
D)), y[:, np.newaxis])
else:
W_r = np.diag(np.sqrt(weights))
A = W_r.dot(K_w).dot(W_r) + self.alpha * np.eye(N, D)
Y = np.dot(W_r, y[:, np.newaxis])
x_sol = np.linalg.solve(A, Y)
c = np.dot(W_r, x_sol)
return c
def KlrIterate(self, Kernels, y, coef, tol=1e-07, max_iters=5):
"""
KLR iterations
"""
c_old = self.KrrIterate(Kernels, y, coef)
K_w = np.sum(Kernels * coef[:, None, None], axis=0) ** self.degree
y_enc = 2 * y - 1
for i in range(max_iters):
m_t = np.dot(K_w, c_old)
p_t = -expit(-y_enc[:, np.newaxis] * m_t)
w_t = expit(m_t) * expit(-m_t)
z_t = m_t - p_t * y_enc[:, np.newaxis] / (w_t + 1e-05)
c_new = self.KrrIterate(Kernels, z_t.flatten(), coef, weights=
w_t.flatten())
if np.linalg.norm(c_new - c_old) < tol:
break
else:
c_old = c_new
return c_old
def SvmIterate(self, Kernels, y, coef):
"""
SVM Estimation
"""
nb_samples = y.shape[0]
C = 1 / (2 * self.alpha * nb_samples)
r = np.arange(nb_samples)
o = np.ones(nb_samples)
z = np.zeros(nb_samples)
K_w = np.sum(Kernels * coef[:, None, None], axis=0) ** self.degree
y_enc = 2 * y - 1
P = matrix(K_w.astype(float), tc='d')
q = matrix(-y_enc, tc='d')
G = spmatrix(np.r_[y_enc, -y_enc], np.r_[r, r + nb_samples], np.r_[
r, r], tc='d')
h = matrix(np.r_[o * C, z], tc='d')
if self.hide:
solvers.options['show_progress'] = False
sol = solvers.qp(P, q, G, h)
c = np.ravel(sol['x'])[:, np.newaxis]
return c
def gradUpdate(self, Kernels, coef, delta):
"""
Updating Gradient
"""
K_t = np.sum(Kernels * coef[:, None, None], axis=0) ** (self.degree - 1
)
grad = np.zeros(len(Kernels))
for m in range(len(Kernels)):
grad[m] = delta.T.dot(K_t * Kernels[m]).dot(delta)
return -self.degree * grad
def fit(self, Kernels, y, u_0=0, gamma=1, norm='l2', n_iter=5, step=1,
weights=None):
coef = np.random.normal(0, 1, len(Kernels)) / len(Kernels)
coef = self.bound(coef, u_0, gamma, norm)
new_coef = 0
score_prev = np.inf
for i in range(n_iter):
if self.method == 'klr':
delta = self.KlrIterate(Kernels, y, coef, tol=1e-07,
max_iters=5)
elif self.method == 'svm':
delta = self.SvmIterate(Kernels, y, coef)
else:
delta = self.KrrIterate(Kernels, y, coef, weights=weights)
grad = self.gradUpdate(Kernels, coef, delta)
new_coef = coef - step * grad
new_coef = self.bound(new_coef, u_0, gamma, norm)
score = np.linalg.norm(new_coef - coef, np.inf)
if score > score_prev:
step *= 0.9
if score < self.tol:
self.coef = coef
self.delta = delta
coef = new_coef
score_prev = score.copy()
self.coef, self.delta = coef, delta
def predict(self, Kernels):
K_w = np.sum(Kernels * self.coef[:, None, None], axis=0) ** self.degree
y__ = np.sign(K_w.dot(self.delta)).flatten()
if self.method != 'krr':
y__ = 0.5 * (y__ + 1)
return y__
def score(self, Kernels, y):
y__ = self.predict(Kernels)
if self.method != 'krr':
score = 100 * (y__ == y).mean()
else:
score = np.mean((y__ - y) ** 2)
return score
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class KernelNC:
"""
distance based classifier for spectrum kernels
"""
def __init__(self, classes):
self.classes = classes
def compute_dist(self, X, Y):
K_x = np.dot(X, X.T).toarray()
K_y = np.dot(Y, Y.T).toarray()
K_xy = np.dot(X, Y.T).toarray()
return np.diag(K_x) - 2 * K_xy.mean(axis=1) + K_y.mean()
def predict(self, X):
dists = np.array([self.compute_dist(X, classe) for classe in self.
classes])
return dists.argmin(axis=0)
def score(self, X, y):
y__ = self.predict(X)
return 100 * (y__ == y).mean()
class MultiKerOpt:
def __init__(self, alpha=0.01, tol=1e-07, degree=2, method='klr', hide=
False):
self.alpha = alpha
self.tol = tol
self.degree = degree
self.method = method
self.hide = hide
def scale(self, u, norm):
if norm == 'l1':
return u / np.sum(u)
elif norm == 'l2':
return u / np.sqrt(np.sum(u ** 2))
else:
raise Exception('l1 and l2 are the only available norms')
def bound(self, u, u_0, gamma, norm):
u__ = u - u_0
u__ = np.abs(self.scale(u__, norm) * gamma)
return u__ + u_0
def KrrIterate(self, Kernels, y, coef, weights=None):
"""
Weighted KRR iterations
"""
K_w = np.sum(Kernels * coef[:, None, None], axis=0) ** self.degree
N, D = K_w.shape
if weights is None:
c = np.linalg.solve(np.linalg.inv(K_w + self.alpha * np.eye(N,
D)), y[:, np.newaxis])
else:
W_r = np.diag(np.sqrt(weights))
A = W_r.dot(K_w).dot(W_r) + self.alpha * np.eye(N, D)
Y = np.dot(W_r, y[:, np.newaxis])
x_sol = np.linalg.solve(A, Y)
c = np.dot(W_r, x_sol)
return c
def KlrIterate(self, Kernels, y, coef, tol=1e-07, max_iters=5):
"""
KLR iterations
"""
c_old = self.KrrIterate(Kernels, y, coef)
K_w = np.sum(Kernels * coef[:, None, None], axis=0) ** self.degree
y_enc = 2 * y - 1
for i in range(max_iters):
m_t = np.dot(K_w, c_old)
p_t = -expit(-y_enc[:, np.newaxis] * m_t)
w_t = expit(m_t) * expit(-m_t)
z_t = m_t - p_t * y_enc[:, np.newaxis] / (w_t + 1e-05)
c_new = self.KrrIterate(Kernels, z_t.flatten(), coef, weights=
w_t.flatten())
if np.linalg.norm(c_new - c_old) < tol:
break
else:
c_old = c_new
return c_old
def SvmIterate(self, Kernels, y, coef):
"""
SVM Estimation
"""
nb_samples = y.shape[0]
C = 1 / (2 * self.alpha * nb_samples)
r = np.arange(nb_samples)
o = np.ones(nb_samples)
z = np.zeros(nb_samples)
K_w = np.sum(Kernels * coef[:, None, None], axis=0) ** self.degree
y_enc = 2 * y - 1
P = matrix(K_w.astype(float), tc='d')
q = matrix(-y_enc, tc='d')
G = spmatrix(np.r_[y_enc, -y_enc], np.r_[r, r + nb_samples], np.r_[
r, r], tc='d')
h = matrix(np.r_[o * C, z], tc='d')
if self.hide:
solvers.options['show_progress'] = False
sol = solvers.qp(P, q, G, h)
c = np.ravel(sol['x'])[:, np.newaxis]
return c
def gradUpdate(self, Kernels, coef, delta):
"""
Updating Gradient
"""
K_t = np.sum(Kernels * coef[:, None, None], axis=0) ** (self.degree - 1
)
grad = np.zeros(len(Kernels))
for m in range(len(Kernels)):
grad[m] = delta.T.dot(K_t * Kernels[m]).dot(delta)
return -self.degree * grad
def fit(self, Kernels, y, u_0=0, gamma=1, norm='l2', n_iter=5, step=1,
weights=None):
coef = np.random.normal(0, 1, len(Kernels)) / len(Kernels)
coef = self.bound(coef, u_0, gamma, norm)
new_coef = 0
score_prev = np.inf
for i in range(n_iter):
if self.method == 'klr':
delta = self.KlrIterate(Kernels, y, coef, tol=1e-07,
max_iters=5)
elif self.method == 'svm':
delta = self.SvmIterate(Kernels, y, coef)
else:
delta = self.KrrIterate(Kernels, y, coef, weights=weights)
grad = self.gradUpdate(Kernels, coef, delta)
new_coef = coef - step * grad
new_coef = self.bound(new_coef, u_0, gamma, norm)
score = np.linalg.norm(new_coef - coef, np.inf)
if score > score_prev:
step *= 0.9
if score < self.tol:
self.coef = coef
self.delta = delta
coef = new_coef
score_prev = score.copy()
self.coef, self.delta = coef, delta
def predict(self, Kernels):
K_w = np.sum(Kernels * self.coef[:, None, None], axis=0) ** self.degree
y__ = np.sign(K_w.dot(self.delta)).flatten()
if self.method != 'krr':
y__ = 0.5 * (y__ + 1)
return y__
def score(self, Kernels, y):
y__ = self.predict(Kernels)
if self.method != 'krr':
score = 100 * (y__ == y).mean()
else:
score = np.mean((y__ - y) ** 2)
return score
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class KernelNC:
"""
distance based classifier for spectrum kernels
"""
def __init__(self, classes):
self.classes = classes
def compute_dist(self, X, Y):
K_x = np.dot(X, X.T).toarray()
K_y = np.dot(Y, Y.T).toarray()
K_xy = np.dot(X, Y.T).toarray()
return np.diag(K_x) - 2 * K_xy.mean(axis=1) + K_y.mean()
def predict(self, X):
dists = np.array([self.compute_dist(X, classe) for classe in self.
classes])
return dists.argmin(axis=0)
def score(self, X, y):
y__ = self.predict(X)
return 100 * (y__ == y).mean()
class MultiKerOpt:
def __init__(self, alpha=0.01, tol=1e-07, degree=2, method='klr', hide=
False):
self.alpha = alpha
self.tol = tol
self.degree = degree
self.method = method
self.hide = hide
def scale(self, u, norm):
if norm == 'l1':
return u / np.sum(u)
elif norm == 'l2':
return u / np.sqrt(np.sum(u ** 2))
else:
raise Exception('l1 and l2 are the only available norms')
def bound(self, u, u_0, gamma, norm):
u__ = u - u_0
u__ = np.abs(self.scale(u__, norm) * gamma)
return u__ + u_0
def KrrIterate(self, Kernels, y, coef, weights=None):
"""
Weighted KRR iterations
"""
K_w = np.sum(Kernels * coef[:, None, None], axis=0) ** self.degree
N, D = K_w.shape
if weights is None:
c = np.linalg.solve(np.linalg.inv(K_w + self.alpha * np.eye(N,
D)), y[:, np.newaxis])
else:
W_r = np.diag(np.sqrt(weights))
A = W_r.dot(K_w).dot(W_r) + self.alpha * np.eye(N, D)
Y = np.dot(W_r, y[:, np.newaxis])
x_sol = np.linalg.solve(A, Y)
c = np.dot(W_r, x_sol)
return c
def KlrIterate(self, Kernels, y, coef, tol=1e-07, max_iters=5):
"""
KLR iterations
"""
c_old = self.KrrIterate(Kernels, y, coef)
K_w = np.sum(Kernels * coef[:, None, None], axis=0) ** self.degree
y_enc = 2 * y - 1
for i in range(max_iters):
m_t = np.dot(K_w, c_old)
p_t = -expit(-y_enc[:, np.newaxis] * m_t)
w_t = expit(m_t) * expit(-m_t)
z_t = m_t - p_t * y_enc[:, np.newaxis] / (w_t + 1e-05)
c_new = self.KrrIterate(Kernels, z_t.flatten(), coef, weights=
w_t.flatten())
if np.linalg.norm(c_new - c_old) < tol:
break
else:
c_old = c_new
return c_old
def SvmIterate(self, Kernels, y, coef):
"""
SVM Estimation
"""
nb_samples = y.shape[0]
C = 1 / (2 * self.alpha * nb_samples)
r = np.arange(nb_samples)
o = np.ones(nb_samples)
z = np.zeros(nb_samples)
K_w = np.sum(Kernels * coef[:, None, None], axis=0) ** self.degree
y_enc = 2 * y - 1
P = matrix(K_w.astype(float), tc='d')
q = matrix(-y_enc, tc='d')
G = spmatrix(np.r_[y_enc, -y_enc], np.r_[r, r + nb_samples], np.r_[
r, r], tc='d')
h = matrix(np.r_[o * C, z], tc='d')
if self.hide:
solvers.options['show_progress'] = False
sol = solvers.qp(P, q, G, h)
c = np.ravel(sol['x'])[:, np.newaxis]
return c
def gradUpdate(self, Kernels, coef, delta):
"""
Updating Gradient
"""
K_t = np.sum(Kernels * coef[:, None, None], axis=0) ** (self.degree - 1
)
grad = np.zeros(len(Kernels))
for m in range(len(Kernels)):
grad[m] = delta.T.dot(K_t * Kernels[m]).dot(delta)
return -self.degree * grad
def fit(self, Kernels, y, u_0=0, gamma=1, norm='l2', n_iter=5, step=1,
weights=None):
coef = np.random.normal(0, 1, len(Kernels)) / len(Kernels)
coef = self.bound(coef, u_0, gamma, norm)
new_coef = 0
score_prev = np.inf
for i in range(n_iter):
if self.method == 'klr':
delta = self.KlrIterate(Kernels, y, coef, tol=1e-07,
max_iters=5)
elif self.method == 'svm':
delta = self.SvmIterate(Kernels, y, coef)
else:
delta = self.KrrIterate(Kernels, y, coef, weights=weights)
grad = self.gradUpdate(Kernels, coef, delta)
new_coef = coef - step * grad
new_coef = self.bound(new_coef, u_0, gamma, norm)
score = np.linalg.norm(new_coef - coef, np.inf)
if score > score_prev:
step *= 0.9
if score < self.tol:
self.coef = coef
self.delta = delta
coef = new_coef
score_prev = score.copy()
self.coef, self.delta = coef, delta
def predict(self, Kernels):
K_w = np.sum(Kernels * self.coef[:, None, None], axis=0) ** self.degree
y__ = np.sign(K_w.dot(self.delta)).flatten()
if self.method != 'krr':
y__ = 0.5 * (y__ + 1)
return y__
def score(self, Kernels, y):
y__ = self.predict(Kernels)
if self.method != 'krr':
score = 100 * (y__ == y).mean()
else:
score = np.mean((y__ - y) ** 2)
return score
def CvSearch(K_xx, K_yx, y, method='svm', degrees=[4], alphas=[0.01], cv=5,
n_iter=5):
tt = time.time()
n_iters = cv * len(degrees) * len(alphas)
n_samples = y.shape[0]
DEG, ALPH, TRAIN, VAL = [], [], [], []
i = 0
for degree in degrees:
for alpha in alphas:
DEG.append(degree)
ALPH.append(alpha)
INDS = np.array(range(n_samples))
idx = np.random.permutation(n_samples)
INDS = INDS[idx]
vals = np.array_split(INDS, cv)
perfs_train = []
perfs_val = []
for val in vals:
i += 1
sys.stderr.write(
'\rIteration %d/%d -- degree %d --alpha %.3f' % (i,
n_iters, degree, alpha))
sys.stderr.flush()
train = np.setdiff1d(range(n_samples), val)
clf = MultiKerOpt(alpha=alpha, tol=1e-07, degree=degree,
method=method, hide=True)
clf.fit(K_xx[:, train.reshape(-1, 1), train], y[train],
n_iter=n_iter)
score_train = clf.score(K_xx[:, train.reshape(-1, 1), train
], y[train])
score_val = clf.score(K_xx[:, val.reshape(-1, 1), train], y
[val])
perfs_train.append(score_train)
perfs_val.append(score_val)
TRAIN.append(np.mean(np.array(perfs_train)))
VAL.append(np.mean(np.array(perfs_val)))
df = pd.DataFrame({'degree': DEG, 'alpha': ALPH, 'train': TRAIN, 'val':
VAL})
tt = time.time() - tt
print('Done in %.3f' % (tt / 60))
return df
def get_best(df):
idx = np.argmax(df.val.values)
best = np.max(df.val.values)
best_degree = df.degree[idx]
best_alpha = df.alpha[idx]
return best_degree, best_alpha, best
<|reserved_special_token_1|>
from cvxopt import matrix, spmatrix, solvers
from scipy.special import expit
import numpy as np
import sys
import pandas as pd
import time
class KernelNC:
"""
distance based classifier for spectrum kernels
"""
def __init__(self, classes):
self.classes = classes
def compute_dist(self, X, Y):
K_x = np.dot(X, X.T).toarray()
K_y = np.dot(Y, Y.T).toarray()
K_xy = np.dot(X, Y.T).toarray()
return np.diag(K_x) - 2 * K_xy.mean(axis=1) + K_y.mean()
def predict(self, X):
dists = np.array([self.compute_dist(X, classe) for classe in self.
classes])
return dists.argmin(axis=0)
def score(self, X, y):
y__ = self.predict(X)
return 100 * (y__ == y).mean()
class MultiKerOpt:
def __init__(self, alpha=0.01, tol=1e-07, degree=2, method='klr', hide=
False):
self.alpha = alpha
self.tol = tol
self.degree = degree
self.method = method
self.hide = hide
def scale(self, u, norm):
if norm == 'l1':
return u / np.sum(u)
elif norm == 'l2':
return u / np.sqrt(np.sum(u ** 2))
else:
raise Exception('l1 and l2 are the only available norms')
def bound(self, u, u_0, gamma, norm):
u__ = u - u_0
u__ = np.abs(self.scale(u__, norm) * gamma)
return u__ + u_0
def KrrIterate(self, Kernels, y, coef, weights=None):
"""
Weighted KRR iterations
"""
K_w = np.sum(Kernels * coef[:, None, None], axis=0) ** self.degree
N, D = K_w.shape
if weights is None:
c = np.linalg.solve(np.linalg.inv(K_w + self.alpha * np.eye(N,
D)), y[:, np.newaxis])
else:
W_r = np.diag(np.sqrt(weights))
A = W_r.dot(K_w).dot(W_r) + self.alpha * np.eye(N, D)
Y = np.dot(W_r, y[:, np.newaxis])
x_sol = np.linalg.solve(A, Y)
c = np.dot(W_r, x_sol)
return c
def KlrIterate(self, Kernels, y, coef, tol=1e-07, max_iters=5):
"""
KLR iterations
"""
c_old = self.KrrIterate(Kernels, y, coef)
K_w = np.sum(Kernels * coef[:, None, None], axis=0) ** self.degree
y_enc = 2 * y - 1
for i in range(max_iters):
m_t = np.dot(K_w, c_old)
p_t = -expit(-y_enc[:, np.newaxis] * m_t)
w_t = expit(m_t) * expit(-m_t)
z_t = m_t - p_t * y_enc[:, np.newaxis] / (w_t + 1e-05)
c_new = self.KrrIterate(Kernels, z_t.flatten(), coef, weights=
w_t.flatten())
if np.linalg.norm(c_new - c_old) < tol:
break
else:
c_old = c_new
return c_old
def SvmIterate(self, Kernels, y, coef):
"""
SVM Estimation
"""
nb_samples = y.shape[0]
C = 1 / (2 * self.alpha * nb_samples)
r = np.arange(nb_samples)
o = np.ones(nb_samples)
z = np.zeros(nb_samples)
K_w = np.sum(Kernels * coef[:, None, None], axis=0) ** self.degree
y_enc = 2 * y - 1
P = matrix(K_w.astype(float), tc='d')
q = matrix(-y_enc, tc='d')
G = spmatrix(np.r_[y_enc, -y_enc], np.r_[r, r + nb_samples], np.r_[
r, r], tc='d')
h = matrix(np.r_[o * C, z], tc='d')
if self.hide:
solvers.options['show_progress'] = False
sol = solvers.qp(P, q, G, h)
c = np.ravel(sol['x'])[:, np.newaxis]
return c
def gradUpdate(self, Kernels, coef, delta):
"""
Updating Gradient
"""
K_t = np.sum(Kernels * coef[:, None, None], axis=0) ** (self.degree - 1
)
grad = np.zeros(len(Kernels))
for m in range(len(Kernels)):
grad[m] = delta.T.dot(K_t * Kernels[m]).dot(delta)
return -self.degree * grad
def fit(self, Kernels, y, u_0=0, gamma=1, norm='l2', n_iter=5, step=1,
weights=None):
coef = np.random.normal(0, 1, len(Kernels)) / len(Kernels)
coef = self.bound(coef, u_0, gamma, norm)
new_coef = 0
score_prev = np.inf
for i in range(n_iter):
if self.method == 'klr':
delta = self.KlrIterate(Kernels, y, coef, tol=1e-07,
max_iters=5)
elif self.method == 'svm':
delta = self.SvmIterate(Kernels, y, coef)
else:
delta = self.KrrIterate(Kernels, y, coef, weights=weights)
grad = self.gradUpdate(Kernels, coef, delta)
new_coef = coef - step * grad
new_coef = self.bound(new_coef, u_0, gamma, norm)
score = np.linalg.norm(new_coef - coef, np.inf)
if score > score_prev:
step *= 0.9
if score < self.tol:
self.coef = coef
self.delta = delta
coef = new_coef
score_prev = score.copy()
self.coef, self.delta = coef, delta
def predict(self, Kernels):
K_w = np.sum(Kernels * self.coef[:, None, None], axis=0) ** self.degree
y__ = np.sign(K_w.dot(self.delta)).flatten()
if self.method != 'krr':
y__ = 0.5 * (y__ + 1)
return y__
def score(self, Kernels, y):
y__ = self.predict(Kernels)
if self.method != 'krr':
score = 100 * (y__ == y).mean()
else:
score = np.mean((y__ - y) ** 2)
return score
def CvSearch(K_xx, K_yx, y, method='svm', degrees=[4], alphas=[0.01], cv=5,
n_iter=5):
tt = time.time()
n_iters = cv * len(degrees) * len(alphas)
n_samples = y.shape[0]
DEG, ALPH, TRAIN, VAL = [], [], [], []
i = 0
for degree in degrees:
for alpha in alphas:
DEG.append(degree)
ALPH.append(alpha)
INDS = np.array(range(n_samples))
idx = np.random.permutation(n_samples)
INDS = INDS[idx]
vals = np.array_split(INDS, cv)
perfs_train = []
perfs_val = []
for val in vals:
i += 1
sys.stderr.write(
'\rIteration %d/%d -- degree %d --alpha %.3f' % (i,
n_iters, degree, alpha))
sys.stderr.flush()
train = np.setdiff1d(range(n_samples), val)
clf = MultiKerOpt(alpha=alpha, tol=1e-07, degree=degree,
method=method, hide=True)
clf.fit(K_xx[:, train.reshape(-1, 1), train], y[train],
n_iter=n_iter)
score_train = clf.score(K_xx[:, train.reshape(-1, 1), train
], y[train])
score_val = clf.score(K_xx[:, val.reshape(-1, 1), train], y
[val])
perfs_train.append(score_train)
perfs_val.append(score_val)
TRAIN.append(np.mean(np.array(perfs_train)))
VAL.append(np.mean(np.array(perfs_val)))
df = pd.DataFrame({'degree': DEG, 'alpha': ALPH, 'train': TRAIN, 'val':
VAL})
tt = time.time() - tt
print('Done in %.3f' % (tt / 60))
return df
def get_best(df):
idx = np.argmax(df.val.values)
best = np.max(df.val.values)
best_degree = df.degree[idx]
best_alpha = df.alpha[idx]
return best_degree, best_alpha, best
<|reserved_special_token_1|>
#import cvxopt
from cvxopt import matrix, spmatrix, solvers
#import scipy
from scipy.special import expit
import numpy as np
import sys
import pandas as pd
import time
class KernelNC():
"""
distance based classifier for spectrum kernels
"""
def __init__(self, classes):
self.classes = classes
def compute_dist(self, X, Y):
K_x = np.dot(X, X.T).toarray()
K_y = np.dot(Y, Y.T).toarray()
K_xy = np.dot(X, Y.T).toarray()
return np.diag(K_x) - 2*K_xy.mean(axis=1) + K_y.mean()
def predict(self, X):
dists = np.array([self.compute_dist(X, classe) for classe in self.classes])
return dists.argmin(axis=0)
def score(self, X, y):
y__ = self.predict(X)
return 100*(y__==y).mean()
class MultiKerOpt():
def __init__(self, alpha=0.01, tol=1e-07, degree=2, method='klr', hide=False):
self.alpha = alpha
self.tol = tol
self.degree = degree
self.method = method
self.hide = hide
def scale(self, u, norm):
if norm=='l1':
return u/np.sum(u)
elif norm=='l2':
return u / np.sqrt(np.sum(u**2))
else:
raise Exception('l1 and l2 are the only available norms')
def bound(self, u, u_0, gamma, norm):
u__ = u - u_0
u__ = np.abs(self.scale(u__, norm) * gamma)
return u__ + u_0
def KrrIterate(self, Kernels, y, coef, weights = None):
"""
Weighted KRR iterations
"""
K_w = np.sum((Kernels * coef[:, None, None]), axis=0) ** self.degree
N, D = K_w.shape
if weights is None:
c = np.linalg.solve(np.linalg.inv(K_w + self.alpha * np.eye(N, D)), y[:, np.newaxis])
else:
W_r = np.diag(np.sqrt(weights))
A = W_r.dot(K_w).dot(W_r) + self.alpha * np.eye(N,D)
Y = np.dot(W_r, y[:, np.newaxis])
x_sol = np.linalg.solve(A, Y)
c = np.dot(W_r, x_sol)
return c
def KlrIterate(self, Kernels, y, coef, tol=1e-07, max_iters=5):
"""
KLR iterations
"""
c_old = self.KrrIterate(Kernels, y, coef)
K_w = np.sum((Kernels * coef[:, None, None]), axis=0) ** self.degree
y_enc = 2*y-1
for i in range(max_iters):
m_t = np.dot(K_w, c_old)
p_t = -expit(-y_enc[:, np.newaxis]*m_t)
w_t = expit(m_t)*expit(-m_t)
z_t = m_t - (p_t * y_enc[:, np.newaxis]) /(w_t+ 1e-05)
c_new = self.KrrIterate(Kernels, z_t.flatten(), coef, weights=w_t.flatten())
if np.linalg.norm(c_new - c_old)<tol:
break
else:
c_old = c_new
return c_old
def SvmIterate(self, Kernels, y, coef):
"""
SVM Estimation
"""
nb_samples = y.shape[0]
C = 1 / ( 2 * self.alpha * nb_samples)
r = np.arange(nb_samples)
o = np.ones(nb_samples)
z = np.zeros(nb_samples)
K_w = np.sum(Kernels * coef[:, None, None], axis=0) ** (self.degree)
y_enc = 2*y-1
P = matrix(K_w.astype(float), tc='d')
q = matrix(-y_enc, tc='d')
G = spmatrix(np.r_[y_enc, -y_enc], np.r_[r, r + nb_samples], np.r_[r, r], tc='d')
h = matrix(np.r_[o * C, z], tc='d')
if self.hide:
solvers.options['show_progress'] = False
sol = solvers.qp(P, q, G, h)
c = np.ravel(sol['x'])[:,np.newaxis]
return c
def gradUpdate(self, Kernels, coef, delta):
"""
Updating Gradient
"""
K_t = np.sum(Kernels * coef[:, None, None], axis=0) ** (self.degree-1)
grad = np.zeros(len(Kernels))
for m in range(len(Kernels)):
grad[m] = delta.T.dot((K_t * Kernels[m])).dot(delta)
return - self.degree * grad
def fit(self, Kernels, y, u_0=0, gamma=1, norm='l2', n_iter=5, step=1, weights=None):
coef = np.random.normal(0, 1, len(Kernels)) / len(Kernels)
coef = self.bound(coef, u_0, gamma, norm)
new_coef = 0
score_prev = np.inf
for i in range(n_iter):
#print(i+1)
if self.method=='klr':
delta = self.KlrIterate(Kernels, y, coef, tol=1e-07, max_iters=5)
elif self.method=='svm':
delta = self.SvmIterate(Kernels, y, coef)
else:
delta = self.KrrIterate(Kernels, y, coef, weights = weights)
grad = self.gradUpdate(Kernels, coef, delta)
new_coef = coef - step * grad
new_coef = self.bound(new_coef, u_0, gamma, norm)
score = np.linalg.norm(new_coef - coef, np.inf)
if score>score_prev:
step *= 0.9
if score<self.tol:
self.coef = coef
self.delta = delta
coef = new_coef
score_prev = score.copy()
self.coef, self.delta = coef, delta
#return new_coef
def predict(self, Kernels):
K_w = np.sum(Kernels * self.coef[:, None, None], axis=0) ** (self.degree)
y__ = np.sign(K_w.dot(self.delta)).flatten()
if self.method != 'krr':
y__ = 0.5 * (y__ + 1)
return y__
def score(self, Kernels, y):
y__ = self.predict(Kernels)
if self.method!='krr':
score = 100*(y__==y).mean()
else:
score = np.mean((y__- y)**2)
return score
def CvSearch(K_xx, K_yx, y, method='svm', degrees=[4], alphas=[0.01], cv=5, n_iter=5):
tt = time.time()
n_iters = cv * len(degrees) * len(alphas)
n_samples = y.shape[0]
DEG, ALPH, TRAIN, VAL = [], [], [], []
i=0
for degree in degrees:
for alpha in alphas:
DEG.append(degree)
ALPH.append(alpha)
#SPLITTING
INDS = np.array(range(n_samples))
idx = np.random.permutation(n_samples)
INDS = INDS[idx]
vals = np.array_split(INDS, cv)
perfs_train = []
perfs_val = []
for val in vals:
i += 1
sys.stderr.write('\rIteration %d/%d -- degree %d --alpha %.3f' %(i, n_iters, degree, alpha))
sys.stderr.flush()
train = np.setdiff1d(range(n_samples),val)
clf = MultiKerOpt(alpha=alpha, tol=1e-07, degree=degree, method=method, hide=True)
clf.fit(K_xx[:,train.reshape(-1,1), train], y[train], n_iter=n_iter)
score_train = clf.score(K_xx[:,train.reshape(-1,1), train], y[train])
score_val = clf.score(K_xx[:,val.reshape(-1,1), train], y[val])
perfs_train.append(score_train)
perfs_val.append(score_val)
TRAIN.append(np.mean(np.array(perfs_train)))
VAL.append(np.mean(np.array(perfs_val)))
df = pd.DataFrame({'degree':DEG, 'alpha':ALPH, 'train':TRAIN, 'val':VAL})
tt = time.time() - tt
print('Done in %.3f'%(tt/60))
return df
#
def get_best(df):
idx = np.argmax(df.val.values)
best = np.max(df.val.values)
best_degree = df.degree[idx]
best_alpha = df.alpha[idx]
return best_degree, best_alpha, best
|
flexible
|
{
"blob_id": "6f35c29f6f2dcc6c1dae3e9c1ddf595225748041",
"index": 3018,
"step-1": "<mask token>\n\n\nclass KernelNC:\n <mask token>\n\n def __init__(self, classes):\n self.classes = classes\n\n def compute_dist(self, X, Y):\n K_x = np.dot(X, X.T).toarray()\n K_y = np.dot(Y, Y.T).toarray()\n K_xy = np.dot(X, Y.T).toarray()\n return np.diag(K_x) - 2 * K_xy.mean(axis=1) + K_y.mean()\n\n def predict(self, X):\n dists = np.array([self.compute_dist(X, classe) for classe in self.\n classes])\n return dists.argmin(axis=0)\n\n def score(self, X, y):\n y__ = self.predict(X)\n return 100 * (y__ == y).mean()\n\n\nclass MultiKerOpt:\n\n def __init__(self, alpha=0.01, tol=1e-07, degree=2, method='klr', hide=\n False):\n self.alpha = alpha\n self.tol = tol\n self.degree = degree\n self.method = method\n self.hide = hide\n\n def scale(self, u, norm):\n if norm == 'l1':\n return u / np.sum(u)\n elif norm == 'l2':\n return u / np.sqrt(np.sum(u ** 2))\n else:\n raise Exception('l1 and l2 are the only available norms')\n\n def bound(self, u, u_0, gamma, norm):\n u__ = u - u_0\n u__ = np.abs(self.scale(u__, norm) * gamma)\n return u__ + u_0\n\n def KrrIterate(self, Kernels, y, coef, weights=None):\n \"\"\"\n Weighted KRR iterations\n \"\"\"\n K_w = np.sum(Kernels * coef[:, None, None], axis=0) ** self.degree\n N, D = K_w.shape\n if weights is None:\n c = np.linalg.solve(np.linalg.inv(K_w + self.alpha * np.eye(N,\n D)), y[:, np.newaxis])\n else:\n W_r = np.diag(np.sqrt(weights))\n A = W_r.dot(K_w).dot(W_r) + self.alpha * np.eye(N, D)\n Y = np.dot(W_r, y[:, np.newaxis])\n x_sol = np.linalg.solve(A, Y)\n c = np.dot(W_r, x_sol)\n return c\n\n def KlrIterate(self, Kernels, y, coef, tol=1e-07, max_iters=5):\n \"\"\"\n KLR iterations\n \"\"\"\n c_old = self.KrrIterate(Kernels, y, coef)\n K_w = np.sum(Kernels * coef[:, None, None], axis=0) ** self.degree\n y_enc = 2 * y - 1\n for i in range(max_iters):\n m_t = np.dot(K_w, c_old)\n p_t = -expit(-y_enc[:, np.newaxis] * m_t)\n w_t = expit(m_t) * expit(-m_t)\n z_t = m_t - p_t * y_enc[:, np.newaxis] / (w_t + 1e-05)\n c_new = self.KrrIterate(Kernels, z_t.flatten(), coef, weights=\n w_t.flatten())\n if np.linalg.norm(c_new - c_old) < tol:\n break\n else:\n c_old = c_new\n return c_old\n\n def SvmIterate(self, Kernels, y, coef):\n \"\"\"\n SVM Estimation\n \"\"\"\n nb_samples = y.shape[0]\n C = 1 / (2 * self.alpha * nb_samples)\n r = np.arange(nb_samples)\n o = np.ones(nb_samples)\n z = np.zeros(nb_samples)\n K_w = np.sum(Kernels * coef[:, None, None], axis=0) ** self.degree\n y_enc = 2 * y - 1\n P = matrix(K_w.astype(float), tc='d')\n q = matrix(-y_enc, tc='d')\n G = spmatrix(np.r_[y_enc, -y_enc], np.r_[r, r + nb_samples], np.r_[\n r, r], tc='d')\n h = matrix(np.r_[o * C, z], tc='d')\n if self.hide:\n solvers.options['show_progress'] = False\n sol = solvers.qp(P, q, G, h)\n c = np.ravel(sol['x'])[:, np.newaxis]\n return c\n\n def gradUpdate(self, Kernels, coef, delta):\n \"\"\"\n Updating Gradient\n \"\"\"\n K_t = np.sum(Kernels * coef[:, None, None], axis=0) ** (self.degree - 1\n )\n grad = np.zeros(len(Kernels))\n for m in range(len(Kernels)):\n grad[m] = delta.T.dot(K_t * Kernels[m]).dot(delta)\n return -self.degree * grad\n\n def fit(self, Kernels, y, u_0=0, gamma=1, norm='l2', n_iter=5, step=1,\n weights=None):\n coef = np.random.normal(0, 1, len(Kernels)) / len(Kernels)\n coef = self.bound(coef, u_0, gamma, norm)\n new_coef = 0\n score_prev = np.inf\n for i in range(n_iter):\n if self.method == 'klr':\n delta = self.KlrIterate(Kernels, y, coef, tol=1e-07,\n max_iters=5)\n elif self.method == 'svm':\n delta = self.SvmIterate(Kernels, y, coef)\n else:\n delta = self.KrrIterate(Kernels, y, coef, weights=weights)\n grad = self.gradUpdate(Kernels, coef, delta)\n new_coef = coef - step * grad\n new_coef = self.bound(new_coef, u_0, gamma, norm)\n score = np.linalg.norm(new_coef - coef, np.inf)\n if score > score_prev:\n step *= 0.9\n if score < self.tol:\n self.coef = coef\n self.delta = delta\n coef = new_coef\n score_prev = score.copy()\n self.coef, self.delta = coef, delta\n\n def predict(self, Kernels):\n K_w = np.sum(Kernels * self.coef[:, None, None], axis=0) ** self.degree\n y__ = np.sign(K_w.dot(self.delta)).flatten()\n if self.method != 'krr':\n y__ = 0.5 * (y__ + 1)\n return y__\n\n def score(self, Kernels, y):\n y__ = self.predict(Kernels)\n if self.method != 'krr':\n score = 100 * (y__ == y).mean()\n else:\n score = np.mean((y__ - y) ** 2)\n return score\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass KernelNC:\n \"\"\"\n distance based classifier for spectrum kernels\n \"\"\"\n\n def __init__(self, classes):\n self.classes = classes\n\n def compute_dist(self, X, Y):\n K_x = np.dot(X, X.T).toarray()\n K_y = np.dot(Y, Y.T).toarray()\n K_xy = np.dot(X, Y.T).toarray()\n return np.diag(K_x) - 2 * K_xy.mean(axis=1) + K_y.mean()\n\n def predict(self, X):\n dists = np.array([self.compute_dist(X, classe) for classe in self.\n classes])\n return dists.argmin(axis=0)\n\n def score(self, X, y):\n y__ = self.predict(X)\n return 100 * (y__ == y).mean()\n\n\nclass MultiKerOpt:\n\n def __init__(self, alpha=0.01, tol=1e-07, degree=2, method='klr', hide=\n False):\n self.alpha = alpha\n self.tol = tol\n self.degree = degree\n self.method = method\n self.hide = hide\n\n def scale(self, u, norm):\n if norm == 'l1':\n return u / np.sum(u)\n elif norm == 'l2':\n return u / np.sqrt(np.sum(u ** 2))\n else:\n raise Exception('l1 and l2 are the only available norms')\n\n def bound(self, u, u_0, gamma, norm):\n u__ = u - u_0\n u__ = np.abs(self.scale(u__, norm) * gamma)\n return u__ + u_0\n\n def KrrIterate(self, Kernels, y, coef, weights=None):\n \"\"\"\n Weighted KRR iterations\n \"\"\"\n K_w = np.sum(Kernels * coef[:, None, None], axis=0) ** self.degree\n N, D = K_w.shape\n if weights is None:\n c = np.linalg.solve(np.linalg.inv(K_w + self.alpha * np.eye(N,\n D)), y[:, np.newaxis])\n else:\n W_r = np.diag(np.sqrt(weights))\n A = W_r.dot(K_w).dot(W_r) + self.alpha * np.eye(N, D)\n Y = np.dot(W_r, y[:, np.newaxis])\n x_sol = np.linalg.solve(A, Y)\n c = np.dot(W_r, x_sol)\n return c\n\n def KlrIterate(self, Kernels, y, coef, tol=1e-07, max_iters=5):\n \"\"\"\n KLR iterations\n \"\"\"\n c_old = self.KrrIterate(Kernels, y, coef)\n K_w = np.sum(Kernels * coef[:, None, None], axis=0) ** self.degree\n y_enc = 2 * y - 1\n for i in range(max_iters):\n m_t = np.dot(K_w, c_old)\n p_t = -expit(-y_enc[:, np.newaxis] * m_t)\n w_t = expit(m_t) * expit(-m_t)\n z_t = m_t - p_t * y_enc[:, np.newaxis] / (w_t + 1e-05)\n c_new = self.KrrIterate(Kernels, z_t.flatten(), coef, weights=\n w_t.flatten())\n if np.linalg.norm(c_new - c_old) < tol:\n break\n else:\n c_old = c_new\n return c_old\n\n def SvmIterate(self, Kernels, y, coef):\n \"\"\"\n SVM Estimation\n \"\"\"\n nb_samples = y.shape[0]\n C = 1 / (2 * self.alpha * nb_samples)\n r = np.arange(nb_samples)\n o = np.ones(nb_samples)\n z = np.zeros(nb_samples)\n K_w = np.sum(Kernels * coef[:, None, None], axis=0) ** self.degree\n y_enc = 2 * y - 1\n P = matrix(K_w.astype(float), tc='d')\n q = matrix(-y_enc, tc='d')\n G = spmatrix(np.r_[y_enc, -y_enc], np.r_[r, r + nb_samples], np.r_[\n r, r], tc='d')\n h = matrix(np.r_[o * C, z], tc='d')\n if self.hide:\n solvers.options['show_progress'] = False\n sol = solvers.qp(P, q, G, h)\n c = np.ravel(sol['x'])[:, np.newaxis]\n return c\n\n def gradUpdate(self, Kernels, coef, delta):\n \"\"\"\n Updating Gradient\n \"\"\"\n K_t = np.sum(Kernels * coef[:, None, None], axis=0) ** (self.degree - 1\n )\n grad = np.zeros(len(Kernels))\n for m in range(len(Kernels)):\n grad[m] = delta.T.dot(K_t * Kernels[m]).dot(delta)\n return -self.degree * grad\n\n def fit(self, Kernels, y, u_0=0, gamma=1, norm='l2', n_iter=5, step=1,\n weights=None):\n coef = np.random.normal(0, 1, len(Kernels)) / len(Kernels)\n coef = self.bound(coef, u_0, gamma, norm)\n new_coef = 0\n score_prev = np.inf\n for i in range(n_iter):\n if self.method == 'klr':\n delta = self.KlrIterate(Kernels, y, coef, tol=1e-07,\n max_iters=5)\n elif self.method == 'svm':\n delta = self.SvmIterate(Kernels, y, coef)\n else:\n delta = self.KrrIterate(Kernels, y, coef, weights=weights)\n grad = self.gradUpdate(Kernels, coef, delta)\n new_coef = coef - step * grad\n new_coef = self.bound(new_coef, u_0, gamma, norm)\n score = np.linalg.norm(new_coef - coef, np.inf)\n if score > score_prev:\n step *= 0.9\n if score < self.tol:\n self.coef = coef\n self.delta = delta\n coef = new_coef\n score_prev = score.copy()\n self.coef, self.delta = coef, delta\n\n def predict(self, Kernels):\n K_w = np.sum(Kernels * self.coef[:, None, None], axis=0) ** self.degree\n y__ = np.sign(K_w.dot(self.delta)).flatten()\n if self.method != 'krr':\n y__ = 0.5 * (y__ + 1)\n return y__\n\n def score(self, Kernels, y):\n y__ = self.predict(Kernels)\n if self.method != 'krr':\n score = 100 * (y__ == y).mean()\n else:\n score = np.mean((y__ - y) ** 2)\n return score\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass KernelNC:\n \"\"\"\n distance based classifier for spectrum kernels\n \"\"\"\n\n def __init__(self, classes):\n self.classes = classes\n\n def compute_dist(self, X, Y):\n K_x = np.dot(X, X.T).toarray()\n K_y = np.dot(Y, Y.T).toarray()\n K_xy = np.dot(X, Y.T).toarray()\n return np.diag(K_x) - 2 * K_xy.mean(axis=1) + K_y.mean()\n\n def predict(self, X):\n dists = np.array([self.compute_dist(X, classe) for classe in self.\n classes])\n return dists.argmin(axis=0)\n\n def score(self, X, y):\n y__ = self.predict(X)\n return 100 * (y__ == y).mean()\n\n\nclass MultiKerOpt:\n\n def __init__(self, alpha=0.01, tol=1e-07, degree=2, method='klr', hide=\n False):\n self.alpha = alpha\n self.tol = tol\n self.degree = degree\n self.method = method\n self.hide = hide\n\n def scale(self, u, norm):\n if norm == 'l1':\n return u / np.sum(u)\n elif norm == 'l2':\n return u / np.sqrt(np.sum(u ** 2))\n else:\n raise Exception('l1 and l2 are the only available norms')\n\n def bound(self, u, u_0, gamma, norm):\n u__ = u - u_0\n u__ = np.abs(self.scale(u__, norm) * gamma)\n return u__ + u_0\n\n def KrrIterate(self, Kernels, y, coef, weights=None):\n \"\"\"\n Weighted KRR iterations\n \"\"\"\n K_w = np.sum(Kernels * coef[:, None, None], axis=0) ** self.degree\n N, D = K_w.shape\n if weights is None:\n c = np.linalg.solve(np.linalg.inv(K_w + self.alpha * np.eye(N,\n D)), y[:, np.newaxis])\n else:\n W_r = np.diag(np.sqrt(weights))\n A = W_r.dot(K_w).dot(W_r) + self.alpha * np.eye(N, D)\n Y = np.dot(W_r, y[:, np.newaxis])\n x_sol = np.linalg.solve(A, Y)\n c = np.dot(W_r, x_sol)\n return c\n\n def KlrIterate(self, Kernels, y, coef, tol=1e-07, max_iters=5):\n \"\"\"\n KLR iterations\n \"\"\"\n c_old = self.KrrIterate(Kernels, y, coef)\n K_w = np.sum(Kernels * coef[:, None, None], axis=0) ** self.degree\n y_enc = 2 * y - 1\n for i in range(max_iters):\n m_t = np.dot(K_w, c_old)\n p_t = -expit(-y_enc[:, np.newaxis] * m_t)\n w_t = expit(m_t) * expit(-m_t)\n z_t = m_t - p_t * y_enc[:, np.newaxis] / (w_t + 1e-05)\n c_new = self.KrrIterate(Kernels, z_t.flatten(), coef, weights=\n w_t.flatten())\n if np.linalg.norm(c_new - c_old) < tol:\n break\n else:\n c_old = c_new\n return c_old\n\n def SvmIterate(self, Kernels, y, coef):\n \"\"\"\n SVM Estimation\n \"\"\"\n nb_samples = y.shape[0]\n C = 1 / (2 * self.alpha * nb_samples)\n r = np.arange(nb_samples)\n o = np.ones(nb_samples)\n z = np.zeros(nb_samples)\n K_w = np.sum(Kernels * coef[:, None, None], axis=0) ** self.degree\n y_enc = 2 * y - 1\n P = matrix(K_w.astype(float), tc='d')\n q = matrix(-y_enc, tc='d')\n G = spmatrix(np.r_[y_enc, -y_enc], np.r_[r, r + nb_samples], np.r_[\n r, r], tc='d')\n h = matrix(np.r_[o * C, z], tc='d')\n if self.hide:\n solvers.options['show_progress'] = False\n sol = solvers.qp(P, q, G, h)\n c = np.ravel(sol['x'])[:, np.newaxis]\n return c\n\n def gradUpdate(self, Kernels, coef, delta):\n \"\"\"\n Updating Gradient\n \"\"\"\n K_t = np.sum(Kernels * coef[:, None, None], axis=0) ** (self.degree - 1\n )\n grad = np.zeros(len(Kernels))\n for m in range(len(Kernels)):\n grad[m] = delta.T.dot(K_t * Kernels[m]).dot(delta)\n return -self.degree * grad\n\n def fit(self, Kernels, y, u_0=0, gamma=1, norm='l2', n_iter=5, step=1,\n weights=None):\n coef = np.random.normal(0, 1, len(Kernels)) / len(Kernels)\n coef = self.bound(coef, u_0, gamma, norm)\n new_coef = 0\n score_prev = np.inf\n for i in range(n_iter):\n if self.method == 'klr':\n delta = self.KlrIterate(Kernels, y, coef, tol=1e-07,\n max_iters=5)\n elif self.method == 'svm':\n delta = self.SvmIterate(Kernels, y, coef)\n else:\n delta = self.KrrIterate(Kernels, y, coef, weights=weights)\n grad = self.gradUpdate(Kernels, coef, delta)\n new_coef = coef - step * grad\n new_coef = self.bound(new_coef, u_0, gamma, norm)\n score = np.linalg.norm(new_coef - coef, np.inf)\n if score > score_prev:\n step *= 0.9\n if score < self.tol:\n self.coef = coef\n self.delta = delta\n coef = new_coef\n score_prev = score.copy()\n self.coef, self.delta = coef, delta\n\n def predict(self, Kernels):\n K_w = np.sum(Kernels * self.coef[:, None, None], axis=0) ** self.degree\n y__ = np.sign(K_w.dot(self.delta)).flatten()\n if self.method != 'krr':\n y__ = 0.5 * (y__ + 1)\n return y__\n\n def score(self, Kernels, y):\n y__ = self.predict(Kernels)\n if self.method != 'krr':\n score = 100 * (y__ == y).mean()\n else:\n score = np.mean((y__ - y) ** 2)\n return score\n\n\ndef CvSearch(K_xx, K_yx, y, method='svm', degrees=[4], alphas=[0.01], cv=5,\n n_iter=5):\n tt = time.time()\n n_iters = cv * len(degrees) * len(alphas)\n n_samples = y.shape[0]\n DEG, ALPH, TRAIN, VAL = [], [], [], []\n i = 0\n for degree in degrees:\n for alpha in alphas:\n DEG.append(degree)\n ALPH.append(alpha)\n INDS = np.array(range(n_samples))\n idx = np.random.permutation(n_samples)\n INDS = INDS[idx]\n vals = np.array_split(INDS, cv)\n perfs_train = []\n perfs_val = []\n for val in vals:\n i += 1\n sys.stderr.write(\n '\\rIteration %d/%d -- degree %d --alpha %.3f' % (i,\n n_iters, degree, alpha))\n sys.stderr.flush()\n train = np.setdiff1d(range(n_samples), val)\n clf = MultiKerOpt(alpha=alpha, tol=1e-07, degree=degree,\n method=method, hide=True)\n clf.fit(K_xx[:, train.reshape(-1, 1), train], y[train],\n n_iter=n_iter)\n score_train = clf.score(K_xx[:, train.reshape(-1, 1), train\n ], y[train])\n score_val = clf.score(K_xx[:, val.reshape(-1, 1), train], y\n [val])\n perfs_train.append(score_train)\n perfs_val.append(score_val)\n TRAIN.append(np.mean(np.array(perfs_train)))\n VAL.append(np.mean(np.array(perfs_val)))\n df = pd.DataFrame({'degree': DEG, 'alpha': ALPH, 'train': TRAIN, 'val':\n VAL})\n tt = time.time() - tt\n print('Done in %.3f' % (tt / 60))\n return df\n\n\ndef get_best(df):\n idx = np.argmax(df.val.values)\n best = np.max(df.val.values)\n best_degree = df.degree[idx]\n best_alpha = df.alpha[idx]\n return best_degree, best_alpha, best\n",
"step-4": "from cvxopt import matrix, spmatrix, solvers\nfrom scipy.special import expit\nimport numpy as np\nimport sys\nimport pandas as pd\nimport time\n\n\nclass KernelNC:\n \"\"\"\n distance based classifier for spectrum kernels\n \"\"\"\n\n def __init__(self, classes):\n self.classes = classes\n\n def compute_dist(self, X, Y):\n K_x = np.dot(X, X.T).toarray()\n K_y = np.dot(Y, Y.T).toarray()\n K_xy = np.dot(X, Y.T).toarray()\n return np.diag(K_x) - 2 * K_xy.mean(axis=1) + K_y.mean()\n\n def predict(self, X):\n dists = np.array([self.compute_dist(X, classe) for classe in self.\n classes])\n return dists.argmin(axis=0)\n\n def score(self, X, y):\n y__ = self.predict(X)\n return 100 * (y__ == y).mean()\n\n\nclass MultiKerOpt:\n\n def __init__(self, alpha=0.01, tol=1e-07, degree=2, method='klr', hide=\n False):\n self.alpha = alpha\n self.tol = tol\n self.degree = degree\n self.method = method\n self.hide = hide\n\n def scale(self, u, norm):\n if norm == 'l1':\n return u / np.sum(u)\n elif norm == 'l2':\n return u / np.sqrt(np.sum(u ** 2))\n else:\n raise Exception('l1 and l2 are the only available norms')\n\n def bound(self, u, u_0, gamma, norm):\n u__ = u - u_0\n u__ = np.abs(self.scale(u__, norm) * gamma)\n return u__ + u_0\n\n def KrrIterate(self, Kernels, y, coef, weights=None):\n \"\"\"\n Weighted KRR iterations\n \"\"\"\n K_w = np.sum(Kernels * coef[:, None, None], axis=0) ** self.degree\n N, D = K_w.shape\n if weights is None:\n c = np.linalg.solve(np.linalg.inv(K_w + self.alpha * np.eye(N,\n D)), y[:, np.newaxis])\n else:\n W_r = np.diag(np.sqrt(weights))\n A = W_r.dot(K_w).dot(W_r) + self.alpha * np.eye(N, D)\n Y = np.dot(W_r, y[:, np.newaxis])\n x_sol = np.linalg.solve(A, Y)\n c = np.dot(W_r, x_sol)\n return c\n\n def KlrIterate(self, Kernels, y, coef, tol=1e-07, max_iters=5):\n \"\"\"\n KLR iterations\n \"\"\"\n c_old = self.KrrIterate(Kernels, y, coef)\n K_w = np.sum(Kernels * coef[:, None, None], axis=0) ** self.degree\n y_enc = 2 * y - 1\n for i in range(max_iters):\n m_t = np.dot(K_w, c_old)\n p_t = -expit(-y_enc[:, np.newaxis] * m_t)\n w_t = expit(m_t) * expit(-m_t)\n z_t = m_t - p_t * y_enc[:, np.newaxis] / (w_t + 1e-05)\n c_new = self.KrrIterate(Kernels, z_t.flatten(), coef, weights=\n w_t.flatten())\n if np.linalg.norm(c_new - c_old) < tol:\n break\n else:\n c_old = c_new\n return c_old\n\n def SvmIterate(self, Kernels, y, coef):\n \"\"\"\n SVM Estimation\n \"\"\"\n nb_samples = y.shape[0]\n C = 1 / (2 * self.alpha * nb_samples)\n r = np.arange(nb_samples)\n o = np.ones(nb_samples)\n z = np.zeros(nb_samples)\n K_w = np.sum(Kernels * coef[:, None, None], axis=0) ** self.degree\n y_enc = 2 * y - 1\n P = matrix(K_w.astype(float), tc='d')\n q = matrix(-y_enc, tc='d')\n G = spmatrix(np.r_[y_enc, -y_enc], np.r_[r, r + nb_samples], np.r_[\n r, r], tc='d')\n h = matrix(np.r_[o * C, z], tc='d')\n if self.hide:\n solvers.options['show_progress'] = False\n sol = solvers.qp(P, q, G, h)\n c = np.ravel(sol['x'])[:, np.newaxis]\n return c\n\n def gradUpdate(self, Kernels, coef, delta):\n \"\"\"\n Updating Gradient\n \"\"\"\n K_t = np.sum(Kernels * coef[:, None, None], axis=0) ** (self.degree - 1\n )\n grad = np.zeros(len(Kernels))\n for m in range(len(Kernels)):\n grad[m] = delta.T.dot(K_t * Kernels[m]).dot(delta)\n return -self.degree * grad\n\n def fit(self, Kernels, y, u_0=0, gamma=1, norm='l2', n_iter=5, step=1,\n weights=None):\n coef = np.random.normal(0, 1, len(Kernels)) / len(Kernels)\n coef = self.bound(coef, u_0, gamma, norm)\n new_coef = 0\n score_prev = np.inf\n for i in range(n_iter):\n if self.method == 'klr':\n delta = self.KlrIterate(Kernels, y, coef, tol=1e-07,\n max_iters=5)\n elif self.method == 'svm':\n delta = self.SvmIterate(Kernels, y, coef)\n else:\n delta = self.KrrIterate(Kernels, y, coef, weights=weights)\n grad = self.gradUpdate(Kernels, coef, delta)\n new_coef = coef - step * grad\n new_coef = self.bound(new_coef, u_0, gamma, norm)\n score = np.linalg.norm(new_coef - coef, np.inf)\n if score > score_prev:\n step *= 0.9\n if score < self.tol:\n self.coef = coef\n self.delta = delta\n coef = new_coef\n score_prev = score.copy()\n self.coef, self.delta = coef, delta\n\n def predict(self, Kernels):\n K_w = np.sum(Kernels * self.coef[:, None, None], axis=0) ** self.degree\n y__ = np.sign(K_w.dot(self.delta)).flatten()\n if self.method != 'krr':\n y__ = 0.5 * (y__ + 1)\n return y__\n\n def score(self, Kernels, y):\n y__ = self.predict(Kernels)\n if self.method != 'krr':\n score = 100 * (y__ == y).mean()\n else:\n score = np.mean((y__ - y) ** 2)\n return score\n\n\ndef CvSearch(K_xx, K_yx, y, method='svm', degrees=[4], alphas=[0.01], cv=5,\n n_iter=5):\n tt = time.time()\n n_iters = cv * len(degrees) * len(alphas)\n n_samples = y.shape[0]\n DEG, ALPH, TRAIN, VAL = [], [], [], []\n i = 0\n for degree in degrees:\n for alpha in alphas:\n DEG.append(degree)\n ALPH.append(alpha)\n INDS = np.array(range(n_samples))\n idx = np.random.permutation(n_samples)\n INDS = INDS[idx]\n vals = np.array_split(INDS, cv)\n perfs_train = []\n perfs_val = []\n for val in vals:\n i += 1\n sys.stderr.write(\n '\\rIteration %d/%d -- degree %d --alpha %.3f' % (i,\n n_iters, degree, alpha))\n sys.stderr.flush()\n train = np.setdiff1d(range(n_samples), val)\n clf = MultiKerOpt(alpha=alpha, tol=1e-07, degree=degree,\n method=method, hide=True)\n clf.fit(K_xx[:, train.reshape(-1, 1), train], y[train],\n n_iter=n_iter)\n score_train = clf.score(K_xx[:, train.reshape(-1, 1), train\n ], y[train])\n score_val = clf.score(K_xx[:, val.reshape(-1, 1), train], y\n [val])\n perfs_train.append(score_train)\n perfs_val.append(score_val)\n TRAIN.append(np.mean(np.array(perfs_train)))\n VAL.append(np.mean(np.array(perfs_val)))\n df = pd.DataFrame({'degree': DEG, 'alpha': ALPH, 'train': TRAIN, 'val':\n VAL})\n tt = time.time() - tt\n print('Done in %.3f' % (tt / 60))\n return df\n\n\ndef get_best(df):\n idx = np.argmax(df.val.values)\n best = np.max(df.val.values)\n best_degree = df.degree[idx]\n best_alpha = df.alpha[idx]\n return best_degree, best_alpha, best\n",
"step-5": "#import cvxopt\nfrom cvxopt import matrix, spmatrix, solvers\n#import scipy\nfrom scipy.special import expit\nimport numpy as np\nimport sys\nimport pandas as pd\nimport time\n\nclass KernelNC():\n \"\"\"\n distance based classifier for spectrum kernels\n \"\"\"\n \n def __init__(self, classes):\n self.classes = classes\n \n def compute_dist(self, X, Y):\n K_x = np.dot(X, X.T).toarray()\n K_y = np.dot(Y, Y.T).toarray()\n K_xy = np.dot(X, Y.T).toarray()\n \n return np.diag(K_x) - 2*K_xy.mean(axis=1) + K_y.mean()\n \n def predict(self, X):\n \n dists = np.array([self.compute_dist(X, classe) for classe in self.classes])\n return dists.argmin(axis=0)\n \n def score(self, X, y):\n y__ = self.predict(X)\n return 100*(y__==y).mean()\n\nclass MultiKerOpt():\n \n def __init__(self, alpha=0.01, tol=1e-07, degree=2, method='klr', hide=False):\n self.alpha = alpha\n self.tol = tol\n self.degree = degree\n self.method = method\n self.hide = hide\n \n def scale(self, u, norm):\n if norm=='l1':\n return u/np.sum(u)\n elif norm=='l2':\n return u / np.sqrt(np.sum(u**2))\n else:\n raise Exception('l1 and l2 are the only available norms')\n \n def bound(self, u, u_0, gamma, norm):\n u__ = u - u_0\n u__ = np.abs(self.scale(u__, norm) * gamma)\n return u__ + u_0\n \n def KrrIterate(self, Kernels, y, coef, weights = None):\n \"\"\"\n Weighted KRR iterations\n \"\"\"\n K_w = np.sum((Kernels * coef[:, None, None]), axis=0) ** self.degree\n N, D = K_w.shape\n if weights is None:\n c = np.linalg.solve(np.linalg.inv(K_w + self.alpha * np.eye(N, D)), y[:, np.newaxis])\n else:\n W_r = np.diag(np.sqrt(weights))\n A = W_r.dot(K_w).dot(W_r) + self.alpha * np.eye(N,D)\n Y = np.dot(W_r, y[:, np.newaxis])\n x_sol = np.linalg.solve(A, Y)\n c = np.dot(W_r, x_sol)\n return c\n \n def KlrIterate(self, Kernels, y, coef, tol=1e-07, max_iters=5):\n \"\"\"\n KLR iterations\n \"\"\"\n c_old = self.KrrIterate(Kernels, y, coef)\n K_w = np.sum((Kernels * coef[:, None, None]), axis=0) ** self.degree\n y_enc = 2*y-1\n \n for i in range(max_iters):\n m_t = np.dot(K_w, c_old)\n p_t = -expit(-y_enc[:, np.newaxis]*m_t)\n w_t = expit(m_t)*expit(-m_t)\n z_t = m_t - (p_t * y_enc[:, np.newaxis]) /(w_t+ 1e-05)\n c_new = self.KrrIterate(Kernels, z_t.flatten(), coef, weights=w_t.flatten())\n if np.linalg.norm(c_new - c_old)<tol:\n break\n else:\n c_old = c_new\n return c_old\n\n def SvmIterate(self, Kernels, y, coef):\n \"\"\"\n SVM Estimation\n \"\"\"\n nb_samples = y.shape[0]\n C = 1 / ( 2 * self.alpha * nb_samples)\n \n r = np.arange(nb_samples)\n o = np.ones(nb_samples)\n z = np.zeros(nb_samples)\n \n K_w = np.sum(Kernels * coef[:, None, None], axis=0) ** (self.degree)\n \n y_enc = 2*y-1\n \n P = matrix(K_w.astype(float), tc='d')\n q = matrix(-y_enc, tc='d')\n G = spmatrix(np.r_[y_enc, -y_enc], np.r_[r, r + nb_samples], np.r_[r, r], tc='d')\n h = matrix(np.r_[o * C, z], tc='d')\n \n if self.hide:\n solvers.options['show_progress'] = False\n sol = solvers.qp(P, q, G, h)\n c = np.ravel(sol['x'])[:,np.newaxis]\n \n return c\n \n def gradUpdate(self, Kernels, coef, delta):\n \"\"\"\n Updating Gradient\n \"\"\"\n K_t = np.sum(Kernels * coef[:, None, None], axis=0) ** (self.degree-1)\n grad = np.zeros(len(Kernels))\n for m in range(len(Kernels)):\n grad[m] = delta.T.dot((K_t * Kernels[m])).dot(delta)\n \n return - self.degree * grad\n \n def fit(self, Kernels, y, u_0=0, gamma=1, norm='l2', n_iter=5, step=1, weights=None):\n coef = np.random.normal(0, 1, len(Kernels)) / len(Kernels)\n coef = self.bound(coef, u_0, gamma, norm)\n new_coef = 0\n \n score_prev = np.inf\n \n for i in range(n_iter):\n #print(i+1)\n if self.method=='klr':\n delta = self.KlrIterate(Kernels, y, coef, tol=1e-07, max_iters=5)\n elif self.method=='svm':\n delta = self.SvmIterate(Kernels, y, coef)\n else:\n delta = self.KrrIterate(Kernels, y, coef, weights = weights)\n \n grad = self.gradUpdate(Kernels, coef, delta)\n \n new_coef = coef - step * grad\n new_coef = self.bound(new_coef, u_0, gamma, norm)\n \n score = np.linalg.norm(new_coef - coef, np.inf)\n \n if score>score_prev:\n step *= 0.9\n \n if score<self.tol:\n self.coef = coef\n self.delta = delta\n \n coef = new_coef\n score_prev = score.copy()\n \n self.coef, self.delta = coef, delta\n #return new_coef\n def predict(self, Kernels):\n K_w = np.sum(Kernels * self.coef[:, None, None], axis=0) ** (self.degree)\n y__ = np.sign(K_w.dot(self.delta)).flatten()\n if self.method != 'krr':\n y__ = 0.5 * (y__ + 1)\n return y__\n \n def score(self, Kernels, y):\n y__ = self.predict(Kernels)\n if self.method!='krr':\n score = 100*(y__==y).mean()\n else:\n score = np.mean((y__- y)**2)\n return score\n \n \ndef CvSearch(K_xx, K_yx, y, method='svm', degrees=[4], alphas=[0.01], cv=5, n_iter=5):\n tt = time.time()\n \n n_iters = cv * len(degrees) * len(alphas)\n \n n_samples = y.shape[0]\n \n DEG, ALPH, TRAIN, VAL = [], [], [], []\n \n i=0\n \n for degree in degrees:\n for alpha in alphas:\n DEG.append(degree)\n ALPH.append(alpha)\n \n #SPLITTING\n INDS = np.array(range(n_samples))\n idx = np.random.permutation(n_samples)\n INDS = INDS[idx]\n \n vals = np.array_split(INDS, cv)\n \n perfs_train = []\n perfs_val = []\n \n for val in vals:\n i += 1 \n sys.stderr.write('\\rIteration %d/%d -- degree %d --alpha %.3f' %(i, n_iters, degree, alpha))\n sys.stderr.flush()\n \n train = np.setdiff1d(range(n_samples),val)\n \n clf = MultiKerOpt(alpha=alpha, tol=1e-07, degree=degree, method=method, hide=True)\n \n clf.fit(K_xx[:,train.reshape(-1,1), train], y[train], n_iter=n_iter)\n \n score_train = clf.score(K_xx[:,train.reshape(-1,1), train], y[train])\n \n score_val = clf.score(K_xx[:,val.reshape(-1,1), train], y[val])\n \n perfs_train.append(score_train)\n perfs_val.append(score_val)\n \n TRAIN.append(np.mean(np.array(perfs_train)))\n VAL.append(np.mean(np.array(perfs_val)))\n \n df = pd.DataFrame({'degree':DEG, 'alpha':ALPH, 'train':TRAIN, 'val':VAL})\n \n tt = time.time() - tt\n print('Done in %.3f'%(tt/60))\n \n return df\n#\ndef get_best(df):\n idx = np.argmax(df.val.values)\n best = np.max(df.val.values)\n\n best_degree = df.degree[idx]\n best_alpha = df.alpha[idx]\n return best_degree, best_alpha, best\n",
"step-ids": [
16,
17,
19,
20,
21
]
}
|
[
16,
17,
19,
20,
21
] |
<|reserved_special_token_0|>
class FRSHTTHolder:
frshtt_code = ''
star_count_lst = [0, 0, 0, 0, 0, 0]
counter = 0
def __init__(self, in_frshtt_code):
self.frshtt_code = in_frshtt_code
self.counter = 0
self.star_count_lst = [0, 0, 0, 0, 0, 0]
def is_in_code(self, in_frshtt_code):
if self.frshtt_code == in_frshtt_code:
return True
else:
return False
def add_rating(self, rating):
if int(rating) == 0:
self.star_count_lst[0] += 1
if int(rating) == 1:
self.star_count_lst[1] += 1
if int(rating) == 2:
self.star_count_lst[2] += 1
if int(rating) == 3:
self.star_count_lst[3] += 1
if int(rating) == 4:
self.star_count_lst[4] += 1
if int(rating) == 5:
self.star_count_lst[5] += 1
self.counter += 1
def __str__(self):
return_str = ''
return_str += 'Code: ' + str(self.frshtt_code) + '\n'
return_str += 'Count: ' + str(self.counter) + '\n'
if self.star_count_lst[0] == 0:
return_str += '0 Stars: 0.00%\n'
else:
return_str += '0 Stars: ' + str(round(self.star_count_lst[0] /
(self.counter * 1.0), 4) * 100) + '%\n'
if self.star_count_lst[1] == 0:
return_str += '1 Stars: 0.00%\n'
else:
return_str += '1 Stars: ' + str(round(self.star_count_lst[1] /
(self.counter * 1.0), 4) * 100) + '%\n'
if self.star_count_lst[2] == 0:
return_str += '2 Stars: 0.00%\n'
else:
return_str += '2 Stars: ' + str(round(self.star_count_lst[2] /
(self.counter * 1.0), 4) * 100) + '%\n'
if self.star_count_lst[3] == 0:
return_str += '3 Stars: 0.00%\n'
else:
return_str += '3 Stars: ' + str(round(self.star_count_lst[3] /
(self.counter * 1.0), 4) * 100) + '%\n'
if self.star_count_lst[4] == 0:
return_str += '4 Stars: 0.00%\n'
else:
return_str += '4 Stars: ' + str(round(self.star_count_lst[4] /
(self.counter * 1.0), 4) * 100) + '%\n'
if self.star_count_lst[5] == 0:
return_str += '5 Stars: 0.00%\n'
else:
return_str += '5 Stars: ' + str(round(self.star_count_lst[5] /
(self.counter * 1.0), 4) * 100) + '%\n'
return return_str
class FRSHTTAnalysis:
frshtt_holder_lst = list()
def __init__(self):
self.frshtt_holder_lst.append(FRSHTTHolder('000000'))
self.frshtt_holder_lst.append(FRSHTTHolder('010000'))
self.frshtt_holder_lst.append(FRSHTTHolder('010010'))
self.frshtt_holder_lst.append(FRSHTTHolder('100000'))
self.frshtt_holder_lst.append(FRSHTTHolder('001000'))
self.frshtt_holder_lst.append(FRSHTTHolder('011000'))
self.frshtt_holder_lst.append(FRSHTTHolder('000010'))
self.frshtt_holder_lst.append(FRSHTTHolder('000100'))
def add_rating(self, rating, frshtt_code):
for frshtt_holder in self.frshtt_holder_lst:
if frshtt_holder.is_in_code(frshtt_code):
frshtt_holder.add_rating(rating)
return True
return False
def __str__(self):
return_str = 'Breakdown by Code:\n'
return_str += '-------------------------\n'
for frshtt_holder in self.frshtt_holder_lst:
return_str += str(frshtt_holder) + '\n'
return return_str
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class TempAnalysis:
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def __str__(self):
return_str = 'Breakdown by Temperature:\n'
return_str += '-------------------------\n'
for temp_holder in self.temp_holder_lst:
return_str += str(temp_holder) + '\n'
return return_str
<|reserved_special_token_0|>
class FRSHTTHolder:
frshtt_code = ''
star_count_lst = [0, 0, 0, 0, 0, 0]
counter = 0
def __init__(self, in_frshtt_code):
self.frshtt_code = in_frshtt_code
self.counter = 0
self.star_count_lst = [0, 0, 0, 0, 0, 0]
def is_in_code(self, in_frshtt_code):
if self.frshtt_code == in_frshtt_code:
return True
else:
return False
def add_rating(self, rating):
if int(rating) == 0:
self.star_count_lst[0] += 1
if int(rating) == 1:
self.star_count_lst[1] += 1
if int(rating) == 2:
self.star_count_lst[2] += 1
if int(rating) == 3:
self.star_count_lst[3] += 1
if int(rating) == 4:
self.star_count_lst[4] += 1
if int(rating) == 5:
self.star_count_lst[5] += 1
self.counter += 1
def __str__(self):
return_str = ''
return_str += 'Code: ' + str(self.frshtt_code) + '\n'
return_str += 'Count: ' + str(self.counter) + '\n'
if self.star_count_lst[0] == 0:
return_str += '0 Stars: 0.00%\n'
else:
return_str += '0 Stars: ' + str(round(self.star_count_lst[0] /
(self.counter * 1.0), 4) * 100) + '%\n'
if self.star_count_lst[1] == 0:
return_str += '1 Stars: 0.00%\n'
else:
return_str += '1 Stars: ' + str(round(self.star_count_lst[1] /
(self.counter * 1.0), 4) * 100) + '%\n'
if self.star_count_lst[2] == 0:
return_str += '2 Stars: 0.00%\n'
else:
return_str += '2 Stars: ' + str(round(self.star_count_lst[2] /
(self.counter * 1.0), 4) * 100) + '%\n'
if self.star_count_lst[3] == 0:
return_str += '3 Stars: 0.00%\n'
else:
return_str += '3 Stars: ' + str(round(self.star_count_lst[3] /
(self.counter * 1.0), 4) * 100) + '%\n'
if self.star_count_lst[4] == 0:
return_str += '4 Stars: 0.00%\n'
else:
return_str += '4 Stars: ' + str(round(self.star_count_lst[4] /
(self.counter * 1.0), 4) * 100) + '%\n'
if self.star_count_lst[5] == 0:
return_str += '5 Stars: 0.00%\n'
else:
return_str += '5 Stars: ' + str(round(self.star_count_lst[5] /
(self.counter * 1.0), 4) * 100) + '%\n'
return return_str
class FRSHTTAnalysis:
frshtt_holder_lst = list()
def __init__(self):
self.frshtt_holder_lst.append(FRSHTTHolder('000000'))
self.frshtt_holder_lst.append(FRSHTTHolder('010000'))
self.frshtt_holder_lst.append(FRSHTTHolder('010010'))
self.frshtt_holder_lst.append(FRSHTTHolder('100000'))
self.frshtt_holder_lst.append(FRSHTTHolder('001000'))
self.frshtt_holder_lst.append(FRSHTTHolder('011000'))
self.frshtt_holder_lst.append(FRSHTTHolder('000010'))
self.frshtt_holder_lst.append(FRSHTTHolder('000100'))
def add_rating(self, rating, frshtt_code):
for frshtt_holder in self.frshtt_holder_lst:
if frshtt_holder.is_in_code(frshtt_code):
frshtt_holder.add_rating(rating)
return True
return False
def __str__(self):
return_str = 'Breakdown by Code:\n'
return_str += '-------------------------\n'
for frshtt_holder in self.frshtt_holder_lst:
return_str += str(frshtt_holder) + '\n'
return return_str
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class TempAnalysis:
<|reserved_special_token_0|>
def __init__(self):
temp_counter = 0
while temp_counter < 110:
self.temp_holder_lst.append(TempHolder(temp_counter,
temp_counter + 10))
temp_counter += 10
def add_rating(self, rating, temp):
for temp_holder in self.temp_holder_lst:
if temp_holder.is_in_temp_range(temp):
temp_holder.add_rating(rating)
return True
return False
def __str__(self):
return_str = 'Breakdown by Temperature:\n'
return_str += '-------------------------\n'
for temp_holder in self.temp_holder_lst:
return_str += str(temp_holder) + '\n'
return return_str
<|reserved_special_token_0|>
class FRSHTTHolder:
frshtt_code = ''
star_count_lst = [0, 0, 0, 0, 0, 0]
counter = 0
def __init__(self, in_frshtt_code):
self.frshtt_code = in_frshtt_code
self.counter = 0
self.star_count_lst = [0, 0, 0, 0, 0, 0]
def is_in_code(self, in_frshtt_code):
if self.frshtt_code == in_frshtt_code:
return True
else:
return False
def add_rating(self, rating):
if int(rating) == 0:
self.star_count_lst[0] += 1
if int(rating) == 1:
self.star_count_lst[1] += 1
if int(rating) == 2:
self.star_count_lst[2] += 1
if int(rating) == 3:
self.star_count_lst[3] += 1
if int(rating) == 4:
self.star_count_lst[4] += 1
if int(rating) == 5:
self.star_count_lst[5] += 1
self.counter += 1
def __str__(self):
return_str = ''
return_str += 'Code: ' + str(self.frshtt_code) + '\n'
return_str += 'Count: ' + str(self.counter) + '\n'
if self.star_count_lst[0] == 0:
return_str += '0 Stars: 0.00%\n'
else:
return_str += '0 Stars: ' + str(round(self.star_count_lst[0] /
(self.counter * 1.0), 4) * 100) + '%\n'
if self.star_count_lst[1] == 0:
return_str += '1 Stars: 0.00%\n'
else:
return_str += '1 Stars: ' + str(round(self.star_count_lst[1] /
(self.counter * 1.0), 4) * 100) + '%\n'
if self.star_count_lst[2] == 0:
return_str += '2 Stars: 0.00%\n'
else:
return_str += '2 Stars: ' + str(round(self.star_count_lst[2] /
(self.counter * 1.0), 4) * 100) + '%\n'
if self.star_count_lst[3] == 0:
return_str += '3 Stars: 0.00%\n'
else:
return_str += '3 Stars: ' + str(round(self.star_count_lst[3] /
(self.counter * 1.0), 4) * 100) + '%\n'
if self.star_count_lst[4] == 0:
return_str += '4 Stars: 0.00%\n'
else:
return_str += '4 Stars: ' + str(round(self.star_count_lst[4] /
(self.counter * 1.0), 4) * 100) + '%\n'
if self.star_count_lst[5] == 0:
return_str += '5 Stars: 0.00%\n'
else:
return_str += '5 Stars: ' + str(round(self.star_count_lst[5] /
(self.counter * 1.0), 4) * 100) + '%\n'
return return_str
class FRSHTTAnalysis:
frshtt_holder_lst = list()
def __init__(self):
self.frshtt_holder_lst.append(FRSHTTHolder('000000'))
self.frshtt_holder_lst.append(FRSHTTHolder('010000'))
self.frshtt_holder_lst.append(FRSHTTHolder('010010'))
self.frshtt_holder_lst.append(FRSHTTHolder('100000'))
self.frshtt_holder_lst.append(FRSHTTHolder('001000'))
self.frshtt_holder_lst.append(FRSHTTHolder('011000'))
self.frshtt_holder_lst.append(FRSHTTHolder('000010'))
self.frshtt_holder_lst.append(FRSHTTHolder('000100'))
def add_rating(self, rating, frshtt_code):
for frshtt_holder in self.frshtt_holder_lst:
if frshtt_holder.is_in_code(frshtt_code):
frshtt_holder.add_rating(rating)
return True
return False
def __str__(self):
return_str = 'Breakdown by Code:\n'
return_str += '-------------------------\n'
for frshtt_holder in self.frshtt_holder_lst:
return_str += str(frshtt_holder) + '\n'
return return_str
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class TempHolder:
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def __init__(self, in_range_start, in_range_end):
self.range_start = in_range_start
self.range_end = in_range_end
self.counter = 0
self.star_count_lst = [0, 0, 0, 0, 0, 0]
def is_in_temp_range(self, temp):
if self.range_start <= temp and temp < self.range_end:
return True
else:
return False
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class TempAnalysis:
temp_holder_lst = list()
def __init__(self):
temp_counter = 0
while temp_counter < 110:
self.temp_holder_lst.append(TempHolder(temp_counter,
temp_counter + 10))
temp_counter += 10
def add_rating(self, rating, temp):
for temp_holder in self.temp_holder_lst:
if temp_holder.is_in_temp_range(temp):
temp_holder.add_rating(rating)
return True
return False
def __str__(self):
return_str = 'Breakdown by Temperature:\n'
return_str += '-------------------------\n'
for temp_holder in self.temp_holder_lst:
return_str += str(temp_holder) + '\n'
return return_str
<|reserved_special_token_0|>
class FRSHTTHolder:
frshtt_code = ''
star_count_lst = [0, 0, 0, 0, 0, 0]
counter = 0
def __init__(self, in_frshtt_code):
self.frshtt_code = in_frshtt_code
self.counter = 0
self.star_count_lst = [0, 0, 0, 0, 0, 0]
def is_in_code(self, in_frshtt_code):
if self.frshtt_code == in_frshtt_code:
return True
else:
return False
def add_rating(self, rating):
if int(rating) == 0:
self.star_count_lst[0] += 1
if int(rating) == 1:
self.star_count_lst[1] += 1
if int(rating) == 2:
self.star_count_lst[2] += 1
if int(rating) == 3:
self.star_count_lst[3] += 1
if int(rating) == 4:
self.star_count_lst[4] += 1
if int(rating) == 5:
self.star_count_lst[5] += 1
self.counter += 1
def __str__(self):
return_str = ''
return_str += 'Code: ' + str(self.frshtt_code) + '\n'
return_str += 'Count: ' + str(self.counter) + '\n'
if self.star_count_lst[0] == 0:
return_str += '0 Stars: 0.00%\n'
else:
return_str += '0 Stars: ' + str(round(self.star_count_lst[0] /
(self.counter * 1.0), 4) * 100) + '%\n'
if self.star_count_lst[1] == 0:
return_str += '1 Stars: 0.00%\n'
else:
return_str += '1 Stars: ' + str(round(self.star_count_lst[1] /
(self.counter * 1.0), 4) * 100) + '%\n'
if self.star_count_lst[2] == 0:
return_str += '2 Stars: 0.00%\n'
else:
return_str += '2 Stars: ' + str(round(self.star_count_lst[2] /
(self.counter * 1.0), 4) * 100) + '%\n'
if self.star_count_lst[3] == 0:
return_str += '3 Stars: 0.00%\n'
else:
return_str += '3 Stars: ' + str(round(self.star_count_lst[3] /
(self.counter * 1.0), 4) * 100) + '%\n'
if self.star_count_lst[4] == 0:
return_str += '4 Stars: 0.00%\n'
else:
return_str += '4 Stars: ' + str(round(self.star_count_lst[4] /
(self.counter * 1.0), 4) * 100) + '%\n'
if self.star_count_lst[5] == 0:
return_str += '5 Stars: 0.00%\n'
else:
return_str += '5 Stars: ' + str(round(self.star_count_lst[5] /
(self.counter * 1.0), 4) * 100) + '%\n'
return return_str
class FRSHTTAnalysis:
frshtt_holder_lst = list()
def __init__(self):
self.frshtt_holder_lst.append(FRSHTTHolder('000000'))
self.frshtt_holder_lst.append(FRSHTTHolder('010000'))
self.frshtt_holder_lst.append(FRSHTTHolder('010010'))
self.frshtt_holder_lst.append(FRSHTTHolder('100000'))
self.frshtt_holder_lst.append(FRSHTTHolder('001000'))
self.frshtt_holder_lst.append(FRSHTTHolder('011000'))
self.frshtt_holder_lst.append(FRSHTTHolder('000010'))
self.frshtt_holder_lst.append(FRSHTTHolder('000100'))
def add_rating(self, rating, frshtt_code):
for frshtt_holder in self.frshtt_holder_lst:
if frshtt_holder.is_in_code(frshtt_code):
frshtt_holder.add_rating(rating)
return True
return False
def __str__(self):
return_str = 'Breakdown by Code:\n'
return_str += '-------------------------\n'
for frshtt_holder in self.frshtt_holder_lst:
return_str += str(frshtt_holder) + '\n'
return return_str
<|reserved_special_token_1|>
'''
Temperature Container
'''
class TempHolder:
range_start = 0
range_end = 0
star_count_lst = [0,0,0,0,0,0]
counter = 0
def __init__(self, in_range_start, in_range_end):
self.range_start = in_range_start
self.range_end = in_range_end
self.counter = 0
self.star_count_lst = [0,0,0,0,0,0]
def is_in_temp_range(self, temp):
if self.range_start <= temp and temp < self.range_end:
return True
else:
return False
def add_rating(self, rating):
if int(rating) == 0:
self.star_count_lst[0] += 1
if int(rating) == 1:
self.star_count_lst[1] += 1
if int(rating) == 2:
self.star_count_lst[2] += 1
if int(rating) == 3:
self.star_count_lst[3] += 1
if int(rating) == 4:
self.star_count_lst[4] += 1
if int(rating) == 5:
self.star_count_lst[5] += 1
self.counter += 1
def __str__(self):
return_str = ""
return_str += "Temp: " + str(self.range_start) + "-" + str(self.range_end) + "\n"
return_str += "Count: " + str(self.counter) + "\n"
if self.star_count_lst[0] == 0:
return_str += "0 Stars: 0.00%\n"
else:
return_str += "0 Stars: " + str(round((self.star_count_lst[0] / (self.counter * 1.0)), 4) * 100) + "%\n"
if self.star_count_lst[1] == 0:
return_str += "1 Stars: 0.00%\n"
else:
return_str += "1 Stars: " + str(round((self.star_count_lst[1] / (self.counter * 1.0)), 4) * 100) + "%\n"
if self.star_count_lst[2] == 0:
return_str += "2 Stars: 0.00%\n"
else:
return_str += "2 Stars: " + str(round((self.star_count_lst[2] / (self.counter * 1.0)), 4) * 100) + "%\n"
if self.star_count_lst[3] == 0:
return_str += "3 Stars: 0.00%\n"
else:
return_str += "3 Stars: " + str(round((self.star_count_lst[3] / (self.counter * 1.0)), 4) * 100) + "%\n"
if self.star_count_lst[4] == 0:
return_str += "4 Stars: 0.00%\n"
else:
return_str += "4 Stars: " + str(round((self.star_count_lst[4] / (self.counter * 1.0)), 4) * 100) + "%\n"
if self.star_count_lst[5] == 0:
return_str += "5 Stars: 0.00%\n"
else:
return_str += "5 Stars: " + str(round((self.star_count_lst[5] / (self.counter * 1.0)), 4) * 100) + "%\n"
return return_str
class TempAnalysis:
temp_holder_lst = list()
def __init__(self):
temp_counter = 0
while temp_counter < 110:
self.temp_holder_lst.append(TempHolder(temp_counter, temp_counter + 10))
temp_counter += 10
def add_rating(self, rating, temp):
for temp_holder in self.temp_holder_lst:
if temp_holder.is_in_temp_range(temp):
temp_holder.add_rating(rating)
return True
return False
def __str__(self):
return_str = "Breakdown by Temperature:\n"
return_str += "-------------------------\n"
for temp_holder in self.temp_holder_lst:
return_str += str(temp_holder) + "\n"
return return_str
'''
Temperature Container
'''
class FRSHTTHolder:
frshtt_code = ""
star_count_lst = [0,0,0,0,0,0]
counter = 0
def __init__(self, in_frshtt_code):
self.frshtt_code = in_frshtt_code
self.counter = 0
self.star_count_lst = [0,0,0,0,0,0]
def is_in_code(self, in_frshtt_code):
if self.frshtt_code == in_frshtt_code:
return True
else:
return False
def add_rating(self, rating):
if int(rating) == 0:
self.star_count_lst[0] += 1
if int(rating) == 1:
self.star_count_lst[1] += 1
if int(rating) == 2:
self.star_count_lst[2] += 1
if int(rating) == 3:
self.star_count_lst[3] += 1
if int(rating) == 4:
self.star_count_lst[4] += 1
if int(rating) == 5:
self.star_count_lst[5] += 1
self.counter += 1
def __str__(self):
return_str = ""
return_str += "Code: " + str(self.frshtt_code) + "\n"
return_str += "Count: " + str(self.counter) + "\n"
if self.star_count_lst[0] == 0:
return_str += "0 Stars: 0.00%\n"
else:
return_str += "0 Stars: " + str(round((self.star_count_lst[0] / (self.counter * 1.0)), 4) * 100) + "%\n"
if self.star_count_lst[1] == 0:
return_str += "1 Stars: 0.00%\n"
else:
return_str += "1 Stars: " + str(round((self.star_count_lst[1] / (self.counter * 1.0)), 4) * 100) + "%\n"
if self.star_count_lst[2] == 0:
return_str += "2 Stars: 0.00%\n"
else:
return_str += "2 Stars: " + str(round((self.star_count_lst[2] / (self.counter * 1.0)), 4) * 100) + "%\n"
if self.star_count_lst[3] == 0:
return_str += "3 Stars: 0.00%\n"
else:
return_str += "3 Stars: " + str(round((self.star_count_lst[3] / (self.counter * 1.0)), 4) * 100) + "%\n"
if self.star_count_lst[4] == 0:
return_str += "4 Stars: 0.00%\n"
else:
return_str += "4 Stars: " + str(round((self.star_count_lst[4] / (self.counter * 1.0)), 4) * 100) + "%\n"
if self.star_count_lst[5] == 0:
return_str += "5 Stars: 0.00%\n"
else:
return_str += "5 Stars: " + str(round((self.star_count_lst[5] / (self.counter * 1.0)), 4) * 100) + "%\n"
return return_str
class FRSHTTAnalysis:
frshtt_holder_lst = list()
def __init__(self):
# no weather
self.frshtt_holder_lst.append(FRSHTTHolder("000000"))
# rain
self.frshtt_holder_lst.append(FRSHTTHolder("010000"))
# thunder strom
self.frshtt_holder_lst.append(FRSHTTHolder("010010"))
# fog
self.frshtt_holder_lst.append(FRSHTTHolder("100000"))
# snow
self.frshtt_holder_lst.append(FRSHTTHolder("001000"))
# mixed (snow/rain)
self.frshtt_holder_lst.append(FRSHTTHolder("011000"))
# dry thunder
self.frshtt_holder_lst.append(FRSHTTHolder("000010"))
# hail
self.frshtt_holder_lst.append(FRSHTTHolder("000100"))
def add_rating(self, rating, frshtt_code):
for frshtt_holder in self.frshtt_holder_lst:
if frshtt_holder.is_in_code(frshtt_code):
frshtt_holder.add_rating(rating)
return True
return False
def __str__(self):
return_str = "Breakdown by Code:\n"
return_str += "-------------------------\n"
for frshtt_holder in self.frshtt_holder_lst:
return_str += str(frshtt_holder) + "\n"
return return_str
|
flexible
|
{
"blob_id": "330b843501e0fdaff21cc4eff1ef930d54ab6e8d",
"index": 747,
"step-1": "<mask token>\n\n\nclass FRSHTTHolder:\n frshtt_code = ''\n star_count_lst = [0, 0, 0, 0, 0, 0]\n counter = 0\n\n def __init__(self, in_frshtt_code):\n self.frshtt_code = in_frshtt_code\n self.counter = 0\n self.star_count_lst = [0, 0, 0, 0, 0, 0]\n\n def is_in_code(self, in_frshtt_code):\n if self.frshtt_code == in_frshtt_code:\n return True\n else:\n return False\n\n def add_rating(self, rating):\n if int(rating) == 0:\n self.star_count_lst[0] += 1\n if int(rating) == 1:\n self.star_count_lst[1] += 1\n if int(rating) == 2:\n self.star_count_lst[2] += 1\n if int(rating) == 3:\n self.star_count_lst[3] += 1\n if int(rating) == 4:\n self.star_count_lst[4] += 1\n if int(rating) == 5:\n self.star_count_lst[5] += 1\n self.counter += 1\n\n def __str__(self):\n return_str = ''\n return_str += 'Code: ' + str(self.frshtt_code) + '\\n'\n return_str += 'Count: ' + str(self.counter) + '\\n'\n if self.star_count_lst[0] == 0:\n return_str += '0 Stars: 0.00%\\n'\n else:\n return_str += '0 Stars: ' + str(round(self.star_count_lst[0] /\n (self.counter * 1.0), 4) * 100) + '%\\n'\n if self.star_count_lst[1] == 0:\n return_str += '1 Stars: 0.00%\\n'\n else:\n return_str += '1 Stars: ' + str(round(self.star_count_lst[1] /\n (self.counter * 1.0), 4) * 100) + '%\\n'\n if self.star_count_lst[2] == 0:\n return_str += '2 Stars: 0.00%\\n'\n else:\n return_str += '2 Stars: ' + str(round(self.star_count_lst[2] /\n (self.counter * 1.0), 4) * 100) + '%\\n'\n if self.star_count_lst[3] == 0:\n return_str += '3 Stars: 0.00%\\n'\n else:\n return_str += '3 Stars: ' + str(round(self.star_count_lst[3] /\n (self.counter * 1.0), 4) * 100) + '%\\n'\n if self.star_count_lst[4] == 0:\n return_str += '4 Stars: 0.00%\\n'\n else:\n return_str += '4 Stars: ' + str(round(self.star_count_lst[4] /\n (self.counter * 1.0), 4) * 100) + '%\\n'\n if self.star_count_lst[5] == 0:\n return_str += '5 Stars: 0.00%\\n'\n else:\n return_str += '5 Stars: ' + str(round(self.star_count_lst[5] /\n (self.counter * 1.0), 4) * 100) + '%\\n'\n return return_str\n\n\nclass FRSHTTAnalysis:\n frshtt_holder_lst = list()\n\n def __init__(self):\n self.frshtt_holder_lst.append(FRSHTTHolder('000000'))\n self.frshtt_holder_lst.append(FRSHTTHolder('010000'))\n self.frshtt_holder_lst.append(FRSHTTHolder('010010'))\n self.frshtt_holder_lst.append(FRSHTTHolder('100000'))\n self.frshtt_holder_lst.append(FRSHTTHolder('001000'))\n self.frshtt_holder_lst.append(FRSHTTHolder('011000'))\n self.frshtt_holder_lst.append(FRSHTTHolder('000010'))\n self.frshtt_holder_lst.append(FRSHTTHolder('000100'))\n\n def add_rating(self, rating, frshtt_code):\n for frshtt_holder in self.frshtt_holder_lst:\n if frshtt_holder.is_in_code(frshtt_code):\n frshtt_holder.add_rating(rating)\n return True\n return False\n\n def __str__(self):\n return_str = 'Breakdown by Code:\\n'\n return_str += '-------------------------\\n'\n for frshtt_holder in self.frshtt_holder_lst:\n return_str += str(frshtt_holder) + '\\n'\n return return_str\n",
"step-2": "<mask token>\n\n\nclass TempAnalysis:\n <mask token>\n <mask token>\n <mask token>\n\n def __str__(self):\n return_str = 'Breakdown by Temperature:\\n'\n return_str += '-------------------------\\n'\n for temp_holder in self.temp_holder_lst:\n return_str += str(temp_holder) + '\\n'\n return return_str\n\n\n<mask token>\n\n\nclass FRSHTTHolder:\n frshtt_code = ''\n star_count_lst = [0, 0, 0, 0, 0, 0]\n counter = 0\n\n def __init__(self, in_frshtt_code):\n self.frshtt_code = in_frshtt_code\n self.counter = 0\n self.star_count_lst = [0, 0, 0, 0, 0, 0]\n\n def is_in_code(self, in_frshtt_code):\n if self.frshtt_code == in_frshtt_code:\n return True\n else:\n return False\n\n def add_rating(self, rating):\n if int(rating) == 0:\n self.star_count_lst[0] += 1\n if int(rating) == 1:\n self.star_count_lst[1] += 1\n if int(rating) == 2:\n self.star_count_lst[2] += 1\n if int(rating) == 3:\n self.star_count_lst[3] += 1\n if int(rating) == 4:\n self.star_count_lst[4] += 1\n if int(rating) == 5:\n self.star_count_lst[5] += 1\n self.counter += 1\n\n def __str__(self):\n return_str = ''\n return_str += 'Code: ' + str(self.frshtt_code) + '\\n'\n return_str += 'Count: ' + str(self.counter) + '\\n'\n if self.star_count_lst[0] == 0:\n return_str += '0 Stars: 0.00%\\n'\n else:\n return_str += '0 Stars: ' + str(round(self.star_count_lst[0] /\n (self.counter * 1.0), 4) * 100) + '%\\n'\n if self.star_count_lst[1] == 0:\n return_str += '1 Stars: 0.00%\\n'\n else:\n return_str += '1 Stars: ' + str(round(self.star_count_lst[1] /\n (self.counter * 1.0), 4) * 100) + '%\\n'\n if self.star_count_lst[2] == 0:\n return_str += '2 Stars: 0.00%\\n'\n else:\n return_str += '2 Stars: ' + str(round(self.star_count_lst[2] /\n (self.counter * 1.0), 4) * 100) + '%\\n'\n if self.star_count_lst[3] == 0:\n return_str += '3 Stars: 0.00%\\n'\n else:\n return_str += '3 Stars: ' + str(round(self.star_count_lst[3] /\n (self.counter * 1.0), 4) * 100) + '%\\n'\n if self.star_count_lst[4] == 0:\n return_str += '4 Stars: 0.00%\\n'\n else:\n return_str += '4 Stars: ' + str(round(self.star_count_lst[4] /\n (self.counter * 1.0), 4) * 100) + '%\\n'\n if self.star_count_lst[5] == 0:\n return_str += '5 Stars: 0.00%\\n'\n else:\n return_str += '5 Stars: ' + str(round(self.star_count_lst[5] /\n (self.counter * 1.0), 4) * 100) + '%\\n'\n return return_str\n\n\nclass FRSHTTAnalysis:\n frshtt_holder_lst = list()\n\n def __init__(self):\n self.frshtt_holder_lst.append(FRSHTTHolder('000000'))\n self.frshtt_holder_lst.append(FRSHTTHolder('010000'))\n self.frshtt_holder_lst.append(FRSHTTHolder('010010'))\n self.frshtt_holder_lst.append(FRSHTTHolder('100000'))\n self.frshtt_holder_lst.append(FRSHTTHolder('001000'))\n self.frshtt_holder_lst.append(FRSHTTHolder('011000'))\n self.frshtt_holder_lst.append(FRSHTTHolder('000010'))\n self.frshtt_holder_lst.append(FRSHTTHolder('000100'))\n\n def add_rating(self, rating, frshtt_code):\n for frshtt_holder in self.frshtt_holder_lst:\n if frshtt_holder.is_in_code(frshtt_code):\n frshtt_holder.add_rating(rating)\n return True\n return False\n\n def __str__(self):\n return_str = 'Breakdown by Code:\\n'\n return_str += '-------------------------\\n'\n for frshtt_holder in self.frshtt_holder_lst:\n return_str += str(frshtt_holder) + '\\n'\n return return_str\n",
"step-3": "<mask token>\n\n\nclass TempAnalysis:\n <mask token>\n\n def __init__(self):\n temp_counter = 0\n while temp_counter < 110:\n self.temp_holder_lst.append(TempHolder(temp_counter, \n temp_counter + 10))\n temp_counter += 10\n\n def add_rating(self, rating, temp):\n for temp_holder in self.temp_holder_lst:\n if temp_holder.is_in_temp_range(temp):\n temp_holder.add_rating(rating)\n return True\n return False\n\n def __str__(self):\n return_str = 'Breakdown by Temperature:\\n'\n return_str += '-------------------------\\n'\n for temp_holder in self.temp_holder_lst:\n return_str += str(temp_holder) + '\\n'\n return return_str\n\n\n<mask token>\n\n\nclass FRSHTTHolder:\n frshtt_code = ''\n star_count_lst = [0, 0, 0, 0, 0, 0]\n counter = 0\n\n def __init__(self, in_frshtt_code):\n self.frshtt_code = in_frshtt_code\n self.counter = 0\n self.star_count_lst = [0, 0, 0, 0, 0, 0]\n\n def is_in_code(self, in_frshtt_code):\n if self.frshtt_code == in_frshtt_code:\n return True\n else:\n return False\n\n def add_rating(self, rating):\n if int(rating) == 0:\n self.star_count_lst[0] += 1\n if int(rating) == 1:\n self.star_count_lst[1] += 1\n if int(rating) == 2:\n self.star_count_lst[2] += 1\n if int(rating) == 3:\n self.star_count_lst[3] += 1\n if int(rating) == 4:\n self.star_count_lst[4] += 1\n if int(rating) == 5:\n self.star_count_lst[5] += 1\n self.counter += 1\n\n def __str__(self):\n return_str = ''\n return_str += 'Code: ' + str(self.frshtt_code) + '\\n'\n return_str += 'Count: ' + str(self.counter) + '\\n'\n if self.star_count_lst[0] == 0:\n return_str += '0 Stars: 0.00%\\n'\n else:\n return_str += '0 Stars: ' + str(round(self.star_count_lst[0] /\n (self.counter * 1.0), 4) * 100) + '%\\n'\n if self.star_count_lst[1] == 0:\n return_str += '1 Stars: 0.00%\\n'\n else:\n return_str += '1 Stars: ' + str(round(self.star_count_lst[1] /\n (self.counter * 1.0), 4) * 100) + '%\\n'\n if self.star_count_lst[2] == 0:\n return_str += '2 Stars: 0.00%\\n'\n else:\n return_str += '2 Stars: ' + str(round(self.star_count_lst[2] /\n (self.counter * 1.0), 4) * 100) + '%\\n'\n if self.star_count_lst[3] == 0:\n return_str += '3 Stars: 0.00%\\n'\n else:\n return_str += '3 Stars: ' + str(round(self.star_count_lst[3] /\n (self.counter * 1.0), 4) * 100) + '%\\n'\n if self.star_count_lst[4] == 0:\n return_str += '4 Stars: 0.00%\\n'\n else:\n return_str += '4 Stars: ' + str(round(self.star_count_lst[4] /\n (self.counter * 1.0), 4) * 100) + '%\\n'\n if self.star_count_lst[5] == 0:\n return_str += '5 Stars: 0.00%\\n'\n else:\n return_str += '5 Stars: ' + str(round(self.star_count_lst[5] /\n (self.counter * 1.0), 4) * 100) + '%\\n'\n return return_str\n\n\nclass FRSHTTAnalysis:\n frshtt_holder_lst = list()\n\n def __init__(self):\n self.frshtt_holder_lst.append(FRSHTTHolder('000000'))\n self.frshtt_holder_lst.append(FRSHTTHolder('010000'))\n self.frshtt_holder_lst.append(FRSHTTHolder('010010'))\n self.frshtt_holder_lst.append(FRSHTTHolder('100000'))\n self.frshtt_holder_lst.append(FRSHTTHolder('001000'))\n self.frshtt_holder_lst.append(FRSHTTHolder('011000'))\n self.frshtt_holder_lst.append(FRSHTTHolder('000010'))\n self.frshtt_holder_lst.append(FRSHTTHolder('000100'))\n\n def add_rating(self, rating, frshtt_code):\n for frshtt_holder in self.frshtt_holder_lst:\n if frshtt_holder.is_in_code(frshtt_code):\n frshtt_holder.add_rating(rating)\n return True\n return False\n\n def __str__(self):\n return_str = 'Breakdown by Code:\\n'\n return_str += '-------------------------\\n'\n for frshtt_holder in self.frshtt_holder_lst:\n return_str += str(frshtt_holder) + '\\n'\n return return_str\n",
"step-4": "<mask token>\n\n\nclass TempHolder:\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def __init__(self, in_range_start, in_range_end):\n self.range_start = in_range_start\n self.range_end = in_range_end\n self.counter = 0\n self.star_count_lst = [0, 0, 0, 0, 0, 0]\n\n def is_in_temp_range(self, temp):\n if self.range_start <= temp and temp < self.range_end:\n return True\n else:\n return False\n <mask token>\n <mask token>\n\n\nclass TempAnalysis:\n temp_holder_lst = list()\n\n def __init__(self):\n temp_counter = 0\n while temp_counter < 110:\n self.temp_holder_lst.append(TempHolder(temp_counter, \n temp_counter + 10))\n temp_counter += 10\n\n def add_rating(self, rating, temp):\n for temp_holder in self.temp_holder_lst:\n if temp_holder.is_in_temp_range(temp):\n temp_holder.add_rating(rating)\n return True\n return False\n\n def __str__(self):\n return_str = 'Breakdown by Temperature:\\n'\n return_str += '-------------------------\\n'\n for temp_holder in self.temp_holder_lst:\n return_str += str(temp_holder) + '\\n'\n return return_str\n\n\n<mask token>\n\n\nclass FRSHTTHolder:\n frshtt_code = ''\n star_count_lst = [0, 0, 0, 0, 0, 0]\n counter = 0\n\n def __init__(self, in_frshtt_code):\n self.frshtt_code = in_frshtt_code\n self.counter = 0\n self.star_count_lst = [0, 0, 0, 0, 0, 0]\n\n def is_in_code(self, in_frshtt_code):\n if self.frshtt_code == in_frshtt_code:\n return True\n else:\n return False\n\n def add_rating(self, rating):\n if int(rating) == 0:\n self.star_count_lst[0] += 1\n if int(rating) == 1:\n self.star_count_lst[1] += 1\n if int(rating) == 2:\n self.star_count_lst[2] += 1\n if int(rating) == 3:\n self.star_count_lst[3] += 1\n if int(rating) == 4:\n self.star_count_lst[4] += 1\n if int(rating) == 5:\n self.star_count_lst[5] += 1\n self.counter += 1\n\n def __str__(self):\n return_str = ''\n return_str += 'Code: ' + str(self.frshtt_code) + '\\n'\n return_str += 'Count: ' + str(self.counter) + '\\n'\n if self.star_count_lst[0] == 0:\n return_str += '0 Stars: 0.00%\\n'\n else:\n return_str += '0 Stars: ' + str(round(self.star_count_lst[0] /\n (self.counter * 1.0), 4) * 100) + '%\\n'\n if self.star_count_lst[1] == 0:\n return_str += '1 Stars: 0.00%\\n'\n else:\n return_str += '1 Stars: ' + str(round(self.star_count_lst[1] /\n (self.counter * 1.0), 4) * 100) + '%\\n'\n if self.star_count_lst[2] == 0:\n return_str += '2 Stars: 0.00%\\n'\n else:\n return_str += '2 Stars: ' + str(round(self.star_count_lst[2] /\n (self.counter * 1.0), 4) * 100) + '%\\n'\n if self.star_count_lst[3] == 0:\n return_str += '3 Stars: 0.00%\\n'\n else:\n return_str += '3 Stars: ' + str(round(self.star_count_lst[3] /\n (self.counter * 1.0), 4) * 100) + '%\\n'\n if self.star_count_lst[4] == 0:\n return_str += '4 Stars: 0.00%\\n'\n else:\n return_str += '4 Stars: ' + str(round(self.star_count_lst[4] /\n (self.counter * 1.0), 4) * 100) + '%\\n'\n if self.star_count_lst[5] == 0:\n return_str += '5 Stars: 0.00%\\n'\n else:\n return_str += '5 Stars: ' + str(round(self.star_count_lst[5] /\n (self.counter * 1.0), 4) * 100) + '%\\n'\n return return_str\n\n\nclass FRSHTTAnalysis:\n frshtt_holder_lst = list()\n\n def __init__(self):\n self.frshtt_holder_lst.append(FRSHTTHolder('000000'))\n self.frshtt_holder_lst.append(FRSHTTHolder('010000'))\n self.frshtt_holder_lst.append(FRSHTTHolder('010010'))\n self.frshtt_holder_lst.append(FRSHTTHolder('100000'))\n self.frshtt_holder_lst.append(FRSHTTHolder('001000'))\n self.frshtt_holder_lst.append(FRSHTTHolder('011000'))\n self.frshtt_holder_lst.append(FRSHTTHolder('000010'))\n self.frshtt_holder_lst.append(FRSHTTHolder('000100'))\n\n def add_rating(self, rating, frshtt_code):\n for frshtt_holder in self.frshtt_holder_lst:\n if frshtt_holder.is_in_code(frshtt_code):\n frshtt_holder.add_rating(rating)\n return True\n return False\n\n def __str__(self):\n return_str = 'Breakdown by Code:\\n'\n return_str += '-------------------------\\n'\n for frshtt_holder in self.frshtt_holder_lst:\n return_str += str(frshtt_holder) + '\\n'\n return return_str\n",
"step-5": "'''\nTemperature Container\n'''\nclass TempHolder:\n range_start = 0\n range_end = 0\n \n star_count_lst = [0,0,0,0,0,0]\n counter = 0\n \n def __init__(self, in_range_start, in_range_end):\n self.range_start = in_range_start\n self.range_end = in_range_end\n self.counter = 0\n self.star_count_lst = [0,0,0,0,0,0]\n \n def is_in_temp_range(self, temp):\n if self.range_start <= temp and temp < self.range_end:\n return True\n else:\n return False\n \n def add_rating(self, rating):\n if int(rating) == 0:\n self.star_count_lst[0] += 1\n if int(rating) == 1:\n self.star_count_lst[1] += 1\n if int(rating) == 2:\n self.star_count_lst[2] += 1\n if int(rating) == 3:\n self.star_count_lst[3] += 1\n if int(rating) == 4:\n self.star_count_lst[4] += 1\n if int(rating) == 5:\n self.star_count_lst[5] += 1\n \n self.counter += 1\n \n def __str__(self):\n return_str = \"\"\n \n return_str += \"Temp: \" + str(self.range_start) + \"-\" + str(self.range_end) + \"\\n\"\n return_str += \"Count: \" + str(self.counter) + \"\\n\"\n\n if self.star_count_lst[0] == 0:\n return_str += \"0 Stars: 0.00%\\n\" \n else:\n return_str += \"0 Stars: \" + str(round((self.star_count_lst[0] / (self.counter * 1.0)), 4) * 100) + \"%\\n\"\n \n if self.star_count_lst[1] == 0:\n return_str += \"1 Stars: 0.00%\\n\"\n else:\n return_str += \"1 Stars: \" + str(round((self.star_count_lst[1] / (self.counter * 1.0)), 4) * 100) + \"%\\n\"\n \n if self.star_count_lst[2] == 0:\n return_str += \"2 Stars: 0.00%\\n\"\n else:\n return_str += \"2 Stars: \" + str(round((self.star_count_lst[2] / (self.counter * 1.0)), 4) * 100) + \"%\\n\"\n \n if self.star_count_lst[3] == 0:\n return_str += \"3 Stars: 0.00%\\n\"\n else:\n return_str += \"3 Stars: \" + str(round((self.star_count_lst[3] / (self.counter * 1.0)), 4) * 100) + \"%\\n\"\n \n if self.star_count_lst[4] == 0:\n return_str += \"4 Stars: 0.00%\\n\"\n else:\n return_str += \"4 Stars: \" + str(round((self.star_count_lst[4] / (self.counter * 1.0)), 4) * 100) + \"%\\n\"\n \n if self.star_count_lst[5] == 0:\n return_str += \"5 Stars: 0.00%\\n\"\n else:\n return_str += \"5 Stars: \" + str(round((self.star_count_lst[5] / (self.counter * 1.0)), 4) * 100) + \"%\\n\"\n \n return return_str\n \nclass TempAnalysis:\n temp_holder_lst = list()\n \n def __init__(self):\n temp_counter = 0\n \n while temp_counter < 110:\n self.temp_holder_lst.append(TempHolder(temp_counter, temp_counter + 10))\n temp_counter += 10\n \n def add_rating(self, rating, temp):\n for temp_holder in self.temp_holder_lst:\n if temp_holder.is_in_temp_range(temp):\n temp_holder.add_rating(rating)\n return True\n \n return False\n \n def __str__(self):\n return_str = \"Breakdown by Temperature:\\n\"\n return_str += \"-------------------------\\n\"\n \n for temp_holder in self.temp_holder_lst:\n return_str += str(temp_holder) + \"\\n\"\n \n return return_str\n \n \n'''\nTemperature Container\n'''\nclass FRSHTTHolder:\n frshtt_code = \"\"\n \n star_count_lst = [0,0,0,0,0,0]\n counter = 0\n \n def __init__(self, in_frshtt_code):\n self.frshtt_code = in_frshtt_code\n self.counter = 0\n self.star_count_lst = [0,0,0,0,0,0]\n \n def is_in_code(self, in_frshtt_code):\n if self.frshtt_code == in_frshtt_code:\n return True\n else:\n return False\n \n def add_rating(self, rating):\n if int(rating) == 0:\n self.star_count_lst[0] += 1\n if int(rating) == 1:\n self.star_count_lst[1] += 1\n if int(rating) == 2:\n self.star_count_lst[2] += 1\n if int(rating) == 3:\n self.star_count_lst[3] += 1\n if int(rating) == 4:\n self.star_count_lst[4] += 1\n if int(rating) == 5:\n self.star_count_lst[5] += 1\n \n self.counter += 1\n \n def __str__(self):\n return_str = \"\"\n \n return_str += \"Code: \" + str(self.frshtt_code) + \"\\n\"\n return_str += \"Count: \" + str(self.counter) + \"\\n\"\n\n if self.star_count_lst[0] == 0:\n return_str += \"0 Stars: 0.00%\\n\" \n else:\n return_str += \"0 Stars: \" + str(round((self.star_count_lst[0] / (self.counter * 1.0)), 4) * 100) + \"%\\n\"\n \n if self.star_count_lst[1] == 0:\n return_str += \"1 Stars: 0.00%\\n\"\n else:\n return_str += \"1 Stars: \" + str(round((self.star_count_lst[1] / (self.counter * 1.0)), 4) * 100) + \"%\\n\"\n \n if self.star_count_lst[2] == 0:\n return_str += \"2 Stars: 0.00%\\n\"\n else:\n return_str += \"2 Stars: \" + str(round((self.star_count_lst[2] / (self.counter * 1.0)), 4) * 100) + \"%\\n\"\n \n if self.star_count_lst[3] == 0:\n return_str += \"3 Stars: 0.00%\\n\"\n else:\n return_str += \"3 Stars: \" + str(round((self.star_count_lst[3] / (self.counter * 1.0)), 4) * 100) + \"%\\n\"\n \n if self.star_count_lst[4] == 0:\n return_str += \"4 Stars: 0.00%\\n\"\n else:\n return_str += \"4 Stars: \" + str(round((self.star_count_lst[4] / (self.counter * 1.0)), 4) * 100) + \"%\\n\"\n \n if self.star_count_lst[5] == 0:\n return_str += \"5 Stars: 0.00%\\n\"\n else:\n return_str += \"5 Stars: \" + str(round((self.star_count_lst[5] / (self.counter * 1.0)), 4) * 100) + \"%\\n\"\n \n return return_str\n \nclass FRSHTTAnalysis:\n frshtt_holder_lst = list()\n \n def __init__(self):\n # no weather\n self.frshtt_holder_lst.append(FRSHTTHolder(\"000000\"))\n # rain\n self.frshtt_holder_lst.append(FRSHTTHolder(\"010000\"))\n # thunder strom\n self.frshtt_holder_lst.append(FRSHTTHolder(\"010010\"))\n # fog\n self.frshtt_holder_lst.append(FRSHTTHolder(\"100000\"))\n # snow\n self.frshtt_holder_lst.append(FRSHTTHolder(\"001000\"))\n # mixed (snow/rain)\n self.frshtt_holder_lst.append(FRSHTTHolder(\"011000\"))\n # dry thunder\n self.frshtt_holder_lst.append(FRSHTTHolder(\"000010\"))\n # hail\n self.frshtt_holder_lst.append(FRSHTTHolder(\"000100\"))\n \n def add_rating(self, rating, frshtt_code):\n for frshtt_holder in self.frshtt_holder_lst:\n if frshtt_holder.is_in_code(frshtt_code):\n frshtt_holder.add_rating(rating)\n return True\n \n return False\n \n def __str__(self):\n return_str = \"Breakdown by Code:\\n\"\n return_str += \"-------------------------\\n\"\n \n for frshtt_holder in self.frshtt_holder_lst:\n return_str += str(frshtt_holder) + \"\\n\"\n \n return return_str\n",
"step-ids": [
11,
13,
15,
19,
23
]
}
|
[
11,
13,
15,
19,
23
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def a():
lines = 0
words = 0
letters = 0
for line in open(f'{text}.txt', 'r'):
lines += 1
letters += len(line.strip('.,:-()!?;)"\'\n}'))
words += len(line.split())
return f'Lines = {lines}, words = {words}, letters = {letters}'
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def a():
lines = 0
words = 0
letters = 0
for line in open(f'{text}.txt', 'r'):
lines += 1
letters += len(line.strip('.,:-()!?;)"\'\n}'))
words += len(line.split())
return f'Lines = {lines}, words = {words}, letters = {letters}'
print(a())
<|reserved_special_token_1|>
text = input('Ввести имя файла: ')
def a():
lines = 0
words = 0
letters = 0
for line in open(f'{text}.txt', 'r'):
lines += 1
letters += len(line.strip('.,:-()!?;)"\'\n}'))
words += len(line.split())
return f'Lines = {lines}, words = {words}, letters = {letters}'
print(a())
|
flexible
|
{
"blob_id": "2a65287588fe1337ba1a6f7c2e15e0505611d739",
"index": 2228,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef a():\n lines = 0\n words = 0\n letters = 0\n for line in open(f'{text}.txt', 'r'):\n lines += 1\n letters += len(line.strip('.,:-()!?;)\"\\'\\n}'))\n words += len(line.split())\n return f'Lines = {lines}, words = {words}, letters = {letters}'\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef a():\n lines = 0\n words = 0\n letters = 0\n for line in open(f'{text}.txt', 'r'):\n lines += 1\n letters += len(line.strip('.,:-()!?;)\"\\'\\n}'))\n words += len(line.split())\n return f'Lines = {lines}, words = {words}, letters = {letters}'\n\n\nprint(a())\n",
"step-4": "text = input('Ввести имя файла: ')\n\n\ndef a():\n lines = 0\n words = 0\n letters = 0\n for line in open(f'{text}.txt', 'r'):\n lines += 1\n letters += len(line.strip('.,:-()!?;)\"\\'\\n}'))\n words += len(line.split())\n return f'Lines = {lines}, words = {words}, letters = {letters}'\n\n\nprint(a())\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
for i in range(2, n + 1):
c = a + b
list.append(c)
a, b = b, c
print(n, 'th fibonacci number is ', list[n])
<|reserved_special_token_1|>
n = int(input('Enter a number: '))
c = 0
a, b = 0, 1
list = [a, b]
for i in range(2, n + 1):
c = a + b
list.append(c)
a, b = b, c
print(n, 'th fibonacci number is ', list[n])
<|reserved_special_token_1|>
#day11
n = int(input("Enter a number: "))
c = 0
a,b = 0, 1
list = [a, b]
for i in range(2,n+1):
c = a+b
list.append(c)
a,b = b, c
print(n,"th fibonacci number is ",list[n])
|
flexible
|
{
"blob_id": "255cdbce1f9f7709165b1a29362026ad92ba4712",
"index": 2303,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor i in range(2, n + 1):\n c = a + b\n list.append(c)\n a, b = b, c\nprint(n, 'th fibonacci number is ', list[n])\n",
"step-3": "n = int(input('Enter a number: '))\nc = 0\na, b = 0, 1\nlist = [a, b]\nfor i in range(2, n + 1):\n c = a + b\n list.append(c)\n a, b = b, c\nprint(n, 'th fibonacci number is ', list[n])\n",
"step-4": "#day11\nn = int(input(\"Enter a number: \"))\nc = 0\na,b = 0, 1\nlist = [a, b]\nfor i in range(2,n+1):\n c = a+b\n list.append(c)\n a,b = b, c\nprint(n,\"th fibonacci number is \",list[n])\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from pyplasm import *
doorY = [.2,.18,.08,.18,.08,.18,.4,.18,.08,.18,.08,.18,.2]
doorX = [.2,.5,.2,1.8,.08,.18,.08,.18,.2]
doorOccurrency = [[True]*13,
[True, False, True, False, True, False, True, False, True, False, True, False, True],
[True]*13,
[True, False, True, False, True, False, True, False, True, False, True, False, True],
[True, False, True, False, True, True, True, True, True, False, True, False, True],
[True, False, True, False, False, False, True, False, False, False, True, False, True],
[True, False, True, True, True, True, True, True, True, True, True, False, True],
[True, False, False, False, False, False, True, False, False, False, False, False, True],
[True]*13]
windowY = [0.04,0.04,0.2,0.02,0.16,0.02,0.2,0.04,0.04]
windowX = [0.02,0.8,0.05,0.02,0.4,0.02,0.4,0.05,0.04]
windowOccurrency = [[True]*9,
[True, False, False, False, False, False, False, False, True],
[True]*9,
[True]*9,
[True, True, False, True, False, True, False, True, True],
[True]*9,
[True, True, False, True, False, True, False, True, True],
[True]*9,
[True]*9]
def resizeXY(X, Y, occurrency, dx, dz):
"""This function takes in input X,Y,occurrency, two dimensions dx, dz and scales the values
contained in X and Y, in such a way that only empty spaces are scaled and filled spaces are mantained fixed"""
sumY = sum(Y)
sumX = sum(X)
visitedY = [False]*len(Y)
for y_index in range(len(Y)):
update = True
for x_index in range(len(X)):
if(occurrency[x_index][y_index] == False):
update = False
if(update):
sumY = sumY - Y[y_index]
sumX = sumX - X[y_index]
dx = dx - X[y_index]
dz = dz - Y[y_index]
for x_index in range(len(X)):
modifyX = False
for y_index in range(len(Y)):
if(occurrency[x_index][y_index] == False and visitedY[y_index] == False):
Y[y_index] = (dz * Y[y_index])/sumY
visitedY[y_index] = True
modifyX = True
if(occurrency[x_index][y_index] == False and visitedY[y_index] == True and not modifyX):
modifyX = True
if(modifyX):
X[x_index] = (dx * X[x_index])/sumX
def window(windowX, windowY, occurrency):
"""This function, given three array, X, Y and occurrency, return the HPC model of the window
generated according to the three parameters. X and Y contain values of distances calculated on the previous
segment of the axis. Occurrency is a matrix containing booleans that map which cell is empty and which cell is filled.
The inner function is useful for 'scaling'"""
def window0(dx, dy, dz):
resizeXY(windowX,windowY,occurrency, dx, dz)
model = []
for xIndex in range(len(windowX)):
yQuotes = []
xSum = sum(windowX[:xIndex])
for yIndex in range(len(windowY)):
if(occurrency[xIndex][yIndex] == False):
yQuotes.append(-windowY[yIndex])
else:
yQuotes.append(windowY[yIndex])
model.append(PROD([QUOTE([-xSum, windowX[xIndex]]), QUOTE(yQuotes)]))
result = STRUCT(model)
result = MAP([S2,S3,S1])(PROD([result, Q(dy)]))
windowFrame = STRUCT([result])
windowFrame = TEXTURE(["iron.jpg"])(windowFrame)
glass = CUBOID([SIZE([1])(result)[0]*0.98,0.001,SIZE([3])(result)[0]*0.95])
glass = T([1,2,3])([dx*0.005, dy/2, 0.01])(glass)
glass = TEXTURE(["glass2.jpg"])(glass)
window = STRUCT([windowFrame, glass])
window = S([1,2,3])([dx/SIZE([1])(window)[0], dy/SIZE([2])(window)[0], dz/SIZE([3])(window)[0]])(window)
return window
return window0
def door(doorX, doorY, occurrency):
"""This function takes in input three array, X, Y and occurrency and returns the HPC model of the door
generated according to the three parameters. X and Y contain values of distances calculated on the previous
segment of the axis. Occurrency is a matrix containing booleans that map which cell is empty and which cell is filled.
The inner function is useful for scaling the resulting door by the three parameter dx, dy, dz."""
def door0(dx, dy, dz):
model = []
for xIndex in range(len(doorX)):
yQuotes = []
xSum = sum(doorX[:xIndex])
for yIndex in range(len(doorY)):
if(occurrency[xIndex][yIndex] == False):
yQuotes.append(-doorY[yIndex])
else:
yQuotes.append(doorY[yIndex])
model.append(PROD([ QUOTE([-xSum, doorX[xIndex]]), QUOTE(yQuotes)]))
res = PROD([STRUCT(model), Q(dy)])
res = MAP([S2,S3,S1])(res)
res = S([1,2,3])([dx/SIZE([1])(res)[0], dy/SIZE([2])(res)[0], dz/SIZE([3])(res)[0]]) (res)
door = TEXTURE(["wood.jpg", True, False, 1, 1, 0, 1, 1])(STRUCT([res]))
glass = CUBOID([SIZE([1])(res)[0]*0.94, 0.01, SIZE([3])(res)[0]*0.94])
glass = T([1,2,3])([dx*0.003, dy/2, dz*0.005])(glass)
glass = TEXTURE(["glass.jpg"])(glass)
refiner = CUBOID([0.03, 0.01,dz])
refiner = T([1,2])([dx/2,dy])(refiner)
refiner = TEXTURE(["wood.jpg", True, False, 1, 1, 0, 1, 1])(refiner)
handler1 = T(3)(.15)(CUBOID([.05,.02,.2]))
handler2 = CUBOID([.05,.02,.05])
handler3 = T([1,2])([.01,.02])(CUBOID([.03,.02,.2]))
handler = TEXTURE("bronze.jpg")(STRUCT([handler3, handler2, handler1]))
handler = T([1,2,3])([dx/2.-2*SIZE([1])(handler)[0],dy, dz/2.-1.5*SIZE([3])(handler)[0]])(handler)
finalDoor = S([1,2,3])([dx/SIZE([1])(res)[0], dy/SIZE([2])(res)[0], dz/SIZE([3])(res)[0]]) (STRUCT([door, glass, refiner, handler]))
return finalDoor
return door0
VIEW(door(doorX, doorY, doorOccurrency)(2.2, .4, 2.8))
VIEW(window(windowX,windowY,windowOccurrency)(.6,.1,1.2))
|
normal
|
{
"blob_id": "9bc955def6250908050a1f3046dd78480f25e0a1",
"index": 1898,
"step-1": "<mask token>\n\n\ndef resizeXY(X, Y, occurrency, dx, dz):\n \"\"\"This function takes in input X,Y,occurrency, two dimensions dx, dz and scales the values\n\tcontained in X and Y, in such a way that only empty spaces are scaled and filled spaces are mantained fixed\"\"\"\n sumY = sum(Y)\n sumX = sum(X)\n visitedY = [False] * len(Y)\n for y_index in range(len(Y)):\n update = True\n for x_index in range(len(X)):\n if occurrency[x_index][y_index] == False:\n update = False\n if update:\n sumY = sumY - Y[y_index]\n sumX = sumX - X[y_index]\n dx = dx - X[y_index]\n dz = dz - Y[y_index]\n for x_index in range(len(X)):\n modifyX = False\n for y_index in range(len(Y)):\n if occurrency[x_index][y_index] == False and visitedY[y_index\n ] == False:\n Y[y_index] = dz * Y[y_index] / sumY\n visitedY[y_index] = True\n modifyX = True\n if occurrency[x_index][y_index] == False and visitedY[y_index\n ] == True and not modifyX:\n modifyX = True\n if modifyX:\n X[x_index] = dx * X[x_index] / sumX\n\n\ndef window(windowX, windowY, occurrency):\n \"\"\"This function, given three array, X, Y and occurrency, return the HPC model of the window\n\tgenerated according to the three parameters. X and Y contain values of distances calculated on the previous \n\tsegment of the axis. Occurrency is a matrix containing booleans that map which cell is empty and which cell is filled. \n\tThe inner function is useful for 'scaling'\"\"\"\n\n def window0(dx, dy, dz):\n resizeXY(windowX, windowY, occurrency, dx, dz)\n model = []\n for xIndex in range(len(windowX)):\n yQuotes = []\n xSum = sum(windowX[:xIndex])\n for yIndex in range(len(windowY)):\n if occurrency[xIndex][yIndex] == False:\n yQuotes.append(-windowY[yIndex])\n else:\n yQuotes.append(windowY[yIndex])\n model.append(PROD([QUOTE([-xSum, windowX[xIndex]]), QUOTE(\n yQuotes)]))\n result = STRUCT(model)\n result = MAP([S2, S3, S1])(PROD([result, Q(dy)]))\n windowFrame = STRUCT([result])\n windowFrame = TEXTURE(['iron.jpg'])(windowFrame)\n glass = CUBOID([SIZE([1])(result)[0] * 0.98, 0.001, SIZE([3])(\n result)[0] * 0.95])\n glass = T([1, 2, 3])([dx * 0.005, dy / 2, 0.01])(glass)\n glass = TEXTURE(['glass2.jpg'])(glass)\n window = STRUCT([windowFrame, glass])\n window = S([1, 2, 3])([dx / SIZE([1])(window)[0], dy / SIZE([2])(\n window)[0], dz / SIZE([3])(window)[0]])(window)\n return window\n return window0\n\n\ndef door(doorX, doorY, occurrency):\n \"\"\"This function takes in input three array, X, Y and occurrency and returns the HPC model of the door\n\tgenerated according to the three parameters. X and Y contain values of distances calculated on the previous \n\tsegment of the axis. Occurrency is a matrix containing booleans that map which cell is empty and which cell is filled. \n\tThe inner function is useful for scaling the resulting door by the three parameter dx, dy, dz.\"\"\"\n\n def door0(dx, dy, dz):\n model = []\n for xIndex in range(len(doorX)):\n yQuotes = []\n xSum = sum(doorX[:xIndex])\n for yIndex in range(len(doorY)):\n if occurrency[xIndex][yIndex] == False:\n yQuotes.append(-doorY[yIndex])\n else:\n yQuotes.append(doorY[yIndex])\n model.append(PROD([QUOTE([-xSum, doorX[xIndex]]), QUOTE(yQuotes)]))\n res = PROD([STRUCT(model), Q(dy)])\n res = MAP([S2, S3, S1])(res)\n res = S([1, 2, 3])([dx / SIZE([1])(res)[0], dy / SIZE([2])(res)[0],\n dz / SIZE([3])(res)[0]])(res)\n door = TEXTURE(['wood.jpg', True, False, 1, 1, 0, 1, 1])(STRUCT([res]))\n glass = CUBOID([SIZE([1])(res)[0] * 0.94, 0.01, SIZE([3])(res)[0] *\n 0.94])\n glass = T([1, 2, 3])([dx * 0.003, dy / 2, dz * 0.005])(glass)\n glass = TEXTURE(['glass.jpg'])(glass)\n refiner = CUBOID([0.03, 0.01, dz])\n refiner = T([1, 2])([dx / 2, dy])(refiner)\n refiner = TEXTURE(['wood.jpg', True, False, 1, 1, 0, 1, 1])(refiner)\n handler1 = T(3)(0.15)(CUBOID([0.05, 0.02, 0.2]))\n handler2 = CUBOID([0.05, 0.02, 0.05])\n handler3 = T([1, 2])([0.01, 0.02])(CUBOID([0.03, 0.02, 0.2]))\n handler = TEXTURE('bronze.jpg')(STRUCT([handler3, handler2, handler1]))\n handler = T([1, 2, 3])([dx / 2.0 - 2 * SIZE([1])(handler)[0], dy, \n dz / 2.0 - 1.5 * SIZE([3])(handler)[0]])(handler)\n finalDoor = S([1, 2, 3])([dx / SIZE([1])(res)[0], dy / SIZE([2])(\n res)[0], dz / SIZE([3])(res)[0]])(STRUCT([door, glass, refiner,\n handler]))\n return finalDoor\n return door0\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef resizeXY(X, Y, occurrency, dx, dz):\n \"\"\"This function takes in input X,Y,occurrency, two dimensions dx, dz and scales the values\n\tcontained in X and Y, in such a way that only empty spaces are scaled and filled spaces are mantained fixed\"\"\"\n sumY = sum(Y)\n sumX = sum(X)\n visitedY = [False] * len(Y)\n for y_index in range(len(Y)):\n update = True\n for x_index in range(len(X)):\n if occurrency[x_index][y_index] == False:\n update = False\n if update:\n sumY = sumY - Y[y_index]\n sumX = sumX - X[y_index]\n dx = dx - X[y_index]\n dz = dz - Y[y_index]\n for x_index in range(len(X)):\n modifyX = False\n for y_index in range(len(Y)):\n if occurrency[x_index][y_index] == False and visitedY[y_index\n ] == False:\n Y[y_index] = dz * Y[y_index] / sumY\n visitedY[y_index] = True\n modifyX = True\n if occurrency[x_index][y_index] == False and visitedY[y_index\n ] == True and not modifyX:\n modifyX = True\n if modifyX:\n X[x_index] = dx * X[x_index] / sumX\n\n\ndef window(windowX, windowY, occurrency):\n \"\"\"This function, given three array, X, Y and occurrency, return the HPC model of the window\n\tgenerated according to the three parameters. X and Y contain values of distances calculated on the previous \n\tsegment of the axis. Occurrency is a matrix containing booleans that map which cell is empty and which cell is filled. \n\tThe inner function is useful for 'scaling'\"\"\"\n\n def window0(dx, dy, dz):\n resizeXY(windowX, windowY, occurrency, dx, dz)\n model = []\n for xIndex in range(len(windowX)):\n yQuotes = []\n xSum = sum(windowX[:xIndex])\n for yIndex in range(len(windowY)):\n if occurrency[xIndex][yIndex] == False:\n yQuotes.append(-windowY[yIndex])\n else:\n yQuotes.append(windowY[yIndex])\n model.append(PROD([QUOTE([-xSum, windowX[xIndex]]), QUOTE(\n yQuotes)]))\n result = STRUCT(model)\n result = MAP([S2, S3, S1])(PROD([result, Q(dy)]))\n windowFrame = STRUCT([result])\n windowFrame = TEXTURE(['iron.jpg'])(windowFrame)\n glass = CUBOID([SIZE([1])(result)[0] * 0.98, 0.001, SIZE([3])(\n result)[0] * 0.95])\n glass = T([1, 2, 3])([dx * 0.005, dy / 2, 0.01])(glass)\n glass = TEXTURE(['glass2.jpg'])(glass)\n window = STRUCT([windowFrame, glass])\n window = S([1, 2, 3])([dx / SIZE([1])(window)[0], dy / SIZE([2])(\n window)[0], dz / SIZE([3])(window)[0]])(window)\n return window\n return window0\n\n\ndef door(doorX, doorY, occurrency):\n \"\"\"This function takes in input three array, X, Y and occurrency and returns the HPC model of the door\n\tgenerated according to the three parameters. X and Y contain values of distances calculated on the previous \n\tsegment of the axis. Occurrency is a matrix containing booleans that map which cell is empty and which cell is filled. \n\tThe inner function is useful for scaling the resulting door by the three parameter dx, dy, dz.\"\"\"\n\n def door0(dx, dy, dz):\n model = []\n for xIndex in range(len(doorX)):\n yQuotes = []\n xSum = sum(doorX[:xIndex])\n for yIndex in range(len(doorY)):\n if occurrency[xIndex][yIndex] == False:\n yQuotes.append(-doorY[yIndex])\n else:\n yQuotes.append(doorY[yIndex])\n model.append(PROD([QUOTE([-xSum, doorX[xIndex]]), QUOTE(yQuotes)]))\n res = PROD([STRUCT(model), Q(dy)])\n res = MAP([S2, S3, S1])(res)\n res = S([1, 2, 3])([dx / SIZE([1])(res)[0], dy / SIZE([2])(res)[0],\n dz / SIZE([3])(res)[0]])(res)\n door = TEXTURE(['wood.jpg', True, False, 1, 1, 0, 1, 1])(STRUCT([res]))\n glass = CUBOID([SIZE([1])(res)[0] * 0.94, 0.01, SIZE([3])(res)[0] *\n 0.94])\n glass = T([1, 2, 3])([dx * 0.003, dy / 2, dz * 0.005])(glass)\n glass = TEXTURE(['glass.jpg'])(glass)\n refiner = CUBOID([0.03, 0.01, dz])\n refiner = T([1, 2])([dx / 2, dy])(refiner)\n refiner = TEXTURE(['wood.jpg', True, False, 1, 1, 0, 1, 1])(refiner)\n handler1 = T(3)(0.15)(CUBOID([0.05, 0.02, 0.2]))\n handler2 = CUBOID([0.05, 0.02, 0.05])\n handler3 = T([1, 2])([0.01, 0.02])(CUBOID([0.03, 0.02, 0.2]))\n handler = TEXTURE('bronze.jpg')(STRUCT([handler3, handler2, handler1]))\n handler = T([1, 2, 3])([dx / 2.0 - 2 * SIZE([1])(handler)[0], dy, \n dz / 2.0 - 1.5 * SIZE([3])(handler)[0]])(handler)\n finalDoor = S([1, 2, 3])([dx / SIZE([1])(res)[0], dy / SIZE([2])(\n res)[0], dz / SIZE([3])(res)[0]])(STRUCT([door, glass, refiner,\n handler]))\n return finalDoor\n return door0\n\n\nVIEW(door(doorX, doorY, doorOccurrency)(2.2, 0.4, 2.8))\nVIEW(window(windowX, windowY, windowOccurrency)(0.6, 0.1, 1.2))\n",
"step-3": "<mask token>\ndoorY = [0.2, 0.18, 0.08, 0.18, 0.08, 0.18, 0.4, 0.18, 0.08, 0.18, 0.08, \n 0.18, 0.2]\ndoorX = [0.2, 0.5, 0.2, 1.8, 0.08, 0.18, 0.08, 0.18, 0.2]\ndoorOccurrency = [[True] * 13, [True, False, True, False, True, False, True,\n False, True, False, True, False, True], [True] * 13, [True, False, True,\n False, True, False, True, False, True, False, True, False, True], [True,\n False, True, False, True, True, True, True, True, False, True, False, \n True], [True, False, True, False, False, False, True, False, False, \n False, True, False, True], [True, False, True, True, True, True, True, \n True, True, True, True, False, True], [True, False, False, False, False,\n False, True, False, False, False, False, False, True], [True] * 13]\nwindowY = [0.04, 0.04, 0.2, 0.02, 0.16, 0.02, 0.2, 0.04, 0.04]\nwindowX = [0.02, 0.8, 0.05, 0.02, 0.4, 0.02, 0.4, 0.05, 0.04]\nwindowOccurrency = [[True] * 9, [True, False, False, False, False, False, \n False, False, True], [True] * 9, [True] * 9, [True, True, False, True, \n False, True, False, True, True], [True] * 9, [True, True, False, True, \n False, True, False, True, True], [True] * 9, [True] * 9]\n\n\ndef resizeXY(X, Y, occurrency, dx, dz):\n \"\"\"This function takes in input X,Y,occurrency, two dimensions dx, dz and scales the values\n\tcontained in X and Y, in such a way that only empty spaces are scaled and filled spaces are mantained fixed\"\"\"\n sumY = sum(Y)\n sumX = sum(X)\n visitedY = [False] * len(Y)\n for y_index in range(len(Y)):\n update = True\n for x_index in range(len(X)):\n if occurrency[x_index][y_index] == False:\n update = False\n if update:\n sumY = sumY - Y[y_index]\n sumX = sumX - X[y_index]\n dx = dx - X[y_index]\n dz = dz - Y[y_index]\n for x_index in range(len(X)):\n modifyX = False\n for y_index in range(len(Y)):\n if occurrency[x_index][y_index] == False and visitedY[y_index\n ] == False:\n Y[y_index] = dz * Y[y_index] / sumY\n visitedY[y_index] = True\n modifyX = True\n if occurrency[x_index][y_index] == False and visitedY[y_index\n ] == True and not modifyX:\n modifyX = True\n if modifyX:\n X[x_index] = dx * X[x_index] / sumX\n\n\ndef window(windowX, windowY, occurrency):\n \"\"\"This function, given three array, X, Y and occurrency, return the HPC model of the window\n\tgenerated according to the three parameters. X and Y contain values of distances calculated on the previous \n\tsegment of the axis. Occurrency is a matrix containing booleans that map which cell is empty and which cell is filled. \n\tThe inner function is useful for 'scaling'\"\"\"\n\n def window0(dx, dy, dz):\n resizeXY(windowX, windowY, occurrency, dx, dz)\n model = []\n for xIndex in range(len(windowX)):\n yQuotes = []\n xSum = sum(windowX[:xIndex])\n for yIndex in range(len(windowY)):\n if occurrency[xIndex][yIndex] == False:\n yQuotes.append(-windowY[yIndex])\n else:\n yQuotes.append(windowY[yIndex])\n model.append(PROD([QUOTE([-xSum, windowX[xIndex]]), QUOTE(\n yQuotes)]))\n result = STRUCT(model)\n result = MAP([S2, S3, S1])(PROD([result, Q(dy)]))\n windowFrame = STRUCT([result])\n windowFrame = TEXTURE(['iron.jpg'])(windowFrame)\n glass = CUBOID([SIZE([1])(result)[0] * 0.98, 0.001, SIZE([3])(\n result)[0] * 0.95])\n glass = T([1, 2, 3])([dx * 0.005, dy / 2, 0.01])(glass)\n glass = TEXTURE(['glass2.jpg'])(glass)\n window = STRUCT([windowFrame, glass])\n window = S([1, 2, 3])([dx / SIZE([1])(window)[0], dy / SIZE([2])(\n window)[0], dz / SIZE([3])(window)[0]])(window)\n return window\n return window0\n\n\ndef door(doorX, doorY, occurrency):\n \"\"\"This function takes in input three array, X, Y and occurrency and returns the HPC model of the door\n\tgenerated according to the three parameters. X and Y contain values of distances calculated on the previous \n\tsegment of the axis. Occurrency is a matrix containing booleans that map which cell is empty and which cell is filled. \n\tThe inner function is useful for scaling the resulting door by the three parameter dx, dy, dz.\"\"\"\n\n def door0(dx, dy, dz):\n model = []\n for xIndex in range(len(doorX)):\n yQuotes = []\n xSum = sum(doorX[:xIndex])\n for yIndex in range(len(doorY)):\n if occurrency[xIndex][yIndex] == False:\n yQuotes.append(-doorY[yIndex])\n else:\n yQuotes.append(doorY[yIndex])\n model.append(PROD([QUOTE([-xSum, doorX[xIndex]]), QUOTE(yQuotes)]))\n res = PROD([STRUCT(model), Q(dy)])\n res = MAP([S2, S3, S1])(res)\n res = S([1, 2, 3])([dx / SIZE([1])(res)[0], dy / SIZE([2])(res)[0],\n dz / SIZE([3])(res)[0]])(res)\n door = TEXTURE(['wood.jpg', True, False, 1, 1, 0, 1, 1])(STRUCT([res]))\n glass = CUBOID([SIZE([1])(res)[0] * 0.94, 0.01, SIZE([3])(res)[0] *\n 0.94])\n glass = T([1, 2, 3])([dx * 0.003, dy / 2, dz * 0.005])(glass)\n glass = TEXTURE(['glass.jpg'])(glass)\n refiner = CUBOID([0.03, 0.01, dz])\n refiner = T([1, 2])([dx / 2, dy])(refiner)\n refiner = TEXTURE(['wood.jpg', True, False, 1, 1, 0, 1, 1])(refiner)\n handler1 = T(3)(0.15)(CUBOID([0.05, 0.02, 0.2]))\n handler2 = CUBOID([0.05, 0.02, 0.05])\n handler3 = T([1, 2])([0.01, 0.02])(CUBOID([0.03, 0.02, 0.2]))\n handler = TEXTURE('bronze.jpg')(STRUCT([handler3, handler2, handler1]))\n handler = T([1, 2, 3])([dx / 2.0 - 2 * SIZE([1])(handler)[0], dy, \n dz / 2.0 - 1.5 * SIZE([3])(handler)[0]])(handler)\n finalDoor = S([1, 2, 3])([dx / SIZE([1])(res)[0], dy / SIZE([2])(\n res)[0], dz / SIZE([3])(res)[0]])(STRUCT([door, glass, refiner,\n handler]))\n return finalDoor\n return door0\n\n\nVIEW(door(doorX, doorY, doorOccurrency)(2.2, 0.4, 2.8))\nVIEW(window(windowX, windowY, windowOccurrency)(0.6, 0.1, 1.2))\n",
"step-4": "from pyplasm import *\ndoorY = [0.2, 0.18, 0.08, 0.18, 0.08, 0.18, 0.4, 0.18, 0.08, 0.18, 0.08, \n 0.18, 0.2]\ndoorX = [0.2, 0.5, 0.2, 1.8, 0.08, 0.18, 0.08, 0.18, 0.2]\ndoorOccurrency = [[True] * 13, [True, False, True, False, True, False, True,\n False, True, False, True, False, True], [True] * 13, [True, False, True,\n False, True, False, True, False, True, False, True, False, True], [True,\n False, True, False, True, True, True, True, True, False, True, False, \n True], [True, False, True, False, False, False, True, False, False, \n False, True, False, True], [True, False, True, True, True, True, True, \n True, True, True, True, False, True], [True, False, False, False, False,\n False, True, False, False, False, False, False, True], [True] * 13]\nwindowY = [0.04, 0.04, 0.2, 0.02, 0.16, 0.02, 0.2, 0.04, 0.04]\nwindowX = [0.02, 0.8, 0.05, 0.02, 0.4, 0.02, 0.4, 0.05, 0.04]\nwindowOccurrency = [[True] * 9, [True, False, False, False, False, False, \n False, False, True], [True] * 9, [True] * 9, [True, True, False, True, \n False, True, False, True, True], [True] * 9, [True, True, False, True, \n False, True, False, True, True], [True] * 9, [True] * 9]\n\n\ndef resizeXY(X, Y, occurrency, dx, dz):\n \"\"\"This function takes in input X,Y,occurrency, two dimensions dx, dz and scales the values\n\tcontained in X and Y, in such a way that only empty spaces are scaled and filled spaces are mantained fixed\"\"\"\n sumY = sum(Y)\n sumX = sum(X)\n visitedY = [False] * len(Y)\n for y_index in range(len(Y)):\n update = True\n for x_index in range(len(X)):\n if occurrency[x_index][y_index] == False:\n update = False\n if update:\n sumY = sumY - Y[y_index]\n sumX = sumX - X[y_index]\n dx = dx - X[y_index]\n dz = dz - Y[y_index]\n for x_index in range(len(X)):\n modifyX = False\n for y_index in range(len(Y)):\n if occurrency[x_index][y_index] == False and visitedY[y_index\n ] == False:\n Y[y_index] = dz * Y[y_index] / sumY\n visitedY[y_index] = True\n modifyX = True\n if occurrency[x_index][y_index] == False and visitedY[y_index\n ] == True and not modifyX:\n modifyX = True\n if modifyX:\n X[x_index] = dx * X[x_index] / sumX\n\n\ndef window(windowX, windowY, occurrency):\n \"\"\"This function, given three array, X, Y and occurrency, return the HPC model of the window\n\tgenerated according to the three parameters. X and Y contain values of distances calculated on the previous \n\tsegment of the axis. Occurrency is a matrix containing booleans that map which cell is empty and which cell is filled. \n\tThe inner function is useful for 'scaling'\"\"\"\n\n def window0(dx, dy, dz):\n resizeXY(windowX, windowY, occurrency, dx, dz)\n model = []\n for xIndex in range(len(windowX)):\n yQuotes = []\n xSum = sum(windowX[:xIndex])\n for yIndex in range(len(windowY)):\n if occurrency[xIndex][yIndex] == False:\n yQuotes.append(-windowY[yIndex])\n else:\n yQuotes.append(windowY[yIndex])\n model.append(PROD([QUOTE([-xSum, windowX[xIndex]]), QUOTE(\n yQuotes)]))\n result = STRUCT(model)\n result = MAP([S2, S3, S1])(PROD([result, Q(dy)]))\n windowFrame = STRUCT([result])\n windowFrame = TEXTURE(['iron.jpg'])(windowFrame)\n glass = CUBOID([SIZE([1])(result)[0] * 0.98, 0.001, SIZE([3])(\n result)[0] * 0.95])\n glass = T([1, 2, 3])([dx * 0.005, dy / 2, 0.01])(glass)\n glass = TEXTURE(['glass2.jpg'])(glass)\n window = STRUCT([windowFrame, glass])\n window = S([1, 2, 3])([dx / SIZE([1])(window)[0], dy / SIZE([2])(\n window)[0], dz / SIZE([3])(window)[0]])(window)\n return window\n return window0\n\n\ndef door(doorX, doorY, occurrency):\n \"\"\"This function takes in input three array, X, Y and occurrency and returns the HPC model of the door\n\tgenerated according to the three parameters. X and Y contain values of distances calculated on the previous \n\tsegment of the axis. Occurrency is a matrix containing booleans that map which cell is empty and which cell is filled. \n\tThe inner function is useful for scaling the resulting door by the three parameter dx, dy, dz.\"\"\"\n\n def door0(dx, dy, dz):\n model = []\n for xIndex in range(len(doorX)):\n yQuotes = []\n xSum = sum(doorX[:xIndex])\n for yIndex in range(len(doorY)):\n if occurrency[xIndex][yIndex] == False:\n yQuotes.append(-doorY[yIndex])\n else:\n yQuotes.append(doorY[yIndex])\n model.append(PROD([QUOTE([-xSum, doorX[xIndex]]), QUOTE(yQuotes)]))\n res = PROD([STRUCT(model), Q(dy)])\n res = MAP([S2, S3, S1])(res)\n res = S([1, 2, 3])([dx / SIZE([1])(res)[0], dy / SIZE([2])(res)[0],\n dz / SIZE([3])(res)[0]])(res)\n door = TEXTURE(['wood.jpg', True, False, 1, 1, 0, 1, 1])(STRUCT([res]))\n glass = CUBOID([SIZE([1])(res)[0] * 0.94, 0.01, SIZE([3])(res)[0] *\n 0.94])\n glass = T([1, 2, 3])([dx * 0.003, dy / 2, dz * 0.005])(glass)\n glass = TEXTURE(['glass.jpg'])(glass)\n refiner = CUBOID([0.03, 0.01, dz])\n refiner = T([1, 2])([dx / 2, dy])(refiner)\n refiner = TEXTURE(['wood.jpg', True, False, 1, 1, 0, 1, 1])(refiner)\n handler1 = T(3)(0.15)(CUBOID([0.05, 0.02, 0.2]))\n handler2 = CUBOID([0.05, 0.02, 0.05])\n handler3 = T([1, 2])([0.01, 0.02])(CUBOID([0.03, 0.02, 0.2]))\n handler = TEXTURE('bronze.jpg')(STRUCT([handler3, handler2, handler1]))\n handler = T([1, 2, 3])([dx / 2.0 - 2 * SIZE([1])(handler)[0], dy, \n dz / 2.0 - 1.5 * SIZE([3])(handler)[0]])(handler)\n finalDoor = S([1, 2, 3])([dx / SIZE([1])(res)[0], dy / SIZE([2])(\n res)[0], dz / SIZE([3])(res)[0]])(STRUCT([door, glass, refiner,\n handler]))\n return finalDoor\n return door0\n\n\nVIEW(door(doorX, doorY, doorOccurrency)(2.2, 0.4, 2.8))\nVIEW(window(windowX, windowY, windowOccurrency)(0.6, 0.1, 1.2))\n",
"step-5": "from pyplasm import *\n\ndoorY = [.2,.18,.08,.18,.08,.18,.4,.18,.08,.18,.08,.18,.2]\ndoorX = [.2,.5,.2,1.8,.08,.18,.08,.18,.2]\n\ndoorOccurrency = [[True]*13,\n\t\t\t\t\t[True, False, True, False, True, False, True, False, True, False, True, False, True],\n\t\t\t\t\t[True]*13,\n\t\t\t\t\t[True, False, True, False, True, False, True, False, True, False, True, False, True],\n\t\t\t\t\t[True, False, True, False, True, True, True, True, True, False, True, False, True],\n\t\t\t\t\t[True, False, True, False, False, False, True, False, False, False, True, False, True],\n\t\t\t\t\t[True, False, True, True, True, True, True, True, True, True, True, False, True],\n\t\t\t\t\t[True, False, False, False, False, False, True, False, False, False, False, False, True],\n\t\t\t\t\t[True]*13]\n\nwindowY = [0.04,0.04,0.2,0.02,0.16,0.02,0.2,0.04,0.04]\nwindowX = [0.02,0.8,0.05,0.02,0.4,0.02,0.4,0.05,0.04]\n\nwindowOccurrency = [[True]*9,\n\t\t\t\t\t[True, False, False, False, False, False, False, False, True],\n\t\t\t\t\t[True]*9,\n\t\t\t\t\t[True]*9,\n\t\t\t\t\t[True, True, False, True, False, True, False, True, True],\n\t\t\t\t\t[True]*9,\n\t\t\t\t\t[True, True, False, True, False, True, False, True, True],\n\t\t\t\t\t[True]*9,\n\t\t\t\t\t[True]*9]\n\ndef resizeXY(X, Y, occurrency, dx, dz):\n\t\"\"\"This function takes in input X,Y,occurrency, two dimensions dx, dz and scales the values\n\tcontained in X and Y, in such a way that only empty spaces are scaled and filled spaces are mantained fixed\"\"\"\n\tsumY = sum(Y) \n\tsumX = sum(X)\n\tvisitedY = [False]*len(Y)\n\tfor y_index in range(len(Y)):\n\t\tupdate = True\n\t\tfor x_index in range(len(X)):\n\t\t\tif(occurrency[x_index][y_index] == False):\n\t\t\t\tupdate = False \n\t\tif(update):\n\t\t\tsumY = sumY - Y[y_index]\n\t\t\tsumX = sumX - X[y_index]\n\t\t\tdx = dx - X[y_index]\n\t\t\tdz = dz - Y[y_index]\n\n\tfor x_index in range(len(X)):\n\t\tmodifyX = False\n\t\tfor y_index in range(len(Y)):\n\t\t\tif(occurrency[x_index][y_index] == False and visitedY[y_index] == False):\n\t\t\t\tY[y_index] = (dz * Y[y_index])/sumY\n\t\t\t\tvisitedY[y_index] = True\n\t\t\t\tmodifyX = True\n\t\t\tif(occurrency[x_index][y_index] == False and visitedY[y_index] == True and not modifyX):\n\t\t\t\tmodifyX = True\n\t\tif(modifyX):\n\t\t\tX[x_index] = (dx * X[x_index])/sumX\n\n\ndef window(windowX, windowY, occurrency):\n\t\"\"\"This function, given three array, X, Y and occurrency, return the HPC model of the window\n\tgenerated according to the three parameters. X and Y contain values of distances calculated on the previous \n\tsegment of the axis. Occurrency is a matrix containing booleans that map which cell is empty and which cell is filled. \n\tThe inner function is useful for 'scaling'\"\"\"\n\tdef window0(dx, dy, dz):\n\n\t\tresizeXY(windowX,windowY,occurrency, dx, dz)\n\n\t\tmodel = []\n\t\tfor xIndex in range(len(windowX)):\n\t\t\tyQuotes = []\n\t\t\txSum = sum(windowX[:xIndex])\n\t\t\tfor yIndex in range(len(windowY)):\n\t\t\t\tif(occurrency[xIndex][yIndex] == False):\n\t\t\t\t\tyQuotes.append(-windowY[yIndex])\n\t\t\t\telse:\n\t\t\t\t\tyQuotes.append(windowY[yIndex])\n\t\t\tmodel.append(PROD([QUOTE([-xSum, windowX[xIndex]]), QUOTE(yQuotes)]))\n\n\t\tresult = STRUCT(model)\n\t\tresult = MAP([S2,S3,S1])(PROD([result, Q(dy)]))\n\t\twindowFrame = STRUCT([result])\n\t\twindowFrame = TEXTURE([\"iron.jpg\"])(windowFrame)\n\n\t\tglass = CUBOID([SIZE([1])(result)[0]*0.98,0.001,SIZE([3])(result)[0]*0.95])\n\t\tglass = T([1,2,3])([dx*0.005, dy/2, 0.01])(glass)\n\t\tglass = TEXTURE([\"glass2.jpg\"])(glass) \n\n\t\twindow = STRUCT([windowFrame, glass])\n\t\twindow = S([1,2,3])([dx/SIZE([1])(window)[0], dy/SIZE([2])(window)[0], dz/SIZE([3])(window)[0]])(window)\n\t\t\n\t\treturn window\n\n\treturn window0\n\n\ndef door(doorX, doorY, occurrency):\n\t\"\"\"This function takes in input three array, X, Y and occurrency and returns the HPC model of the door\n\tgenerated according to the three parameters. X and Y contain values of distances calculated on the previous \n\tsegment of the axis. Occurrency is a matrix containing booleans that map which cell is empty and which cell is filled. \n\tThe inner function is useful for scaling the resulting door by the three parameter dx, dy, dz.\"\"\"\n\tdef door0(dx, dy, dz):\n\n\t\tmodel = []\n\n\t\tfor xIndex in range(len(doorX)):\n\t\t\tyQuotes = []\n\t\t\txSum = sum(doorX[:xIndex])\n\t\t\tfor yIndex in range(len(doorY)):\n\t\t\t\tif(occurrency[xIndex][yIndex] == False):\n\t\t\t\t\tyQuotes.append(-doorY[yIndex])\n\t\t\t\telse:\n\t\t\t\t\tyQuotes.append(doorY[yIndex])\n\t\t\tmodel.append(PROD([ QUOTE([-xSum, doorX[xIndex]]), QUOTE(yQuotes)]))\n\n\t\tres = PROD([STRUCT(model), Q(dy)])\n\t\tres = MAP([S2,S3,S1])(res)\n\t\tres = S([1,2,3])([dx/SIZE([1])(res)[0], dy/SIZE([2])(res)[0], dz/SIZE([3])(res)[0]]) (res)\n\n\t\tdoor = TEXTURE([\"wood.jpg\", True, False, 1, 1, 0, 1, 1])(STRUCT([res]))\n\n\t\tglass = CUBOID([SIZE([1])(res)[0]*0.94, 0.01, SIZE([3])(res)[0]*0.94])\n\t\tglass = T([1,2,3])([dx*0.003, dy/2, dz*0.005])(glass)\n\t\tglass = TEXTURE([\"glass.jpg\"])(glass)\n\n\t\trefiner = CUBOID([0.03, 0.01,dz])\n\t\trefiner = T([1,2])([dx/2,dy])(refiner)\n\t\trefiner = TEXTURE([\"wood.jpg\", True, False, 1, 1, 0, 1, 1])(refiner)\n\n\t\thandler1 = T(3)(.15)(CUBOID([.05,.02,.2]))\n\t\thandler2 = CUBOID([.05,.02,.05])\n\t\thandler3 = T([1,2])([.01,.02])(CUBOID([.03,.02,.2]))\n\t\thandler = TEXTURE(\"bronze.jpg\")(STRUCT([handler3, handler2, handler1]))\n\t\thandler = T([1,2,3])([dx/2.-2*SIZE([1])(handler)[0],dy, dz/2.-1.5*SIZE([3])(handler)[0]])(handler)\n\n\t\tfinalDoor = S([1,2,3])([dx/SIZE([1])(res)[0], dy/SIZE([2])(res)[0], dz/SIZE([3])(res)[0]]) (STRUCT([door, glass, refiner, handler]))\n\n\t\treturn finalDoor\n\n\treturn door0\n\nVIEW(door(doorX, doorY, doorOccurrency)(2.2, .4, 2.8))\nVIEW(window(windowX,windowY,windowOccurrency)(.6,.1,1.2))",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
for demo in demoModules:
pid = os.fork()
filepath = './' + demo + '.py'
if pid == 0:
os.execvp('python3.5', (filepath,))
<|reserved_special_token_0|>
root.title('Progress')
Label(root, text='Multiple program demo: command lines', bg='white').pack()
root.mainloop()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
demoModules = ['demoDlg', 'demoRadio', 'demoCheck', 'demoScale']
for demo in demoModules:
pid = os.fork()
filepath = './' + demo + '.py'
if pid == 0:
os.execvp('python3.5', (filepath,))
root = Tk()
root.title('Progress')
Label(root, text='Multiple program demo: command lines', bg='white').pack()
root.mainloop()
<|reserved_special_token_1|>
from tkinter import *
from PP4E.launchmodes import PortableLauncher
import os, sys
demoModules = ['demoDlg', 'demoRadio', 'demoCheck', 'demoScale']
for demo in demoModules:
pid = os.fork()
filepath = './' + demo + '.py'
if pid == 0:
os.execvp('python3.5', (filepath,))
root = Tk()
root.title('Progress')
Label(root, text='Multiple program demo: command lines', bg='white').pack()
root.mainloop()
<|reserved_special_token_1|>
# e.g. 8-34
from tkinter import *
from PP4E.launchmodes import PortableLauncher
import os, sys
demoModules = ['demoDlg', 'demoRadio', 'demoCheck', 'demoScale']
for demo in demoModules:
pid = os.fork()
filepath = './' + demo + '.py'
if pid == 0:
os.execvp('python3.5', (filepath, ))
root = Tk()
root.title('Progress')
Label(root, text='Multiple program demo: command lines', bg='white').pack()
root.mainloop()
|
flexible
|
{
"blob_id": "d91dc850c293cf085e1be04b6e13e0a62cb0bcb1",
"index": 9812,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor demo in demoModules:\n pid = os.fork()\n filepath = './' + demo + '.py'\n if pid == 0:\n os.execvp('python3.5', (filepath,))\n<mask token>\nroot.title('Progress')\nLabel(root, text='Multiple program demo: command lines', bg='white').pack()\nroot.mainloop()\n",
"step-3": "<mask token>\ndemoModules = ['demoDlg', 'demoRadio', 'demoCheck', 'demoScale']\nfor demo in demoModules:\n pid = os.fork()\n filepath = './' + demo + '.py'\n if pid == 0:\n os.execvp('python3.5', (filepath,))\nroot = Tk()\nroot.title('Progress')\nLabel(root, text='Multiple program demo: command lines', bg='white').pack()\nroot.mainloop()\n",
"step-4": "from tkinter import *\nfrom PP4E.launchmodes import PortableLauncher\nimport os, sys\ndemoModules = ['demoDlg', 'demoRadio', 'demoCheck', 'demoScale']\nfor demo in demoModules:\n pid = os.fork()\n filepath = './' + demo + '.py'\n if pid == 0:\n os.execvp('python3.5', (filepath,))\nroot = Tk()\nroot.title('Progress')\nLabel(root, text='Multiple program demo: command lines', bg='white').pack()\nroot.mainloop()\n",
"step-5": "# e.g. 8-34\n\nfrom tkinter import *\nfrom PP4E.launchmodes import PortableLauncher\nimport os, sys\n\n\ndemoModules = ['demoDlg', 'demoRadio', 'demoCheck', 'demoScale']\n\nfor demo in demoModules:\n pid = os.fork()\n filepath = './' + demo + '.py'\n if pid == 0:\n os.execvp('python3.5', (filepath, ))\n\nroot = Tk()\nroot.title('Progress')\nLabel(root, text='Multiple program demo: command lines', bg='white').pack()\nroot.mainloop()\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
schema_view = get_swagger_view(title='Pastebin API')
urlpatterns = [url('^admin/', admin.site.urls), url('^doc_u/', schema_view),
url('^', include('o.urls')), url('^api/', include('restapi.urls',
namespace='res'))]
<|reserved_special_token_1|>
from django.conf.urls import url, include
from django.contrib import admin
from rest_framework_swagger.views import get_swagger_view
schema_view = get_swagger_view(title='Pastebin API')
urlpatterns = [url('^admin/', admin.site.urls), url('^doc_u/', schema_view),
url('^', include('o.urls')), url('^api/', include('restapi.urls',
namespace='res'))]
<|reserved_special_token_1|>
from django.conf.urls import url, include
from django.contrib import admin
from rest_framework_swagger.views import get_swagger_view
schema_view = get_swagger_view(title='Pastebin API')
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^doc_u/', schema_view),
url(r'^', include('o.urls', )),
url(r'^api/', include('restapi.urls', namespace='res')),
]
|
flexible
|
{
"blob_id": "891588327046e26acb9a691fa8bb9a99420712d6",
"index": 913,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nschema_view = get_swagger_view(title='Pastebin API')\nurlpatterns = [url('^admin/', admin.site.urls), url('^doc_u/', schema_view),\n url('^', include('o.urls')), url('^api/', include('restapi.urls',\n namespace='res'))]\n",
"step-3": "from django.conf.urls import url, include\nfrom django.contrib import admin\nfrom rest_framework_swagger.views import get_swagger_view\nschema_view = get_swagger_view(title='Pastebin API')\nurlpatterns = [url('^admin/', admin.site.urls), url('^doc_u/', schema_view),\n url('^', include('o.urls')), url('^api/', include('restapi.urls',\n namespace='res'))]\n",
"step-4": "from django.conf.urls import url, include\nfrom django.contrib import admin\n\nfrom rest_framework_swagger.views import get_swagger_view\nschema_view = get_swagger_view(title='Pastebin API')\n\nurlpatterns = [\n url(r'^admin/', admin.site.urls),\n url(r'^doc_u/', schema_view),\n url(r'^', include('o.urls', )),\n url(r'^api/', include('restapi.urls', namespace='res')),\n\n]\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
class Book(models.Model):
ISBN = models.CharField(primary_key=True, max_length=100)
Title = models.CharField(max_length=200)
AuthorID = models.IntegerField(max_length=100)
Publisher = models.CharField(max_length=200)
PublishDate = models.CharField(max_length=200)
Price = models.FloatField(max_length=200)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Author(models.Model):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class Book(models.Model):
ISBN = models.CharField(primary_key=True, max_length=100)
Title = models.CharField(max_length=200)
AuthorID = models.IntegerField(max_length=100)
Publisher = models.CharField(max_length=200)
PublishDate = models.CharField(max_length=200)
Price = models.FloatField(max_length=200)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Author(models.Model):
AuthorID = models.IntegerField(primary_key=True)
Name = models.CharField(max_length=200)
Age = models.IntegerField(max_length=50)
Country = models.CharField(max_length=100)
class Book(models.Model):
ISBN = models.CharField(primary_key=True, max_length=100)
Title = models.CharField(max_length=200)
AuthorID = models.IntegerField(max_length=100)
Publisher = models.CharField(max_length=200)
PublishDate = models.CharField(max_length=200)
Price = models.FloatField(max_length=200)
<|reserved_special_token_1|>
from django.db import models
class Author(models.Model):
AuthorID = models.IntegerField(primary_key=True)
Name = models.CharField(max_length=200)
Age = models.IntegerField(max_length=50)
Country = models.CharField(max_length=100)
class Book(models.Model):
ISBN = models.CharField(primary_key=True, max_length=100)
Title = models.CharField(max_length=200)
AuthorID = models.IntegerField(max_length=100)
Publisher = models.CharField(max_length=200)
PublishDate = models.CharField(max_length=200)
Price = models.FloatField(max_length=200)
<|reserved_special_token_1|>
from django.db import models
# Create your models here.
class Author(models.Model):
AuthorID = models.IntegerField(primary_key=True)
Name = models.CharField(max_length=200)
Age = models.IntegerField(max_length=50)
Country = models.CharField(max_length=100)
class Book(models.Model):
ISBN = models.CharField(primary_key=True,max_length=100)
Title = models.CharField(max_length=200)
AuthorID = models.IntegerField(max_length=100)
Publisher = models.CharField(max_length=200)
PublishDate = models.CharField(max_length=200)
Price = models.FloatField(max_length=200)
|
flexible
|
{
"blob_id": "817d7259b3607f3a94d2f363c9684f733ee87d37",
"index": 2124,
"step-1": "<mask token>\n\n\nclass Book(models.Model):\n ISBN = models.CharField(primary_key=True, max_length=100)\n Title = models.CharField(max_length=200)\n AuthorID = models.IntegerField(max_length=100)\n Publisher = models.CharField(max_length=200)\n PublishDate = models.CharField(max_length=200)\n Price = models.FloatField(max_length=200)\n",
"step-2": "<mask token>\n\n\nclass Author(models.Model):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\nclass Book(models.Model):\n ISBN = models.CharField(primary_key=True, max_length=100)\n Title = models.CharField(max_length=200)\n AuthorID = models.IntegerField(max_length=100)\n Publisher = models.CharField(max_length=200)\n PublishDate = models.CharField(max_length=200)\n Price = models.FloatField(max_length=200)\n",
"step-3": "<mask token>\n\n\nclass Author(models.Model):\n AuthorID = models.IntegerField(primary_key=True)\n Name = models.CharField(max_length=200)\n Age = models.IntegerField(max_length=50)\n Country = models.CharField(max_length=100)\n\n\nclass Book(models.Model):\n ISBN = models.CharField(primary_key=True, max_length=100)\n Title = models.CharField(max_length=200)\n AuthorID = models.IntegerField(max_length=100)\n Publisher = models.CharField(max_length=200)\n PublishDate = models.CharField(max_length=200)\n Price = models.FloatField(max_length=200)\n",
"step-4": "from django.db import models\n\n\nclass Author(models.Model):\n AuthorID = models.IntegerField(primary_key=True)\n Name = models.CharField(max_length=200)\n Age = models.IntegerField(max_length=50)\n Country = models.CharField(max_length=100)\n\n\nclass Book(models.Model):\n ISBN = models.CharField(primary_key=True, max_length=100)\n Title = models.CharField(max_length=200)\n AuthorID = models.IntegerField(max_length=100)\n Publisher = models.CharField(max_length=200)\n PublishDate = models.CharField(max_length=200)\n Price = models.FloatField(max_length=200)\n",
"step-5": "from django.db import models\n\n# Create your models here.\nclass Author(models.Model):\n AuthorID = models.IntegerField(primary_key=True)\n Name = models.CharField(max_length=200)\n Age = models.IntegerField(max_length=50)\n Country = models.CharField(max_length=100)\n\nclass Book(models.Model):\n ISBN = models.CharField(primary_key=True,max_length=100)\n Title = models.CharField(max_length=200)\n AuthorID = models.IntegerField(max_length=100)\n Publisher = models.CharField(max_length=200)\n PublishDate = models.CharField(max_length=200)\n Price = models.FloatField(max_length=200)\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
"""
Download the full CHIRPS 2.0 data for a specific type (dekads, pentads, daily ...)
with the possibility to automatically recut the data over Argentina.
"""
import os
import requests
import urllib.request
import time
from bs4 import BeautifulSoup
import subprocess
##############
# PARAMETERS to define
# Set a pre-existing directory where the CHIRPS files must be saved
download_dir = ""
# Url for global dekad, change if you want another product
url = 'https://data.chc.ucsb.edu/products/CHIRPS-2.0/global_dekad/netcdf/'
# Recut the data over Argentina
argentina = False
startindex = 5
##############
if download_dir != "":
os.chdir(download_dir)
response = requests.get(url)
soup = BeautifulSoup(response.text,"html.parser")
soup.findAll('a')
# First link to download in the page
# Here the index = 5 is valid for the dekad link but it may change if you download another product (ex : daily, dekad, monthly)
# To be sure you can check the link and check that it is the first year
one_a_tag = soup.findAll('a')[startindex:]
links = [one_a_tag[i]['href'] for i in range(len(one_a_tag))]
for link in links:
print(link)
download_url = url + link
urllib.request.urlretrieve(download_url,"./"+link)
# Section to recut CHIRPS over Argentina
if argentina:
subprocess.check_call(["cdo", "sellonlatbox,-80,-44,-60,-20", link, link.replace(".nc", "ARG.nc")])
subprocess.check_call(["rm", link])
time.sleep(1)
else:
print("Please enter a valid download direction")
|
normal
|
{
"blob_id": "ff0495ee1f4aa1f243c82b709a974d3d7c37e8bd",
"index": 2425,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif download_dir != '':\n os.chdir(download_dir)\n response = requests.get(url)\n soup = BeautifulSoup(response.text, 'html.parser')\n soup.findAll('a')\n one_a_tag = soup.findAll('a')[startindex:]\n links = [one_a_tag[i]['href'] for i in range(len(one_a_tag))]\n for link in links:\n print(link)\n download_url = url + link\n urllib.request.urlretrieve(download_url, './' + link)\n if argentina:\n subprocess.check_call(['cdo', 'sellonlatbox,-80,-44,-60,-20',\n link, link.replace('.nc', 'ARG.nc')])\n subprocess.check_call(['rm', link])\n time.sleep(1)\nelse:\n print('Please enter a valid download direction')\n",
"step-3": "<mask token>\ndownload_dir = ''\nurl = 'https://data.chc.ucsb.edu/products/CHIRPS-2.0/global_dekad/netcdf/'\nargentina = False\nstartindex = 5\nif download_dir != '':\n os.chdir(download_dir)\n response = requests.get(url)\n soup = BeautifulSoup(response.text, 'html.parser')\n soup.findAll('a')\n one_a_tag = soup.findAll('a')[startindex:]\n links = [one_a_tag[i]['href'] for i in range(len(one_a_tag))]\n for link in links:\n print(link)\n download_url = url + link\n urllib.request.urlretrieve(download_url, './' + link)\n if argentina:\n subprocess.check_call(['cdo', 'sellonlatbox,-80,-44,-60,-20',\n link, link.replace('.nc', 'ARG.nc')])\n subprocess.check_call(['rm', link])\n time.sleep(1)\nelse:\n print('Please enter a valid download direction')\n",
"step-4": "<mask token>\nimport os\nimport requests\nimport urllib.request\nimport time\nfrom bs4 import BeautifulSoup\nimport subprocess\ndownload_dir = ''\nurl = 'https://data.chc.ucsb.edu/products/CHIRPS-2.0/global_dekad/netcdf/'\nargentina = False\nstartindex = 5\nif download_dir != '':\n os.chdir(download_dir)\n response = requests.get(url)\n soup = BeautifulSoup(response.text, 'html.parser')\n soup.findAll('a')\n one_a_tag = soup.findAll('a')[startindex:]\n links = [one_a_tag[i]['href'] for i in range(len(one_a_tag))]\n for link in links:\n print(link)\n download_url = url + link\n urllib.request.urlretrieve(download_url, './' + link)\n if argentina:\n subprocess.check_call(['cdo', 'sellonlatbox,-80,-44,-60,-20',\n link, link.replace('.nc', 'ARG.nc')])\n subprocess.check_call(['rm', link])\n time.sleep(1)\nelse:\n print('Please enter a valid download direction')\n",
"step-5": "\"\"\"\nDownload the full CHIRPS 2.0 data for a specific type (dekads, pentads, daily ...)\nwith the possibility to automatically recut the data over Argentina.\n\"\"\"\nimport os\nimport requests\nimport urllib.request\nimport time\nfrom bs4 import BeautifulSoup\nimport subprocess\n\n##############\n\n# PARAMETERS to define\n\n# Set a pre-existing directory where the CHIRPS files must be saved\ndownload_dir = \"\"\n# Url for global dekad, change if you want another product\nurl = 'https://data.chc.ucsb.edu/products/CHIRPS-2.0/global_dekad/netcdf/'\n# Recut the data over Argentina\nargentina = False\nstartindex = 5\n\n##############\nif download_dir != \"\":\n os.chdir(download_dir)\n\n response = requests.get(url)\n soup = BeautifulSoup(response.text,\"html.parser\")\n soup.findAll('a')\n \n # First link to download in the page\n # Here the index = 5 is valid for the dekad link but it may change if you download another product (ex : daily, dekad, monthly)\n # To be sure you can check the link and check that it is the first year\n one_a_tag = soup.findAll('a')[startindex:] \n links = [one_a_tag[i]['href'] for i in range(len(one_a_tag))]\n\n for link in links:\n print(link)\n download_url = url + link\n urllib.request.urlretrieve(download_url,\"./\"+link)\n # Section to recut CHIRPS over Argentina\n if argentina:\n subprocess.check_call([\"cdo\", \"sellonlatbox,-80,-44,-60,-20\", link, link.replace(\".nc\", \"ARG.nc\")])\n subprocess.check_call([\"rm\", link])\n time.sleep(1)\n\nelse:\n print(\"Please enter a valid download direction\")\n \n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
class AppendSlashMiddleware(MiddlewareMixin):
<|reserved_special_token_0|>
def process_request(self, request):
redirect_url = ''
if self.should_redirect_with_slash(request):
path = self.get_full_path_with_slash(request)
else:
path = request.get_full_path()
if redirect_url or path != request.get_full_path():
redirect_url += path
return self.response_redirect_class(redirect_url)
<|reserved_special_token_0|>
def get_full_path_with_slash(self, request):
new_path = request.get_full_path(force_append_slash=True)
return escape_leading_slashes(new_path)
def process_response(self, request, response):
if response.status_code == 404:
if self.should_redirect_with_slash(request):
return self.response_redirect_class(self.
get_full_path_with_slash(request))
if not response.streaming and not response.has_header('Content-Length'
):
response['Content-Length'] = str(len(response.content))
return response
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class AppendSlashMiddleware(MiddlewareMixin):
<|reserved_special_token_0|>
def process_request(self, request):
redirect_url = ''
if self.should_redirect_with_slash(request):
path = self.get_full_path_with_slash(request)
else:
path = request.get_full_path()
if redirect_url or path != request.get_full_path():
redirect_url += path
return self.response_redirect_class(redirect_url)
def should_redirect_with_slash(self, request):
if request.path_info.endswith('/'):
return False
urlconf = getattr(request, 'urlconf', None)
return not is_valid_path(request.path_info, urlconf) and is_valid_path(
'%s/' % request.path_info, urlconf)
def get_full_path_with_slash(self, request):
new_path = request.get_full_path(force_append_slash=True)
return escape_leading_slashes(new_path)
def process_response(self, request, response):
if response.status_code == 404:
if self.should_redirect_with_slash(request):
return self.response_redirect_class(self.
get_full_path_with_slash(request))
if not response.streaming and not response.has_header('Content-Length'
):
response['Content-Length'] = str(len(response.content))
return response
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class AppendSlashMiddleware(MiddlewareMixin):
response_redirect_class = HttpResponsePermanentRedirect
def process_request(self, request):
redirect_url = ''
if self.should_redirect_with_slash(request):
path = self.get_full_path_with_slash(request)
else:
path = request.get_full_path()
if redirect_url or path != request.get_full_path():
redirect_url += path
return self.response_redirect_class(redirect_url)
def should_redirect_with_slash(self, request):
if request.path_info.endswith('/'):
return False
urlconf = getattr(request, 'urlconf', None)
return not is_valid_path(request.path_info, urlconf) and is_valid_path(
'%s/' % request.path_info, urlconf)
def get_full_path_with_slash(self, request):
new_path = request.get_full_path(force_append_slash=True)
return escape_leading_slashes(new_path)
def process_response(self, request, response):
if response.status_code == 404:
if self.should_redirect_with_slash(request):
return self.response_redirect_class(self.
get_full_path_with_slash(request))
if not response.streaming and not response.has_header('Content-Length'
):
response['Content-Length'] = str(len(response.content))
return response
<|reserved_special_token_1|>
from django.http import HttpResponsePermanentRedirect
from django.urls import is_valid_path
from django.utils.deprecation import MiddlewareMixin
from django.utils.http import escape_leading_slashes
class AppendSlashMiddleware(MiddlewareMixin):
response_redirect_class = HttpResponsePermanentRedirect
def process_request(self, request):
redirect_url = ''
if self.should_redirect_with_slash(request):
path = self.get_full_path_with_slash(request)
else:
path = request.get_full_path()
if redirect_url or path != request.get_full_path():
redirect_url += path
return self.response_redirect_class(redirect_url)
def should_redirect_with_slash(self, request):
if request.path_info.endswith('/'):
return False
urlconf = getattr(request, 'urlconf', None)
return not is_valid_path(request.path_info, urlconf) and is_valid_path(
'%s/' % request.path_info, urlconf)
def get_full_path_with_slash(self, request):
new_path = request.get_full_path(force_append_slash=True)
return escape_leading_slashes(new_path)
def process_response(self, request, response):
if response.status_code == 404:
if self.should_redirect_with_slash(request):
return self.response_redirect_class(self.
get_full_path_with_slash(request))
if not response.streaming and not response.has_header('Content-Length'
):
response['Content-Length'] = str(len(response.content))
return response
<|reserved_special_token_1|>
from django.http import HttpResponsePermanentRedirect
from django.urls import is_valid_path
from django.utils.deprecation import MiddlewareMixin
from django.utils.http import escape_leading_slashes
class AppendSlashMiddleware(MiddlewareMixin):
response_redirect_class = HttpResponsePermanentRedirect
def process_request(self, request):
redirect_url = ''
if self.should_redirect_with_slash(request):
path = self.get_full_path_with_slash(request)
else:
path = request.get_full_path()
if redirect_url or path != request.get_full_path():
redirect_url += path
return self.response_redirect_class(redirect_url)
def should_redirect_with_slash(self, request):
if request.path_info.endswith('/'):
return False
urlconf = getattr(request, 'urlconf', None)
return (
not is_valid_path(request.path_info, urlconf) and
is_valid_path('%s/' % request.path_info, urlconf)
)
def get_full_path_with_slash(self, request):
new_path = request.get_full_path(force_append_slash=True)
return escape_leading_slashes(new_path)
def process_response(self, request, response):
if response.status_code == 404:
if self.should_redirect_with_slash(request):
return self.response_redirect_class(
self.get_full_path_with_slash(request))
if not response.streaming and \
not response.has_header('Content-Length'):
response['Content-Length'] = str(len(response.content))
return response
|
flexible
|
{
"blob_id": "ec70fb9119b430dcd36549f2fac8e5e0a0e1bb00",
"index": 2696,
"step-1": "<mask token>\n\n\nclass AppendSlashMiddleware(MiddlewareMixin):\n <mask token>\n\n def process_request(self, request):\n redirect_url = ''\n if self.should_redirect_with_slash(request):\n path = self.get_full_path_with_slash(request)\n else:\n path = request.get_full_path()\n if redirect_url or path != request.get_full_path():\n redirect_url += path\n return self.response_redirect_class(redirect_url)\n <mask token>\n\n def get_full_path_with_slash(self, request):\n new_path = request.get_full_path(force_append_slash=True)\n return escape_leading_slashes(new_path)\n\n def process_response(self, request, response):\n if response.status_code == 404:\n if self.should_redirect_with_slash(request):\n return self.response_redirect_class(self.\n get_full_path_with_slash(request))\n if not response.streaming and not response.has_header('Content-Length'\n ):\n response['Content-Length'] = str(len(response.content))\n return response\n",
"step-2": "<mask token>\n\n\nclass AppendSlashMiddleware(MiddlewareMixin):\n <mask token>\n\n def process_request(self, request):\n redirect_url = ''\n if self.should_redirect_with_slash(request):\n path = self.get_full_path_with_slash(request)\n else:\n path = request.get_full_path()\n if redirect_url or path != request.get_full_path():\n redirect_url += path\n return self.response_redirect_class(redirect_url)\n\n def should_redirect_with_slash(self, request):\n if request.path_info.endswith('/'):\n return False\n urlconf = getattr(request, 'urlconf', None)\n return not is_valid_path(request.path_info, urlconf) and is_valid_path(\n '%s/' % request.path_info, urlconf)\n\n def get_full_path_with_slash(self, request):\n new_path = request.get_full_path(force_append_slash=True)\n return escape_leading_slashes(new_path)\n\n def process_response(self, request, response):\n if response.status_code == 404:\n if self.should_redirect_with_slash(request):\n return self.response_redirect_class(self.\n get_full_path_with_slash(request))\n if not response.streaming and not response.has_header('Content-Length'\n ):\n response['Content-Length'] = str(len(response.content))\n return response\n",
"step-3": "<mask token>\n\n\nclass AppendSlashMiddleware(MiddlewareMixin):\n response_redirect_class = HttpResponsePermanentRedirect\n\n def process_request(self, request):\n redirect_url = ''\n if self.should_redirect_with_slash(request):\n path = self.get_full_path_with_slash(request)\n else:\n path = request.get_full_path()\n if redirect_url or path != request.get_full_path():\n redirect_url += path\n return self.response_redirect_class(redirect_url)\n\n def should_redirect_with_slash(self, request):\n if request.path_info.endswith('/'):\n return False\n urlconf = getattr(request, 'urlconf', None)\n return not is_valid_path(request.path_info, urlconf) and is_valid_path(\n '%s/' % request.path_info, urlconf)\n\n def get_full_path_with_slash(self, request):\n new_path = request.get_full_path(force_append_slash=True)\n return escape_leading_slashes(new_path)\n\n def process_response(self, request, response):\n if response.status_code == 404:\n if self.should_redirect_with_slash(request):\n return self.response_redirect_class(self.\n get_full_path_with_slash(request))\n if not response.streaming and not response.has_header('Content-Length'\n ):\n response['Content-Length'] = str(len(response.content))\n return response\n",
"step-4": "from django.http import HttpResponsePermanentRedirect\nfrom django.urls import is_valid_path\nfrom django.utils.deprecation import MiddlewareMixin\nfrom django.utils.http import escape_leading_slashes\n\n\nclass AppendSlashMiddleware(MiddlewareMixin):\n response_redirect_class = HttpResponsePermanentRedirect\n\n def process_request(self, request):\n redirect_url = ''\n if self.should_redirect_with_slash(request):\n path = self.get_full_path_with_slash(request)\n else:\n path = request.get_full_path()\n if redirect_url or path != request.get_full_path():\n redirect_url += path\n return self.response_redirect_class(redirect_url)\n\n def should_redirect_with_slash(self, request):\n if request.path_info.endswith('/'):\n return False\n urlconf = getattr(request, 'urlconf', None)\n return not is_valid_path(request.path_info, urlconf) and is_valid_path(\n '%s/' % request.path_info, urlconf)\n\n def get_full_path_with_slash(self, request):\n new_path = request.get_full_path(force_append_slash=True)\n return escape_leading_slashes(new_path)\n\n def process_response(self, request, response):\n if response.status_code == 404:\n if self.should_redirect_with_slash(request):\n return self.response_redirect_class(self.\n get_full_path_with_slash(request))\n if not response.streaming and not response.has_header('Content-Length'\n ):\n response['Content-Length'] = str(len(response.content))\n return response\n",
"step-5": "\nfrom django.http import HttpResponsePermanentRedirect\nfrom django.urls import is_valid_path\nfrom django.utils.deprecation import MiddlewareMixin\nfrom django.utils.http import escape_leading_slashes\n\n\nclass AppendSlashMiddleware(MiddlewareMixin):\n response_redirect_class = HttpResponsePermanentRedirect\n\n def process_request(self, request):\n redirect_url = ''\n\n if self.should_redirect_with_slash(request):\n path = self.get_full_path_with_slash(request)\n else:\n path = request.get_full_path()\n\n if redirect_url or path != request.get_full_path():\n redirect_url += path\n return self.response_redirect_class(redirect_url)\n\n def should_redirect_with_slash(self, request):\n if request.path_info.endswith('/'):\n return False\n\n urlconf = getattr(request, 'urlconf', None)\n return (\n not is_valid_path(request.path_info, urlconf) and\n is_valid_path('%s/' % request.path_info, urlconf)\n )\n\n def get_full_path_with_slash(self, request):\n new_path = request.get_full_path(force_append_slash=True)\n return escape_leading_slashes(new_path)\n\n def process_response(self, request, response):\n if response.status_code == 404:\n if self.should_redirect_with_slash(request):\n return self.response_redirect_class(\n self.get_full_path_with_slash(request))\n\n if not response.streaming and \\\n not response.has_header('Content-Length'):\n response['Content-Length'] = str(len(response.content))\n\n return response\n",
"step-ids": [
4,
5,
6,
7,
8
]
}
|
[
4,
5,
6,
7,
8
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.