code
stringlengths 13
1.2M
| order_type
stringclasses 1
value | original_example
dict | step_ids
listlengths 1
5
|
---|---|---|---|
#2) write a program to make banking system develop business logic
#in one module and call functionality in another .py file
class Customer: #user defined class
def __init__(self,name,phoneno,address,pin,accno,balance) : #constructor with multiple arguments
self._name=name
self._pno=phoneno
self._add=address
self._pin=pin
self._acc=accno
self._bal=balance#protected variable
def add(self) : #user defined method
self._d={} #create empty dictionary
self._d['CustomerName']=self._name #add values to the dictionary using key names
self._d['CustomerPhonenumber']=self._pno
self._d['CustomerAddress']=self._add
self._d['CustomerPin']=self._pin
self._d['CustomerAccountNumber']=self._acc
self._d['CustomerBalance']=self._bal
print('Customer Details Add Successfully')
def deposit(self):
amt=int(input('Enter Deposit amount : '))
self._d['CustomerBalance']+=amt
print('Your a/c is credited for INR ',amt)
print('Account Balance is ',self._d['CustomerBalance'])
print()
def withdraw(self):
amt=int(input('Enter Withdraw amount : '))
if amt>self._d['CustomerBalance'] :
print('Insufficient Balance')
print('Account Balance is ',self._d['CustomerBalance'])
print()
else:
self._d['CustomerBalance']-=amt
print('Your a/c is debited for INR ',amt)
print('Account Balance is ',self._d['CustomerBalance'])
print()
def transfer(self):
name=input('Enter Recipient name : ')
acc=input('Enter account number : ')
if len(acc)==16:
amt=int(input('Enter amount to transfer : '))
if amt>self._d['CustomerBalance'] :
print('Insufficient Balance')
print('Account Balance is ',self._d['CustomerBalance'])
print()
else:
self._d['CustomerBalance']-=amt
print('Transfer amount successfully')
print('Your a/c is debited for INR ',amt)
print('Account Balance is ',self._d['CustomerBalance'])
print()
else:
print('Invalid Account Number\n')
def mini(self):
print('Name : ',self._d['CustomerName'])
print('Account Balance is ',self._d['CustomerBalance'])
print()
def __del__(self): #destructor
print('Thank You')
pass
|
normal
|
{
"blob_id": "cf5a9b8dad5a02610fa5ce2a849b6f9fc50a0aa8",
"index": 1872,
"step-1": "class Customer:\n\n def __init__(self, name, phoneno, address, pin, accno, balance):\n self._name = name\n self._pno = phoneno\n self._add = address\n self._pin = pin\n self._acc = accno\n self._bal = balance\n <mask token>\n <mask token>\n <mask token>\n\n def transfer(self):\n name = input('Enter Recipient name : ')\n acc = input('Enter account number : ')\n if len(acc) == 16:\n amt = int(input('Enter amount to transfer : '))\n if amt > self._d['CustomerBalance']:\n print('Insufficient Balance')\n print('Account Balance is ', self._d['CustomerBalance'])\n print()\n else:\n self._d['CustomerBalance'] -= amt\n print('Transfer amount successfully')\n print('Your a/c is debited for INR ', amt)\n print('Account Balance is ', self._d['CustomerBalance'])\n print()\n else:\n print('Invalid Account Number\\n')\n\n def mini(self):\n print('Name : ', self._d['CustomerName'])\n print('Account Balance is ', self._d['CustomerBalance'])\n print()\n\n def __del__(self):\n print('Thank You')\n pass\n",
"step-2": "class Customer:\n\n def __init__(self, name, phoneno, address, pin, accno, balance):\n self._name = name\n self._pno = phoneno\n self._add = address\n self._pin = pin\n self._acc = accno\n self._bal = balance\n <mask token>\n\n def deposit(self):\n amt = int(input('Enter Deposit amount : '))\n self._d['CustomerBalance'] += amt\n print('Your a/c is credited for INR ', amt)\n print('Account Balance is ', self._d['CustomerBalance'])\n print()\n <mask token>\n\n def transfer(self):\n name = input('Enter Recipient name : ')\n acc = input('Enter account number : ')\n if len(acc) == 16:\n amt = int(input('Enter amount to transfer : '))\n if amt > self._d['CustomerBalance']:\n print('Insufficient Balance')\n print('Account Balance is ', self._d['CustomerBalance'])\n print()\n else:\n self._d['CustomerBalance'] -= amt\n print('Transfer amount successfully')\n print('Your a/c is debited for INR ', amt)\n print('Account Balance is ', self._d['CustomerBalance'])\n print()\n else:\n print('Invalid Account Number\\n')\n\n def mini(self):\n print('Name : ', self._d['CustomerName'])\n print('Account Balance is ', self._d['CustomerBalance'])\n print()\n\n def __del__(self):\n print('Thank You')\n pass\n",
"step-3": "class Customer:\n\n def __init__(self, name, phoneno, address, pin, accno, balance):\n self._name = name\n self._pno = phoneno\n self._add = address\n self._pin = pin\n self._acc = accno\n self._bal = balance\n\n def add(self):\n self._d = {}\n self._d['CustomerName'] = self._name\n self._d['CustomerPhonenumber'] = self._pno\n self._d['CustomerAddress'] = self._add\n self._d['CustomerPin'] = self._pin\n self._d['CustomerAccountNumber'] = self._acc\n self._d['CustomerBalance'] = self._bal\n print('Customer Details Add Successfully')\n\n def deposit(self):\n amt = int(input('Enter Deposit amount : '))\n self._d['CustomerBalance'] += amt\n print('Your a/c is credited for INR ', amt)\n print('Account Balance is ', self._d['CustomerBalance'])\n print()\n <mask token>\n\n def transfer(self):\n name = input('Enter Recipient name : ')\n acc = input('Enter account number : ')\n if len(acc) == 16:\n amt = int(input('Enter amount to transfer : '))\n if amt > self._d['CustomerBalance']:\n print('Insufficient Balance')\n print('Account Balance is ', self._d['CustomerBalance'])\n print()\n else:\n self._d['CustomerBalance'] -= amt\n print('Transfer amount successfully')\n print('Your a/c is debited for INR ', amt)\n print('Account Balance is ', self._d['CustomerBalance'])\n print()\n else:\n print('Invalid Account Number\\n')\n\n def mini(self):\n print('Name : ', self._d['CustomerName'])\n print('Account Balance is ', self._d['CustomerBalance'])\n print()\n\n def __del__(self):\n print('Thank You')\n pass\n",
"step-4": "class Customer:\n\n def __init__(self, name, phoneno, address, pin, accno, balance):\n self._name = name\n self._pno = phoneno\n self._add = address\n self._pin = pin\n self._acc = accno\n self._bal = balance\n\n def add(self):\n self._d = {}\n self._d['CustomerName'] = self._name\n self._d['CustomerPhonenumber'] = self._pno\n self._d['CustomerAddress'] = self._add\n self._d['CustomerPin'] = self._pin\n self._d['CustomerAccountNumber'] = self._acc\n self._d['CustomerBalance'] = self._bal\n print('Customer Details Add Successfully')\n\n def deposit(self):\n amt = int(input('Enter Deposit amount : '))\n self._d['CustomerBalance'] += amt\n print('Your a/c is credited for INR ', amt)\n print('Account Balance is ', self._d['CustomerBalance'])\n print()\n\n def withdraw(self):\n amt = int(input('Enter Withdraw amount : '))\n if amt > self._d['CustomerBalance']:\n print('Insufficient Balance')\n print('Account Balance is ', self._d['CustomerBalance'])\n print()\n else:\n self._d['CustomerBalance'] -= amt\n print('Your a/c is debited for INR ', amt)\n print('Account Balance is ', self._d['CustomerBalance'])\n print()\n\n def transfer(self):\n name = input('Enter Recipient name : ')\n acc = input('Enter account number : ')\n if len(acc) == 16:\n amt = int(input('Enter amount to transfer : '))\n if amt > self._d['CustomerBalance']:\n print('Insufficient Balance')\n print('Account Balance is ', self._d['CustomerBalance'])\n print()\n else:\n self._d['CustomerBalance'] -= amt\n print('Transfer amount successfully')\n print('Your a/c is debited for INR ', amt)\n print('Account Balance is ', self._d['CustomerBalance'])\n print()\n else:\n print('Invalid Account Number\\n')\n\n def mini(self):\n print('Name : ', self._d['CustomerName'])\n print('Account Balance is ', self._d['CustomerBalance'])\n print()\n\n def __del__(self):\n print('Thank You')\n pass\n",
"step-5": "#2) write a program to make banking system develop business logic\r\n#in one module and call functionality in another .py file\r\n\r\nclass Customer: #user defined class\r\n def __init__(self,name,phoneno,address,pin,accno,balance) : #constructor with multiple arguments\r\n self._name=name \r\n self._pno=phoneno\r\n self._add=address\r\n self._pin=pin\r\n self._acc=accno\r\n self._bal=balance#protected variable\r\n def add(self) : #user defined method\r\n self._d={} #create empty dictionary\r\n self._d['CustomerName']=self._name #add values to the dictionary using key names\r\n self._d['CustomerPhonenumber']=self._pno\r\n self._d['CustomerAddress']=self._add\r\n self._d['CustomerPin']=self._pin\r\n self._d['CustomerAccountNumber']=self._acc\r\n self._d['CustomerBalance']=self._bal\r\n print('Customer Details Add Successfully')\r\n def deposit(self):\r\n amt=int(input('Enter Deposit amount : '))\r\n self._d['CustomerBalance']+=amt\r\n print('Your a/c is credited for INR ',amt)\r\n print('Account Balance is ',self._d['CustomerBalance'])\r\n print()\r\n def withdraw(self):\r\n amt=int(input('Enter Withdraw amount : '))\r\n if amt>self._d['CustomerBalance'] :\r\n print('Insufficient Balance')\r\n print('Account Balance is ',self._d['CustomerBalance'])\r\n print()\r\n else:\r\n self._d['CustomerBalance']-=amt\r\n print('Your a/c is debited for INR ',amt)\r\n print('Account Balance is ',self._d['CustomerBalance'])\r\n print()\r\n def transfer(self):\r\n name=input('Enter Recipient name : ')\r\n acc=input('Enter account number : ')\r\n if len(acc)==16:\r\n amt=int(input('Enter amount to transfer : '))\r\n if amt>self._d['CustomerBalance'] :\r\n print('Insufficient Balance')\r\n print('Account Balance is ',self._d['CustomerBalance'])\r\n print()\r\n else:\r\n self._d['CustomerBalance']-=amt\r\n print('Transfer amount successfully')\r\n print('Your a/c is debited for INR ',amt)\r\n print('Account Balance is ',self._d['CustomerBalance'])\r\n print()\r\n else:\r\n print('Invalid Account Number\\n')\r\n def mini(self):\r\n print('Name : ',self._d['CustomerName'])\r\n print('Account Balance is ',self._d['CustomerBalance'])\r\n print()\r\n def __del__(self): #destructor\r\n print('Thank You')\r\n pass\r\n\r\n",
"step-ids": [
5,
6,
7,
8,
9
]
}
|
[
5,
6,
7,
8,
9
] |
##class Human:
## pass
##hb1-HB("Sudhir")
##hb2=HB("Sreenu")
class Student:
def __init__(self,name,rollno):
self.name=name
self.rollno=rollno
std1=Student("Siva",123)
|
normal
|
{
"blob_id": "97656bca3ce0085fb2f1167d37485fb7ee812730",
"index": 4825,
"step-1": "<mask token>\n",
"step-2": "class Student:\n <mask token>\n\n\n<mask token>\n",
"step-3": "class Student:\n\n def __init__(self, name, rollno):\n self.name = name\n self.rollno = rollno\n\n\n<mask token>\n",
"step-4": "class Student:\n\n def __init__(self, name, rollno):\n self.name = name\n self.rollno = rollno\n\n\nstd1 = Student('Siva', 123)\n",
"step-5": "##class Human:\r\n## pass\r\n##hb1-HB(\"Sudhir\")\r\n##hb2=HB(\"Sreenu\")\r\n\r\n\r\nclass Student:\r\n def __init__(self,name,rollno):\r\n self.name=name\r\n self.rollno=rollno\r\nstd1=Student(\"Siva\",123)\r\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
tp = 1, 2, 3
print(tp + (4,))
|
normal
|
{
"blob_id": "8e9db58488f6ee8aa0d521a19d9d89504d119076",
"index": 6689,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(tp + (4,))\n",
"step-3": "tp = 1, 2, 3\nprint(tp + (4,))\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
##This script looks at a path for a dated file, then parses it by row into two different files/folders based on fields being blank within each row.
import os.path
from datetime import date
##sets date variables/format
today = date.today()
todayFormatted = today.strftime("%m%d%Y")
print(todayFormatted)
##Sets variable for the base file name
basefilename = "PartOfFileNameYouAreLookingFor"
basepath = '\\\\Test/Path/Base/'
type1_path = '\\\\Test/Path/Base/Type1'
type2_path = '\\\\Test/Path/Base/Type2'
filename = basefilename + todayFormatted + '.txt'
os.chdir(basepath)
if not os.path.isfile(filename):
print('File does not exist.')
else:
with open(filename) as f:
content = f.read().splitlines()
def parse():
for line in content:
if line[40:60] != " ": ##This usecase looks for a specific field/position in a file row to be blank
## print(line[40:60])
os.chdir(type1_path)
open('Type1Results.txt', 'a').write(line + '\n')
elif line[40:60] == " ":
os.chdir(type2_path)
open('Type2Results.txt', 'a').write(line + '\n')
parse()
|
normal
|
{
"blob_id": "07a546928df1acfedf7a7735dc813de9da8373e0",
"index": 1275,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(todayFormatted)\n<mask token>\nos.chdir(basepath)\nif not os.path.isfile(filename):\n print('File does not exist.')\nelse:\n with open(filename) as f:\n content = f.read().splitlines()\n\n\ndef parse():\n for line in content:\n if line[40:60] != ' ':\n os.chdir(type1_path)\n open('Type1Results.txt', 'a').write(line + '\\n')\n elif line[40:60] == ' ':\n os.chdir(type2_path)\n open('Type2Results.txt', 'a').write(line + '\\n')\n\n\nparse()\n",
"step-3": "<mask token>\ntoday = date.today()\ntodayFormatted = today.strftime('%m%d%Y')\nprint(todayFormatted)\nbasefilename = 'PartOfFileNameYouAreLookingFor'\nbasepath = '\\\\\\\\Test/Path/Base/'\ntype1_path = '\\\\\\\\Test/Path/Base/Type1'\ntype2_path = '\\\\\\\\Test/Path/Base/Type2'\nfilename = basefilename + todayFormatted + '.txt'\nos.chdir(basepath)\nif not os.path.isfile(filename):\n print('File does not exist.')\nelse:\n with open(filename) as f:\n content = f.read().splitlines()\n\n\ndef parse():\n for line in content:\n if line[40:60] != ' ':\n os.chdir(type1_path)\n open('Type1Results.txt', 'a').write(line + '\\n')\n elif line[40:60] == ' ':\n os.chdir(type2_path)\n open('Type2Results.txt', 'a').write(line + '\\n')\n\n\nparse()\n",
"step-4": "import os.path\nfrom datetime import date\ntoday = date.today()\ntodayFormatted = today.strftime('%m%d%Y')\nprint(todayFormatted)\nbasefilename = 'PartOfFileNameYouAreLookingFor'\nbasepath = '\\\\\\\\Test/Path/Base/'\ntype1_path = '\\\\\\\\Test/Path/Base/Type1'\ntype2_path = '\\\\\\\\Test/Path/Base/Type2'\nfilename = basefilename + todayFormatted + '.txt'\nos.chdir(basepath)\nif not os.path.isfile(filename):\n print('File does not exist.')\nelse:\n with open(filename) as f:\n content = f.read().splitlines()\n\n\ndef parse():\n for line in content:\n if line[40:60] != ' ':\n os.chdir(type1_path)\n open('Type1Results.txt', 'a').write(line + '\\n')\n elif line[40:60] == ' ':\n os.chdir(type2_path)\n open('Type2Results.txt', 'a').write(line + '\\n')\n\n\nparse()\n",
"step-5": "##This script looks at a path for a dated file, then parses it by row into two different files/folders based on fields being blank within each row.\r\n\r\nimport os.path\r\nfrom datetime import date\r\n\r\n##sets date variables/format\r\ntoday = date.today()\r\ntodayFormatted = today.strftime(\"%m%d%Y\")\r\nprint(todayFormatted)\r\n\r\n##Sets variable for the base file name \r\nbasefilename = \"PartOfFileNameYouAreLookingFor\"\r\n\r\n\r\nbasepath = '\\\\\\\\Test/Path/Base/'\r\ntype1_path = '\\\\\\\\Test/Path/Base/Type1'\r\ntype2_path = '\\\\\\\\Test/Path/Base/Type2'\r\n\r\nfilename = basefilename + todayFormatted + '.txt'\r\n\r\nos.chdir(basepath)\r\nif not os.path.isfile(filename):\r\n print('File does not exist.')\r\nelse:\r\n with open(filename) as f:\r\n content = f.read().splitlines()\r\n\r\ndef parse():\r\n for line in content:\r\n if line[40:60] != \" \": ##This usecase looks for a specific field/position in a file row to be blank\r\n## print(line[40:60])\r\n os.chdir(type1_path)\r\n open('Type1Results.txt', 'a').write(line + '\\n')\r\n elif line[40:60] == \" \":\r\n os.chdir(type2_path)\r\n open('Type2Results.txt', 'a').write(line + '\\n') \r\n\r\nparse()\r\n",
"step-ids": [
0,
2,
3,
4,
5
]
}
|
[
0,
2,
3,
4,
5
] |
# coding=utf-8
# flake8:noqa
from .string_helper import (
camelize, uncamelize,
camelize_for_dict_key, camelize_for_dict_key_in_list,
uncamelize_for_dict_key, uncamelize_for_dict_key_in_list
)
from .datetime_helper import datetime_format
from .class_helper import override
from .paginate import paginate2dict
from .json_type import JsonType
from .request import RequestDict
from .response import ResponseJson
from .api_helper import gen_links, gen_pagination, sort_list
from .api_helper import eliminate_key, remain_key
|
normal
|
{
"blob_id": "64a590d31be98f7639034662b2a322e5572cc1ae",
"index": 3554,
"step-1": "<mask token>\n",
"step-2": "from .string_helper import camelize, uncamelize, camelize_for_dict_key, camelize_for_dict_key_in_list, uncamelize_for_dict_key, uncamelize_for_dict_key_in_list\nfrom .datetime_helper import datetime_format\nfrom .class_helper import override\nfrom .paginate import paginate2dict\nfrom .json_type import JsonType\nfrom .request import RequestDict\nfrom .response import ResponseJson\nfrom .api_helper import gen_links, gen_pagination, sort_list\nfrom .api_helper import eliminate_key, remain_key\n",
"step-3": "# coding=utf-8\n# flake8:noqa\n\nfrom .string_helper import (\n camelize, uncamelize,\n camelize_for_dict_key, camelize_for_dict_key_in_list,\n uncamelize_for_dict_key, uncamelize_for_dict_key_in_list\n)\nfrom .datetime_helper import datetime_format\nfrom .class_helper import override\n\nfrom .paginate import paginate2dict\nfrom .json_type import JsonType\nfrom .request import RequestDict\nfrom .response import ResponseJson\nfrom .api_helper import gen_links, gen_pagination, sort_list\nfrom .api_helper import eliminate_key, remain_key\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
from django.shortcuts import render_to_response
from mousedb.animal.models import Animal, Strain
from django.contrib.auth.decorators import login_required
from django.template import RequestContext
from django.db import connection
import datetime
@login_required
def todo(request):
eartag_list = Animal.objects.filter(MouseID__isnull=True, Alive=True).order_by('Strain','Background','Rack','Cage')
genotype_list = Animal.objects.filter(Genotype="N.D.", Alive=True).exclude(Strain__Strain="C57BL/6").order_by('Strain','Background','Rack','Cage')
wean = datetime.date.today() - datetime.timedelta(days=30)
wean_list = Animal.objects.filter(Born__gt=wean).filter(Weaned=None,Alive=True).exclude(Strain=2).order_by('Strain','Background','Rack','Cage')
return render_to_response('todo.html', {'eartag_list':eartag_list, 'wean_list':wean_list, 'genotype_list':genotype_list},context_instance=RequestContext(request))
@login_required
def home(request):
cursor = connection.cursor()
cage_list = Animal.objects.values("Cage")
cage_list_current = Animal.objects.filter(Alive=True).values("Cage")
animal_list = Animal.objects.all()
animal_list_current = Animal.objects.filter(Alive=True)
strain_list = Strain.objects.all()
strain_list_current = Strain.objects.filter(animal__Alive=True)
return render_to_response('home.html', {'animal_list':animal_list, 'animal_list_current':animal_list_current, 'strain_list':strain_list, 'strain_list_current':strain_list_current, 'cage_list':cage_list, 'cage_list_current':cage_list_current},context_instance=RequestContext(request))
|
normal
|
{
"blob_id": "89518f43934710ef2e7471a91128e20d2306d6f6",
"index": 9291,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\n@login_required\ndef todo(request):\n eartag_list = Animal.objects.filter(MouseID__isnull=True, Alive=True\n ).order_by('Strain', 'Background', 'Rack', 'Cage')\n genotype_list = Animal.objects.filter(Genotype='N.D.', Alive=True).exclude(\n Strain__Strain='C57BL/6').order_by('Strain', 'Background', 'Rack',\n 'Cage')\n wean = datetime.date.today() - datetime.timedelta(days=30)\n wean_list = Animal.objects.filter(Born__gt=wean).filter(Weaned=None,\n Alive=True).exclude(Strain=2).order_by('Strain', 'Background',\n 'Rack', 'Cage')\n return render_to_response('todo.html', {'eartag_list': eartag_list,\n 'wean_list': wean_list, 'genotype_list': genotype_list},\n context_instance=RequestContext(request))\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\n@login_required\ndef todo(request):\n eartag_list = Animal.objects.filter(MouseID__isnull=True, Alive=True\n ).order_by('Strain', 'Background', 'Rack', 'Cage')\n genotype_list = Animal.objects.filter(Genotype='N.D.', Alive=True).exclude(\n Strain__Strain='C57BL/6').order_by('Strain', 'Background', 'Rack',\n 'Cage')\n wean = datetime.date.today() - datetime.timedelta(days=30)\n wean_list = Animal.objects.filter(Born__gt=wean).filter(Weaned=None,\n Alive=True).exclude(Strain=2).order_by('Strain', 'Background',\n 'Rack', 'Cage')\n return render_to_response('todo.html', {'eartag_list': eartag_list,\n 'wean_list': wean_list, 'genotype_list': genotype_list},\n context_instance=RequestContext(request))\n\n\n@login_required\ndef home(request):\n cursor = connection.cursor()\n cage_list = Animal.objects.values('Cage')\n cage_list_current = Animal.objects.filter(Alive=True).values('Cage')\n animal_list = Animal.objects.all()\n animal_list_current = Animal.objects.filter(Alive=True)\n strain_list = Strain.objects.all()\n strain_list_current = Strain.objects.filter(animal__Alive=True)\n return render_to_response('home.html', {'animal_list': animal_list,\n 'animal_list_current': animal_list_current, 'strain_list':\n strain_list, 'strain_list_current': strain_list_current,\n 'cage_list': cage_list, 'cage_list_current': cage_list_current},\n context_instance=RequestContext(request))\n",
"step-4": "from django.shortcuts import render_to_response\nfrom mousedb.animal.models import Animal, Strain\nfrom django.contrib.auth.decorators import login_required\nfrom django.template import RequestContext\nfrom django.db import connection\nimport datetime\n\n\n@login_required\ndef todo(request):\n eartag_list = Animal.objects.filter(MouseID__isnull=True, Alive=True\n ).order_by('Strain', 'Background', 'Rack', 'Cage')\n genotype_list = Animal.objects.filter(Genotype='N.D.', Alive=True).exclude(\n Strain__Strain='C57BL/6').order_by('Strain', 'Background', 'Rack',\n 'Cage')\n wean = datetime.date.today() - datetime.timedelta(days=30)\n wean_list = Animal.objects.filter(Born__gt=wean).filter(Weaned=None,\n Alive=True).exclude(Strain=2).order_by('Strain', 'Background',\n 'Rack', 'Cage')\n return render_to_response('todo.html', {'eartag_list': eartag_list,\n 'wean_list': wean_list, 'genotype_list': genotype_list},\n context_instance=RequestContext(request))\n\n\n@login_required\ndef home(request):\n cursor = connection.cursor()\n cage_list = Animal.objects.values('Cage')\n cage_list_current = Animal.objects.filter(Alive=True).values('Cage')\n animal_list = Animal.objects.all()\n animal_list_current = Animal.objects.filter(Alive=True)\n strain_list = Strain.objects.all()\n strain_list_current = Strain.objects.filter(animal__Alive=True)\n return render_to_response('home.html', {'animal_list': animal_list,\n 'animal_list_current': animal_list_current, 'strain_list':\n strain_list, 'strain_list_current': strain_list_current,\n 'cage_list': cage_list, 'cage_list_current': cage_list_current},\n context_instance=RequestContext(request))\n",
"step-5": "from django.shortcuts import render_to_response\nfrom mousedb.animal.models import Animal, Strain\nfrom django.contrib.auth.decorators import login_required\nfrom django.template import RequestContext\nfrom django.db import connection\nimport datetime\n\n@login_required\ndef todo(request):\n\teartag_list = Animal.objects.filter(MouseID__isnull=True, Alive=True).order_by('Strain','Background','Rack','Cage')\n\tgenotype_list = Animal.objects.filter(Genotype=\"N.D.\", Alive=True).exclude(Strain__Strain=\"C57BL/6\").order_by('Strain','Background','Rack','Cage')\n\twean = datetime.date.today() - datetime.timedelta(days=30)\n\twean_list = Animal.objects.filter(Born__gt=wean).filter(Weaned=None,Alive=True).exclude(Strain=2).order_by('Strain','Background','Rack','Cage')\n\treturn render_to_response('todo.html', {'eartag_list':eartag_list, 'wean_list':wean_list, 'genotype_list':genotype_list},context_instance=RequestContext(request))\n\n@login_required\ndef home(request):\n\tcursor = connection.cursor()\n\tcage_list = Animal.objects.values(\"Cage\")\n\tcage_list_current = Animal.objects.filter(Alive=True).values(\"Cage\")\n\tanimal_list = Animal.objects.all()\n\tanimal_list_current = Animal.objects.filter(Alive=True)\n\tstrain_list = Strain.objects.all()\n\tstrain_list_current = Strain.objects.filter(animal__Alive=True)\n\treturn render_to_response('home.html', {'animal_list':animal_list, 'animal_list_current':animal_list_current, 'strain_list':strain_list, 'strain_list_current':strain_list_current, 'cage_list':cage_list, 'cage_list_current':cage_list_current},context_instance=RequestContext(request))\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
def fibonacci(num):
f_1 = 0
f_2 = 1
answer = 0
for i in range(num-1):
answer = f_1 + f_2
f_1 = f_2
f_2 = answer
return answer
# 아래는 테스트로 출력해 보기 위한 코드입니다.
print(fibonacci(3))
|
normal
|
{
"blob_id": "c3d0a9bdbfd5b6f2b960ee2c1f11ec4acf508310",
"index": 8458,
"step-1": "<mask token>\n",
"step-2": "def fibonacci(num):\n f_1 = 0\n f_2 = 1\n answer = 0\n for i in range(num - 1):\n answer = f_1 + f_2\n f_1 = f_2\n f_2 = answer\n return answer\n\n\n<mask token>\n",
"step-3": "def fibonacci(num):\n f_1 = 0\n f_2 = 1\n answer = 0\n for i in range(num - 1):\n answer = f_1 + f_2\n f_1 = f_2\n f_2 = answer\n return answer\n\n\nprint(fibonacci(3))\n",
"step-4": "def fibonacci(num):\n f_1 = 0\n f_2 = 1\n answer = 0\n for i in range(num-1):\n answer = f_1 + f_2\n f_1 = f_2\n f_2 = answer\n return answer\n\n# 아래는 테스트로 출력해 보기 위한 코드입니다.\nprint(fibonacci(3))\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
"""
Naive Bayes Class
- Bernoulli Naive Bayes
- Multinomial Naive Bayes
- Gaussian Naive Bayes
Arthor: Zhenhuan(Steven) Sun
"""
import numpy as np
class BernoulliNB:
def __init__(self, k=1.0, binarize=0.0):
# Laplace Smoothing Factor
self.K = k
# the degree of binarization
self.binarize = binarize
def fit(self, X, y):
# binarize X
# since we assume data is bernoulli distributed we need to make sure
# that data consist of binary values
X = self._binarize(X)
# separate training data by classes(different target)
X_separated_by_class = [[x for x, t in zip(X, y) if t == c] for c in np.unique(y)]
# number of different class
self.n_classes = len(np.unique(y))
# count the number of examples and number of features in X
self.n_examples, self.n_features = X.shape
# count the number of examples that belong to class k (0 or 1 in spam classification)
prior_numerator = np.array([len(x) for x in X_separated_by_class])
# compute the prior probability (P(y))
self.prior_prob = prior_numerator / self.n_examples
# compute the log prior probability (log(P(y))) for prediction
self.log_prior_prob = np.log(self.prior_prob)
# compute the conditional probability
# with laplace smoothing we assume we have seen each feature at least self.K times
conditional_prob_numerator = np.array([np.array(x).sum(axis=0) + self.K for x in X_separated_by_class])
conditional_prob_denominator = np.expand_dims(np.array([len(x) + 2 * self.K for x in X_separated_by_class]), axis=1)
self.conditional_prob = conditional_prob_numerator / conditional_prob_denominator
return self
def predict(self, X):
# binarize X
X = self._binarize(X)
# compute log posterior probability log(P(y|X))
posterior_prob_numerator = np.array([(x * np.log(self.conditional_prob) +
np.abs(1 - x) * np.log(1 - self.conditional_prob)).sum(axis=1) +
self.log_prior_prob for x in X])
posterior_prob_denominator = np.expand_dims(np.array([(x * np.log(self.conditional_prob) +
np.abs(1 - x) * np.log(1 - self.conditional_prob)).sum(axis=1) +
self.log_prior_prob for x in X]).sum(axis=1), axis=1)
posterior_prob = posterior_prob_numerator - posterior_prob_denominator
# alternative solution
# since posterior_prob_denominator is a constant thus we don't bother compute the denominator
# compute the numerator is sufficient enough to make prediction and also it makes algorithm runs faster
#return np.argmax(posterior_prob_numerator, axis=1)
return np.argmax(posterior_prob, axis=1)
def _binarize(self, X):
# convert the values in X to binary values (0 or 1)
return np.where(X > self.binarize, 1, 0)
class MultinomialNB:
def __init__(self, k=1.0):
# Laplace Smoothing Factor
self.K = k
def fit(self, X, y):
# separate the training data by class
X_separated_by_class = [[x for x, t in zip(X, y) if t == c] for c in np.unique(y)]
# number of different class
self.n_classes = len(np.unique(y))
# count the number of examples that belong to different classes
prior_numerator = [len(x) for x in X_separated_by_class]
# count the total number of examples in the training set
prior_denominator = X.shape[0]
# compute prior probability
self.prior_prob = np.array(prior_numerator) / prior_denominator
# compute log prior probability for prediction
self.log_prior_prob = np.log(self.prior_prob)
# compute the conditional probability's numerator for different class (with laplace smoothing)
# assume we have seen each feature at least once to avoid divide by zero error
conditional_prob_numerator = np.array([np.array(x).sum(axis=0) + self.K for x in X_separated_by_class])
# compute the conditional probability's denominator for different class
conditional_prob_denominator = np.expand_dims(conditional_prob_numerator.sum(axis=1), axis=1)
# compute the conditional probability for each feature and for each different classes
self.conditional_prob = conditional_prob_numerator / conditional_prob_denominator
return self
def predict(self, X):
# compute the log conditional probability for each examples and for each different classes
log_conditional_prob = np.array([(x * np.log(self.conditional_prob)).sum(axis=1) for x in X])
# compute the posterior probability
posterior_pronb = log_conditional_prob + self.log_prior_prob
# make prediction
return np.argmax(posterior_pronb, axis=1)
class GaussianNB:
def __init__(self, k=1.0):
# Laplace Smoothing Factor
self.K = k
def fit(self, X, y):
# separate the training set by classes
X_separated_by_class = [[x for x, t in zip(X, y) if t == c] for c in np.unique(y)]
# count the number of different classes
self.n_classes = len(np.unique(y))
# compute prior probability
self.prior_prob = np.array([len(x) / X.shape[0] for x in X_separated_by_class])
# compute mean vector for each class
self.mean_vector = np.array([np.array(x).sum(axis=0) / len(x) for x in X_separated_by_class])
# compute covariance matrix for each class
covariance_diagonal_matrices = []
for c, x in enumerate(X_separated_by_class):
mean_square_difference = 0
for x_i in x:
# compute the covariance matrix for each examples (slow as hell -> abandoned)
# mean_difference = np.expand_dims((x_i - self.mean_vector[c]), axis=1)
# mean_square_difference += mean_difference.dot(mean_difference.T)
# compute the diagnal entries of covariance matrix for each examples (much faster than above method)
mean_difference = x_i - self.mean_vector[c]
mean_square_difference += mean_difference ** 2
# convert the list of diagonal entries back to covariance diagonal matrix
# here we assumed that the mean square difference between each feature and its mean is at least 1 to make sure that
# there is no zero variance in the covariance matrix and thus we won't encounter divide by zero error in the future
covariance_diagonal_matrix = ((mean_square_difference + self.K) / len(x)) * np.identity(X.shape[1])
covariance_diagonal_matrices.append(covariance_diagonal_matrix)
self.covariance_diagonal_matrices = np.asarray(covariance_diagonal_matrices)
return self
def log_gaussian_distribution(self, x, mean, variance):
log_multiplier = -np.log(np.sqrt((2 * np.pi) * variance))
log_exponent = -(x - mean)**2 / (2 * variance)
return sum(log_multiplier + log_exponent)
def predict(self, X):
variances = []
for matrix in self.covariance_diagonal_matrices:
variance = matrix.diagonal()
variances.append(variance)
variances = np.array(variances)
# list that stores all test data's posterior probability
posterior_prob_collection = []
for x in X:
conditional_prob = []
for mean, variance in zip(self.mean_vector, variances):
# compute conditional probability for each class
conditional_prob.append(self.log_gaussian_distribution(x, mean, variance))
# compute posterior probability
posterior_prob = np.array(conditional_prob) + np.log(self.prior_prob)
posterior_prob_collection.append(posterior_prob)
posterior_prob_collection = np.array(posterior_prob_collection)
return np.argmax(posterior_prob_collection, axis=1)
|
normal
|
{
"blob_id": "5dfe86d654e4184bab4401f8b634326996e42e9c",
"index": 2646,
"step-1": "<mask token>\n\n\nclass MultinomialNB:\n <mask token>\n\n def fit(self, X, y):\n X_separated_by_class = [[x for x, t in zip(X, y) if t == c] for c in\n np.unique(y)]\n self.n_classes = len(np.unique(y))\n prior_numerator = [len(x) for x in X_separated_by_class]\n prior_denominator = X.shape[0]\n self.prior_prob = np.array(prior_numerator) / prior_denominator\n self.log_prior_prob = np.log(self.prior_prob)\n conditional_prob_numerator = np.array([(np.array(x).sum(axis=0) +\n self.K) for x in X_separated_by_class])\n conditional_prob_denominator = np.expand_dims(\n conditional_prob_numerator.sum(axis=1), axis=1)\n self.conditional_prob = (conditional_prob_numerator /\n conditional_prob_denominator)\n return self\n\n def predict(self, X):\n log_conditional_prob = np.array([(x * np.log(self.conditional_prob)\n ).sum(axis=1) for x in X])\n posterior_pronb = log_conditional_prob + self.log_prior_prob\n return np.argmax(posterior_pronb, axis=1)\n\n\nclass GaussianNB:\n\n def __init__(self, k=1.0):\n self.K = k\n\n def fit(self, X, y):\n X_separated_by_class = [[x for x, t in zip(X, y) if t == c] for c in\n np.unique(y)]\n self.n_classes = len(np.unique(y))\n self.prior_prob = np.array([(len(x) / X.shape[0]) for x in\n X_separated_by_class])\n self.mean_vector = np.array([(np.array(x).sum(axis=0) / len(x)) for\n x in X_separated_by_class])\n covariance_diagonal_matrices = []\n for c, x in enumerate(X_separated_by_class):\n mean_square_difference = 0\n for x_i in x:\n mean_difference = x_i - self.mean_vector[c]\n mean_square_difference += mean_difference ** 2\n covariance_diagonal_matrix = (mean_square_difference + self.K\n ) / len(x) * np.identity(X.shape[1])\n covariance_diagonal_matrices.append(covariance_diagonal_matrix)\n self.covariance_diagonal_matrices = np.asarray(\n covariance_diagonal_matrices)\n return self\n\n def log_gaussian_distribution(self, x, mean, variance):\n log_multiplier = -np.log(np.sqrt(2 * np.pi * variance))\n log_exponent = -(x - mean) ** 2 / (2 * variance)\n return sum(log_multiplier + log_exponent)\n\n def predict(self, X):\n variances = []\n for matrix in self.covariance_diagonal_matrices:\n variance = matrix.diagonal()\n variances.append(variance)\n variances = np.array(variances)\n posterior_prob_collection = []\n for x in X:\n conditional_prob = []\n for mean, variance in zip(self.mean_vector, variances):\n conditional_prob.append(self.log_gaussian_distribution(x,\n mean, variance))\n posterior_prob = np.array(conditional_prob) + np.log(self.\n prior_prob)\n posterior_prob_collection.append(posterior_prob)\n posterior_prob_collection = np.array(posterior_prob_collection)\n return np.argmax(posterior_prob_collection, axis=1)\n",
"step-2": "<mask token>\n\n\nclass MultinomialNB:\n\n def __init__(self, k=1.0):\n self.K = k\n\n def fit(self, X, y):\n X_separated_by_class = [[x for x, t in zip(X, y) if t == c] for c in\n np.unique(y)]\n self.n_classes = len(np.unique(y))\n prior_numerator = [len(x) for x in X_separated_by_class]\n prior_denominator = X.shape[0]\n self.prior_prob = np.array(prior_numerator) / prior_denominator\n self.log_prior_prob = np.log(self.prior_prob)\n conditional_prob_numerator = np.array([(np.array(x).sum(axis=0) +\n self.K) for x in X_separated_by_class])\n conditional_prob_denominator = np.expand_dims(\n conditional_prob_numerator.sum(axis=1), axis=1)\n self.conditional_prob = (conditional_prob_numerator /\n conditional_prob_denominator)\n return self\n\n def predict(self, X):\n log_conditional_prob = np.array([(x * np.log(self.conditional_prob)\n ).sum(axis=1) for x in X])\n posterior_pronb = log_conditional_prob + self.log_prior_prob\n return np.argmax(posterior_pronb, axis=1)\n\n\nclass GaussianNB:\n\n def __init__(self, k=1.0):\n self.K = k\n\n def fit(self, X, y):\n X_separated_by_class = [[x for x, t in zip(X, y) if t == c] for c in\n np.unique(y)]\n self.n_classes = len(np.unique(y))\n self.prior_prob = np.array([(len(x) / X.shape[0]) for x in\n X_separated_by_class])\n self.mean_vector = np.array([(np.array(x).sum(axis=0) / len(x)) for\n x in X_separated_by_class])\n covariance_diagonal_matrices = []\n for c, x in enumerate(X_separated_by_class):\n mean_square_difference = 0\n for x_i in x:\n mean_difference = x_i - self.mean_vector[c]\n mean_square_difference += mean_difference ** 2\n covariance_diagonal_matrix = (mean_square_difference + self.K\n ) / len(x) * np.identity(X.shape[1])\n covariance_diagonal_matrices.append(covariance_diagonal_matrix)\n self.covariance_diagonal_matrices = np.asarray(\n covariance_diagonal_matrices)\n return self\n\n def log_gaussian_distribution(self, x, mean, variance):\n log_multiplier = -np.log(np.sqrt(2 * np.pi * variance))\n log_exponent = -(x - mean) ** 2 / (2 * variance)\n return sum(log_multiplier + log_exponent)\n\n def predict(self, X):\n variances = []\n for matrix in self.covariance_diagonal_matrices:\n variance = matrix.diagonal()\n variances.append(variance)\n variances = np.array(variances)\n posterior_prob_collection = []\n for x in X:\n conditional_prob = []\n for mean, variance in zip(self.mean_vector, variances):\n conditional_prob.append(self.log_gaussian_distribution(x,\n mean, variance))\n posterior_prob = np.array(conditional_prob) + np.log(self.\n prior_prob)\n posterior_prob_collection.append(posterior_prob)\n posterior_prob_collection = np.array(posterior_prob_collection)\n return np.argmax(posterior_prob_collection, axis=1)\n",
"step-3": "<mask token>\n\n\nclass BernoulliNB:\n\n def __init__(self, k=1.0, binarize=0.0):\n self.K = k\n self.binarize = binarize\n\n def fit(self, X, y):\n X = self._binarize(X)\n X_separated_by_class = [[x for x, t in zip(X, y) if t == c] for c in\n np.unique(y)]\n self.n_classes = len(np.unique(y))\n self.n_examples, self.n_features = X.shape\n prior_numerator = np.array([len(x) for x in X_separated_by_class])\n self.prior_prob = prior_numerator / self.n_examples\n self.log_prior_prob = np.log(self.prior_prob)\n conditional_prob_numerator = np.array([(np.array(x).sum(axis=0) +\n self.K) for x in X_separated_by_class])\n conditional_prob_denominator = np.expand_dims(np.array([(len(x) + 2 *\n self.K) for x in X_separated_by_class]), axis=1)\n self.conditional_prob = (conditional_prob_numerator /\n conditional_prob_denominator)\n return self\n\n def predict(self, X):\n X = self._binarize(X)\n posterior_prob_numerator = np.array([((x * np.log(self.\n conditional_prob) + np.abs(1 - x) * np.log(1 - self.\n conditional_prob)).sum(axis=1) + self.log_prior_prob) for x in X])\n posterior_prob_denominator = np.expand_dims(np.array([((x * np.log(\n self.conditional_prob) + np.abs(1 - x) * np.log(1 - self.\n conditional_prob)).sum(axis=1) + self.log_prior_prob) for x in\n X]).sum(axis=1), axis=1)\n posterior_prob = posterior_prob_numerator - posterior_prob_denominator\n return np.argmax(posterior_prob, axis=1)\n\n def _binarize(self, X):\n return np.where(X > self.binarize, 1, 0)\n\n\nclass MultinomialNB:\n\n def __init__(self, k=1.0):\n self.K = k\n\n def fit(self, X, y):\n X_separated_by_class = [[x for x, t in zip(X, y) if t == c] for c in\n np.unique(y)]\n self.n_classes = len(np.unique(y))\n prior_numerator = [len(x) for x in X_separated_by_class]\n prior_denominator = X.shape[0]\n self.prior_prob = np.array(prior_numerator) / prior_denominator\n self.log_prior_prob = np.log(self.prior_prob)\n conditional_prob_numerator = np.array([(np.array(x).sum(axis=0) +\n self.K) for x in X_separated_by_class])\n conditional_prob_denominator = np.expand_dims(\n conditional_prob_numerator.sum(axis=1), axis=1)\n self.conditional_prob = (conditional_prob_numerator /\n conditional_prob_denominator)\n return self\n\n def predict(self, X):\n log_conditional_prob = np.array([(x * np.log(self.conditional_prob)\n ).sum(axis=1) for x in X])\n posterior_pronb = log_conditional_prob + self.log_prior_prob\n return np.argmax(posterior_pronb, axis=1)\n\n\nclass GaussianNB:\n\n def __init__(self, k=1.0):\n self.K = k\n\n def fit(self, X, y):\n X_separated_by_class = [[x for x, t in zip(X, y) if t == c] for c in\n np.unique(y)]\n self.n_classes = len(np.unique(y))\n self.prior_prob = np.array([(len(x) / X.shape[0]) for x in\n X_separated_by_class])\n self.mean_vector = np.array([(np.array(x).sum(axis=0) / len(x)) for\n x in X_separated_by_class])\n covariance_diagonal_matrices = []\n for c, x in enumerate(X_separated_by_class):\n mean_square_difference = 0\n for x_i in x:\n mean_difference = x_i - self.mean_vector[c]\n mean_square_difference += mean_difference ** 2\n covariance_diagonal_matrix = (mean_square_difference + self.K\n ) / len(x) * np.identity(X.shape[1])\n covariance_diagonal_matrices.append(covariance_diagonal_matrix)\n self.covariance_diagonal_matrices = np.asarray(\n covariance_diagonal_matrices)\n return self\n\n def log_gaussian_distribution(self, x, mean, variance):\n log_multiplier = -np.log(np.sqrt(2 * np.pi * variance))\n log_exponent = -(x - mean) ** 2 / (2 * variance)\n return sum(log_multiplier + log_exponent)\n\n def predict(self, X):\n variances = []\n for matrix in self.covariance_diagonal_matrices:\n variance = matrix.diagonal()\n variances.append(variance)\n variances = np.array(variances)\n posterior_prob_collection = []\n for x in X:\n conditional_prob = []\n for mean, variance in zip(self.mean_vector, variances):\n conditional_prob.append(self.log_gaussian_distribution(x,\n mean, variance))\n posterior_prob = np.array(conditional_prob) + np.log(self.\n prior_prob)\n posterior_prob_collection.append(posterior_prob)\n posterior_prob_collection = np.array(posterior_prob_collection)\n return np.argmax(posterior_prob_collection, axis=1)\n",
"step-4": "<mask token>\nimport numpy as np\n\n\nclass BernoulliNB:\n\n def __init__(self, k=1.0, binarize=0.0):\n self.K = k\n self.binarize = binarize\n\n def fit(self, X, y):\n X = self._binarize(X)\n X_separated_by_class = [[x for x, t in zip(X, y) if t == c] for c in\n np.unique(y)]\n self.n_classes = len(np.unique(y))\n self.n_examples, self.n_features = X.shape\n prior_numerator = np.array([len(x) for x in X_separated_by_class])\n self.prior_prob = prior_numerator / self.n_examples\n self.log_prior_prob = np.log(self.prior_prob)\n conditional_prob_numerator = np.array([(np.array(x).sum(axis=0) +\n self.K) for x in X_separated_by_class])\n conditional_prob_denominator = np.expand_dims(np.array([(len(x) + 2 *\n self.K) for x in X_separated_by_class]), axis=1)\n self.conditional_prob = (conditional_prob_numerator /\n conditional_prob_denominator)\n return self\n\n def predict(self, X):\n X = self._binarize(X)\n posterior_prob_numerator = np.array([((x * np.log(self.\n conditional_prob) + np.abs(1 - x) * np.log(1 - self.\n conditional_prob)).sum(axis=1) + self.log_prior_prob) for x in X])\n posterior_prob_denominator = np.expand_dims(np.array([((x * np.log(\n self.conditional_prob) + np.abs(1 - x) * np.log(1 - self.\n conditional_prob)).sum(axis=1) + self.log_prior_prob) for x in\n X]).sum(axis=1), axis=1)\n posterior_prob = posterior_prob_numerator - posterior_prob_denominator\n return np.argmax(posterior_prob, axis=1)\n\n def _binarize(self, X):\n return np.where(X > self.binarize, 1, 0)\n\n\nclass MultinomialNB:\n\n def __init__(self, k=1.0):\n self.K = k\n\n def fit(self, X, y):\n X_separated_by_class = [[x for x, t in zip(X, y) if t == c] for c in\n np.unique(y)]\n self.n_classes = len(np.unique(y))\n prior_numerator = [len(x) for x in X_separated_by_class]\n prior_denominator = X.shape[0]\n self.prior_prob = np.array(prior_numerator) / prior_denominator\n self.log_prior_prob = np.log(self.prior_prob)\n conditional_prob_numerator = np.array([(np.array(x).sum(axis=0) +\n self.K) for x in X_separated_by_class])\n conditional_prob_denominator = np.expand_dims(\n conditional_prob_numerator.sum(axis=1), axis=1)\n self.conditional_prob = (conditional_prob_numerator /\n conditional_prob_denominator)\n return self\n\n def predict(self, X):\n log_conditional_prob = np.array([(x * np.log(self.conditional_prob)\n ).sum(axis=1) for x in X])\n posterior_pronb = log_conditional_prob + self.log_prior_prob\n return np.argmax(posterior_pronb, axis=1)\n\n\nclass GaussianNB:\n\n def __init__(self, k=1.0):\n self.K = k\n\n def fit(self, X, y):\n X_separated_by_class = [[x for x, t in zip(X, y) if t == c] for c in\n np.unique(y)]\n self.n_classes = len(np.unique(y))\n self.prior_prob = np.array([(len(x) / X.shape[0]) for x in\n X_separated_by_class])\n self.mean_vector = np.array([(np.array(x).sum(axis=0) / len(x)) for\n x in X_separated_by_class])\n covariance_diagonal_matrices = []\n for c, x in enumerate(X_separated_by_class):\n mean_square_difference = 0\n for x_i in x:\n mean_difference = x_i - self.mean_vector[c]\n mean_square_difference += mean_difference ** 2\n covariance_diagonal_matrix = (mean_square_difference + self.K\n ) / len(x) * np.identity(X.shape[1])\n covariance_diagonal_matrices.append(covariance_diagonal_matrix)\n self.covariance_diagonal_matrices = np.asarray(\n covariance_diagonal_matrices)\n return self\n\n def log_gaussian_distribution(self, x, mean, variance):\n log_multiplier = -np.log(np.sqrt(2 * np.pi * variance))\n log_exponent = -(x - mean) ** 2 / (2 * variance)\n return sum(log_multiplier + log_exponent)\n\n def predict(self, X):\n variances = []\n for matrix in self.covariance_diagonal_matrices:\n variance = matrix.diagonal()\n variances.append(variance)\n variances = np.array(variances)\n posterior_prob_collection = []\n for x in X:\n conditional_prob = []\n for mean, variance in zip(self.mean_vector, variances):\n conditional_prob.append(self.log_gaussian_distribution(x,\n mean, variance))\n posterior_prob = np.array(conditional_prob) + np.log(self.\n prior_prob)\n posterior_prob_collection.append(posterior_prob)\n posterior_prob_collection = np.array(posterior_prob_collection)\n return np.argmax(posterior_prob_collection, axis=1)\n",
"step-5": "\"\"\"\n Naive Bayes Class\n - Bernoulli Naive Bayes\n - Multinomial Naive Bayes\n - Gaussian Naive Bayes\n Arthor: Zhenhuan(Steven) Sun\n\"\"\"\n\nimport numpy as np\n\nclass BernoulliNB:\n def __init__(self, k=1.0, binarize=0.0):\n # Laplace Smoothing Factor\n self.K = k\n\n # the degree of binarization\n self.binarize = binarize\n\n def fit(self, X, y):\n # binarize X\n # since we assume data is bernoulli distributed we need to make sure\n # that data consist of binary values\n X = self._binarize(X)\n\n # separate training data by classes(different target)\n X_separated_by_class = [[x for x, t in zip(X, y) if t == c] for c in np.unique(y)]\n\n # number of different class\n self.n_classes = len(np.unique(y))\n\n # count the number of examples and number of features in X\n self.n_examples, self.n_features = X.shape\n\n # count the number of examples that belong to class k (0 or 1 in spam classification)\n prior_numerator = np.array([len(x) for x in X_separated_by_class])\n\n # compute the prior probability (P(y))\n self.prior_prob = prior_numerator / self.n_examples\n\n # compute the log prior probability (log(P(y))) for prediction\n self.log_prior_prob = np.log(self.prior_prob)\n\n # compute the conditional probability\n # with laplace smoothing we assume we have seen each feature at least self.K times\n conditional_prob_numerator = np.array([np.array(x).sum(axis=0) + self.K for x in X_separated_by_class])\n conditional_prob_denominator = np.expand_dims(np.array([len(x) + 2 * self.K for x in X_separated_by_class]), axis=1)\n self.conditional_prob = conditional_prob_numerator / conditional_prob_denominator\n\n return self\n\n def predict(self, X):\n # binarize X\n X = self._binarize(X)\n\n # compute log posterior probability log(P(y|X))\n posterior_prob_numerator = np.array([(x * np.log(self.conditional_prob) + \n np.abs(1 - x) * np.log(1 - self.conditional_prob)).sum(axis=1) + \n self.log_prior_prob for x in X])\n posterior_prob_denominator = np.expand_dims(np.array([(x * np.log(self.conditional_prob) + \n np.abs(1 - x) * np.log(1 - self.conditional_prob)).sum(axis=1) +\n self.log_prior_prob for x in X]).sum(axis=1), axis=1)\n \n posterior_prob = posterior_prob_numerator - posterior_prob_denominator\n\n # alternative solution\n # since posterior_prob_denominator is a constant thus we don't bother compute the denominator\n # compute the numerator is sufficient enough to make prediction and also it makes algorithm runs faster\n #return np.argmax(posterior_prob_numerator, axis=1)\n\n return np.argmax(posterior_prob, axis=1)\n\n def _binarize(self, X):\n # convert the values in X to binary values (0 or 1)\n return np.where(X > self.binarize, 1, 0)\n\nclass MultinomialNB:\n def __init__(self, k=1.0):\n # Laplace Smoothing Factor\n self.K = k\n\n def fit(self, X, y):\n # separate the training data by class\n X_separated_by_class = [[x for x, t in zip(X, y) if t == c] for c in np.unique(y)]\n \n # number of different class\n self.n_classes = len(np.unique(y))\n\n # count the number of examples that belong to different classes\n prior_numerator = [len(x) for x in X_separated_by_class]\n\n # count the total number of examples in the training set\n prior_denominator = X.shape[0]\n\n # compute prior probability\n self.prior_prob = np.array(prior_numerator) / prior_denominator\n\n # compute log prior probability for prediction\n self.log_prior_prob = np.log(self.prior_prob)\n\n # compute the conditional probability's numerator for different class (with laplace smoothing)\n # assume we have seen each feature at least once to avoid divide by zero error\n conditional_prob_numerator = np.array([np.array(x).sum(axis=0) + self.K for x in X_separated_by_class])\n\n # compute the conditional probability's denominator for different class\n conditional_prob_denominator = np.expand_dims(conditional_prob_numerator.sum(axis=1), axis=1)\n\n # compute the conditional probability for each feature and for each different classes\n self.conditional_prob = conditional_prob_numerator / conditional_prob_denominator\n\n return self\n\n def predict(self, X):\n # compute the log conditional probability for each examples and for each different classes\n log_conditional_prob = np.array([(x * np.log(self.conditional_prob)).sum(axis=1) for x in X])\n\n # compute the posterior probability\n posterior_pronb = log_conditional_prob + self.log_prior_prob\n\n # make prediction\n return np.argmax(posterior_pronb, axis=1)\n\nclass GaussianNB:\n def __init__(self, k=1.0):\n # Laplace Smoothing Factor\n self.K = k\n\n def fit(self, X, y):\n # separate the training set by classes\n X_separated_by_class = [[x for x, t in zip(X, y) if t == c] for c in np.unique(y)]\n\n # count the number of different classes\n self.n_classes = len(np.unique(y))\n\n # compute prior probability\n self.prior_prob = np.array([len(x) / X.shape[0] for x in X_separated_by_class])\n\n # compute mean vector for each class\n self.mean_vector = np.array([np.array(x).sum(axis=0) / len(x) for x in X_separated_by_class])\n\n # compute covariance matrix for each class\n covariance_diagonal_matrices = []\n for c, x in enumerate(X_separated_by_class):\n mean_square_difference = 0\n for x_i in x:\n # compute the covariance matrix for each examples (slow as hell -> abandoned)\n # mean_difference = np.expand_dims((x_i - self.mean_vector[c]), axis=1)\n # mean_square_difference += mean_difference.dot(mean_difference.T) \n # compute the diagnal entries of covariance matrix for each examples (much faster than above method)\n mean_difference = x_i - self.mean_vector[c]\n mean_square_difference += mean_difference ** 2\n # convert the list of diagonal entries back to covariance diagonal matrix\n # here we assumed that the mean square difference between each feature and its mean is at least 1 to make sure that \n # there is no zero variance in the covariance matrix and thus we won't encounter divide by zero error in the future\n covariance_diagonal_matrix = ((mean_square_difference + self.K) / len(x)) * np.identity(X.shape[1])\n covariance_diagonal_matrices.append(covariance_diagonal_matrix)\n self.covariance_diagonal_matrices = np.asarray(covariance_diagonal_matrices)\n\n return self\n\n def log_gaussian_distribution(self, x, mean, variance):\n\n log_multiplier = -np.log(np.sqrt((2 * np.pi) * variance))\n log_exponent = -(x - mean)**2 / (2 * variance)\n\n return sum(log_multiplier + log_exponent)\n\n def predict(self, X):\n variances = []\n for matrix in self.covariance_diagonal_matrices:\n variance = matrix.diagonal()\n variances.append(variance)\n variances = np.array(variances)\n \n # list that stores all test data's posterior probability\n posterior_prob_collection = []\n for x in X:\n conditional_prob = []\n for mean, variance in zip(self.mean_vector, variances):\n # compute conditional probability for each class\n conditional_prob.append(self.log_gaussian_distribution(x, mean, variance))\n # compute posterior probability\n posterior_prob = np.array(conditional_prob) + np.log(self.prior_prob)\n posterior_prob_collection.append(posterior_prob)\n posterior_prob_collection = np.array(posterior_prob_collection)\n \n return np.argmax(posterior_prob_collection, axis=1)",
"step-ids": [
8,
9,
14,
15,
16
]
}
|
[
8,
9,
14,
15,
16
] |
from scheme import *
from tests.util import *
class TestDateTime(FieldTestCase):
def test_instantiation(self):
with self.assertRaises(TypeError):
DateTime(minimum=True)
with self.assertRaises(TypeError):
DateTime(maximum=True)
def test_processing(self):
field = DateTime()
self.assert_processed(field, None)
self.assert_not_processed(field, 'invalid', True)
now = datetime.now().replace(microsecond=0)
now_local = now.replace(tzinfo=LOCAL)
now_utc = now_local.astimezone(UTC)
now_text = now_utc.strftime('%Y-%m-%dT%H:%M:%SZ')
self.assertEqual(field.process(now_text, INBOUND, True), now_local)
self.assertEqual(field.process(now, OUTBOUND, True), now_text)
self.assertEqual(field.process(now_local, OUTBOUND, True), now_text)
self.assertEqual(field.process(now_utc, OUTBOUND, True), now_text)
def test_utc_processing(self):
field = DateTime(utc=True)
self.assert_processed(field, None)
self.assert_not_processed(field, 'invalid', True)
now = datetime.utcnow().replace(microsecond=0)
now_utc = now.replace(tzinfo=UTC)
now_text = now_utc.strftime('%Y-%m-%dT%H:%M:%SZ')
self.assertEqual(field.process(now_text, INBOUND, True), now_utc)
self.assertEqual(field.process(now, OUTBOUND, True), now_text)
self.assertEqual(field.process(now_utc, OUTBOUND, True), now_text)
def test_minimum(self):
now, now_text = construct_now()
for field in (DateTime(minimum=now), DateTime(minimum=now_text)):
self.assertEqual(field.minimum, now)
self.assert_processed(field, (now, now_text), (now, now_text))
self.assert_processed(field, (now, now_text), construct_now(+1))
self.assert_not_processed(field, 'minimum', construct_now(-60))
def test_maximum(self):
now, now_text = construct_now()
for field in (DateTime(maximum=now), DateTime(maximum=now_text)):
self.assertEqual(field.maximum, now)
self.assert_processed(field, (now, now_text), (now, now_text))
self.assert_processed(field, (now, now_text), construct_now(-60))
self.assert_not_processed(field, 'maximum', construct_now(+60))
def test_interpolation(self):
field = DateTime()
now = datetime.now()
self.assert_interpolated(field, None, now)
self.assert_interpolated(field, ('${value}', now), value=now)
def test_description(self):
now_text = '2012-01-01T00:00:00Z'
field = DateTime(name='test', utc=True, minimum=now_text)
self.assertEqual(field.describe(), {'fieldtype': 'datetime', 'name':
'test', 'minimum': now_text, 'utc': True})
field = DateTime(name='test', utc=True, maximum=now_text)
self.assertEqual(field.describe(), {'fieldtype': 'datetime', 'name':
'test', 'maximum': now_text, 'utc': True})
field = DateTime(name='test', utc=True, minimum=now_text, maximum=
now_text)
self.assertEqual(field.describe(), {'fieldtype': 'datetime', 'name':
'test', 'minimum': now_text, 'maximum': now_text, 'utc': True})
|
normal
|
{
"blob_id": "92b22ea23ad0cf4e16c7d19d055b7ec152ca433a",
"index": 5191,
"step-1": "<mask token>\n\n\nclass TestDateTime(FieldTestCase):\n <mask token>\n <mask token>\n\n def test_utc_processing(self):\n field = DateTime(utc=True)\n self.assert_processed(field, None)\n self.assert_not_processed(field, 'invalid', True)\n now = datetime.utcnow().replace(microsecond=0)\n now_utc = now.replace(tzinfo=UTC)\n now_text = now_utc.strftime('%Y-%m-%dT%H:%M:%SZ')\n self.assertEqual(field.process(now_text, INBOUND, True), now_utc)\n self.assertEqual(field.process(now, OUTBOUND, True), now_text)\n self.assertEqual(field.process(now_utc, OUTBOUND, True), now_text)\n <mask token>\n\n def test_maximum(self):\n now, now_text = construct_now()\n for field in (DateTime(maximum=now), DateTime(maximum=now_text)):\n self.assertEqual(field.maximum, now)\n self.assert_processed(field, (now, now_text), (now, now_text))\n self.assert_processed(field, (now, now_text), construct_now(-60))\n self.assert_not_processed(field, 'maximum', construct_now(+60))\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass TestDateTime(FieldTestCase):\n <mask token>\n\n def test_processing(self):\n field = DateTime()\n self.assert_processed(field, None)\n self.assert_not_processed(field, 'invalid', True)\n now = datetime.now().replace(microsecond=0)\n now_local = now.replace(tzinfo=LOCAL)\n now_utc = now_local.astimezone(UTC)\n now_text = now_utc.strftime('%Y-%m-%dT%H:%M:%SZ')\n self.assertEqual(field.process(now_text, INBOUND, True), now_local)\n self.assertEqual(field.process(now, OUTBOUND, True), now_text)\n self.assertEqual(field.process(now_local, OUTBOUND, True), now_text)\n self.assertEqual(field.process(now_utc, OUTBOUND, True), now_text)\n\n def test_utc_processing(self):\n field = DateTime(utc=True)\n self.assert_processed(field, None)\n self.assert_not_processed(field, 'invalid', True)\n now = datetime.utcnow().replace(microsecond=0)\n now_utc = now.replace(tzinfo=UTC)\n now_text = now_utc.strftime('%Y-%m-%dT%H:%M:%SZ')\n self.assertEqual(field.process(now_text, INBOUND, True), now_utc)\n self.assertEqual(field.process(now, OUTBOUND, True), now_text)\n self.assertEqual(field.process(now_utc, OUTBOUND, True), now_text)\n\n def test_minimum(self):\n now, now_text = construct_now()\n for field in (DateTime(minimum=now), DateTime(minimum=now_text)):\n self.assertEqual(field.minimum, now)\n self.assert_processed(field, (now, now_text), (now, now_text))\n self.assert_processed(field, (now, now_text), construct_now(+1))\n self.assert_not_processed(field, 'minimum', construct_now(-60))\n\n def test_maximum(self):\n now, now_text = construct_now()\n for field in (DateTime(maximum=now), DateTime(maximum=now_text)):\n self.assertEqual(field.maximum, now)\n self.assert_processed(field, (now, now_text), (now, now_text))\n self.assert_processed(field, (now, now_text), construct_now(-60))\n self.assert_not_processed(field, 'maximum', construct_now(+60))\n\n def test_interpolation(self):\n field = DateTime()\n now = datetime.now()\n self.assert_interpolated(field, None, now)\n self.assert_interpolated(field, ('${value}', now), value=now)\n\n def test_description(self):\n now_text = '2012-01-01T00:00:00Z'\n field = DateTime(name='test', utc=True, minimum=now_text)\n self.assertEqual(field.describe(), {'fieldtype': 'datetime', 'name':\n 'test', 'minimum': now_text, 'utc': True})\n field = DateTime(name='test', utc=True, maximum=now_text)\n self.assertEqual(field.describe(), {'fieldtype': 'datetime', 'name':\n 'test', 'maximum': now_text, 'utc': True})\n field = DateTime(name='test', utc=True, minimum=now_text, maximum=\n now_text)\n self.assertEqual(field.describe(), {'fieldtype': 'datetime', 'name':\n 'test', 'minimum': now_text, 'maximum': now_text, 'utc': True})\n",
"step-3": "<mask token>\n\n\nclass TestDateTime(FieldTestCase):\n\n def test_instantiation(self):\n with self.assertRaises(TypeError):\n DateTime(minimum=True)\n with self.assertRaises(TypeError):\n DateTime(maximum=True)\n\n def test_processing(self):\n field = DateTime()\n self.assert_processed(field, None)\n self.assert_not_processed(field, 'invalid', True)\n now = datetime.now().replace(microsecond=0)\n now_local = now.replace(tzinfo=LOCAL)\n now_utc = now_local.astimezone(UTC)\n now_text = now_utc.strftime('%Y-%m-%dT%H:%M:%SZ')\n self.assertEqual(field.process(now_text, INBOUND, True), now_local)\n self.assertEqual(field.process(now, OUTBOUND, True), now_text)\n self.assertEqual(field.process(now_local, OUTBOUND, True), now_text)\n self.assertEqual(field.process(now_utc, OUTBOUND, True), now_text)\n\n def test_utc_processing(self):\n field = DateTime(utc=True)\n self.assert_processed(field, None)\n self.assert_not_processed(field, 'invalid', True)\n now = datetime.utcnow().replace(microsecond=0)\n now_utc = now.replace(tzinfo=UTC)\n now_text = now_utc.strftime('%Y-%m-%dT%H:%M:%SZ')\n self.assertEqual(field.process(now_text, INBOUND, True), now_utc)\n self.assertEqual(field.process(now, OUTBOUND, True), now_text)\n self.assertEqual(field.process(now_utc, OUTBOUND, True), now_text)\n\n def test_minimum(self):\n now, now_text = construct_now()\n for field in (DateTime(minimum=now), DateTime(minimum=now_text)):\n self.assertEqual(field.minimum, now)\n self.assert_processed(field, (now, now_text), (now, now_text))\n self.assert_processed(field, (now, now_text), construct_now(+1))\n self.assert_not_processed(field, 'minimum', construct_now(-60))\n\n def test_maximum(self):\n now, now_text = construct_now()\n for field in (DateTime(maximum=now), DateTime(maximum=now_text)):\n self.assertEqual(field.maximum, now)\n self.assert_processed(field, (now, now_text), (now, now_text))\n self.assert_processed(field, (now, now_text), construct_now(-60))\n self.assert_not_processed(field, 'maximum', construct_now(+60))\n\n def test_interpolation(self):\n field = DateTime()\n now = datetime.now()\n self.assert_interpolated(field, None, now)\n self.assert_interpolated(field, ('${value}', now), value=now)\n\n def test_description(self):\n now_text = '2012-01-01T00:00:00Z'\n field = DateTime(name='test', utc=True, minimum=now_text)\n self.assertEqual(field.describe(), {'fieldtype': 'datetime', 'name':\n 'test', 'minimum': now_text, 'utc': True})\n field = DateTime(name='test', utc=True, maximum=now_text)\n self.assertEqual(field.describe(), {'fieldtype': 'datetime', 'name':\n 'test', 'maximum': now_text, 'utc': True})\n field = DateTime(name='test', utc=True, minimum=now_text, maximum=\n now_text)\n self.assertEqual(field.describe(), {'fieldtype': 'datetime', 'name':\n 'test', 'minimum': now_text, 'maximum': now_text, 'utc': True})\n",
"step-4": "from scheme import *\nfrom tests.util import *\n\n\nclass TestDateTime(FieldTestCase):\n\n def test_instantiation(self):\n with self.assertRaises(TypeError):\n DateTime(minimum=True)\n with self.assertRaises(TypeError):\n DateTime(maximum=True)\n\n def test_processing(self):\n field = DateTime()\n self.assert_processed(field, None)\n self.assert_not_processed(field, 'invalid', True)\n now = datetime.now().replace(microsecond=0)\n now_local = now.replace(tzinfo=LOCAL)\n now_utc = now_local.astimezone(UTC)\n now_text = now_utc.strftime('%Y-%m-%dT%H:%M:%SZ')\n self.assertEqual(field.process(now_text, INBOUND, True), now_local)\n self.assertEqual(field.process(now, OUTBOUND, True), now_text)\n self.assertEqual(field.process(now_local, OUTBOUND, True), now_text)\n self.assertEqual(field.process(now_utc, OUTBOUND, True), now_text)\n\n def test_utc_processing(self):\n field = DateTime(utc=True)\n self.assert_processed(field, None)\n self.assert_not_processed(field, 'invalid', True)\n now = datetime.utcnow().replace(microsecond=0)\n now_utc = now.replace(tzinfo=UTC)\n now_text = now_utc.strftime('%Y-%m-%dT%H:%M:%SZ')\n self.assertEqual(field.process(now_text, INBOUND, True), now_utc)\n self.assertEqual(field.process(now, OUTBOUND, True), now_text)\n self.assertEqual(field.process(now_utc, OUTBOUND, True), now_text)\n\n def test_minimum(self):\n now, now_text = construct_now()\n for field in (DateTime(minimum=now), DateTime(minimum=now_text)):\n self.assertEqual(field.minimum, now)\n self.assert_processed(field, (now, now_text), (now, now_text))\n self.assert_processed(field, (now, now_text), construct_now(+1))\n self.assert_not_processed(field, 'minimum', construct_now(-60))\n\n def test_maximum(self):\n now, now_text = construct_now()\n for field in (DateTime(maximum=now), DateTime(maximum=now_text)):\n self.assertEqual(field.maximum, now)\n self.assert_processed(field, (now, now_text), (now, now_text))\n self.assert_processed(field, (now, now_text), construct_now(-60))\n self.assert_not_processed(field, 'maximum', construct_now(+60))\n\n def test_interpolation(self):\n field = DateTime()\n now = datetime.now()\n self.assert_interpolated(field, None, now)\n self.assert_interpolated(field, ('${value}', now), value=now)\n\n def test_description(self):\n now_text = '2012-01-01T00:00:00Z'\n field = DateTime(name='test', utc=True, minimum=now_text)\n self.assertEqual(field.describe(), {'fieldtype': 'datetime', 'name':\n 'test', 'minimum': now_text, 'utc': True})\n field = DateTime(name='test', utc=True, maximum=now_text)\n self.assertEqual(field.describe(), {'fieldtype': 'datetime', 'name':\n 'test', 'maximum': now_text, 'utc': True})\n field = DateTime(name='test', utc=True, minimum=now_text, maximum=\n now_text)\n self.assertEqual(field.describe(), {'fieldtype': 'datetime', 'name':\n 'test', 'minimum': now_text, 'maximum': now_text, 'utc': True})\n",
"step-5": null,
"step-ids": [
3,
7,
8,
9
]
}
|
[
3,
7,
8,
9
] |
# This file is part of the printrun suite.
#
# printrun is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# printrun is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with printrun. If not, see <http://www.gnu.org/licenses/>.
import traceback
import logging
import wx
class NoViz:
showall = False
def clear(self, *a):
pass
def addfile_perlayer(self, gcode, showall = False):
layer_idx = 0
while layer_idx < len(gcode.all_layers):
yield layer_idx
layer_idx += 1
yield None
def addfile(self, *a, **kw):
pass
def addgcode(self, *a, **kw):
pass
def addgcodehighlight(self, *a, **kw):
pass
def Refresh(self, *a):
pass
def setlayer(self, *a):
pass
class NoVizWindow:
def __init__(self):
self.p = NoViz()
def Destroy(self):
pass
class VizPane(wx.BoxSizer):
def __init__(self, root, parentpanel = None):
super(VizPane, self).__init__(wx.VERTICAL)
if not parentpanel: parentpanel = root.panel
if root.settings.mainviz == "None":
root.gviz = NoViz()
root.gwindow = NoVizWindow()
return
use2dview = root.settings.mainviz == "2D"
if root.settings.mainviz == "3D":
try:
import printrun.gcview
root.gviz = printrun.gcview.GcodeViewMainWrapper(parentpanel, root.build_dimensions_list, root = root, circular = root.settings.circular_bed, antialias_samples = int(root.settings.antialias3dsamples))
root.gviz.clickcb = root.show_viz_window
except:
use2dview = True
logging.error("3D view mode requested, but we failed to initialize it.\n"
+ "Falling back to 2D view, and here is the backtrace:\n"
+ traceback.format_exc())
if use2dview:
from printrun import gviz
root.gviz = gviz.Gviz(parentpanel, (300, 300),
build_dimensions = root.build_dimensions_list,
grid = (root.settings.preview_grid_step1, root.settings.preview_grid_step2),
extrusion_width = root.settings.preview_extrusion_width,
bgcolor = root.bgcolor)
root.gviz.SetToolTip(wx.ToolTip(_("Click to examine / edit\n layers of loaded file")))
root.gviz.showall = 1
root.gviz.Bind(wx.EVT_LEFT_DOWN, root.show_viz_window)
use3dview = root.settings.viz3d
if use3dview:
try:
import printrun.gcview
objects = None
if isinstance(root.gviz, printrun.gcview.GcodeViewMainWrapper):
objects = root.gviz.objects
root.gwindow = printrun.gcview.GcodeViewFrame(None, wx.ID_ANY, 'Gcode view, shift to move view, mousewheel to set layer', size = (600, 600), build_dimensions = root.build_dimensions_list, objects = objects, root = root, circular = root.settings.circular_bed, antialias_samples = int(root.settings.antialias3dsamples))
except:
use3dview = False
logging.error("3D view mode requested, but we failed to initialize it.\n"
+ "Falling back to 2D view, and here is the backtrace:\n"
+ traceback.format_exc())
if not use3dview:
from printrun import gviz
root.gwindow = gviz.GvizWindow(build_dimensions = root.build_dimensions_list,
grid = (root.settings.preview_grid_step1, root.settings.preview_grid_step2),
extrusion_width = root.settings.preview_extrusion_width,
bgcolor = root.bgcolor)
root.gwindow.Bind(wx.EVT_CLOSE, lambda x: root.gwindow.Hide())
if not isinstance(root.gviz, NoViz):
self.Add(root.gviz.widget, 1, flag = wx.EXPAND | wx.ALIGN_CENTER_HORIZONTAL)
|
normal
|
{
"blob_id": "3cc473f6bb4b2e1dd806edb8b096a6118fe7056a",
"index": 7202,
"step-1": "<mask token>\n\n\nclass NoViz:\n <mask token>\n <mask token>\n <mask token>\n\n def addfile(self, *a, **kw):\n pass\n\n def addgcode(self, *a, **kw):\n pass\n\n def addgcodehighlight(self, *a, **kw):\n pass\n <mask token>\n <mask token>\n\n\nclass NoVizWindow:\n\n def __init__(self):\n self.p = NoViz()\n\n def Destroy(self):\n pass\n\n\nclass VizPane(wx.BoxSizer):\n\n def __init__(self, root, parentpanel=None):\n super(VizPane, self).__init__(wx.VERTICAL)\n if not parentpanel:\n parentpanel = root.panel\n if root.settings.mainviz == 'None':\n root.gviz = NoViz()\n root.gwindow = NoVizWindow()\n return\n use2dview = root.settings.mainviz == '2D'\n if root.settings.mainviz == '3D':\n try:\n import printrun.gcview\n root.gviz = printrun.gcview.GcodeViewMainWrapper(parentpanel,\n root.build_dimensions_list, root=root, circular=root.\n settings.circular_bed, antialias_samples=int(root.\n settings.antialias3dsamples))\n root.gviz.clickcb = root.show_viz_window\n except:\n use2dview = True\n logging.error(\n '3D view mode requested, but we failed to initialize it.\\n'\n +\n \"\"\"Falling back to 2D view, and here is the backtrace:\n\"\"\"\n + traceback.format_exc())\n if use2dview:\n from printrun import gviz\n root.gviz = gviz.Gviz(parentpanel, (300, 300), build_dimensions\n =root.build_dimensions_list, grid=(root.settings.\n preview_grid_step1, root.settings.preview_grid_step2),\n extrusion_width=root.settings.preview_extrusion_width,\n bgcolor=root.bgcolor)\n root.gviz.SetToolTip(wx.ToolTip(_(\n 'Click to examine / edit\\n layers of loaded file')))\n root.gviz.showall = 1\n root.gviz.Bind(wx.EVT_LEFT_DOWN, root.show_viz_window)\n use3dview = root.settings.viz3d\n if use3dview:\n try:\n import printrun.gcview\n objects = None\n if isinstance(root.gviz, printrun.gcview.GcodeViewMainWrapper):\n objects = root.gviz.objects\n root.gwindow = printrun.gcview.GcodeViewFrame(None, wx.\n ID_ANY,\n 'Gcode view, shift to move view, mousewheel to set layer',\n size=(600, 600), build_dimensions=root.\n build_dimensions_list, objects=objects, root=root,\n circular=root.settings.circular_bed, antialias_samples=\n int(root.settings.antialias3dsamples))\n except:\n use3dview = False\n logging.error(\n '3D view mode requested, but we failed to initialize it.\\n'\n +\n \"\"\"Falling back to 2D view, and here is the backtrace:\n\"\"\"\n + traceback.format_exc())\n if not use3dview:\n from printrun import gviz\n root.gwindow = gviz.GvizWindow(build_dimensions=root.\n build_dimensions_list, grid=(root.settings.\n preview_grid_step1, root.settings.preview_grid_step2),\n extrusion_width=root.settings.preview_extrusion_width,\n bgcolor=root.bgcolor)\n root.gwindow.Bind(wx.EVT_CLOSE, lambda x: root.gwindow.Hide())\n if not isinstance(root.gviz, NoViz):\n self.Add(root.gviz.widget, 1, flag=wx.EXPAND | wx.\n ALIGN_CENTER_HORIZONTAL)\n",
"step-2": "<mask token>\n\n\nclass NoViz:\n <mask token>\n <mask token>\n <mask token>\n\n def addfile(self, *a, **kw):\n pass\n\n def addgcode(self, *a, **kw):\n pass\n\n def addgcodehighlight(self, *a, **kw):\n pass\n <mask token>\n\n def setlayer(self, *a):\n pass\n\n\nclass NoVizWindow:\n\n def __init__(self):\n self.p = NoViz()\n\n def Destroy(self):\n pass\n\n\nclass VizPane(wx.BoxSizer):\n\n def __init__(self, root, parentpanel=None):\n super(VizPane, self).__init__(wx.VERTICAL)\n if not parentpanel:\n parentpanel = root.panel\n if root.settings.mainviz == 'None':\n root.gviz = NoViz()\n root.gwindow = NoVizWindow()\n return\n use2dview = root.settings.mainviz == '2D'\n if root.settings.mainviz == '3D':\n try:\n import printrun.gcview\n root.gviz = printrun.gcview.GcodeViewMainWrapper(parentpanel,\n root.build_dimensions_list, root=root, circular=root.\n settings.circular_bed, antialias_samples=int(root.\n settings.antialias3dsamples))\n root.gviz.clickcb = root.show_viz_window\n except:\n use2dview = True\n logging.error(\n '3D view mode requested, but we failed to initialize it.\\n'\n +\n \"\"\"Falling back to 2D view, and here is the backtrace:\n\"\"\"\n + traceback.format_exc())\n if use2dview:\n from printrun import gviz\n root.gviz = gviz.Gviz(parentpanel, (300, 300), build_dimensions\n =root.build_dimensions_list, grid=(root.settings.\n preview_grid_step1, root.settings.preview_grid_step2),\n extrusion_width=root.settings.preview_extrusion_width,\n bgcolor=root.bgcolor)\n root.gviz.SetToolTip(wx.ToolTip(_(\n 'Click to examine / edit\\n layers of loaded file')))\n root.gviz.showall = 1\n root.gviz.Bind(wx.EVT_LEFT_DOWN, root.show_viz_window)\n use3dview = root.settings.viz3d\n if use3dview:\n try:\n import printrun.gcview\n objects = None\n if isinstance(root.gviz, printrun.gcview.GcodeViewMainWrapper):\n objects = root.gviz.objects\n root.gwindow = printrun.gcview.GcodeViewFrame(None, wx.\n ID_ANY,\n 'Gcode view, shift to move view, mousewheel to set layer',\n size=(600, 600), build_dimensions=root.\n build_dimensions_list, objects=objects, root=root,\n circular=root.settings.circular_bed, antialias_samples=\n int(root.settings.antialias3dsamples))\n except:\n use3dview = False\n logging.error(\n '3D view mode requested, but we failed to initialize it.\\n'\n +\n \"\"\"Falling back to 2D view, and here is the backtrace:\n\"\"\"\n + traceback.format_exc())\n if not use3dview:\n from printrun import gviz\n root.gwindow = gviz.GvizWindow(build_dimensions=root.\n build_dimensions_list, grid=(root.settings.\n preview_grid_step1, root.settings.preview_grid_step2),\n extrusion_width=root.settings.preview_extrusion_width,\n bgcolor=root.bgcolor)\n root.gwindow.Bind(wx.EVT_CLOSE, lambda x: root.gwindow.Hide())\n if not isinstance(root.gviz, NoViz):\n self.Add(root.gviz.widget, 1, flag=wx.EXPAND | wx.\n ALIGN_CENTER_HORIZONTAL)\n",
"step-3": "<mask token>\n\n\nclass NoViz:\n <mask token>\n\n def clear(self, *a):\n pass\n <mask token>\n\n def addfile(self, *a, **kw):\n pass\n\n def addgcode(self, *a, **kw):\n pass\n\n def addgcodehighlight(self, *a, **kw):\n pass\n <mask token>\n\n def setlayer(self, *a):\n pass\n\n\nclass NoVizWindow:\n\n def __init__(self):\n self.p = NoViz()\n\n def Destroy(self):\n pass\n\n\nclass VizPane(wx.BoxSizer):\n\n def __init__(self, root, parentpanel=None):\n super(VizPane, self).__init__(wx.VERTICAL)\n if not parentpanel:\n parentpanel = root.panel\n if root.settings.mainviz == 'None':\n root.gviz = NoViz()\n root.gwindow = NoVizWindow()\n return\n use2dview = root.settings.mainviz == '2D'\n if root.settings.mainviz == '3D':\n try:\n import printrun.gcview\n root.gviz = printrun.gcview.GcodeViewMainWrapper(parentpanel,\n root.build_dimensions_list, root=root, circular=root.\n settings.circular_bed, antialias_samples=int(root.\n settings.antialias3dsamples))\n root.gviz.clickcb = root.show_viz_window\n except:\n use2dview = True\n logging.error(\n '3D view mode requested, but we failed to initialize it.\\n'\n +\n \"\"\"Falling back to 2D view, and here is the backtrace:\n\"\"\"\n + traceback.format_exc())\n if use2dview:\n from printrun import gviz\n root.gviz = gviz.Gviz(parentpanel, (300, 300), build_dimensions\n =root.build_dimensions_list, grid=(root.settings.\n preview_grid_step1, root.settings.preview_grid_step2),\n extrusion_width=root.settings.preview_extrusion_width,\n bgcolor=root.bgcolor)\n root.gviz.SetToolTip(wx.ToolTip(_(\n 'Click to examine / edit\\n layers of loaded file')))\n root.gviz.showall = 1\n root.gviz.Bind(wx.EVT_LEFT_DOWN, root.show_viz_window)\n use3dview = root.settings.viz3d\n if use3dview:\n try:\n import printrun.gcview\n objects = None\n if isinstance(root.gviz, printrun.gcview.GcodeViewMainWrapper):\n objects = root.gviz.objects\n root.gwindow = printrun.gcview.GcodeViewFrame(None, wx.\n ID_ANY,\n 'Gcode view, shift to move view, mousewheel to set layer',\n size=(600, 600), build_dimensions=root.\n build_dimensions_list, objects=objects, root=root,\n circular=root.settings.circular_bed, antialias_samples=\n int(root.settings.antialias3dsamples))\n except:\n use3dview = False\n logging.error(\n '3D view mode requested, but we failed to initialize it.\\n'\n +\n \"\"\"Falling back to 2D view, and here is the backtrace:\n\"\"\"\n + traceback.format_exc())\n if not use3dview:\n from printrun import gviz\n root.gwindow = gviz.GvizWindow(build_dimensions=root.\n build_dimensions_list, grid=(root.settings.\n preview_grid_step1, root.settings.preview_grid_step2),\n extrusion_width=root.settings.preview_extrusion_width,\n bgcolor=root.bgcolor)\n root.gwindow.Bind(wx.EVT_CLOSE, lambda x: root.gwindow.Hide())\n if not isinstance(root.gviz, NoViz):\n self.Add(root.gviz.widget, 1, flag=wx.EXPAND | wx.\n ALIGN_CENTER_HORIZONTAL)\n",
"step-4": "import traceback\nimport logging\nimport wx\n\n\nclass NoViz:\n showall = False\n\n def clear(self, *a):\n pass\n\n def addfile_perlayer(self, gcode, showall=False):\n layer_idx = 0\n while layer_idx < len(gcode.all_layers):\n yield layer_idx\n layer_idx += 1\n yield None\n\n def addfile(self, *a, **kw):\n pass\n\n def addgcode(self, *a, **kw):\n pass\n\n def addgcodehighlight(self, *a, **kw):\n pass\n\n def Refresh(self, *a):\n pass\n\n def setlayer(self, *a):\n pass\n\n\nclass NoVizWindow:\n\n def __init__(self):\n self.p = NoViz()\n\n def Destroy(self):\n pass\n\n\nclass VizPane(wx.BoxSizer):\n\n def __init__(self, root, parentpanel=None):\n super(VizPane, self).__init__(wx.VERTICAL)\n if not parentpanel:\n parentpanel = root.panel\n if root.settings.mainviz == 'None':\n root.gviz = NoViz()\n root.gwindow = NoVizWindow()\n return\n use2dview = root.settings.mainviz == '2D'\n if root.settings.mainviz == '3D':\n try:\n import printrun.gcview\n root.gviz = printrun.gcview.GcodeViewMainWrapper(parentpanel,\n root.build_dimensions_list, root=root, circular=root.\n settings.circular_bed, antialias_samples=int(root.\n settings.antialias3dsamples))\n root.gviz.clickcb = root.show_viz_window\n except:\n use2dview = True\n logging.error(\n '3D view mode requested, but we failed to initialize it.\\n'\n +\n \"\"\"Falling back to 2D view, and here is the backtrace:\n\"\"\"\n + traceback.format_exc())\n if use2dview:\n from printrun import gviz\n root.gviz = gviz.Gviz(parentpanel, (300, 300), build_dimensions\n =root.build_dimensions_list, grid=(root.settings.\n preview_grid_step1, root.settings.preview_grid_step2),\n extrusion_width=root.settings.preview_extrusion_width,\n bgcolor=root.bgcolor)\n root.gviz.SetToolTip(wx.ToolTip(_(\n 'Click to examine / edit\\n layers of loaded file')))\n root.gviz.showall = 1\n root.gviz.Bind(wx.EVT_LEFT_DOWN, root.show_viz_window)\n use3dview = root.settings.viz3d\n if use3dview:\n try:\n import printrun.gcview\n objects = None\n if isinstance(root.gviz, printrun.gcview.GcodeViewMainWrapper):\n objects = root.gviz.objects\n root.gwindow = printrun.gcview.GcodeViewFrame(None, wx.\n ID_ANY,\n 'Gcode view, shift to move view, mousewheel to set layer',\n size=(600, 600), build_dimensions=root.\n build_dimensions_list, objects=objects, root=root,\n circular=root.settings.circular_bed, antialias_samples=\n int(root.settings.antialias3dsamples))\n except:\n use3dview = False\n logging.error(\n '3D view mode requested, but we failed to initialize it.\\n'\n +\n \"\"\"Falling back to 2D view, and here is the backtrace:\n\"\"\"\n + traceback.format_exc())\n if not use3dview:\n from printrun import gviz\n root.gwindow = gviz.GvizWindow(build_dimensions=root.\n build_dimensions_list, grid=(root.settings.\n preview_grid_step1, root.settings.preview_grid_step2),\n extrusion_width=root.settings.preview_extrusion_width,\n bgcolor=root.bgcolor)\n root.gwindow.Bind(wx.EVT_CLOSE, lambda x: root.gwindow.Hide())\n if not isinstance(root.gviz, NoViz):\n self.Add(root.gviz.widget, 1, flag=wx.EXPAND | wx.\n ALIGN_CENTER_HORIZONTAL)\n",
"step-5": "# This file is part of the printrun suite.\n#\n# printrun is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# printrun is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with printrun. If not, see <http://www.gnu.org/licenses/>.\n\nimport traceback\nimport logging\n\nimport wx\n\nclass NoViz:\n\n showall = False\n\n def clear(self, *a):\n pass\n\n def addfile_perlayer(self, gcode, showall = False):\n layer_idx = 0\n while layer_idx < len(gcode.all_layers):\n yield layer_idx\n layer_idx += 1\n yield None\n\n def addfile(self, *a, **kw):\n pass\n\n def addgcode(self, *a, **kw):\n pass\n\n def addgcodehighlight(self, *a, **kw):\n pass\n\n def Refresh(self, *a):\n pass\n\n def setlayer(self, *a):\n pass\n\nclass NoVizWindow:\n\n def __init__(self):\n self.p = NoViz()\n\n def Destroy(self):\n pass\n\nclass VizPane(wx.BoxSizer):\n\n def __init__(self, root, parentpanel = None):\n super(VizPane, self).__init__(wx.VERTICAL)\n if not parentpanel: parentpanel = root.panel\n if root.settings.mainviz == \"None\":\n root.gviz = NoViz()\n root.gwindow = NoVizWindow()\n return\n use2dview = root.settings.mainviz == \"2D\"\n if root.settings.mainviz == \"3D\":\n try:\n import printrun.gcview\n root.gviz = printrun.gcview.GcodeViewMainWrapper(parentpanel, root.build_dimensions_list, root = root, circular = root.settings.circular_bed, antialias_samples = int(root.settings.antialias3dsamples))\n root.gviz.clickcb = root.show_viz_window\n except:\n use2dview = True\n logging.error(\"3D view mode requested, but we failed to initialize it.\\n\"\n + \"Falling back to 2D view, and here is the backtrace:\\n\"\n + traceback.format_exc())\n if use2dview:\n from printrun import gviz\n root.gviz = gviz.Gviz(parentpanel, (300, 300),\n build_dimensions = root.build_dimensions_list,\n grid = (root.settings.preview_grid_step1, root.settings.preview_grid_step2),\n extrusion_width = root.settings.preview_extrusion_width,\n bgcolor = root.bgcolor)\n root.gviz.SetToolTip(wx.ToolTip(_(\"Click to examine / edit\\n layers of loaded file\")))\n root.gviz.showall = 1\n root.gviz.Bind(wx.EVT_LEFT_DOWN, root.show_viz_window)\n use3dview = root.settings.viz3d\n if use3dview:\n try:\n import printrun.gcview\n objects = None\n if isinstance(root.gviz, printrun.gcview.GcodeViewMainWrapper):\n objects = root.gviz.objects\n root.gwindow = printrun.gcview.GcodeViewFrame(None, wx.ID_ANY, 'Gcode view, shift to move view, mousewheel to set layer', size = (600, 600), build_dimensions = root.build_dimensions_list, objects = objects, root = root, circular = root.settings.circular_bed, antialias_samples = int(root.settings.antialias3dsamples))\n except:\n use3dview = False\n logging.error(\"3D view mode requested, but we failed to initialize it.\\n\"\n + \"Falling back to 2D view, and here is the backtrace:\\n\"\n + traceback.format_exc())\n if not use3dview:\n from printrun import gviz\n root.gwindow = gviz.GvizWindow(build_dimensions = root.build_dimensions_list,\n grid = (root.settings.preview_grid_step1, root.settings.preview_grid_step2),\n extrusion_width = root.settings.preview_extrusion_width,\n bgcolor = root.bgcolor)\n root.gwindow.Bind(wx.EVT_CLOSE, lambda x: root.gwindow.Hide())\n if not isinstance(root.gviz, NoViz):\n self.Add(root.gviz.widget, 1, flag = wx.EXPAND | wx.ALIGN_CENTER_HORIZONTAL)\n",
"step-ids": [
9,
10,
11,
15,
16
]
}
|
[
9,
10,
11,
15,
16
] |
import os
import h5py
import numpy as np
import torch
from datasets.hdf5 import get_test_datasets
from unet3d import utils
from unet3d.config import load_config
from unet3d.model import get_model
logger = utils.get_logger('UNet3DPredictor')
def predict(model, hdf5_dataset, config):
"""
Return prediction masks by applying the model on the given dataset
Args:
model (Unet3D): trained 3D UNet model used for prediction
hdf5_dataset (torch.utils.data.Dataset): input dataset
out_channels (int): number of channels in the network output
device (torch.Device): device to run the prediction on
Returns:
prediction_maps (numpy array): prediction masks for given dataset
"""
def _volume_shape(hdf5_dataset):
# TODO: support multiple internal datasets
raw = hdf5_dataset.raws[0]
if raw.ndim == 3:
return raw.shape
else:
return raw.shape[1:]
out_channels = config['model'].get('out_channels')
if out_channels is None:
out_channels = config['model']['dt_out_channels']
prediction_channel = config.get('prediction_channel', None)
if prediction_channel is not None:
logger.info(f"Using only channel '{prediction_channel}' from the network output")
device = config['device']
output_heads = config['model'].get('output_heads', 1)
logger.info(f'Running prediction on {len(hdf5_dataset)} patches...')
# dimensionality of the the output (CxDxHxW)
volume_shape = _volume_shape(hdf5_dataset)
if prediction_channel is None:
prediction_maps_shape = (out_channels,) + volume_shape
else:
# single channel prediction map
prediction_maps_shape = (1,) + volume_shape
logger.info(f'The shape of the output prediction maps (CDHW): {prediction_maps_shape}')
# initialize the output prediction arrays
prediction_maps = [np.zeros(prediction_maps_shape, dtype='float32') for _ in range(output_heads)]
# initialize normalization mask in order to average out probabilities of overlapping patches
normalization_masks = [np.zeros(prediction_maps_shape, dtype='float32') for _ in range(output_heads)]
# Sets the module in evaluation mode explicitly, otherwise the final Softmax/Sigmoid won't be applied!
model.eval()
# Run predictions on the entire input dataset
with torch.no_grad():
for patch, index in hdf5_dataset:
logger.info(f'Predicting slice:{index}')
# save patch index: (C,D,H,W)
if prediction_channel is None:
channel_slice = slice(0, out_channels)
else:
channel_slice = slice(0, 1)
index = index[1:] #dk: support input image with mulitple dims
index = (channel_slice,) + index
# convert patch to torch tensor NxCxDxHxW and send to device we're using batch size of 1
patch = patch.unsqueeze(dim=0).to(device)
# forward pass
predictions = model(patch)
# wrap predictions into a list if there is only one output head from the network
if output_heads == 1:
predictions = [predictions]
for prediction, prediction_map, normalization_mask in zip(predictions, prediction_maps,
normalization_masks):
# squeeze batch dimension and convert back to numpy array
prediction = prediction.squeeze(dim=0).cpu().numpy()
if prediction_channel is not None:
# use only the 'prediction_channel'
logger.info(f"Using channel '{prediction_channel}'...")
prediction = np.expand_dims(prediction[prediction_channel], axis=0)
# unpad in order to avoid block artifacts in the output probability maps
u_prediction, u_index = utils.unpad(prediction, index, volume_shape)
# accumulate probabilities into the output prediction array
prediction_map[u_index] += u_prediction
# count voxel visits for normalization
normalization_mask[u_index] += 1
return [prediction_map / normalization_mask for prediction_map, normalization_mask in
zip(prediction_maps, normalization_masks)]
def save_predictions(prediction_maps, output_file, dataset_names):
"""
Saving probability maps to a given output H5 file. If 'average_channels'
is set to True average the probability_maps across the the channel axis
(useful in case where each channel predicts semantically the same thing).
Args:
prediction_maps (list): list of numpy array containing prediction maps in separate channels
output_file (string): path to the output H5 file
dataset_names (list): list of dataset names inside H5 file where the prediction maps will be saved
"""
assert len(prediction_maps) == len(dataset_names), 'Each prediction map has to have a corresponding dataset name'
logger.info(f'Saving predictions to: {output_file}...')
with h5py.File(output_file, "w") as output_h5:
for prediction_map, dataset_name in zip(prediction_maps, dataset_names):
logger.info(f"Creating dataset '{dataset_name}'...")
output_h5.create_dataset(dataset_name, data=prediction_map, compression="gzip")
def _get_output_file(dataset, suffix='_predictions'):
return f'{os.path.splitext(dataset.file_path)[0]}{suffix}.h5'
def _get_dataset_names(config, number_of_datasets):
dataset_names = config.get('dest_dataset_name')
print(dataset_names)
if dataset_names is not None:
if isinstance(dataset_names, str):
return [dataset_names]
else:
return dataset_names
else:
default_prefix = 'predictions'
if number_of_datasets == 1:
return [default_prefix]
else:
return [f'{default_prefix}{i}' for i in range(number_of_datasets)]
def main():
# Load configuration
config = load_config()
print(config)
# Create the model
model = get_model(config)
# Load model state
model_path = config['model_path']
logger.info(f'Loading model from {model_path}...')
utils.load_checkpoint(model_path, model)
logger.info(f"Sending the model to '{config['device']}'")
model = model.to(config['device'])
logger.info('Loading HDF5 datasets...')
for test_dataset in get_test_datasets(config):
logger.info(f"Processing '{test_dataset.file_path}'...")
# run the model prediction on the entire dataset
predictions = predict(model, test_dataset, config)
# save the resulting probability maps
output_file = _get_output_file(test_dataset)
dataset_names = _get_dataset_names(config, len(predictions))
save_predictions(predictions, output_file, dataset_names)
if __name__ == '__main__':
main()
|
normal
|
{
"blob_id": "6fba773025268d724283e510a03d0592282adb0a",
"index": 1780,
"step-1": "<mask token>\n\n\ndef save_predictions(prediction_maps, output_file, dataset_names):\n \"\"\"\n Saving probability maps to a given output H5 file. If 'average_channels'\n is set to True average the probability_maps across the the channel axis\n (useful in case where each channel predicts semantically the same thing).\n\n Args:\n prediction_maps (list): list of numpy array containing prediction maps in separate channels\n output_file (string): path to the output H5 file\n dataset_names (list): list of dataset names inside H5 file where the prediction maps will be saved\n \"\"\"\n assert len(prediction_maps) == len(dataset_names\n ), 'Each prediction map has to have a corresponding dataset name'\n logger.info(f'Saving predictions to: {output_file}...')\n with h5py.File(output_file, 'w') as output_h5:\n for prediction_map, dataset_name in zip(prediction_maps, dataset_names\n ):\n logger.info(f\"Creating dataset '{dataset_name}'...\")\n output_h5.create_dataset(dataset_name, data=prediction_map,\n compression='gzip')\n\n\n<mask token>\n\n\ndef main():\n config = load_config()\n print(config)\n model = get_model(config)\n model_path = config['model_path']\n logger.info(f'Loading model from {model_path}...')\n utils.load_checkpoint(model_path, model)\n logger.info(f\"Sending the model to '{config['device']}'\")\n model = model.to(config['device'])\n logger.info('Loading HDF5 datasets...')\n for test_dataset in get_test_datasets(config):\n logger.info(f\"Processing '{test_dataset.file_path}'...\")\n predictions = predict(model, test_dataset, config)\n output_file = _get_output_file(test_dataset)\n dataset_names = _get_dataset_names(config, len(predictions))\n save_predictions(predictions, output_file, dataset_names)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef predict(model, hdf5_dataset, config):\n \"\"\"\n Return prediction masks by applying the model on the given dataset\n\n Args:\n model (Unet3D): trained 3D UNet model used for prediction\n hdf5_dataset (torch.utils.data.Dataset): input dataset\n out_channels (int): number of channels in the network output\n device (torch.Device): device to run the prediction on\n\n Returns:\n prediction_maps (numpy array): prediction masks for given dataset\n \"\"\"\n\n def _volume_shape(hdf5_dataset):\n raw = hdf5_dataset.raws[0]\n if raw.ndim == 3:\n return raw.shape\n else:\n return raw.shape[1:]\n out_channels = config['model'].get('out_channels')\n if out_channels is None:\n out_channels = config['model']['dt_out_channels']\n prediction_channel = config.get('prediction_channel', None)\n if prediction_channel is not None:\n logger.info(\n f\"Using only channel '{prediction_channel}' from the network output\"\n )\n device = config['device']\n output_heads = config['model'].get('output_heads', 1)\n logger.info(f'Running prediction on {len(hdf5_dataset)} patches...')\n volume_shape = _volume_shape(hdf5_dataset)\n if prediction_channel is None:\n prediction_maps_shape = (out_channels,) + volume_shape\n else:\n prediction_maps_shape = (1,) + volume_shape\n logger.info(\n f'The shape of the output prediction maps (CDHW): {prediction_maps_shape}'\n )\n prediction_maps = [np.zeros(prediction_maps_shape, dtype='float32') for\n _ in range(output_heads)]\n normalization_masks = [np.zeros(prediction_maps_shape, dtype='float32') for\n _ in range(output_heads)]\n model.eval()\n with torch.no_grad():\n for patch, index in hdf5_dataset:\n logger.info(f'Predicting slice:{index}')\n if prediction_channel is None:\n channel_slice = slice(0, out_channels)\n else:\n channel_slice = slice(0, 1)\n index = index[1:]\n index = (channel_slice,) + index\n patch = patch.unsqueeze(dim=0).to(device)\n predictions = model(patch)\n if output_heads == 1:\n predictions = [predictions]\n for prediction, prediction_map, normalization_mask in zip(\n predictions, prediction_maps, normalization_masks):\n prediction = prediction.squeeze(dim=0).cpu().numpy()\n if prediction_channel is not None:\n logger.info(f\"Using channel '{prediction_channel}'...\")\n prediction = np.expand_dims(prediction[\n prediction_channel], axis=0)\n u_prediction, u_index = utils.unpad(prediction, index,\n volume_shape)\n prediction_map[u_index] += u_prediction\n normalization_mask[u_index] += 1\n return [(prediction_map / normalization_mask) for prediction_map,\n normalization_mask in zip(prediction_maps, normalization_masks)]\n\n\ndef save_predictions(prediction_maps, output_file, dataset_names):\n \"\"\"\n Saving probability maps to a given output H5 file. If 'average_channels'\n is set to True average the probability_maps across the the channel axis\n (useful in case where each channel predicts semantically the same thing).\n\n Args:\n prediction_maps (list): list of numpy array containing prediction maps in separate channels\n output_file (string): path to the output H5 file\n dataset_names (list): list of dataset names inside H5 file where the prediction maps will be saved\n \"\"\"\n assert len(prediction_maps) == len(dataset_names\n ), 'Each prediction map has to have a corresponding dataset name'\n logger.info(f'Saving predictions to: {output_file}...')\n with h5py.File(output_file, 'w') as output_h5:\n for prediction_map, dataset_name in zip(prediction_maps, dataset_names\n ):\n logger.info(f\"Creating dataset '{dataset_name}'...\")\n output_h5.create_dataset(dataset_name, data=prediction_map,\n compression='gzip')\n\n\ndef _get_output_file(dataset, suffix='_predictions'):\n return f'{os.path.splitext(dataset.file_path)[0]}{suffix}.h5'\n\n\ndef _get_dataset_names(config, number_of_datasets):\n dataset_names = config.get('dest_dataset_name')\n print(dataset_names)\n if dataset_names is not None:\n if isinstance(dataset_names, str):\n return [dataset_names]\n else:\n return dataset_names\n else:\n default_prefix = 'predictions'\n if number_of_datasets == 1:\n return [default_prefix]\n else:\n return [f'{default_prefix}{i}' for i in range(number_of_datasets)]\n\n\ndef main():\n config = load_config()\n print(config)\n model = get_model(config)\n model_path = config['model_path']\n logger.info(f'Loading model from {model_path}...')\n utils.load_checkpoint(model_path, model)\n logger.info(f\"Sending the model to '{config['device']}'\")\n model = model.to(config['device'])\n logger.info('Loading HDF5 datasets...')\n for test_dataset in get_test_datasets(config):\n logger.info(f\"Processing '{test_dataset.file_path}'...\")\n predictions = predict(model, test_dataset, config)\n output_file = _get_output_file(test_dataset)\n dataset_names = _get_dataset_names(config, len(predictions))\n save_predictions(predictions, output_file, dataset_names)\n\n\nif __name__ == '__main__':\n main()\n",
"step-3": "<mask token>\nlogger = utils.get_logger('UNet3DPredictor')\n\n\ndef predict(model, hdf5_dataset, config):\n \"\"\"\n Return prediction masks by applying the model on the given dataset\n\n Args:\n model (Unet3D): trained 3D UNet model used for prediction\n hdf5_dataset (torch.utils.data.Dataset): input dataset\n out_channels (int): number of channels in the network output\n device (torch.Device): device to run the prediction on\n\n Returns:\n prediction_maps (numpy array): prediction masks for given dataset\n \"\"\"\n\n def _volume_shape(hdf5_dataset):\n raw = hdf5_dataset.raws[0]\n if raw.ndim == 3:\n return raw.shape\n else:\n return raw.shape[1:]\n out_channels = config['model'].get('out_channels')\n if out_channels is None:\n out_channels = config['model']['dt_out_channels']\n prediction_channel = config.get('prediction_channel', None)\n if prediction_channel is not None:\n logger.info(\n f\"Using only channel '{prediction_channel}' from the network output\"\n )\n device = config['device']\n output_heads = config['model'].get('output_heads', 1)\n logger.info(f'Running prediction on {len(hdf5_dataset)} patches...')\n volume_shape = _volume_shape(hdf5_dataset)\n if prediction_channel is None:\n prediction_maps_shape = (out_channels,) + volume_shape\n else:\n prediction_maps_shape = (1,) + volume_shape\n logger.info(\n f'The shape of the output prediction maps (CDHW): {prediction_maps_shape}'\n )\n prediction_maps = [np.zeros(prediction_maps_shape, dtype='float32') for\n _ in range(output_heads)]\n normalization_masks = [np.zeros(prediction_maps_shape, dtype='float32') for\n _ in range(output_heads)]\n model.eval()\n with torch.no_grad():\n for patch, index in hdf5_dataset:\n logger.info(f'Predicting slice:{index}')\n if prediction_channel is None:\n channel_slice = slice(0, out_channels)\n else:\n channel_slice = slice(0, 1)\n index = index[1:]\n index = (channel_slice,) + index\n patch = patch.unsqueeze(dim=0).to(device)\n predictions = model(patch)\n if output_heads == 1:\n predictions = [predictions]\n for prediction, prediction_map, normalization_mask in zip(\n predictions, prediction_maps, normalization_masks):\n prediction = prediction.squeeze(dim=0).cpu().numpy()\n if prediction_channel is not None:\n logger.info(f\"Using channel '{prediction_channel}'...\")\n prediction = np.expand_dims(prediction[\n prediction_channel], axis=0)\n u_prediction, u_index = utils.unpad(prediction, index,\n volume_shape)\n prediction_map[u_index] += u_prediction\n normalization_mask[u_index] += 1\n return [(prediction_map / normalization_mask) for prediction_map,\n normalization_mask in zip(prediction_maps, normalization_masks)]\n\n\ndef save_predictions(prediction_maps, output_file, dataset_names):\n \"\"\"\n Saving probability maps to a given output H5 file. If 'average_channels'\n is set to True average the probability_maps across the the channel axis\n (useful in case where each channel predicts semantically the same thing).\n\n Args:\n prediction_maps (list): list of numpy array containing prediction maps in separate channels\n output_file (string): path to the output H5 file\n dataset_names (list): list of dataset names inside H5 file where the prediction maps will be saved\n \"\"\"\n assert len(prediction_maps) == len(dataset_names\n ), 'Each prediction map has to have a corresponding dataset name'\n logger.info(f'Saving predictions to: {output_file}...')\n with h5py.File(output_file, 'w') as output_h5:\n for prediction_map, dataset_name in zip(prediction_maps, dataset_names\n ):\n logger.info(f\"Creating dataset '{dataset_name}'...\")\n output_h5.create_dataset(dataset_name, data=prediction_map,\n compression='gzip')\n\n\ndef _get_output_file(dataset, suffix='_predictions'):\n return f'{os.path.splitext(dataset.file_path)[0]}{suffix}.h5'\n\n\ndef _get_dataset_names(config, number_of_datasets):\n dataset_names = config.get('dest_dataset_name')\n print(dataset_names)\n if dataset_names is not None:\n if isinstance(dataset_names, str):\n return [dataset_names]\n else:\n return dataset_names\n else:\n default_prefix = 'predictions'\n if number_of_datasets == 1:\n return [default_prefix]\n else:\n return [f'{default_prefix}{i}' for i in range(number_of_datasets)]\n\n\ndef main():\n config = load_config()\n print(config)\n model = get_model(config)\n model_path = config['model_path']\n logger.info(f'Loading model from {model_path}...')\n utils.load_checkpoint(model_path, model)\n logger.info(f\"Sending the model to '{config['device']}'\")\n model = model.to(config['device'])\n logger.info('Loading HDF5 datasets...')\n for test_dataset in get_test_datasets(config):\n logger.info(f\"Processing '{test_dataset.file_path}'...\")\n predictions = predict(model, test_dataset, config)\n output_file = _get_output_file(test_dataset)\n dataset_names = _get_dataset_names(config, len(predictions))\n save_predictions(predictions, output_file, dataset_names)\n\n\nif __name__ == '__main__':\n main()\n",
"step-4": "import os\nimport h5py\nimport numpy as np\nimport torch\nfrom datasets.hdf5 import get_test_datasets\nfrom unet3d import utils\nfrom unet3d.config import load_config\nfrom unet3d.model import get_model\nlogger = utils.get_logger('UNet3DPredictor')\n\n\ndef predict(model, hdf5_dataset, config):\n \"\"\"\n Return prediction masks by applying the model on the given dataset\n\n Args:\n model (Unet3D): trained 3D UNet model used for prediction\n hdf5_dataset (torch.utils.data.Dataset): input dataset\n out_channels (int): number of channels in the network output\n device (torch.Device): device to run the prediction on\n\n Returns:\n prediction_maps (numpy array): prediction masks for given dataset\n \"\"\"\n\n def _volume_shape(hdf5_dataset):\n raw = hdf5_dataset.raws[0]\n if raw.ndim == 3:\n return raw.shape\n else:\n return raw.shape[1:]\n out_channels = config['model'].get('out_channels')\n if out_channels is None:\n out_channels = config['model']['dt_out_channels']\n prediction_channel = config.get('prediction_channel', None)\n if prediction_channel is not None:\n logger.info(\n f\"Using only channel '{prediction_channel}' from the network output\"\n )\n device = config['device']\n output_heads = config['model'].get('output_heads', 1)\n logger.info(f'Running prediction on {len(hdf5_dataset)} patches...')\n volume_shape = _volume_shape(hdf5_dataset)\n if prediction_channel is None:\n prediction_maps_shape = (out_channels,) + volume_shape\n else:\n prediction_maps_shape = (1,) + volume_shape\n logger.info(\n f'The shape of the output prediction maps (CDHW): {prediction_maps_shape}'\n )\n prediction_maps = [np.zeros(prediction_maps_shape, dtype='float32') for\n _ in range(output_heads)]\n normalization_masks = [np.zeros(prediction_maps_shape, dtype='float32') for\n _ in range(output_heads)]\n model.eval()\n with torch.no_grad():\n for patch, index in hdf5_dataset:\n logger.info(f'Predicting slice:{index}')\n if prediction_channel is None:\n channel_slice = slice(0, out_channels)\n else:\n channel_slice = slice(0, 1)\n index = index[1:]\n index = (channel_slice,) + index\n patch = patch.unsqueeze(dim=0).to(device)\n predictions = model(patch)\n if output_heads == 1:\n predictions = [predictions]\n for prediction, prediction_map, normalization_mask in zip(\n predictions, prediction_maps, normalization_masks):\n prediction = prediction.squeeze(dim=0).cpu().numpy()\n if prediction_channel is not None:\n logger.info(f\"Using channel '{prediction_channel}'...\")\n prediction = np.expand_dims(prediction[\n prediction_channel], axis=0)\n u_prediction, u_index = utils.unpad(prediction, index,\n volume_shape)\n prediction_map[u_index] += u_prediction\n normalization_mask[u_index] += 1\n return [(prediction_map / normalization_mask) for prediction_map,\n normalization_mask in zip(prediction_maps, normalization_masks)]\n\n\ndef save_predictions(prediction_maps, output_file, dataset_names):\n \"\"\"\n Saving probability maps to a given output H5 file. If 'average_channels'\n is set to True average the probability_maps across the the channel axis\n (useful in case where each channel predicts semantically the same thing).\n\n Args:\n prediction_maps (list): list of numpy array containing prediction maps in separate channels\n output_file (string): path to the output H5 file\n dataset_names (list): list of dataset names inside H5 file where the prediction maps will be saved\n \"\"\"\n assert len(prediction_maps) == len(dataset_names\n ), 'Each prediction map has to have a corresponding dataset name'\n logger.info(f'Saving predictions to: {output_file}...')\n with h5py.File(output_file, 'w') as output_h5:\n for prediction_map, dataset_name in zip(prediction_maps, dataset_names\n ):\n logger.info(f\"Creating dataset '{dataset_name}'...\")\n output_h5.create_dataset(dataset_name, data=prediction_map,\n compression='gzip')\n\n\ndef _get_output_file(dataset, suffix='_predictions'):\n return f'{os.path.splitext(dataset.file_path)[0]}{suffix}.h5'\n\n\ndef _get_dataset_names(config, number_of_datasets):\n dataset_names = config.get('dest_dataset_name')\n print(dataset_names)\n if dataset_names is not None:\n if isinstance(dataset_names, str):\n return [dataset_names]\n else:\n return dataset_names\n else:\n default_prefix = 'predictions'\n if number_of_datasets == 1:\n return [default_prefix]\n else:\n return [f'{default_prefix}{i}' for i in range(number_of_datasets)]\n\n\ndef main():\n config = load_config()\n print(config)\n model = get_model(config)\n model_path = config['model_path']\n logger.info(f'Loading model from {model_path}...')\n utils.load_checkpoint(model_path, model)\n logger.info(f\"Sending the model to '{config['device']}'\")\n model = model.to(config['device'])\n logger.info('Loading HDF5 datasets...')\n for test_dataset in get_test_datasets(config):\n logger.info(f\"Processing '{test_dataset.file_path}'...\")\n predictions = predict(model, test_dataset, config)\n output_file = _get_output_file(test_dataset)\n dataset_names = _get_dataset_names(config, len(predictions))\n save_predictions(predictions, output_file, dataset_names)\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "import os\n\nimport h5py\nimport numpy as np\nimport torch\n\nfrom datasets.hdf5 import get_test_datasets\nfrom unet3d import utils\nfrom unet3d.config import load_config\nfrom unet3d.model import get_model\n\nlogger = utils.get_logger('UNet3DPredictor')\n\n\ndef predict(model, hdf5_dataset, config):\n \"\"\"\n Return prediction masks by applying the model on the given dataset\n\n Args:\n model (Unet3D): trained 3D UNet model used for prediction\n hdf5_dataset (torch.utils.data.Dataset): input dataset\n out_channels (int): number of channels in the network output\n device (torch.Device): device to run the prediction on\n\n Returns:\n prediction_maps (numpy array): prediction masks for given dataset\n \"\"\"\n\n def _volume_shape(hdf5_dataset):\n # TODO: support multiple internal datasets\n raw = hdf5_dataset.raws[0]\n if raw.ndim == 3:\n return raw.shape\n else:\n return raw.shape[1:]\n\n out_channels = config['model'].get('out_channels')\n if out_channels is None:\n out_channels = config['model']['dt_out_channels']\n\n prediction_channel = config.get('prediction_channel', None)\n if prediction_channel is not None:\n logger.info(f\"Using only channel '{prediction_channel}' from the network output\")\n\n device = config['device']\n output_heads = config['model'].get('output_heads', 1)\n\n logger.info(f'Running prediction on {len(hdf5_dataset)} patches...')\n # dimensionality of the the output (CxDxHxW)\n volume_shape = _volume_shape(hdf5_dataset)\n if prediction_channel is None:\n prediction_maps_shape = (out_channels,) + volume_shape\n else:\n # single channel prediction map\n prediction_maps_shape = (1,) + volume_shape\n\n logger.info(f'The shape of the output prediction maps (CDHW): {prediction_maps_shape}')\n\n # initialize the output prediction arrays\n prediction_maps = [np.zeros(prediction_maps_shape, dtype='float32') for _ in range(output_heads)]\n # initialize normalization mask in order to average out probabilities of overlapping patches\n normalization_masks = [np.zeros(prediction_maps_shape, dtype='float32') for _ in range(output_heads)]\n\n # Sets the module in evaluation mode explicitly, otherwise the final Softmax/Sigmoid won't be applied!\n model.eval()\n # Run predictions on the entire input dataset\n with torch.no_grad():\n for patch, index in hdf5_dataset:\n logger.info(f'Predicting slice:{index}')\n\n # save patch index: (C,D,H,W)\n if prediction_channel is None:\n channel_slice = slice(0, out_channels)\n else:\n channel_slice = slice(0, 1)\n\n index = index[1:] #dk: support input image with mulitple dims\n index = (channel_slice,) + index\n\n # convert patch to torch tensor NxCxDxHxW and send to device we're using batch size of 1\n patch = patch.unsqueeze(dim=0).to(device)\n\n # forward pass\n predictions = model(patch)\n # wrap predictions into a list if there is only one output head from the network\n if output_heads == 1:\n predictions = [predictions]\n\n for prediction, prediction_map, normalization_mask in zip(predictions, prediction_maps,\n normalization_masks):\n # squeeze batch dimension and convert back to numpy array\n prediction = prediction.squeeze(dim=0).cpu().numpy()\n if prediction_channel is not None:\n # use only the 'prediction_channel'\n logger.info(f\"Using channel '{prediction_channel}'...\")\n prediction = np.expand_dims(prediction[prediction_channel], axis=0)\n\n # unpad in order to avoid block artifacts in the output probability maps\n u_prediction, u_index = utils.unpad(prediction, index, volume_shape)\n # accumulate probabilities into the output prediction array\n prediction_map[u_index] += u_prediction\n # count voxel visits for normalization\n normalization_mask[u_index] += 1\n\n return [prediction_map / normalization_mask for prediction_map, normalization_mask in\n zip(prediction_maps, normalization_masks)]\n\n\ndef save_predictions(prediction_maps, output_file, dataset_names):\n \"\"\"\n Saving probability maps to a given output H5 file. If 'average_channels'\n is set to True average the probability_maps across the the channel axis\n (useful in case where each channel predicts semantically the same thing).\n\n Args:\n prediction_maps (list): list of numpy array containing prediction maps in separate channels\n output_file (string): path to the output H5 file\n dataset_names (list): list of dataset names inside H5 file where the prediction maps will be saved\n \"\"\"\n assert len(prediction_maps) == len(dataset_names), 'Each prediction map has to have a corresponding dataset name'\n logger.info(f'Saving predictions to: {output_file}...')\n\n with h5py.File(output_file, \"w\") as output_h5:\n for prediction_map, dataset_name in zip(prediction_maps, dataset_names):\n logger.info(f\"Creating dataset '{dataset_name}'...\")\n output_h5.create_dataset(dataset_name, data=prediction_map, compression=\"gzip\")\n\n\ndef _get_output_file(dataset, suffix='_predictions'):\n return f'{os.path.splitext(dataset.file_path)[0]}{suffix}.h5'\n\n\ndef _get_dataset_names(config, number_of_datasets):\n dataset_names = config.get('dest_dataset_name')\n print(dataset_names)\n if dataset_names is not None:\n if isinstance(dataset_names, str):\n return [dataset_names]\n else:\n return dataset_names\n else:\n default_prefix = 'predictions'\n if number_of_datasets == 1:\n return [default_prefix]\n else:\n return [f'{default_prefix}{i}' for i in range(number_of_datasets)]\n\n\ndef main():\n # Load configuration\n config = load_config()\n print(config)\n\n # Create the model\n model = get_model(config)\n\n # Load model state\n model_path = config['model_path']\n logger.info(f'Loading model from {model_path}...')\n utils.load_checkpoint(model_path, model)\n logger.info(f\"Sending the model to '{config['device']}'\")\n model = model.to(config['device'])\n\n logger.info('Loading HDF5 datasets...')\n for test_dataset in get_test_datasets(config):\n logger.info(f\"Processing '{test_dataset.file_path}'...\")\n # run the model prediction on the entire dataset\n predictions = predict(model, test_dataset, config)\n # save the resulting probability maps\n output_file = _get_output_file(test_dataset)\n dataset_names = _get_dataset_names(config, len(predictions))\n save_predictions(predictions, output_file, dataset_names)\n\n\nif __name__ == '__main__':\n main()\n",
"step-ids": [
2,
6,
7,
8,
9
]
}
|
[
2,
6,
7,
8,
9
] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
##########
# websocket-client
# https://pypi.python.org/pypi/websocket-client/
# sudo -H pip install websocket-client
#####
from websocket import create_connection
ws = create_connection( "ws://192.168.1.132:81/python" )
msg = '#0000FF'
print "Envoi d’un message à l’ESP"
print( msg )
ws.send( msg )
print "Fin de l’envoi\n"
print "Réception..."
result = ws.recv()
print "Reçu : '%s'" % result
ws.close()
|
normal
|
{
"blob_id": "3b26181097025add5919e752aa53e57eea49c943",
"index": 4923,
"step-1": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n##########\n# websocket-client\n# https://pypi.python.org/pypi/websocket-client/\n# sudo -H pip install websocket-client\n#####\n\nfrom websocket import create_connection\nws = create_connection( \"ws://192.168.1.132:81/python\" )\n\nmsg = '#0000FF'\nprint \"Envoi d’un message à l’ESP\"\nprint( msg )\nws.send( msg )\nprint \"Fin de l’envoi\\n\"\n\nprint \"Réception...\"\nresult = ws.recv()\nprint \"Reçu : '%s'\" % result\nws.close()\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
import os
WOO_HOST = os.environ.get('WOO_HOST')
#WooCommerce key credentials
WOO_CONSUMER_KEY = os.environ.get('WOO_CONSUMER_KEY')
WOO_CONSUMER_SECRET = os.environ.get('WOO_CONSUMER_SECRET')
#XML feed fields and settings
XML_FEED_FILENAME = os.environ.get('XML_FEED_FILENAME', 'feedXML')
XML_SITE_NAME = os.environ.get('XML_SITE_NAME')
XML_SITE_HOST = os.environ.get('XML_SITE_HOST')
XML_FEED_DESCRIPTION = os.environ.get('XML_FEED_DESCRIPTION', 'Feed XML autogenerated')
XML_CONFIG_FILENAME = os.environ.get('XML_CONFIG_FILENAME', 'config.json')
PRODUCTS_STATUS_CODE = os.environ.get('PRODUCTS_STATUS_CODE', 'publish')
CRONTAB_HOUR = os.environ.get('CRONTAB_HOUR', '*/7')
REDIS_HOST = os.environ.get('REDIS_HOST', 'redis')
SENTRY_URL = os.environ.get('SENTRY_URL')
try:
from local_settings import *
except ImportError:
pass
if SENTRY_URL:
import sentry_sdk
sentry_sdk.init(SENTRY_URL)
|
normal
|
{
"blob_id": "386fa51b9b285d36c75d6446f9348f6713e0dbaa",
"index": 2794,
"step-1": "<mask token>\n",
"step-2": "<mask token>\ntry:\n from local_settings import *\nexcept ImportError:\n pass\nif SENTRY_URL:\n import sentry_sdk\n sentry_sdk.init(SENTRY_URL)\n",
"step-3": "<mask token>\nWOO_HOST = os.environ.get('WOO_HOST')\nWOO_CONSUMER_KEY = os.environ.get('WOO_CONSUMER_KEY')\nWOO_CONSUMER_SECRET = os.environ.get('WOO_CONSUMER_SECRET')\nXML_FEED_FILENAME = os.environ.get('XML_FEED_FILENAME', 'feedXML')\nXML_SITE_NAME = os.environ.get('XML_SITE_NAME')\nXML_SITE_HOST = os.environ.get('XML_SITE_HOST')\nXML_FEED_DESCRIPTION = os.environ.get('XML_FEED_DESCRIPTION',\n 'Feed XML autogenerated')\nXML_CONFIG_FILENAME = os.environ.get('XML_CONFIG_FILENAME', 'config.json')\nPRODUCTS_STATUS_CODE = os.environ.get('PRODUCTS_STATUS_CODE', 'publish')\nCRONTAB_HOUR = os.environ.get('CRONTAB_HOUR', '*/7')\nREDIS_HOST = os.environ.get('REDIS_HOST', 'redis')\nSENTRY_URL = os.environ.get('SENTRY_URL')\ntry:\n from local_settings import *\nexcept ImportError:\n pass\nif SENTRY_URL:\n import sentry_sdk\n sentry_sdk.init(SENTRY_URL)\n",
"step-4": "import os\nWOO_HOST = os.environ.get('WOO_HOST')\nWOO_CONSUMER_KEY = os.environ.get('WOO_CONSUMER_KEY')\nWOO_CONSUMER_SECRET = os.environ.get('WOO_CONSUMER_SECRET')\nXML_FEED_FILENAME = os.environ.get('XML_FEED_FILENAME', 'feedXML')\nXML_SITE_NAME = os.environ.get('XML_SITE_NAME')\nXML_SITE_HOST = os.environ.get('XML_SITE_HOST')\nXML_FEED_DESCRIPTION = os.environ.get('XML_FEED_DESCRIPTION',\n 'Feed XML autogenerated')\nXML_CONFIG_FILENAME = os.environ.get('XML_CONFIG_FILENAME', 'config.json')\nPRODUCTS_STATUS_CODE = os.environ.get('PRODUCTS_STATUS_CODE', 'publish')\nCRONTAB_HOUR = os.environ.get('CRONTAB_HOUR', '*/7')\nREDIS_HOST = os.environ.get('REDIS_HOST', 'redis')\nSENTRY_URL = os.environ.get('SENTRY_URL')\ntry:\n from local_settings import *\nexcept ImportError:\n pass\nif SENTRY_URL:\n import sentry_sdk\n sentry_sdk.init(SENTRY_URL)\n",
"step-5": "import os\n\nWOO_HOST = os.environ.get('WOO_HOST')\n\n#WooCommerce key credentials\nWOO_CONSUMER_KEY = os.environ.get('WOO_CONSUMER_KEY')\nWOO_CONSUMER_SECRET = os.environ.get('WOO_CONSUMER_SECRET')\n\n#XML feed fields and settings\nXML_FEED_FILENAME = os.environ.get('XML_FEED_FILENAME', 'feedXML')\nXML_SITE_NAME = os.environ.get('XML_SITE_NAME')\nXML_SITE_HOST = os.environ.get('XML_SITE_HOST')\nXML_FEED_DESCRIPTION = os.environ.get('XML_FEED_DESCRIPTION', 'Feed XML autogenerated')\nXML_CONFIG_FILENAME = os.environ.get('XML_CONFIG_FILENAME', 'config.json')\n\nPRODUCTS_STATUS_CODE = os.environ.get('PRODUCTS_STATUS_CODE', 'publish')\n\nCRONTAB_HOUR = os.environ.get('CRONTAB_HOUR', '*/7')\n\nREDIS_HOST = os.environ.get('REDIS_HOST', 'redis')\n\nSENTRY_URL = os.environ.get('SENTRY_URL')\n\ntry:\n from local_settings import *\nexcept ImportError:\n pass\n\nif SENTRY_URL:\n import sentry_sdk\n sentry_sdk.init(SENTRY_URL)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import numpy as np
import pandas as pd
from unrar import rarfile
import numpy as np
import pandas as pd
import tushare as ts
import os
year_month='201911'
contract_kind='NI'
rar_data_file_path='C:/Users/lenovo/Documents/WeChat Files/yiranli13/FileStorage/File/2020-01/'
main_code_path='C:/Users/lenovo/Documents/WeChat Files/yiranli13/FileStorage/File/2020-01/main/main_/'
clean_data_path='D:/1_min补充统一/'
end_date='20200107'
time_range_path='D:/统一所有品种时间范围.csv'
# save_month_fill_data_path='D:/1_min补充统一/'+contract_kind+'主力连续'+'_'+month+'.csv'
def renew_commodity_future(year_month:str,contract_kind:str,main_code_path:str,rar_data_file_path:str,clean_data_path:str,time_range_path:str,end_date:str,commodity_bool=True):
'''
用于更新月度的商品期货数据
year_month:'201911'字符串年份和月份,对应的是FutAC_Min1_Std_后面的数字,如FutAC_Min1_Std_201911
contract_kind:放对应品种的list 类似['A','B']
main_code_path:对应存放主力合约的地方
rar_data_file_path: 对应的是存放rar数据如FutAC_Min1_Std_201911.rar的位置,不包括对应的文件名
clean_data_path:对应存放分钟数据的位置,处理好的新数据会追加到对应位置下的对应品种处
time_range_path:放置交易时间文件的路径,包括文件名 如 D:/统一所有品种时间范围.csv
end_date :'20200103' 今日的日期,用来请求tushare中的交易日历,数据的读取合并都是以交易日历的时间驱动
commodity_bool:商品期货对应True,金融期货False,默认商品期货
'''
month=year_month
if commodity_bool:
file_name=rar_data_file_path+'FutAC_Min1_Std_'+month+'.rar'
else:
file_name=rar_data_file_path+'FutSF_Min1_Std_'+month+'.rar'
orignial_path=main_code_path
specifi_path=orignial_path+contract_kind+'_1day_main.npy'
rar = rarfile.RarFile(file_name,pwd='www.jinshuyuan.net')
# 原始的处理好的数据
orignal_clean_csv_path=clean_data_path
pwd='www.jinshuyuan.net'
data=np.load(specifi_path)
time_0931_15=pd.read_csv(time_range_path)['date'].values.tolist()
rar.extractall(path=file_name.split('.')[0])
# 首先需要输入end_date 确保截取的时间长度和main主力合约的时间对齐
# 按照月份确定位置
pro = ts.pro_api('3d832df2966f27c20e6ff243ab1d53a35a4adc1c64b353cc370ac7d6')
ts.set_token('3d832df2966f27c20e6ff243ab1d53a35a4adc1c64b353cc370ac7d6')
date_df=pro.trade_cal(exchange='DCE', start_date='20100101', end_date=end_date)
date_df=date_df.loc[date_df['is_open']==1]
date_list=date_df['cal_date'].tolist()
# ==========================================================================
# 针对的是201911月数据,对应的合约index 放在 target_date_index中
date_df=pd.DataFrame({'date':date_list})
date_df['month']=date_df['date'].str[:6]
target_date=date_df.loc[date_df['month']==month]
target_date_index=target_date.index.values
target_date=target_date['date'].values
# 获取对应目标
data=data.reshape(-1)
contract_main_pool=data[target_date_index]
# 去掉交易所的代码编号
contract_main_pool=(pd.Series(contract_main_pool).str.split('.').str[0]+'.csv').values
file_pools=os.listdir(file_name.split('.')[0])
# 郑州期货交易所是大写,其它都是小写,这里需要逻辑判断
if contract_main_pool[0] not in file_pools:
contract_main_pool=[contract_file.lower() for contract_file in contract_main_pool]
if contract_main_pool[0] not in file_pools:
print(f'找不到{contract_main_pool[0]}')
# 读取好所有的路径
contract_main_pool=(file_name.split('.')[0]+'/'+pd.Series(contract_main_pool)).values
# (len(target_date),contract_main_pool.shape[0])
row_1=['市场代码','合约代码', '时间', '开','高', '低', '收', '成交量', '成交额', '持仓量']
orignal_data=[]
orignal_data.append(row_1)
for index in range(len(target_date)):
date=target_date[index]
one_file_path=contract_main_pool[index]
df=pd.read_csv(one_file_path,encoding='gbk')
df['date']=df['时间'].str[:10]
df['date2']=df['date'].str.replace('-','')
result=df.loc[df['date2']==date]
if result.shape[0]>0:
for row_index in range(len(result)):
target_row=result.iloc[row_index].tolist()
clean_row=target_row[:-2]
orignal_data.append(clean_row)
print(f'{contract_kind} {date} finished!')
else:
print(f'没找到合约品种{contract_kind}在{date}')
print(f'{contract_kind}在{month}月的主力合约数据读取完成')
final_df=pd.DataFrame(orignal_data[1:],columns=orignal_data[0])
final_df['date']=final_df['时间'].str[:10]
final_df_date=final_df['date'].unique()
final_df['date']=final_df['时间'].str[:10]
final_df['time']=final_df['时间'].str[10:].str.strip()
final_df['时间']=final_df['date']+' '+final_df['time']
final_df=final_df.sort_values('时间')
final_df['合约代码']=final_df['合约代码'].str.upper()
final_df=final_df.sort_values('时间')
# ===============================增加了从constant_time进行截取================================
final_df['transf_date']=pd.to_datetime(final_df['date'])
final_df.set_index('transf_date',inplace=True)
combine_all_df=pd.DataFrame()
final_df['date2']=final_df['date'].str.replace('-','')
# 按月进行填充
# 设置了存放按月填充的路径
for date_index in range(len(target_date)):
#按日期进行分割
target_df=final_df.loc[final_df['date2']==target_date[date_index]]
#分割到的长度放入容器中
target_num=len(target_df)
#理论长度
theory_num=len(time_0931_15)
#实际上两种情况:1.是交易日但完全没有数据2.是交易日,只有部分数据 3.是交易日,数据也是完整的
if target_num>0:
#开始区分2,3情况
have_time=target_df['time'].values.tolist()
lack_time=[x for x in time_0931_15 if x not in have_time]
#检查是不是情况2
if lack_time:
print(f'{target_date[date_index]} 不连续')
#一共12列,先全部填充nan的时候,最后再把已知填入
insert_array=np.empty(shape=(len(lack_time),12))
insert_array.fill(np.nan)
insert_df=pd.DataFrame(insert_array,columns=['市场代码','合约代码','时间','开','高','低','收','成交量','成交额','持仓量','date','time'])
insert_df['date']=target_date[date_index]
insert_df['time']=lack_time
#缺少时间的个数小于time_0931_15则说明,当天并不是完全没数据,只是部分数据缺失,因此要对合约代码进行填充
if len(lack_time)<len(time_0931_15):
insert_df['合约代码']=target_df['合约代码'].unique()[-1]
#生成一天完整的数据
combine_insert_df=pd.concat([target_df,insert_df])
#将数据添加到容器中
combine_all_df=pd.concat([combine_all_df,combine_insert_df])
#完全没有数据,直接填充
else:
print(f'{target_date[date_index]}empty ')
lack_time=[x for x in time_0931_15]
#一共12列,先全部填充nan的时候,最后再把已知填入
insert_array=np.empty(shape=(len(lack_time),12))
insert_array.fill(np.nan)
insert_df=pd.DataFrame(insert_array,columns=['市场代码','合约代码','时间','开','高','低','收','成交量','成交额','持仓量','date','time'])
insert_df['date']=target_date[date_index]
insert_df['time']=lack_time
#将数据添加到容器
combine_all_df=pd.concat([combine_all_df,insert_df])
combine_all_df['时间']=combine_all_df['date']+' '+combine_all_df['time']
#调整时间
combine_all_df=combine_all_df.sort_values('时间')
combine_all_df.reset_index(inplace=True)
#数据输出,按设定的顺序
combine_all_df=combine_all_df[['市场代码', '合约代码', '时间', '开', '高', '低', '收', '成交量', '成交额', '持仓量','date','time']]
combine_all_df['时间']=combine_all_df['时间'].str.replace('-','')
combine_all_df['date']=combine_all_df['date'].str.replace('-','')
# combine_all_df.to_csv(save_month_fill_data_path,index=False,encoding='utf-8-sig')
# ==========================储存数据=================================================
combine_df=combine_all_df.copy()
contract_type=contract_kind
combine_df=combine_df.sort_values('时间')
# ====================================================================开始截取============================================================
# end_time+1其实是可以作为每次截取的起点,终点下一个就是起点,不过要加上0,而终点的位置也可以是end_time+1,因为end_time+1只能取end_time
# 按照下午15:15统一截取
end_time='15:15:00'
end_index=np.where(combine_df['time']==end_time)[0]+1
end_index=np.hstack(([0],end_index))
start=end_index[:-1]
end=end_index[1:]
# ================================================================缺失第一个交易日前一天的夜盘数据==========================================
# 这里的选择构造一个虚拟的时间戳,来满足缺失的夜盘数据
# 按照上一步的截取方法,第一个交易日缺少前一天的夜盘数据
last_day=date_df['date'].iloc[target_date_index[0]-1]
last_day=last_day[:4]+'-'+last_day[4:6]+'-'+last_day[6:]
first_day_have=combine_df[start[0]:end[0]]['time'].values
full_time=combine_df['time'].unique()
full_time.sort()
first_day_lack=[x for x in full_time[-179:]]
first_day_lack.sort()
lack_array=np.empty(shape=(len(first_day_lack),12))
lack_array.fill(np.nan)
# ===============================准备缺失部分df==========================================================================================
first_day_lack_df=pd.DataFrame(lack_array,columns=combine_df.columns)
first_day_lack_df['time']=first_day_lack
first_day_lack_df['date']=last_day
first_day_lack_df['时间']=first_day_lack_df['date']+' '+first_day_lack_df['time']
last_df=pd.read_csv(contract_main_pool[0],encoding='gbk')
# 确定之前的有没有夜盘
last_df['date']=last_df['时间'].str[:10]
last_df['time']=last_df['时间'].str[11:]
# 补夜盘数据
last_time_pool=last_df.loc[last_df['date']==last_day]['time'].values
last_day_have_date=[]
# 说明在上个交易日有数据
if last_time_pool.shape[0]>0:
print(f'期货品种{contract_kind}在前一个交易日{last_day}有夜盘数据,需要读取覆盖')
last_day_have_date=[x for x in last_time_pool]
if last_day_have_date:
for index in range(len(last_day_have_date)):
origanl_index=last_df.loc[(last_df['date']==last_day)&(last_df['time']==last_day_have_date[index])].index[0]
target_index=first_day_lack_df.loc[first_day_lack_df['time']==last_day_have_date[index]].index[0]
first_day_lack_df.iloc[target_index]=last_df.iloc[origanl_index]
else:
print(f'期货品种{contract_kind}在前一个交易日{last_day}没有夜盘数据,不需要读取覆盖')
print('直接使用np.nan填充上一个交易日的夜盘数据')
for index in range(first_day_lack_df.shape[0]):
combine_df=combine_df.append(first_day_lack_df.iloc[index])
combine_df['时间']=combine_df['时间'].str.replace('-','')
combine_df['date']=combine_df['date'].str.replace('-','')
combine_df.sort_values('时间',inplace=True)
# =================================缺失部分填充=========================================================================================
# combine_df=pd.concat([first_day_lack_df,combine_df])
# # ================================重新按时间排序========================================================================================
# combine_df=combine_df.sort_values('时间')
# ============================重新进行切割===============================================================================================
end_index=np.where(combine_df['time']==end_time)[0]+1
end_index=np.hstack(([0],end_index))
start=end_index[:-1]
end=end_index[1:]
# ==============================进行分割按照特定时间,明确col===============================================================================
col_type_list=['开','高','低','收','成交量','成交额','持仓量']
dir_name_list=['open','high','low','close','volume','amount','position']
#这个变量现在没有用
#交易到凌晨01
#merge_df=pd.DataFrame({'time':with_night_01})
#交易到凌晨0230,version中没有集合竞价时间,time_0931_15去掉9:00,21:00
merge_df=pd.DataFrame({'time':time_0931_15})
combine_df['date']=combine_df['时间'].str[:8]
for index in range(len(col_type_list)):
col_type=col_type_list[index]
# 用来接收分col数据的容器
csv_df=pd.DataFrame()
for s_index,e_index in zip(start,end):
# =========================================截取每个交易日数据==============================================================================
res=combine_df.iloc[s_index:e_index,:]
one_date_df=pd.DataFrame(res[col_type].values.reshape(1,-1),columns=res['time'].values.tolist())
one_date_df['main_contract_code']=res.iloc[-1]['合约代码']
one_date_df['date']=res.iloc[-1]['date']
# =======================================设置输出格式====================================================================================
col_layout=['date']
col_layout=np.hstack((col_layout,res['time'].values.tolist()))
col_layout=np.hstack((col_layout,['main_contract_code']))
one_date_df=one_date_df[col_layout]
# =======================================合并数据========================================================================================
csv_df=pd.concat([csv_df,one_date_df])
# ========================追加原始数据=======================================
# 时间问题需要处理,不然对不齐
# 在测试文件中测试,所以修改了路径
orignal_csv_df=pd.read_csv(orignal_clean_csv_path+contract_kind+'_1min_'+dir_name_list[index]+'.csv')
column_ouput_form=orignal_csv_df.columns.values
orignal_date_pool=pd.to_datetime(orignal_csv_df['date'],format='%Y-%m-%d').values
current_date_pool=pd.to_datetime(csv_df['date'],format='%Y-%m-%d').values
orignal_csv_df['date']=pd.to_datetime(orignal_csv_df['date'],format='%Y-%m-%d').dt.strftime('%Y-%m-%d')
csv_df['date']=pd.to_datetime(csv_df['date'],format='%Y%m%d').dt.strftime('%Y-%m-%d')
# check代码中的数字个数等于四个
main_code=csv_df['main_contract_code'].iloc[0]
main_code_num=csv_df['main_contract_code'].str.findall(r'[0-9]+').iloc[0][0]
if len(main_code_num)==3:
print(f'合约代码{main_code}缺少一位数字,将被替换')
csv_df['main_contract_code']=csv_df['main_contract_code'].str[:2]+month[0]+csv_df['main_contract_code'].str[2:]
main_code=csv_df['main_contract_code'].iloc[0]
print(f'合约代码{main_code}')
# 查看有没有交集,如果有交集会停止,说明进行了重复操作
intersection_pool=[date for date in orignal_date_pool if date in current_date_pool]
if not intersection_pool:
print(f'新旧数据没有时间交集,{contract_kind} {dir_name_list[index]} 将被添加到先前数据中')
orignal_csv_df=pd.concat([orignal_csv_df,csv_df])
orignal_csv_df.sort_values('date',inplace=True)
orignal_csv_df=orignal_csv_df[column_ouput_form]
orignal_csv_df.to_csv(orignal_clean_csv_path+contract_kind+'_1min_'+dir_name_list[index]+'.csv',index=False)
print(f'期货品种{contract_kind} {dir_name_list[index]} 完成')
else:
print(f'新旧数据的时间出现交集!!{contract_kind} {dir_name_list[index]} 将不会被添加到先前数据中')
|
normal
|
{
"blob_id": "1c2967c26c845281ceb46cc1d8c06768298ef6b6",
"index": 9407,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef renew_commodity_future(year_month: str, contract_kind: str,\n main_code_path: str, rar_data_file_path: str, clean_data_path: str,\n time_range_path: str, end_date: str, commodity_bool=True):\n \"\"\"\n 用于更新月度的商品期货数据\n year_month:'201911'字符串年份和月份,对应的是FutAC_Min1_Std_后面的数字,如FutAC_Min1_Std_201911\n contract_kind:放对应品种的list 类似['A','B']\n main_code_path:对应存放主力合约的地方\n rar_data_file_path: 对应的是存放rar数据如FutAC_Min1_Std_201911.rar的位置,不包括对应的文件名\n clean_data_path:对应存放分钟数据的位置,处理好的新数据会追加到对应位置下的对应品种处\n time_range_path:放置交易时间文件的路径,包括文件名 如 D:/统一所有品种时间范围.csv\n end_date :'20200103' 今日的日期,用来请求tushare中的交易日历,数据的读取合并都是以交易日历的时间驱动\n commodity_bool:商品期货对应True,金融期货False,默认商品期货\n \"\"\"\n month = year_month\n if commodity_bool:\n file_name = rar_data_file_path + 'FutAC_Min1_Std_' + month + '.rar'\n else:\n file_name = rar_data_file_path + 'FutSF_Min1_Std_' + month + '.rar'\n orignial_path = main_code_path\n specifi_path = orignial_path + contract_kind + '_1day_main.npy'\n rar = rarfile.RarFile(file_name, pwd='www.jinshuyuan.net')\n orignal_clean_csv_path = clean_data_path\n pwd = 'www.jinshuyuan.net'\n data = np.load(specifi_path)\n time_0931_15 = pd.read_csv(time_range_path)['date'].values.tolist()\n rar.extractall(path=file_name.split('.')[0])\n pro = ts.pro_api('3d832df2966f27c20e6ff243ab1d53a35a4adc1c64b353cc370ac7d6'\n )\n ts.set_token('3d832df2966f27c20e6ff243ab1d53a35a4adc1c64b353cc370ac7d6')\n date_df = pro.trade_cal(exchange='DCE', start_date='20100101', end_date\n =end_date)\n date_df = date_df.loc[date_df['is_open'] == 1]\n date_list = date_df['cal_date'].tolist()\n date_df = pd.DataFrame({'date': date_list})\n date_df['month'] = date_df['date'].str[:6]\n target_date = date_df.loc[date_df['month'] == month]\n target_date_index = target_date.index.values\n target_date = target_date['date'].values\n data = data.reshape(-1)\n contract_main_pool = data[target_date_index]\n contract_main_pool = (pd.Series(contract_main_pool).str.split('.').str[\n 0] + '.csv').values\n file_pools = os.listdir(file_name.split('.')[0])\n if contract_main_pool[0] not in file_pools:\n contract_main_pool = [contract_file.lower() for contract_file in\n contract_main_pool]\n if contract_main_pool[0] not in file_pools:\n print(f'找不到{contract_main_pool[0]}')\n contract_main_pool = (file_name.split('.')[0] + '/' + pd.Series(\n contract_main_pool)).values\n row_1 = ['市场代码', '合约代码', '时间', '开', '高', '低', '收', '成交量', '成交额', '持仓量']\n orignal_data = []\n orignal_data.append(row_1)\n for index in range(len(target_date)):\n date = target_date[index]\n one_file_path = contract_main_pool[index]\n df = pd.read_csv(one_file_path, encoding='gbk')\n df['date'] = df['时间'].str[:10]\n df['date2'] = df['date'].str.replace('-', '')\n result = df.loc[df['date2'] == date]\n if result.shape[0] > 0:\n for row_index in range(len(result)):\n target_row = result.iloc[row_index].tolist()\n clean_row = target_row[:-2]\n orignal_data.append(clean_row)\n print(f'{contract_kind} {date} finished!')\n else:\n print(f'没找到合约品种{contract_kind}在{date}')\n print(f'{contract_kind}在{month}月的主力合约数据读取完成')\n final_df = pd.DataFrame(orignal_data[1:], columns=orignal_data[0])\n final_df['date'] = final_df['时间'].str[:10]\n final_df_date = final_df['date'].unique()\n final_df['date'] = final_df['时间'].str[:10]\n final_df['time'] = final_df['时间'].str[10:].str.strip()\n final_df['时间'] = final_df['date'] + ' ' + final_df['time']\n final_df = final_df.sort_values('时间')\n final_df['合约代码'] = final_df['合约代码'].str.upper()\n final_df = final_df.sort_values('时间')\n final_df['transf_date'] = pd.to_datetime(final_df['date'])\n final_df.set_index('transf_date', inplace=True)\n combine_all_df = pd.DataFrame()\n final_df['date2'] = final_df['date'].str.replace('-', '')\n for date_index in range(len(target_date)):\n target_df = final_df.loc[final_df['date2'] == target_date[date_index]]\n target_num = len(target_df)\n theory_num = len(time_0931_15)\n if target_num > 0:\n have_time = target_df['time'].values.tolist()\n lack_time = [x for x in time_0931_15 if x not in have_time]\n if lack_time:\n print(f'{target_date[date_index]} 不连续')\n insert_array = np.empty(shape=(len(lack_time), 12))\n insert_array.fill(np.nan)\n insert_df = pd.DataFrame(insert_array, columns=['市场代码', '合约代码',\n '时间', '开', '高', '低', '收', '成交量', '成交额', '持仓量', 'date', 'time'])\n insert_df['date'] = target_date[date_index]\n insert_df['time'] = lack_time\n if len(lack_time) < len(time_0931_15):\n insert_df['合约代码'] = target_df['合约代码'].unique()[-1]\n combine_insert_df = pd.concat([target_df, insert_df])\n combine_all_df = pd.concat([combine_all_df, combine_insert_df])\n else:\n print(f'{target_date[date_index]}empty ')\n lack_time = [x for x in time_0931_15]\n insert_array = np.empty(shape=(len(lack_time), 12))\n insert_array.fill(np.nan)\n insert_df = pd.DataFrame(insert_array, columns=['市场代码', '合约代码',\n '时间', '开', '高', '低', '收', '成交量', '成交额', '持仓量', 'date', 'time'])\n insert_df['date'] = target_date[date_index]\n insert_df['time'] = lack_time\n combine_all_df = pd.concat([combine_all_df, insert_df])\n combine_all_df['时间'] = combine_all_df['date'] + ' ' + combine_all_df['time'\n ]\n combine_all_df = combine_all_df.sort_values('时间')\n combine_all_df.reset_index(inplace=True)\n combine_all_df = combine_all_df[['市场代码', '合约代码', '时间', '开', '高', '低',\n '收', '成交量', '成交额', '持仓量', 'date', 'time']]\n combine_all_df['时间'] = combine_all_df['时间'].str.replace('-', '')\n combine_all_df['date'] = combine_all_df['date'].str.replace('-', '')\n combine_df = combine_all_df.copy()\n contract_type = contract_kind\n combine_df = combine_df.sort_values('时间')\n end_time = '15:15:00'\n end_index = np.where(combine_df['time'] == end_time)[0] + 1\n end_index = np.hstack(([0], end_index))\n start = end_index[:-1]\n end = end_index[1:]\n last_day = date_df['date'].iloc[target_date_index[0] - 1]\n last_day = last_day[:4] + '-' + last_day[4:6] + '-' + last_day[6:]\n first_day_have = combine_df[start[0]:end[0]]['time'].values\n full_time = combine_df['time'].unique()\n full_time.sort()\n first_day_lack = [x for x in full_time[-179:]]\n first_day_lack.sort()\n lack_array = np.empty(shape=(len(first_day_lack), 12))\n lack_array.fill(np.nan)\n first_day_lack_df = pd.DataFrame(lack_array, columns=combine_df.columns)\n first_day_lack_df['time'] = first_day_lack\n first_day_lack_df['date'] = last_day\n first_day_lack_df['时间'] = first_day_lack_df['date'\n ] + ' ' + first_day_lack_df['time']\n last_df = pd.read_csv(contract_main_pool[0], encoding='gbk')\n last_df['date'] = last_df['时间'].str[:10]\n last_df['time'] = last_df['时间'].str[11:]\n last_time_pool = last_df.loc[last_df['date'] == last_day]['time'].values\n last_day_have_date = []\n if last_time_pool.shape[0] > 0:\n print(f'期货品种{contract_kind}在前一个交易日{last_day}有夜盘数据,需要读取覆盖')\n last_day_have_date = [x for x in last_time_pool]\n if last_day_have_date:\n for index in range(len(last_day_have_date)):\n origanl_index = last_df.loc[(last_df['date'] == last_day) & (\n last_df['time'] == last_day_have_date[index])].index[0]\n target_index = first_day_lack_df.loc[first_day_lack_df['time'] ==\n last_day_have_date[index]].index[0]\n first_day_lack_df.iloc[target_index] = last_df.iloc[origanl_index]\n else:\n print(f'期货品种{contract_kind}在前一个交易日{last_day}没有夜盘数据,不需要读取覆盖')\n print('直接使用np.nan填充上一个交易日的夜盘数据')\n for index in range(first_day_lack_df.shape[0]):\n combine_df = combine_df.append(first_day_lack_df.iloc[index])\n combine_df['时间'] = combine_df['时间'].str.replace('-', '')\n combine_df['date'] = combine_df['date'].str.replace('-', '')\n combine_df.sort_values('时间', inplace=True)\n end_index = np.where(combine_df['time'] == end_time)[0] + 1\n end_index = np.hstack(([0], end_index))\n start = end_index[:-1]\n end = end_index[1:]\n col_type_list = ['开', '高', '低', '收', '成交量', '成交额', '持仓量']\n dir_name_list = ['open', 'high', 'low', 'close', 'volume', 'amount',\n 'position']\n merge_df = pd.DataFrame({'time': time_0931_15})\n combine_df['date'] = combine_df['时间'].str[:8]\n for index in range(len(col_type_list)):\n col_type = col_type_list[index]\n csv_df = pd.DataFrame()\n for s_index, e_index in zip(start, end):\n res = combine_df.iloc[s_index:e_index, :]\n one_date_df = pd.DataFrame(res[col_type].values.reshape(1, -1),\n columns=res['time'].values.tolist())\n one_date_df['main_contract_code'] = res.iloc[-1]['合约代码']\n one_date_df['date'] = res.iloc[-1]['date']\n col_layout = ['date']\n col_layout = np.hstack((col_layout, res['time'].values.tolist()))\n col_layout = np.hstack((col_layout, ['main_contract_code']))\n one_date_df = one_date_df[col_layout]\n csv_df = pd.concat([csv_df, one_date_df])\n orignal_csv_df = pd.read_csv(orignal_clean_csv_path + contract_kind +\n '_1min_' + dir_name_list[index] + '.csv')\n column_ouput_form = orignal_csv_df.columns.values\n orignal_date_pool = pd.to_datetime(orignal_csv_df['date'], format=\n '%Y-%m-%d').values\n current_date_pool = pd.to_datetime(csv_df['date'], format='%Y-%m-%d'\n ).values\n orignal_csv_df['date'] = pd.to_datetime(orignal_csv_df['date'],\n format='%Y-%m-%d').dt.strftime('%Y-%m-%d')\n csv_df['date'] = pd.to_datetime(csv_df['date'], format='%Y%m%d'\n ).dt.strftime('%Y-%m-%d')\n main_code = csv_df['main_contract_code'].iloc[0]\n main_code_num = csv_df['main_contract_code'].str.findall('[0-9]+'\n ).iloc[0][0]\n if len(main_code_num) == 3:\n print(f'合约代码{main_code}缺少一位数字,将被替换')\n csv_df['main_contract_code'] = csv_df['main_contract_code'].str[:2\n ] + month[0] + csv_df['main_contract_code'].str[2:]\n main_code = csv_df['main_contract_code'].iloc[0]\n print(f'合约代码{main_code}')\n intersection_pool = [date for date in orignal_date_pool if date in\n current_date_pool]\n if not intersection_pool:\n print(\n f'新旧数据没有时间交集,{contract_kind} {dir_name_list[index]} 将被添加到先前数据中'\n )\n orignal_csv_df = pd.concat([orignal_csv_df, csv_df])\n orignal_csv_df.sort_values('date', inplace=True)\n orignal_csv_df = orignal_csv_df[column_ouput_form]\n orignal_csv_df.to_csv(orignal_clean_csv_path + contract_kind +\n '_1min_' + dir_name_list[index] + '.csv', index=False)\n print(f'期货品种{contract_kind} {dir_name_list[index]} 完成')\n else:\n print(\n f'新旧数据的时间出现交集!!{contract_kind} {dir_name_list[index]} 将不会被添加到先前数据中'\n )\n",
"step-3": "<mask token>\nyear_month = '201911'\ncontract_kind = 'NI'\nrar_data_file_path = (\n 'C:/Users/lenovo/Documents/WeChat Files/yiranli13/FileStorage/File/2020-01/'\n )\nmain_code_path = (\n 'C:/Users/lenovo/Documents/WeChat Files/yiranli13/FileStorage/File/2020-01/main/main_/'\n )\nclean_data_path = 'D:/1_min补充统一/'\nend_date = '20200107'\ntime_range_path = 'D:/统一所有品种时间范围.csv'\n\n\ndef renew_commodity_future(year_month: str, contract_kind: str,\n main_code_path: str, rar_data_file_path: str, clean_data_path: str,\n time_range_path: str, end_date: str, commodity_bool=True):\n \"\"\"\n 用于更新月度的商品期货数据\n year_month:'201911'字符串年份和月份,对应的是FutAC_Min1_Std_后面的数字,如FutAC_Min1_Std_201911\n contract_kind:放对应品种的list 类似['A','B']\n main_code_path:对应存放主力合约的地方\n rar_data_file_path: 对应的是存放rar数据如FutAC_Min1_Std_201911.rar的位置,不包括对应的文件名\n clean_data_path:对应存放分钟数据的位置,处理好的新数据会追加到对应位置下的对应品种处\n time_range_path:放置交易时间文件的路径,包括文件名 如 D:/统一所有品种时间范围.csv\n end_date :'20200103' 今日的日期,用来请求tushare中的交易日历,数据的读取合并都是以交易日历的时间驱动\n commodity_bool:商品期货对应True,金融期货False,默认商品期货\n \"\"\"\n month = year_month\n if commodity_bool:\n file_name = rar_data_file_path + 'FutAC_Min1_Std_' + month + '.rar'\n else:\n file_name = rar_data_file_path + 'FutSF_Min1_Std_' + month + '.rar'\n orignial_path = main_code_path\n specifi_path = orignial_path + contract_kind + '_1day_main.npy'\n rar = rarfile.RarFile(file_name, pwd='www.jinshuyuan.net')\n orignal_clean_csv_path = clean_data_path\n pwd = 'www.jinshuyuan.net'\n data = np.load(specifi_path)\n time_0931_15 = pd.read_csv(time_range_path)['date'].values.tolist()\n rar.extractall(path=file_name.split('.')[0])\n pro = ts.pro_api('3d832df2966f27c20e6ff243ab1d53a35a4adc1c64b353cc370ac7d6'\n )\n ts.set_token('3d832df2966f27c20e6ff243ab1d53a35a4adc1c64b353cc370ac7d6')\n date_df = pro.trade_cal(exchange='DCE', start_date='20100101', end_date\n =end_date)\n date_df = date_df.loc[date_df['is_open'] == 1]\n date_list = date_df['cal_date'].tolist()\n date_df = pd.DataFrame({'date': date_list})\n date_df['month'] = date_df['date'].str[:6]\n target_date = date_df.loc[date_df['month'] == month]\n target_date_index = target_date.index.values\n target_date = target_date['date'].values\n data = data.reshape(-1)\n contract_main_pool = data[target_date_index]\n contract_main_pool = (pd.Series(contract_main_pool).str.split('.').str[\n 0] + '.csv').values\n file_pools = os.listdir(file_name.split('.')[0])\n if contract_main_pool[0] not in file_pools:\n contract_main_pool = [contract_file.lower() for contract_file in\n contract_main_pool]\n if contract_main_pool[0] not in file_pools:\n print(f'找不到{contract_main_pool[0]}')\n contract_main_pool = (file_name.split('.')[0] + '/' + pd.Series(\n contract_main_pool)).values\n row_1 = ['市场代码', '合约代码', '时间', '开', '高', '低', '收', '成交量', '成交额', '持仓量']\n orignal_data = []\n orignal_data.append(row_1)\n for index in range(len(target_date)):\n date = target_date[index]\n one_file_path = contract_main_pool[index]\n df = pd.read_csv(one_file_path, encoding='gbk')\n df['date'] = df['时间'].str[:10]\n df['date2'] = df['date'].str.replace('-', '')\n result = df.loc[df['date2'] == date]\n if result.shape[0] > 0:\n for row_index in range(len(result)):\n target_row = result.iloc[row_index].tolist()\n clean_row = target_row[:-2]\n orignal_data.append(clean_row)\n print(f'{contract_kind} {date} finished!')\n else:\n print(f'没找到合约品种{contract_kind}在{date}')\n print(f'{contract_kind}在{month}月的主力合约数据读取完成')\n final_df = pd.DataFrame(orignal_data[1:], columns=orignal_data[0])\n final_df['date'] = final_df['时间'].str[:10]\n final_df_date = final_df['date'].unique()\n final_df['date'] = final_df['时间'].str[:10]\n final_df['time'] = final_df['时间'].str[10:].str.strip()\n final_df['时间'] = final_df['date'] + ' ' + final_df['time']\n final_df = final_df.sort_values('时间')\n final_df['合约代码'] = final_df['合约代码'].str.upper()\n final_df = final_df.sort_values('时间')\n final_df['transf_date'] = pd.to_datetime(final_df['date'])\n final_df.set_index('transf_date', inplace=True)\n combine_all_df = pd.DataFrame()\n final_df['date2'] = final_df['date'].str.replace('-', '')\n for date_index in range(len(target_date)):\n target_df = final_df.loc[final_df['date2'] == target_date[date_index]]\n target_num = len(target_df)\n theory_num = len(time_0931_15)\n if target_num > 0:\n have_time = target_df['time'].values.tolist()\n lack_time = [x for x in time_0931_15 if x not in have_time]\n if lack_time:\n print(f'{target_date[date_index]} 不连续')\n insert_array = np.empty(shape=(len(lack_time), 12))\n insert_array.fill(np.nan)\n insert_df = pd.DataFrame(insert_array, columns=['市场代码', '合约代码',\n '时间', '开', '高', '低', '收', '成交量', '成交额', '持仓量', 'date', 'time'])\n insert_df['date'] = target_date[date_index]\n insert_df['time'] = lack_time\n if len(lack_time) < len(time_0931_15):\n insert_df['合约代码'] = target_df['合约代码'].unique()[-1]\n combine_insert_df = pd.concat([target_df, insert_df])\n combine_all_df = pd.concat([combine_all_df, combine_insert_df])\n else:\n print(f'{target_date[date_index]}empty ')\n lack_time = [x for x in time_0931_15]\n insert_array = np.empty(shape=(len(lack_time), 12))\n insert_array.fill(np.nan)\n insert_df = pd.DataFrame(insert_array, columns=['市场代码', '合约代码',\n '时间', '开', '高', '低', '收', '成交量', '成交额', '持仓量', 'date', 'time'])\n insert_df['date'] = target_date[date_index]\n insert_df['time'] = lack_time\n combine_all_df = pd.concat([combine_all_df, insert_df])\n combine_all_df['时间'] = combine_all_df['date'] + ' ' + combine_all_df['time'\n ]\n combine_all_df = combine_all_df.sort_values('时间')\n combine_all_df.reset_index(inplace=True)\n combine_all_df = combine_all_df[['市场代码', '合约代码', '时间', '开', '高', '低',\n '收', '成交量', '成交额', '持仓量', 'date', 'time']]\n combine_all_df['时间'] = combine_all_df['时间'].str.replace('-', '')\n combine_all_df['date'] = combine_all_df['date'].str.replace('-', '')\n combine_df = combine_all_df.copy()\n contract_type = contract_kind\n combine_df = combine_df.sort_values('时间')\n end_time = '15:15:00'\n end_index = np.where(combine_df['time'] == end_time)[0] + 1\n end_index = np.hstack(([0], end_index))\n start = end_index[:-1]\n end = end_index[1:]\n last_day = date_df['date'].iloc[target_date_index[0] - 1]\n last_day = last_day[:4] + '-' + last_day[4:6] + '-' + last_day[6:]\n first_day_have = combine_df[start[0]:end[0]]['time'].values\n full_time = combine_df['time'].unique()\n full_time.sort()\n first_day_lack = [x for x in full_time[-179:]]\n first_day_lack.sort()\n lack_array = np.empty(shape=(len(first_day_lack), 12))\n lack_array.fill(np.nan)\n first_day_lack_df = pd.DataFrame(lack_array, columns=combine_df.columns)\n first_day_lack_df['time'] = first_day_lack\n first_day_lack_df['date'] = last_day\n first_day_lack_df['时间'] = first_day_lack_df['date'\n ] + ' ' + first_day_lack_df['time']\n last_df = pd.read_csv(contract_main_pool[0], encoding='gbk')\n last_df['date'] = last_df['时间'].str[:10]\n last_df['time'] = last_df['时间'].str[11:]\n last_time_pool = last_df.loc[last_df['date'] == last_day]['time'].values\n last_day_have_date = []\n if last_time_pool.shape[0] > 0:\n print(f'期货品种{contract_kind}在前一个交易日{last_day}有夜盘数据,需要读取覆盖')\n last_day_have_date = [x for x in last_time_pool]\n if last_day_have_date:\n for index in range(len(last_day_have_date)):\n origanl_index = last_df.loc[(last_df['date'] == last_day) & (\n last_df['time'] == last_day_have_date[index])].index[0]\n target_index = first_day_lack_df.loc[first_day_lack_df['time'] ==\n last_day_have_date[index]].index[0]\n first_day_lack_df.iloc[target_index] = last_df.iloc[origanl_index]\n else:\n print(f'期货品种{contract_kind}在前一个交易日{last_day}没有夜盘数据,不需要读取覆盖')\n print('直接使用np.nan填充上一个交易日的夜盘数据')\n for index in range(first_day_lack_df.shape[0]):\n combine_df = combine_df.append(first_day_lack_df.iloc[index])\n combine_df['时间'] = combine_df['时间'].str.replace('-', '')\n combine_df['date'] = combine_df['date'].str.replace('-', '')\n combine_df.sort_values('时间', inplace=True)\n end_index = np.where(combine_df['time'] == end_time)[0] + 1\n end_index = np.hstack(([0], end_index))\n start = end_index[:-1]\n end = end_index[1:]\n col_type_list = ['开', '高', '低', '收', '成交量', '成交额', '持仓量']\n dir_name_list = ['open', 'high', 'low', 'close', 'volume', 'amount',\n 'position']\n merge_df = pd.DataFrame({'time': time_0931_15})\n combine_df['date'] = combine_df['时间'].str[:8]\n for index in range(len(col_type_list)):\n col_type = col_type_list[index]\n csv_df = pd.DataFrame()\n for s_index, e_index in zip(start, end):\n res = combine_df.iloc[s_index:e_index, :]\n one_date_df = pd.DataFrame(res[col_type].values.reshape(1, -1),\n columns=res['time'].values.tolist())\n one_date_df['main_contract_code'] = res.iloc[-1]['合约代码']\n one_date_df['date'] = res.iloc[-1]['date']\n col_layout = ['date']\n col_layout = np.hstack((col_layout, res['time'].values.tolist()))\n col_layout = np.hstack((col_layout, ['main_contract_code']))\n one_date_df = one_date_df[col_layout]\n csv_df = pd.concat([csv_df, one_date_df])\n orignal_csv_df = pd.read_csv(orignal_clean_csv_path + contract_kind +\n '_1min_' + dir_name_list[index] + '.csv')\n column_ouput_form = orignal_csv_df.columns.values\n orignal_date_pool = pd.to_datetime(orignal_csv_df['date'], format=\n '%Y-%m-%d').values\n current_date_pool = pd.to_datetime(csv_df['date'], format='%Y-%m-%d'\n ).values\n orignal_csv_df['date'] = pd.to_datetime(orignal_csv_df['date'],\n format='%Y-%m-%d').dt.strftime('%Y-%m-%d')\n csv_df['date'] = pd.to_datetime(csv_df['date'], format='%Y%m%d'\n ).dt.strftime('%Y-%m-%d')\n main_code = csv_df['main_contract_code'].iloc[0]\n main_code_num = csv_df['main_contract_code'].str.findall('[0-9]+'\n ).iloc[0][0]\n if len(main_code_num) == 3:\n print(f'合约代码{main_code}缺少一位数字,将被替换')\n csv_df['main_contract_code'] = csv_df['main_contract_code'].str[:2\n ] + month[0] + csv_df['main_contract_code'].str[2:]\n main_code = csv_df['main_contract_code'].iloc[0]\n print(f'合约代码{main_code}')\n intersection_pool = [date for date in orignal_date_pool if date in\n current_date_pool]\n if not intersection_pool:\n print(\n f'新旧数据没有时间交集,{contract_kind} {dir_name_list[index]} 将被添加到先前数据中'\n )\n orignal_csv_df = pd.concat([orignal_csv_df, csv_df])\n orignal_csv_df.sort_values('date', inplace=True)\n orignal_csv_df = orignal_csv_df[column_ouput_form]\n orignal_csv_df.to_csv(orignal_clean_csv_path + contract_kind +\n '_1min_' + dir_name_list[index] + '.csv', index=False)\n print(f'期货品种{contract_kind} {dir_name_list[index]} 完成')\n else:\n print(\n f'新旧数据的时间出现交集!!{contract_kind} {dir_name_list[index]} 将不会被添加到先前数据中'\n )\n",
"step-4": "import numpy as np\nimport pandas as pd\nfrom unrar import rarfile\nimport numpy as np\nimport pandas as pd\nimport tushare as ts\nimport os\nyear_month = '201911'\ncontract_kind = 'NI'\nrar_data_file_path = (\n 'C:/Users/lenovo/Documents/WeChat Files/yiranli13/FileStorage/File/2020-01/'\n )\nmain_code_path = (\n 'C:/Users/lenovo/Documents/WeChat Files/yiranli13/FileStorage/File/2020-01/main/main_/'\n )\nclean_data_path = 'D:/1_min补充统一/'\nend_date = '20200107'\ntime_range_path = 'D:/统一所有品种时间范围.csv'\n\n\ndef renew_commodity_future(year_month: str, contract_kind: str,\n main_code_path: str, rar_data_file_path: str, clean_data_path: str,\n time_range_path: str, end_date: str, commodity_bool=True):\n \"\"\"\n 用于更新月度的商品期货数据\n year_month:'201911'字符串年份和月份,对应的是FutAC_Min1_Std_后面的数字,如FutAC_Min1_Std_201911\n contract_kind:放对应品种的list 类似['A','B']\n main_code_path:对应存放主力合约的地方\n rar_data_file_path: 对应的是存放rar数据如FutAC_Min1_Std_201911.rar的位置,不包括对应的文件名\n clean_data_path:对应存放分钟数据的位置,处理好的新数据会追加到对应位置下的对应品种处\n time_range_path:放置交易时间文件的路径,包括文件名 如 D:/统一所有品种时间范围.csv\n end_date :'20200103' 今日的日期,用来请求tushare中的交易日历,数据的读取合并都是以交易日历的时间驱动\n commodity_bool:商品期货对应True,金融期货False,默认商品期货\n \"\"\"\n month = year_month\n if commodity_bool:\n file_name = rar_data_file_path + 'FutAC_Min1_Std_' + month + '.rar'\n else:\n file_name = rar_data_file_path + 'FutSF_Min1_Std_' + month + '.rar'\n orignial_path = main_code_path\n specifi_path = orignial_path + contract_kind + '_1day_main.npy'\n rar = rarfile.RarFile(file_name, pwd='www.jinshuyuan.net')\n orignal_clean_csv_path = clean_data_path\n pwd = 'www.jinshuyuan.net'\n data = np.load(specifi_path)\n time_0931_15 = pd.read_csv(time_range_path)['date'].values.tolist()\n rar.extractall(path=file_name.split('.')[0])\n pro = ts.pro_api('3d832df2966f27c20e6ff243ab1d53a35a4adc1c64b353cc370ac7d6'\n )\n ts.set_token('3d832df2966f27c20e6ff243ab1d53a35a4adc1c64b353cc370ac7d6')\n date_df = pro.trade_cal(exchange='DCE', start_date='20100101', end_date\n =end_date)\n date_df = date_df.loc[date_df['is_open'] == 1]\n date_list = date_df['cal_date'].tolist()\n date_df = pd.DataFrame({'date': date_list})\n date_df['month'] = date_df['date'].str[:6]\n target_date = date_df.loc[date_df['month'] == month]\n target_date_index = target_date.index.values\n target_date = target_date['date'].values\n data = data.reshape(-1)\n contract_main_pool = data[target_date_index]\n contract_main_pool = (pd.Series(contract_main_pool).str.split('.').str[\n 0] + '.csv').values\n file_pools = os.listdir(file_name.split('.')[0])\n if contract_main_pool[0] not in file_pools:\n contract_main_pool = [contract_file.lower() for contract_file in\n contract_main_pool]\n if contract_main_pool[0] not in file_pools:\n print(f'找不到{contract_main_pool[0]}')\n contract_main_pool = (file_name.split('.')[0] + '/' + pd.Series(\n contract_main_pool)).values\n row_1 = ['市场代码', '合约代码', '时间', '开', '高', '低', '收', '成交量', '成交额', '持仓量']\n orignal_data = []\n orignal_data.append(row_1)\n for index in range(len(target_date)):\n date = target_date[index]\n one_file_path = contract_main_pool[index]\n df = pd.read_csv(one_file_path, encoding='gbk')\n df['date'] = df['时间'].str[:10]\n df['date2'] = df['date'].str.replace('-', '')\n result = df.loc[df['date2'] == date]\n if result.shape[0] > 0:\n for row_index in range(len(result)):\n target_row = result.iloc[row_index].tolist()\n clean_row = target_row[:-2]\n orignal_data.append(clean_row)\n print(f'{contract_kind} {date} finished!')\n else:\n print(f'没找到合约品种{contract_kind}在{date}')\n print(f'{contract_kind}在{month}月的主力合约数据读取完成')\n final_df = pd.DataFrame(orignal_data[1:], columns=orignal_data[0])\n final_df['date'] = final_df['时间'].str[:10]\n final_df_date = final_df['date'].unique()\n final_df['date'] = final_df['时间'].str[:10]\n final_df['time'] = final_df['时间'].str[10:].str.strip()\n final_df['时间'] = final_df['date'] + ' ' + final_df['time']\n final_df = final_df.sort_values('时间')\n final_df['合约代码'] = final_df['合约代码'].str.upper()\n final_df = final_df.sort_values('时间')\n final_df['transf_date'] = pd.to_datetime(final_df['date'])\n final_df.set_index('transf_date', inplace=True)\n combine_all_df = pd.DataFrame()\n final_df['date2'] = final_df['date'].str.replace('-', '')\n for date_index in range(len(target_date)):\n target_df = final_df.loc[final_df['date2'] == target_date[date_index]]\n target_num = len(target_df)\n theory_num = len(time_0931_15)\n if target_num > 0:\n have_time = target_df['time'].values.tolist()\n lack_time = [x for x in time_0931_15 if x not in have_time]\n if lack_time:\n print(f'{target_date[date_index]} 不连续')\n insert_array = np.empty(shape=(len(lack_time), 12))\n insert_array.fill(np.nan)\n insert_df = pd.DataFrame(insert_array, columns=['市场代码', '合约代码',\n '时间', '开', '高', '低', '收', '成交量', '成交额', '持仓量', 'date', 'time'])\n insert_df['date'] = target_date[date_index]\n insert_df['time'] = lack_time\n if len(lack_time) < len(time_0931_15):\n insert_df['合约代码'] = target_df['合约代码'].unique()[-1]\n combine_insert_df = pd.concat([target_df, insert_df])\n combine_all_df = pd.concat([combine_all_df, combine_insert_df])\n else:\n print(f'{target_date[date_index]}empty ')\n lack_time = [x for x in time_0931_15]\n insert_array = np.empty(shape=(len(lack_time), 12))\n insert_array.fill(np.nan)\n insert_df = pd.DataFrame(insert_array, columns=['市场代码', '合约代码',\n '时间', '开', '高', '低', '收', '成交量', '成交额', '持仓量', 'date', 'time'])\n insert_df['date'] = target_date[date_index]\n insert_df['time'] = lack_time\n combine_all_df = pd.concat([combine_all_df, insert_df])\n combine_all_df['时间'] = combine_all_df['date'] + ' ' + combine_all_df['time'\n ]\n combine_all_df = combine_all_df.sort_values('时间')\n combine_all_df.reset_index(inplace=True)\n combine_all_df = combine_all_df[['市场代码', '合约代码', '时间', '开', '高', '低',\n '收', '成交量', '成交额', '持仓量', 'date', 'time']]\n combine_all_df['时间'] = combine_all_df['时间'].str.replace('-', '')\n combine_all_df['date'] = combine_all_df['date'].str.replace('-', '')\n combine_df = combine_all_df.copy()\n contract_type = contract_kind\n combine_df = combine_df.sort_values('时间')\n end_time = '15:15:00'\n end_index = np.where(combine_df['time'] == end_time)[0] + 1\n end_index = np.hstack(([0], end_index))\n start = end_index[:-1]\n end = end_index[1:]\n last_day = date_df['date'].iloc[target_date_index[0] - 1]\n last_day = last_day[:4] + '-' + last_day[4:6] + '-' + last_day[6:]\n first_day_have = combine_df[start[0]:end[0]]['time'].values\n full_time = combine_df['time'].unique()\n full_time.sort()\n first_day_lack = [x for x in full_time[-179:]]\n first_day_lack.sort()\n lack_array = np.empty(shape=(len(first_day_lack), 12))\n lack_array.fill(np.nan)\n first_day_lack_df = pd.DataFrame(lack_array, columns=combine_df.columns)\n first_day_lack_df['time'] = first_day_lack\n first_day_lack_df['date'] = last_day\n first_day_lack_df['时间'] = first_day_lack_df['date'\n ] + ' ' + first_day_lack_df['time']\n last_df = pd.read_csv(contract_main_pool[0], encoding='gbk')\n last_df['date'] = last_df['时间'].str[:10]\n last_df['time'] = last_df['时间'].str[11:]\n last_time_pool = last_df.loc[last_df['date'] == last_day]['time'].values\n last_day_have_date = []\n if last_time_pool.shape[0] > 0:\n print(f'期货品种{contract_kind}在前一个交易日{last_day}有夜盘数据,需要读取覆盖')\n last_day_have_date = [x for x in last_time_pool]\n if last_day_have_date:\n for index in range(len(last_day_have_date)):\n origanl_index = last_df.loc[(last_df['date'] == last_day) & (\n last_df['time'] == last_day_have_date[index])].index[0]\n target_index = first_day_lack_df.loc[first_day_lack_df['time'] ==\n last_day_have_date[index]].index[0]\n first_day_lack_df.iloc[target_index] = last_df.iloc[origanl_index]\n else:\n print(f'期货品种{contract_kind}在前一个交易日{last_day}没有夜盘数据,不需要读取覆盖')\n print('直接使用np.nan填充上一个交易日的夜盘数据')\n for index in range(first_day_lack_df.shape[0]):\n combine_df = combine_df.append(first_day_lack_df.iloc[index])\n combine_df['时间'] = combine_df['时间'].str.replace('-', '')\n combine_df['date'] = combine_df['date'].str.replace('-', '')\n combine_df.sort_values('时间', inplace=True)\n end_index = np.where(combine_df['time'] == end_time)[0] + 1\n end_index = np.hstack(([0], end_index))\n start = end_index[:-1]\n end = end_index[1:]\n col_type_list = ['开', '高', '低', '收', '成交量', '成交额', '持仓量']\n dir_name_list = ['open', 'high', 'low', 'close', 'volume', 'amount',\n 'position']\n merge_df = pd.DataFrame({'time': time_0931_15})\n combine_df['date'] = combine_df['时间'].str[:8]\n for index in range(len(col_type_list)):\n col_type = col_type_list[index]\n csv_df = pd.DataFrame()\n for s_index, e_index in zip(start, end):\n res = combine_df.iloc[s_index:e_index, :]\n one_date_df = pd.DataFrame(res[col_type].values.reshape(1, -1),\n columns=res['time'].values.tolist())\n one_date_df['main_contract_code'] = res.iloc[-1]['合约代码']\n one_date_df['date'] = res.iloc[-1]['date']\n col_layout = ['date']\n col_layout = np.hstack((col_layout, res['time'].values.tolist()))\n col_layout = np.hstack((col_layout, ['main_contract_code']))\n one_date_df = one_date_df[col_layout]\n csv_df = pd.concat([csv_df, one_date_df])\n orignal_csv_df = pd.read_csv(orignal_clean_csv_path + contract_kind +\n '_1min_' + dir_name_list[index] + '.csv')\n column_ouput_form = orignal_csv_df.columns.values\n orignal_date_pool = pd.to_datetime(orignal_csv_df['date'], format=\n '%Y-%m-%d').values\n current_date_pool = pd.to_datetime(csv_df['date'], format='%Y-%m-%d'\n ).values\n orignal_csv_df['date'] = pd.to_datetime(orignal_csv_df['date'],\n format='%Y-%m-%d').dt.strftime('%Y-%m-%d')\n csv_df['date'] = pd.to_datetime(csv_df['date'], format='%Y%m%d'\n ).dt.strftime('%Y-%m-%d')\n main_code = csv_df['main_contract_code'].iloc[0]\n main_code_num = csv_df['main_contract_code'].str.findall('[0-9]+'\n ).iloc[0][0]\n if len(main_code_num) == 3:\n print(f'合约代码{main_code}缺少一位数字,将被替换')\n csv_df['main_contract_code'] = csv_df['main_contract_code'].str[:2\n ] + month[0] + csv_df['main_contract_code'].str[2:]\n main_code = csv_df['main_contract_code'].iloc[0]\n print(f'合约代码{main_code}')\n intersection_pool = [date for date in orignal_date_pool if date in\n current_date_pool]\n if not intersection_pool:\n print(\n f'新旧数据没有时间交集,{contract_kind} {dir_name_list[index]} 将被添加到先前数据中'\n )\n orignal_csv_df = pd.concat([orignal_csv_df, csv_df])\n orignal_csv_df.sort_values('date', inplace=True)\n orignal_csv_df = orignal_csv_df[column_ouput_form]\n orignal_csv_df.to_csv(orignal_clean_csv_path + contract_kind +\n '_1min_' + dir_name_list[index] + '.csv', index=False)\n print(f'期货品种{contract_kind} {dir_name_list[index]} 完成')\n else:\n print(\n f'新旧数据的时间出现交集!!{contract_kind} {dir_name_list[index]} 将不会被添加到先前数据中'\n )\n",
"step-5": "import numpy as np \nimport pandas as pd\nfrom unrar import rarfile\nimport numpy as np \nimport pandas as pd\nimport tushare as ts\nimport os\n\nyear_month='201911'\ncontract_kind='NI'\nrar_data_file_path='C:/Users/lenovo/Documents/WeChat Files/yiranli13/FileStorage/File/2020-01/'\nmain_code_path='C:/Users/lenovo/Documents/WeChat Files/yiranli13/FileStorage/File/2020-01/main/main_/'\nclean_data_path='D:/1_min补充统一/'\nend_date='20200107'\ntime_range_path='D:/统一所有品种时间范围.csv'\n# save_month_fill_data_path='D:/1_min补充统一/'+contract_kind+'主力连续'+'_'+month+'.csv'\ndef renew_commodity_future(year_month:str,contract_kind:str,main_code_path:str,rar_data_file_path:str,clean_data_path:str,time_range_path:str,end_date:str,commodity_bool=True):\n '''\n 用于更新月度的商品期货数据\n year_month:'201911'字符串年份和月份,对应的是FutAC_Min1_Std_后面的数字,如FutAC_Min1_Std_201911\n contract_kind:放对应品种的list 类似['A','B']\n main_code_path:对应存放主力合约的地方\n rar_data_file_path: 对应的是存放rar数据如FutAC_Min1_Std_201911.rar的位置,不包括对应的文件名\n clean_data_path:对应存放分钟数据的位置,处理好的新数据会追加到对应位置下的对应品种处\n time_range_path:放置交易时间文件的路径,包括文件名 如 D:/统一所有品种时间范围.csv\n end_date :'20200103' 今日的日期,用来请求tushare中的交易日历,数据的读取合并都是以交易日历的时间驱动\n commodity_bool:商品期货对应True,金融期货False,默认商品期货\n '''\n month=year_month\n if commodity_bool: \n file_name=rar_data_file_path+'FutAC_Min1_Std_'+month+'.rar'\n else:\n file_name=rar_data_file_path+'FutSF_Min1_Std_'+month+'.rar'\n orignial_path=main_code_path\n specifi_path=orignial_path+contract_kind+'_1day_main.npy'\n rar = rarfile.RarFile(file_name,pwd='www.jinshuyuan.net')\n # 原始的处理好的数据\n orignal_clean_csv_path=clean_data_path\n pwd='www.jinshuyuan.net'\n data=np.load(specifi_path)\n time_0931_15=pd.read_csv(time_range_path)['date'].values.tolist()\n rar.extractall(path=file_name.split('.')[0])\n # 首先需要输入end_date 确保截取的时间长度和main主力合约的时间对齐\n # 按照月份确定位置\n pro = ts.pro_api('3d832df2966f27c20e6ff243ab1d53a35a4adc1c64b353cc370ac7d6')\n ts.set_token('3d832df2966f27c20e6ff243ab1d53a35a4adc1c64b353cc370ac7d6')\n date_df=pro.trade_cal(exchange='DCE', start_date='20100101', end_date=end_date)\n date_df=date_df.loc[date_df['is_open']==1]\n date_list=date_df['cal_date'].tolist()\n # ==========================================================================\n # 针对的是201911月数据,对应的合约index 放在 target_date_index中\n date_df=pd.DataFrame({'date':date_list})\n date_df['month']=date_df['date'].str[:6]\n target_date=date_df.loc[date_df['month']==month]\n target_date_index=target_date.index.values\n target_date=target_date['date'].values\n # 获取对应目标\n data=data.reshape(-1)\n contract_main_pool=data[target_date_index]\n # 去掉交易所的代码编号\n contract_main_pool=(pd.Series(contract_main_pool).str.split('.').str[0]+'.csv').values\n file_pools=os.listdir(file_name.split('.')[0])\n # 郑州期货交易所是大写,其它都是小写,这里需要逻辑判断\n if contract_main_pool[0] not in file_pools:\n contract_main_pool=[contract_file.lower() for contract_file in contract_main_pool]\n if contract_main_pool[0] not in file_pools:\n print(f'找不到{contract_main_pool[0]}')\n # 读取好所有的路径\n contract_main_pool=(file_name.split('.')[0]+'/'+pd.Series(contract_main_pool)).values\n # (len(target_date),contract_main_pool.shape[0])\n row_1=['市场代码','合约代码',\t'时间',\t'开','高',\t'低',\t'收',\t'成交量',\t'成交额',\t'持仓量']\n orignal_data=[]\n orignal_data.append(row_1)\n for index in range(len(target_date)):\n date=target_date[index]\n one_file_path=contract_main_pool[index]\n df=pd.read_csv(one_file_path,encoding='gbk')\n df['date']=df['时间'].str[:10]\n df['date2']=df['date'].str.replace('-','')\n result=df.loc[df['date2']==date]\n if result.shape[0]>0:\n for row_index in range(len(result)):\n target_row=result.iloc[row_index].tolist()\n clean_row=target_row[:-2]\n orignal_data.append(clean_row)\n print(f'{contract_kind} {date} finished!')\n else:\n print(f'没找到合约品种{contract_kind}在{date}')\n print(f'{contract_kind}在{month}月的主力合约数据读取完成')\n final_df=pd.DataFrame(orignal_data[1:],columns=orignal_data[0])\n\n final_df['date']=final_df['时间'].str[:10]\n final_df_date=final_df['date'].unique()\n\n final_df['date']=final_df['时间'].str[:10]\n final_df['time']=final_df['时间'].str[10:].str.strip()\n final_df['时间']=final_df['date']+' '+final_df['time']\n final_df=final_df.sort_values('时间')\n final_df['合约代码']=final_df['合约代码'].str.upper()\n final_df=final_df.sort_values('时间')\n # ===============================增加了从constant_time进行截取================================\n final_df['transf_date']=pd.to_datetime(final_df['date'])\n final_df.set_index('transf_date',inplace=True)\n combine_all_df=pd.DataFrame()\n final_df['date2']=final_df['date'].str.replace('-','')\n # 按月进行填充\n # 设置了存放按月填充的路径\n for date_index in range(len(target_date)):\n\n #按日期进行分割\n target_df=final_df.loc[final_df['date2']==target_date[date_index]]\n #分割到的长度放入容器中\n target_num=len(target_df)\n #理论长度\n theory_num=len(time_0931_15)\n #实际上两种情况:1.是交易日但完全没有数据2.是交易日,只有部分数据 3.是交易日,数据也是完整的\n if target_num>0:\n #开始区分2,3情况\n have_time=target_df['time'].values.tolist()\n lack_time=[x for x in time_0931_15 if x not in have_time]\n #检查是不是情况2\n if lack_time:\n print(f'{target_date[date_index]} 不连续')\n #一共12列,先全部填充nan的时候,最后再把已知填入\n insert_array=np.empty(shape=(len(lack_time),12))\n insert_array.fill(np.nan)\n insert_df=pd.DataFrame(insert_array,columns=['市场代码','合约代码','时间','开','高','低','收','成交量','成交额','持仓量','date','time'])\n insert_df['date']=target_date[date_index]\n insert_df['time']=lack_time\n #缺少时间的个数小于time_0931_15则说明,当天并不是完全没数据,只是部分数据缺失,因此要对合约代码进行填充\n if len(lack_time)<len(time_0931_15):\n \n insert_df['合约代码']=target_df['合约代码'].unique()[-1]\n #生成一天完整的数据\n combine_insert_df=pd.concat([target_df,insert_df])\n #将数据添加到容器中\n combine_all_df=pd.concat([combine_all_df,combine_insert_df]) \n \n #完全没有数据,直接填充 \n else:\n print(f'{target_date[date_index]}empty ')\n lack_time=[x for x in time_0931_15]\n #一共12列,先全部填充nan的时候,最后再把已知填入\n insert_array=np.empty(shape=(len(lack_time),12))\n insert_array.fill(np.nan)\n insert_df=pd.DataFrame(insert_array,columns=['市场代码','合约代码','时间','开','高','低','收','成交量','成交额','持仓量','date','time'])\n insert_df['date']=target_date[date_index]\n insert_df['time']=lack_time\n #将数据添加到容器\n combine_all_df=pd.concat([combine_all_df,insert_df])\n combine_all_df['时间']=combine_all_df['date']+' '+combine_all_df['time']\n #调整时间\n combine_all_df=combine_all_df.sort_values('时间')\n\n combine_all_df.reset_index(inplace=True)\n #数据输出,按设定的顺序\n combine_all_df=combine_all_df[['市场代码', '合约代码', '时间', '开', '高', '低', '收', '成交量', '成交额', '持仓量','date','time']]\n combine_all_df['时间']=combine_all_df['时间'].str.replace('-','')\n combine_all_df['date']=combine_all_df['date'].str.replace('-','')\n # combine_all_df.to_csv(save_month_fill_data_path,index=False,encoding='utf-8-sig')\n # ==========================储存数据=================================================\n combine_df=combine_all_df.copy()\n contract_type=contract_kind\n combine_df=combine_df.sort_values('时间')\n # ====================================================================开始截取============================================================\n # end_time+1其实是可以作为每次截取的起点,终点下一个就是起点,不过要加上0,而终点的位置也可以是end_time+1,因为end_time+1只能取end_time\n # 按照下午15:15统一截取\n end_time='15:15:00'\n end_index=np.where(combine_df['time']==end_time)[0]+1\n end_index=np.hstack(([0],end_index))\n start=end_index[:-1]\n end=end_index[1:]\n # ================================================================缺失第一个交易日前一天的夜盘数据==========================================\n # 这里的选择构造一个虚拟的时间戳,来满足缺失的夜盘数据\n # 按照上一步的截取方法,第一个交易日缺少前一天的夜盘数据\n last_day=date_df['date'].iloc[target_date_index[0]-1]\n last_day=last_day[:4]+'-'+last_day[4:6]+'-'+last_day[6:]\n first_day_have=combine_df[start[0]:end[0]]['time'].values\n full_time=combine_df['time'].unique()\n full_time.sort()\n first_day_lack=[x for x in full_time[-179:]]\n first_day_lack.sort()\n lack_array=np.empty(shape=(len(first_day_lack),12))\n lack_array.fill(np.nan)\n # ===============================准备缺失部分df==========================================================================================\n first_day_lack_df=pd.DataFrame(lack_array,columns=combine_df.columns)\n first_day_lack_df['time']=first_day_lack\n first_day_lack_df['date']=last_day\n first_day_lack_df['时间']=first_day_lack_df['date']+' '+first_day_lack_df['time']\n\n last_df=pd.read_csv(contract_main_pool[0],encoding='gbk')\n # 确定之前的有没有夜盘\n last_df['date']=last_df['时间'].str[:10]\n last_df['time']=last_df['时间'].str[11:]\n # 补夜盘数据\n last_time_pool=last_df.loc[last_df['date']==last_day]['time'].values\n\n last_day_have_date=[]\n # 说明在上个交易日有数据\n if last_time_pool.shape[0]>0:\n \n print(f'期货品种{contract_kind}在前一个交易日{last_day}有夜盘数据,需要读取覆盖')\n last_day_have_date=[x for x in last_time_pool]\n if last_day_have_date:\n for index in range(len(last_day_have_date)):\n origanl_index=last_df.loc[(last_df['date']==last_day)&(last_df['time']==last_day_have_date[index])].index[0]\n target_index=first_day_lack_df.loc[first_day_lack_df['time']==last_day_have_date[index]].index[0]\n first_day_lack_df.iloc[target_index]=last_df.iloc[origanl_index]\n else:\n print(f'期货品种{contract_kind}在前一个交易日{last_day}没有夜盘数据,不需要读取覆盖')\n print('直接使用np.nan填充上一个交易日的夜盘数据')\n for index in range(first_day_lack_df.shape[0]):\n combine_df=combine_df.append(first_day_lack_df.iloc[index])\n combine_df['时间']=combine_df['时间'].str.replace('-','')\n combine_df['date']=combine_df['date'].str.replace('-','')\n combine_df.sort_values('时间',inplace=True)\n # =================================缺失部分填充=========================================================================================\n # combine_df=pd.concat([first_day_lack_df,combine_df])\n # # ================================重新按时间排序========================================================================================\n # combine_df=combine_df.sort_values('时间')\n # ============================重新进行切割===============================================================================================\n end_index=np.where(combine_df['time']==end_time)[0]+1\n end_index=np.hstack(([0],end_index))\n start=end_index[:-1]\n end=end_index[1:]\n\n # ==============================进行分割按照特定时间,明确col===============================================================================\n\n col_type_list=['开','高','低','收','成交量','成交额','持仓量']\n dir_name_list=['open','high','low','close','volume','amount','position']\n #这个变量现在没有用\n #交易到凌晨01\n #merge_df=pd.DataFrame({'time':with_night_01})\n #交易到凌晨0230,version中没有集合竞价时间,time_0931_15去掉9:00,21:00\n merge_df=pd.DataFrame({'time':time_0931_15})\n\n combine_df['date']=combine_df['时间'].str[:8]\n for index in range(len(col_type_list)):\n\n col_type=col_type_list[index]\n # 用来接收分col数据的容器\n csv_df=pd.DataFrame()\n for s_index,e_index in zip(start,end):\n\n # =========================================截取每个交易日数据==============================================================================\n res=combine_df.iloc[s_index:e_index,:]\n one_date_df=pd.DataFrame(res[col_type].values.reshape(1,-1),columns=res['time'].values.tolist())\n one_date_df['main_contract_code']=res.iloc[-1]['合约代码']\n one_date_df['date']=res.iloc[-1]['date']\n # =======================================设置输出格式====================================================================================\n\n col_layout=['date']\n col_layout=np.hstack((col_layout,res['time'].values.tolist()))\n col_layout=np.hstack((col_layout,['main_contract_code']))\n one_date_df=one_date_df[col_layout]\n # =======================================合并数据========================================================================================\n csv_df=pd.concat([csv_df,one_date_df])\n # ========================追加原始数据=======================================\n # 时间问题需要处理,不然对不齐\n # 在测试文件中测试,所以修改了路径\n orignal_csv_df=pd.read_csv(orignal_clean_csv_path+contract_kind+'_1min_'+dir_name_list[index]+'.csv')\n column_ouput_form=orignal_csv_df.columns.values\n orignal_date_pool=pd.to_datetime(orignal_csv_df['date'],format='%Y-%m-%d').values\n current_date_pool=pd.to_datetime(csv_df['date'],format='%Y-%m-%d').values\n orignal_csv_df['date']=pd.to_datetime(orignal_csv_df['date'],format='%Y-%m-%d').dt.strftime('%Y-%m-%d')\n csv_df['date']=pd.to_datetime(csv_df['date'],format='%Y%m%d').dt.strftime('%Y-%m-%d')\n # check代码中的数字个数等于四个\n main_code=csv_df['main_contract_code'].iloc[0]\n main_code_num=csv_df['main_contract_code'].str.findall(r'[0-9]+').iloc[0][0]\n if len(main_code_num)==3:\n print(f'合约代码{main_code}缺少一位数字,将被替换')\n csv_df['main_contract_code']=csv_df['main_contract_code'].str[:2]+month[0]+csv_df['main_contract_code'].str[2:]\n main_code=csv_df['main_contract_code'].iloc[0]\n print(f'合约代码{main_code}')\n # 查看有没有交集,如果有交集会停止,说明进行了重复操作\n \n intersection_pool=[date for date in orignal_date_pool if date in current_date_pool]\n if not intersection_pool:\n print(f'新旧数据没有时间交集,{contract_kind} {dir_name_list[index]} 将被添加到先前数据中')\n orignal_csv_df=pd.concat([orignal_csv_df,csv_df]) \n orignal_csv_df.sort_values('date',inplace=True)\n orignal_csv_df=orignal_csv_df[column_ouput_form]\n orignal_csv_df.to_csv(orignal_clean_csv_path+contract_kind+'_1min_'+dir_name_list[index]+'.csv',index=False)\n print(f'期货品种{contract_kind} {dir_name_list[index]} 完成')\n else:\n print(f'新旧数据的时间出现交集!!{contract_kind} {dir_name_list[index]} 将不会被添加到先前数据中')\n \n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from rest_framework import serializers
from urlshortner.models import UrlShortnerModel
from urlshortner.constants import HOST
class UrlShortRequest(serializers.Serializer):
url = serializers.CharField(required=True, max_length=255) # Long Url
expiry = serializers.DateTimeField(required=False)
class UrlLongRequest(serializers.Serializer):
url = serializers.CharField(required=True, max_length=64) # Short Url
def validate_url(self, url):
if url.startswith(HOST):
return url
else:
return serializers.ValidationError("Invalid short URL")
class UrlShortResponse(serializers.ModelSerializer):
class Meta:
model = UrlShortnerModel
fields = "__all__"
|
normal
|
{
"blob_id": "6c16afe89d5d0fd6aa6911e3de9e9cebb57bf35e",
"index": 1752,
"step-1": "<mask token>\n\n\nclass UrlLongRequest(serializers.Serializer):\n url = serializers.CharField(required=True, max_length=64)\n\n def validate_url(self, url):\n if url.startswith(HOST):\n return url\n else:\n return serializers.ValidationError('Invalid short URL')\n\n\nclass UrlShortResponse(serializers.ModelSerializer):\n\n\n class Meta:\n model = UrlShortnerModel\n fields = '__all__'\n",
"step-2": "<mask token>\n\n\nclass UrlShortRequest(serializers.Serializer):\n <mask token>\n <mask token>\n\n\nclass UrlLongRequest(serializers.Serializer):\n url = serializers.CharField(required=True, max_length=64)\n\n def validate_url(self, url):\n if url.startswith(HOST):\n return url\n else:\n return serializers.ValidationError('Invalid short URL')\n\n\nclass UrlShortResponse(serializers.ModelSerializer):\n\n\n class Meta:\n model = UrlShortnerModel\n fields = '__all__'\n",
"step-3": "<mask token>\n\n\nclass UrlShortRequest(serializers.Serializer):\n url = serializers.CharField(required=True, max_length=255)\n expiry = serializers.DateTimeField(required=False)\n\n\nclass UrlLongRequest(serializers.Serializer):\n url = serializers.CharField(required=True, max_length=64)\n\n def validate_url(self, url):\n if url.startswith(HOST):\n return url\n else:\n return serializers.ValidationError('Invalid short URL')\n\n\nclass UrlShortResponse(serializers.ModelSerializer):\n\n\n class Meta:\n model = UrlShortnerModel\n fields = '__all__'\n",
"step-4": "from rest_framework import serializers\nfrom urlshortner.models import UrlShortnerModel\nfrom urlshortner.constants import HOST\n\n\nclass UrlShortRequest(serializers.Serializer):\n url = serializers.CharField(required=True, max_length=255)\n expiry = serializers.DateTimeField(required=False)\n\n\nclass UrlLongRequest(serializers.Serializer):\n url = serializers.CharField(required=True, max_length=64)\n\n def validate_url(self, url):\n if url.startswith(HOST):\n return url\n else:\n return serializers.ValidationError('Invalid short URL')\n\n\nclass UrlShortResponse(serializers.ModelSerializer):\n\n\n class Meta:\n model = UrlShortnerModel\n fields = '__all__'\n",
"step-5": "from rest_framework import serializers\nfrom urlshortner.models import UrlShortnerModel\nfrom urlshortner.constants import HOST\n\n\nclass UrlShortRequest(serializers.Serializer):\n url = serializers.CharField(required=True, max_length=255) # Long Url\n expiry = serializers.DateTimeField(required=False)\n\n\nclass UrlLongRequest(serializers.Serializer):\n url = serializers.CharField(required=True, max_length=64) # Short Url\n\n def validate_url(self, url):\n if url.startswith(HOST):\n return url\n else:\n return serializers.ValidationError(\"Invalid short URL\")\n\n\nclass UrlShortResponse(serializers.ModelSerializer):\n class Meta:\n model = UrlShortnerModel\n fields = \"__all__\"\n",
"step-ids": [
4,
5,
6,
7,
8
]
}
|
[
4,
5,
6,
7,
8
] |
# SPDX-License-Identifier: Apache-2.0
# Licensed to the Ed-Fi Alliance under one or more agreements.
# The Ed-Fi Alliance licenses this file to you under the Apache License, Version 2.0.
# See the LICENSE and NOTICES files in the project root for more information.
import json
from typing import Dict
from pandas import DataFrame, concat, Series
from edfi_google_classroom_extractor.mapping.constants import SOURCE_SYSTEM
ACTIVITY_TYPE_STATE = "Submission State Change"
ACTIVITY_TYPE_GRADE = "Submission Grade Change"
def submissions_to_user_submission_activities_dfs(
submissions_df: DataFrame,
) -> Dict[str, DataFrame]:
"""
Convert a Submission API DataFrame to a Dict of UserActivity
UDM DataFrames grouped by source system section id.
Parameters
----------
submissions_df: DataFrame
is a Submission API DataFrame
Returns
-------
Dict[str, DataFrame] LMS UDM UserActivity DataFrames
grouped by source system section id
Notes
-----
UserActivity DataFrame columns are:
ActivityDateTime: The date/time the activity occurred
ActivityStatus: The activity status
ActivityTimeInMinutes: The total activity time in minutes
ActivityType: The type of activity, here "Submission" or "Grade"
AssignmentIdentifier: A unique numeric identifier assigned to the assignment
Content: Content associated with the activity
LMSSectionIdentifier: A unique numeric identifier assigned to the section
SourceSystem: The system code or name providing the user activity data
SourceSystemIdentifier: A unique number or alphanumeric code assigned to a
user activity by the source system
LMSUserIdentifier: A unique numeric identifier assigned to the user
CreateDate: Date this record was created in the extractor
LastModifiedDate: Date this record was last updated in the extractor
"""
assert "submissionHistory" in submissions_df.columns
assert "id" in submissions_df.columns
assert "courseId" in submissions_df.columns
assert "courseWorkId" in submissions_df.columns
# convert json-like submissionHistory string to list of dicts
submissions_df["submissionHistory"] = submissions_df["submissionHistory"].apply(lambda json_like: json.loads(json_like.replace("'", '"')))
submissions_df["AssignmentIdentifier"] = submissions_df[
["courseId", "courseWorkId"]
].agg("-".join, axis=1)
submissions_df = submissions_df[["id", "courseId", "courseWorkId", "submissionHistory", "AssignmentIdentifier", "CreateDate", "LastModifiedDate"]]
# explode submissionHistory lists into rows with other columns duplicated
history_df = submissions_df.explode(column="submissionHistory") # type: ignore
# expand submissionHistory dicts (stateHistory and gradeHistory) into their own columns
history_df = history_df["submissionHistory"].apply(Series).merge(history_df, left_index=True, right_index=True, how='outer')
history_df.drop(columns=["submissionHistory"], inplace=True)
# expand stateHistory (can assume exists, should always have at least one "CREATED" entry)
user_submission_df = concat([history_df, history_df["stateHistory"].apply(Series)], axis=1)
user_submission_df.dropna(subset=["stateHistory"], inplace=True)
# enrich stateHistory
user_submission_df["SourceSystemIdentifier"] = "S-" + user_submission_df[
["courseId", "courseWorkId", "id", "stateTimestamp"]
].agg("-".join, axis=1)
user_submission_df = user_submission_df[
[
"SourceSystemIdentifier",
"AssignmentIdentifier",
"stateTimestamp",
"state",
"courseId",
"actorUserId",
"CreateDate",
"LastModifiedDate"
]
]
user_submission_df = user_submission_df.rename(
columns={
"stateTimestamp": "ActivityDateTime",
"state": "ActivityStatus",
"courseId": "LMSSectionIdentifier",
"actorUserId": "LMSUserIdentifier",
}
)
user_submission_df["ActivityType"] = ACTIVITY_TYPE_STATE
# expand gradeHistory if exists
if "gradeHistory" in history_df:
grade_history_df = concat([history_df, history_df["gradeHistory"].apply(Series)], axis=1)
grade_history_df.dropna(subset=["gradeHistory"], inplace=True)
# enrich gradeHistory
grade_history_df["SourceSystemIdentifier"] = "G-" + grade_history_df[
["courseId", "courseWorkId", "id", "gradeTimestamp"]
].agg("-".join, axis=1)
grade_history_df = grade_history_df[
[
"SourceSystemIdentifier",
"AssignmentIdentifier",
"gradeTimestamp",
"gradeChangeType",
"courseId",
"actorUserId",
"CreateDate",
"LastModifiedDate"
]
]
grade_history_df = grade_history_df.rename(
columns={
"gradeTimestamp": "ActivityDateTime",
"gradeChangeType": "ActivityStatus",
"courseId": "LMSSectionIdentifier",
"actorUserId": "LMSUserIdentifier",
}
)
grade_history_df["ActivityType"] = ACTIVITY_TYPE_GRADE
# combine with stateHistory
user_submission_df = user_submission_df.append(grade_history_df)
# teacher actions can show up on student histories and vice-versa
user_submission_df.drop_duplicates(subset=["SourceSystemIdentifier"], inplace=True)
# finish with common columns
user_submission_df["ActivityTimeInMinutes"] = ""
user_submission_df["Content"] = ""
user_submission_df["SourceSystem"] = SOURCE_SYSTEM
user_submission_df["SourceCreateDate"] = "" # No create date available from API
user_submission_df["SourceLastModifiedDate"] = "" # No modified date available from API
# group by section id as a Dict of DataFrames
result: Dict[str, DataFrame] = dict(
tuple(user_submission_df.groupby(["LMSSectionIdentifier"]))
)
return result
|
normal
|
{
"blob_id": "d6a760774b45454c959c2932d7b28deee7f81872",
"index": 318,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef submissions_to_user_submission_activities_dfs(submissions_df: DataFrame\n ) ->Dict[str, DataFrame]:\n \"\"\"\n Convert a Submission API DataFrame to a Dict of UserActivity\n UDM DataFrames grouped by source system section id.\n\n Parameters\n ----------\n submissions_df: DataFrame\n is a Submission API DataFrame\n\n Returns\n -------\n Dict[str, DataFrame] LMS UDM UserActivity DataFrames\n grouped by source system section id\n\n Notes\n -----\n UserActivity DataFrame columns are:\n ActivityDateTime: The date/time the activity occurred\n ActivityStatus: The activity status\n ActivityTimeInMinutes: The total activity time in minutes\n ActivityType: The type of activity, here \"Submission\" or \"Grade\"\n AssignmentIdentifier: A unique numeric identifier assigned to the assignment\n Content: Content associated with the activity\n LMSSectionIdentifier: A unique numeric identifier assigned to the section\n SourceSystem: The system code or name providing the user activity data\n SourceSystemIdentifier: A unique number or alphanumeric code assigned to a\n user activity by the source system\n LMSUserIdentifier: A unique numeric identifier assigned to the user\n CreateDate: Date this record was created in the extractor\n LastModifiedDate: Date this record was last updated in the extractor\n \"\"\"\n assert 'submissionHistory' in submissions_df.columns\n assert 'id' in submissions_df.columns\n assert 'courseId' in submissions_df.columns\n assert 'courseWorkId' in submissions_df.columns\n submissions_df['submissionHistory'] = submissions_df['submissionHistory'\n ].apply(lambda json_like: json.loads(json_like.replace(\"'\", '\"')))\n submissions_df['AssignmentIdentifier'] = submissions_df[['courseId',\n 'courseWorkId']].agg('-'.join, axis=1)\n submissions_df = submissions_df[['id', 'courseId', 'courseWorkId',\n 'submissionHistory', 'AssignmentIdentifier', 'CreateDate',\n 'LastModifiedDate']]\n history_df = submissions_df.explode(column='submissionHistory')\n history_df = history_df['submissionHistory'].apply(Series).merge(history_df\n , left_index=True, right_index=True, how='outer')\n history_df.drop(columns=['submissionHistory'], inplace=True)\n user_submission_df = concat([history_df, history_df['stateHistory'].\n apply(Series)], axis=1)\n user_submission_df.dropna(subset=['stateHistory'], inplace=True)\n user_submission_df['SourceSystemIdentifier'] = 'S-' + user_submission_df[[\n 'courseId', 'courseWorkId', 'id', 'stateTimestamp']].agg('-'.join,\n axis=1)\n user_submission_df = user_submission_df[['SourceSystemIdentifier',\n 'AssignmentIdentifier', 'stateTimestamp', 'state', 'courseId',\n 'actorUserId', 'CreateDate', 'LastModifiedDate']]\n user_submission_df = user_submission_df.rename(columns={\n 'stateTimestamp': 'ActivityDateTime', 'state': 'ActivityStatus',\n 'courseId': 'LMSSectionIdentifier', 'actorUserId': 'LMSUserIdentifier'}\n )\n user_submission_df['ActivityType'] = ACTIVITY_TYPE_STATE\n if 'gradeHistory' in history_df:\n grade_history_df = concat([history_df, history_df['gradeHistory'].\n apply(Series)], axis=1)\n grade_history_df.dropna(subset=['gradeHistory'], inplace=True)\n grade_history_df['SourceSystemIdentifier'] = 'G-' + grade_history_df[[\n 'courseId', 'courseWorkId', 'id', 'gradeTimestamp']].agg('-'.\n join, axis=1)\n grade_history_df = grade_history_df[['SourceSystemIdentifier',\n 'AssignmentIdentifier', 'gradeTimestamp', 'gradeChangeType',\n 'courseId', 'actorUserId', 'CreateDate', 'LastModifiedDate']]\n grade_history_df = grade_history_df.rename(columns={\n 'gradeTimestamp': 'ActivityDateTime', 'gradeChangeType':\n 'ActivityStatus', 'courseId': 'LMSSectionIdentifier',\n 'actorUserId': 'LMSUserIdentifier'})\n grade_history_df['ActivityType'] = ACTIVITY_TYPE_GRADE\n user_submission_df = user_submission_df.append(grade_history_df)\n user_submission_df.drop_duplicates(subset=['SourceSystemIdentifier'],\n inplace=True)\n user_submission_df['ActivityTimeInMinutes'] = ''\n user_submission_df['Content'] = ''\n user_submission_df['SourceSystem'] = SOURCE_SYSTEM\n user_submission_df['SourceCreateDate'] = ''\n user_submission_df['SourceLastModifiedDate'] = ''\n result: Dict[str, DataFrame] = dict(tuple(user_submission_df.groupby([\n 'LMSSectionIdentifier'])))\n return result\n",
"step-3": "<mask token>\nACTIVITY_TYPE_STATE = 'Submission State Change'\nACTIVITY_TYPE_GRADE = 'Submission Grade Change'\n\n\ndef submissions_to_user_submission_activities_dfs(submissions_df: DataFrame\n ) ->Dict[str, DataFrame]:\n \"\"\"\n Convert a Submission API DataFrame to a Dict of UserActivity\n UDM DataFrames grouped by source system section id.\n\n Parameters\n ----------\n submissions_df: DataFrame\n is a Submission API DataFrame\n\n Returns\n -------\n Dict[str, DataFrame] LMS UDM UserActivity DataFrames\n grouped by source system section id\n\n Notes\n -----\n UserActivity DataFrame columns are:\n ActivityDateTime: The date/time the activity occurred\n ActivityStatus: The activity status\n ActivityTimeInMinutes: The total activity time in minutes\n ActivityType: The type of activity, here \"Submission\" or \"Grade\"\n AssignmentIdentifier: A unique numeric identifier assigned to the assignment\n Content: Content associated with the activity\n LMSSectionIdentifier: A unique numeric identifier assigned to the section\n SourceSystem: The system code or name providing the user activity data\n SourceSystemIdentifier: A unique number or alphanumeric code assigned to a\n user activity by the source system\n LMSUserIdentifier: A unique numeric identifier assigned to the user\n CreateDate: Date this record was created in the extractor\n LastModifiedDate: Date this record was last updated in the extractor\n \"\"\"\n assert 'submissionHistory' in submissions_df.columns\n assert 'id' in submissions_df.columns\n assert 'courseId' in submissions_df.columns\n assert 'courseWorkId' in submissions_df.columns\n submissions_df['submissionHistory'] = submissions_df['submissionHistory'\n ].apply(lambda json_like: json.loads(json_like.replace(\"'\", '\"')))\n submissions_df['AssignmentIdentifier'] = submissions_df[['courseId',\n 'courseWorkId']].agg('-'.join, axis=1)\n submissions_df = submissions_df[['id', 'courseId', 'courseWorkId',\n 'submissionHistory', 'AssignmentIdentifier', 'CreateDate',\n 'LastModifiedDate']]\n history_df = submissions_df.explode(column='submissionHistory')\n history_df = history_df['submissionHistory'].apply(Series).merge(history_df\n , left_index=True, right_index=True, how='outer')\n history_df.drop(columns=['submissionHistory'], inplace=True)\n user_submission_df = concat([history_df, history_df['stateHistory'].\n apply(Series)], axis=1)\n user_submission_df.dropna(subset=['stateHistory'], inplace=True)\n user_submission_df['SourceSystemIdentifier'] = 'S-' + user_submission_df[[\n 'courseId', 'courseWorkId', 'id', 'stateTimestamp']].agg('-'.join,\n axis=1)\n user_submission_df = user_submission_df[['SourceSystemIdentifier',\n 'AssignmentIdentifier', 'stateTimestamp', 'state', 'courseId',\n 'actorUserId', 'CreateDate', 'LastModifiedDate']]\n user_submission_df = user_submission_df.rename(columns={\n 'stateTimestamp': 'ActivityDateTime', 'state': 'ActivityStatus',\n 'courseId': 'LMSSectionIdentifier', 'actorUserId': 'LMSUserIdentifier'}\n )\n user_submission_df['ActivityType'] = ACTIVITY_TYPE_STATE\n if 'gradeHistory' in history_df:\n grade_history_df = concat([history_df, history_df['gradeHistory'].\n apply(Series)], axis=1)\n grade_history_df.dropna(subset=['gradeHistory'], inplace=True)\n grade_history_df['SourceSystemIdentifier'] = 'G-' + grade_history_df[[\n 'courseId', 'courseWorkId', 'id', 'gradeTimestamp']].agg('-'.\n join, axis=1)\n grade_history_df = grade_history_df[['SourceSystemIdentifier',\n 'AssignmentIdentifier', 'gradeTimestamp', 'gradeChangeType',\n 'courseId', 'actorUserId', 'CreateDate', 'LastModifiedDate']]\n grade_history_df = grade_history_df.rename(columns={\n 'gradeTimestamp': 'ActivityDateTime', 'gradeChangeType':\n 'ActivityStatus', 'courseId': 'LMSSectionIdentifier',\n 'actorUserId': 'LMSUserIdentifier'})\n grade_history_df['ActivityType'] = ACTIVITY_TYPE_GRADE\n user_submission_df = user_submission_df.append(grade_history_df)\n user_submission_df.drop_duplicates(subset=['SourceSystemIdentifier'],\n inplace=True)\n user_submission_df['ActivityTimeInMinutes'] = ''\n user_submission_df['Content'] = ''\n user_submission_df['SourceSystem'] = SOURCE_SYSTEM\n user_submission_df['SourceCreateDate'] = ''\n user_submission_df['SourceLastModifiedDate'] = ''\n result: Dict[str, DataFrame] = dict(tuple(user_submission_df.groupby([\n 'LMSSectionIdentifier'])))\n return result\n",
"step-4": "import json\nfrom typing import Dict\nfrom pandas import DataFrame, concat, Series\nfrom edfi_google_classroom_extractor.mapping.constants import SOURCE_SYSTEM\nACTIVITY_TYPE_STATE = 'Submission State Change'\nACTIVITY_TYPE_GRADE = 'Submission Grade Change'\n\n\ndef submissions_to_user_submission_activities_dfs(submissions_df: DataFrame\n ) ->Dict[str, DataFrame]:\n \"\"\"\n Convert a Submission API DataFrame to a Dict of UserActivity\n UDM DataFrames grouped by source system section id.\n\n Parameters\n ----------\n submissions_df: DataFrame\n is a Submission API DataFrame\n\n Returns\n -------\n Dict[str, DataFrame] LMS UDM UserActivity DataFrames\n grouped by source system section id\n\n Notes\n -----\n UserActivity DataFrame columns are:\n ActivityDateTime: The date/time the activity occurred\n ActivityStatus: The activity status\n ActivityTimeInMinutes: The total activity time in minutes\n ActivityType: The type of activity, here \"Submission\" or \"Grade\"\n AssignmentIdentifier: A unique numeric identifier assigned to the assignment\n Content: Content associated with the activity\n LMSSectionIdentifier: A unique numeric identifier assigned to the section\n SourceSystem: The system code or name providing the user activity data\n SourceSystemIdentifier: A unique number or alphanumeric code assigned to a\n user activity by the source system\n LMSUserIdentifier: A unique numeric identifier assigned to the user\n CreateDate: Date this record was created in the extractor\n LastModifiedDate: Date this record was last updated in the extractor\n \"\"\"\n assert 'submissionHistory' in submissions_df.columns\n assert 'id' in submissions_df.columns\n assert 'courseId' in submissions_df.columns\n assert 'courseWorkId' in submissions_df.columns\n submissions_df['submissionHistory'] = submissions_df['submissionHistory'\n ].apply(lambda json_like: json.loads(json_like.replace(\"'\", '\"')))\n submissions_df['AssignmentIdentifier'] = submissions_df[['courseId',\n 'courseWorkId']].agg('-'.join, axis=1)\n submissions_df = submissions_df[['id', 'courseId', 'courseWorkId',\n 'submissionHistory', 'AssignmentIdentifier', 'CreateDate',\n 'LastModifiedDate']]\n history_df = submissions_df.explode(column='submissionHistory')\n history_df = history_df['submissionHistory'].apply(Series).merge(history_df\n , left_index=True, right_index=True, how='outer')\n history_df.drop(columns=['submissionHistory'], inplace=True)\n user_submission_df = concat([history_df, history_df['stateHistory'].\n apply(Series)], axis=1)\n user_submission_df.dropna(subset=['stateHistory'], inplace=True)\n user_submission_df['SourceSystemIdentifier'] = 'S-' + user_submission_df[[\n 'courseId', 'courseWorkId', 'id', 'stateTimestamp']].agg('-'.join,\n axis=1)\n user_submission_df = user_submission_df[['SourceSystemIdentifier',\n 'AssignmentIdentifier', 'stateTimestamp', 'state', 'courseId',\n 'actorUserId', 'CreateDate', 'LastModifiedDate']]\n user_submission_df = user_submission_df.rename(columns={\n 'stateTimestamp': 'ActivityDateTime', 'state': 'ActivityStatus',\n 'courseId': 'LMSSectionIdentifier', 'actorUserId': 'LMSUserIdentifier'}\n )\n user_submission_df['ActivityType'] = ACTIVITY_TYPE_STATE\n if 'gradeHistory' in history_df:\n grade_history_df = concat([history_df, history_df['gradeHistory'].\n apply(Series)], axis=1)\n grade_history_df.dropna(subset=['gradeHistory'], inplace=True)\n grade_history_df['SourceSystemIdentifier'] = 'G-' + grade_history_df[[\n 'courseId', 'courseWorkId', 'id', 'gradeTimestamp']].agg('-'.\n join, axis=1)\n grade_history_df = grade_history_df[['SourceSystemIdentifier',\n 'AssignmentIdentifier', 'gradeTimestamp', 'gradeChangeType',\n 'courseId', 'actorUserId', 'CreateDate', 'LastModifiedDate']]\n grade_history_df = grade_history_df.rename(columns={\n 'gradeTimestamp': 'ActivityDateTime', 'gradeChangeType':\n 'ActivityStatus', 'courseId': 'LMSSectionIdentifier',\n 'actorUserId': 'LMSUserIdentifier'})\n grade_history_df['ActivityType'] = ACTIVITY_TYPE_GRADE\n user_submission_df = user_submission_df.append(grade_history_df)\n user_submission_df.drop_duplicates(subset=['SourceSystemIdentifier'],\n inplace=True)\n user_submission_df['ActivityTimeInMinutes'] = ''\n user_submission_df['Content'] = ''\n user_submission_df['SourceSystem'] = SOURCE_SYSTEM\n user_submission_df['SourceCreateDate'] = ''\n user_submission_df['SourceLastModifiedDate'] = ''\n result: Dict[str, DataFrame] = dict(tuple(user_submission_df.groupby([\n 'LMSSectionIdentifier'])))\n return result\n",
"step-5": "# SPDX-License-Identifier: Apache-2.0\n# Licensed to the Ed-Fi Alliance under one or more agreements.\n# The Ed-Fi Alliance licenses this file to you under the Apache License, Version 2.0.\n# See the LICENSE and NOTICES files in the project root for more information.\n\nimport json\nfrom typing import Dict\nfrom pandas import DataFrame, concat, Series\nfrom edfi_google_classroom_extractor.mapping.constants import SOURCE_SYSTEM\n\nACTIVITY_TYPE_STATE = \"Submission State Change\"\nACTIVITY_TYPE_GRADE = \"Submission Grade Change\"\n\n\ndef submissions_to_user_submission_activities_dfs(\n submissions_df: DataFrame,\n) -> Dict[str, DataFrame]:\n \"\"\"\n Convert a Submission API DataFrame to a Dict of UserActivity\n UDM DataFrames grouped by source system section id.\n\n Parameters\n ----------\n submissions_df: DataFrame\n is a Submission API DataFrame\n\n Returns\n -------\n Dict[str, DataFrame] LMS UDM UserActivity DataFrames\n grouped by source system section id\n\n Notes\n -----\n UserActivity DataFrame columns are:\n ActivityDateTime: The date/time the activity occurred\n ActivityStatus: The activity status\n ActivityTimeInMinutes: The total activity time in minutes\n ActivityType: The type of activity, here \"Submission\" or \"Grade\"\n AssignmentIdentifier: A unique numeric identifier assigned to the assignment\n Content: Content associated with the activity\n LMSSectionIdentifier: A unique numeric identifier assigned to the section\n SourceSystem: The system code or name providing the user activity data\n SourceSystemIdentifier: A unique number or alphanumeric code assigned to a\n user activity by the source system\n LMSUserIdentifier: A unique numeric identifier assigned to the user\n CreateDate: Date this record was created in the extractor\n LastModifiedDate: Date this record was last updated in the extractor\n \"\"\"\n assert \"submissionHistory\" in submissions_df.columns\n assert \"id\" in submissions_df.columns\n assert \"courseId\" in submissions_df.columns\n assert \"courseWorkId\" in submissions_df.columns\n\n # convert json-like submissionHistory string to list of dicts\n submissions_df[\"submissionHistory\"] = submissions_df[\"submissionHistory\"].apply(lambda json_like: json.loads(json_like.replace(\"'\", '\"')))\n submissions_df[\"AssignmentIdentifier\"] = submissions_df[\n [\"courseId\", \"courseWorkId\"]\n ].agg(\"-\".join, axis=1)\n\n submissions_df = submissions_df[[\"id\", \"courseId\", \"courseWorkId\", \"submissionHistory\", \"AssignmentIdentifier\", \"CreateDate\", \"LastModifiedDate\"]]\n\n # explode submissionHistory lists into rows with other columns duplicated\n history_df = submissions_df.explode(column=\"submissionHistory\") # type: ignore\n\n # expand submissionHistory dicts (stateHistory and gradeHistory) into their own columns\n history_df = history_df[\"submissionHistory\"].apply(Series).merge(history_df, left_index=True, right_index=True, how='outer')\n history_df.drop(columns=[\"submissionHistory\"], inplace=True)\n\n # expand stateHistory (can assume exists, should always have at least one \"CREATED\" entry)\n user_submission_df = concat([history_df, history_df[\"stateHistory\"].apply(Series)], axis=1)\n user_submission_df.dropna(subset=[\"stateHistory\"], inplace=True)\n\n # enrich stateHistory\n user_submission_df[\"SourceSystemIdentifier\"] = \"S-\" + user_submission_df[\n [\"courseId\", \"courseWorkId\", \"id\", \"stateTimestamp\"]\n ].agg(\"-\".join, axis=1)\n\n user_submission_df = user_submission_df[\n [\n \"SourceSystemIdentifier\",\n \"AssignmentIdentifier\",\n \"stateTimestamp\",\n \"state\",\n \"courseId\",\n \"actorUserId\",\n \"CreateDate\",\n \"LastModifiedDate\"\n ]\n ]\n\n user_submission_df = user_submission_df.rename(\n columns={\n \"stateTimestamp\": \"ActivityDateTime\",\n \"state\": \"ActivityStatus\",\n \"courseId\": \"LMSSectionIdentifier\",\n \"actorUserId\": \"LMSUserIdentifier\",\n }\n )\n\n user_submission_df[\"ActivityType\"] = ACTIVITY_TYPE_STATE\n\n # expand gradeHistory if exists\n if \"gradeHistory\" in history_df:\n grade_history_df = concat([history_df, history_df[\"gradeHistory\"].apply(Series)], axis=1)\n grade_history_df.dropna(subset=[\"gradeHistory\"], inplace=True)\n\n # enrich gradeHistory\n grade_history_df[\"SourceSystemIdentifier\"] = \"G-\" + grade_history_df[\n [\"courseId\", \"courseWorkId\", \"id\", \"gradeTimestamp\"]\n ].agg(\"-\".join, axis=1)\n\n grade_history_df = grade_history_df[\n [\n \"SourceSystemIdentifier\",\n \"AssignmentIdentifier\",\n \"gradeTimestamp\",\n \"gradeChangeType\",\n \"courseId\",\n \"actorUserId\",\n \"CreateDate\",\n \"LastModifiedDate\"\n ]\n ]\n\n grade_history_df = grade_history_df.rename(\n columns={\n \"gradeTimestamp\": \"ActivityDateTime\",\n \"gradeChangeType\": \"ActivityStatus\",\n \"courseId\": \"LMSSectionIdentifier\",\n \"actorUserId\": \"LMSUserIdentifier\",\n }\n )\n\n grade_history_df[\"ActivityType\"] = ACTIVITY_TYPE_GRADE\n\n # combine with stateHistory\n user_submission_df = user_submission_df.append(grade_history_df)\n\n # teacher actions can show up on student histories and vice-versa\n user_submission_df.drop_duplicates(subset=[\"SourceSystemIdentifier\"], inplace=True)\n\n # finish with common columns\n user_submission_df[\"ActivityTimeInMinutes\"] = \"\"\n user_submission_df[\"Content\"] = \"\"\n user_submission_df[\"SourceSystem\"] = SOURCE_SYSTEM\n user_submission_df[\"SourceCreateDate\"] = \"\" # No create date available from API\n user_submission_df[\"SourceLastModifiedDate\"] = \"\" # No modified date available from API\n\n # group by section id as a Dict of DataFrames\n result: Dict[str, DataFrame] = dict(\n tuple(user_submission_df.groupby([\"LMSSectionIdentifier\"]))\n )\n\n return result\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import pandas as pd
iris_nan = pd.read_csv("MLData/iris_nan.csv")
iris_nan.head()
Y = iris_nan["class"].values
X = iris_nan.drop("class", axis=1)
# Our iris dataframe presents some NaN values, and we need to fix that.
# We got some methods to apply on a pandas dataframe:
# 1: Drop records presenting a NaN value: We can achieve that with dropna, which
# will drop all the records presenting a NaN value inside.
# With dropna() executed, it will remove all the records (rows) presenting a Nan.
iris_nan.dropna()
# 2: A more intrusive method is to use dropna for each row/column that present
# a NaN value. We can drop an entire column presenting a NaN value by using dropna
# and specifying the axix: 0 for the row, 1 for the column. In this case, it will
# then drop the petal_lenght column.
iris_nan.dropna(axis=1)
# 3: A better method is to REPLACE NaN value with another one, that usually match
# Mean, Median or the Mode. Let's see all of them:
# MEAN - We calculate the mean of the iris_nan dataframe, and the use the method
# fillna passing the mean to fill the NaN value with the average. Note that,
# using mean on the entire dataframe will return a Series (dataframe) containing,
# for all the labels the mean of all their values. We then use this series with
# fillna() that will fill each NaN with the appropriate value based on the label
# they appear to be NaN in.
mean_replace = iris_nan.mean()
iris_nan.fillna(mean_replace)
# MEDIAN - The median is the "middle" value of a specific range of values.
# The median() function works exactly like mean(), it will return a series that
# will be used by fillna() to replace the missing NaN values.
median_replace = iris_nan.median()
iris_nan.fillna(median_replace)
# MODE - The mode is just the element that appears the most into a set of elements.
# For example, given the array 3,7,9,13,18,18,24 his mode would be 18 cause it's
# the element that appears the most. With each value being unique, there will be
# no mode. the function mode() will return an entire dataframe composed by, the
# first row as the mode (if present) and the others as NaN. We then need to access
# just the first row of this dataframe, and we can do that by using ILOC (that
# works by indexing) using 0 as argument to indicate the first row. We then use
# fillna to replace the values.
mode_replace = iris_nan.mode().iloc[0]
iris_nan.fillna(mode_replace)
# For the numpy array we use another simple method: The Imputer. An imputer is just
# a tool to fill missing values inside of a numpy array. We need to import it as
# follow: From sklearn 0.22 onward we need to import SimpleImputer since imputer
# has been deprecated.
from sklearn.impute import SimpleImputer
import numpy as np
# we then create an imputer object: We need to specify two things:
# 1) Strategy: could be mean, median or mode. Works exactly like the previous
# examples.
# 2) Missing values: we need to pass the nan type, specifiied by np.nan.
imputer = SimpleImputer(strategy="mean", missing_values=np.nan)
# We then use fit_transform: as we already know, fit_transform is a combination by
# both function fit and transform. It initially calculate the mean/median/mode with
# the function FIT (X' = X - Mean / STD) and then will TRANSFORM all the np.NaN
# values into the argument passed (could be a dataframe) returning a numpy array
# with all the nan filled.
X_imputed = imputer.fit_transform(X)
X_imputed
|
normal
|
{
"blob_id": "00429a16ac009f6f706ef11bc29b0aec77b9ebe6",
"index": 9536,
"step-1": "<mask token>\n",
"step-2": "<mask token>\niris_nan.head()\n<mask token>\niris_nan.dropna()\niris_nan.dropna(axis=1)\n<mask token>\niris_nan.fillna(mean_replace)\n<mask token>\niris_nan.fillna(median_replace)\n<mask token>\niris_nan.fillna(mode_replace)\n<mask token>\nX_imputed\n",
"step-3": "<mask token>\niris_nan = pd.read_csv('MLData/iris_nan.csv')\niris_nan.head()\nY = iris_nan['class'].values\nX = iris_nan.drop('class', axis=1)\niris_nan.dropna()\niris_nan.dropna(axis=1)\nmean_replace = iris_nan.mean()\niris_nan.fillna(mean_replace)\nmedian_replace = iris_nan.median()\niris_nan.fillna(median_replace)\nmode_replace = iris_nan.mode().iloc[0]\niris_nan.fillna(mode_replace)\n<mask token>\nimputer = SimpleImputer(strategy='mean', missing_values=np.nan)\nX_imputed = imputer.fit_transform(X)\nX_imputed\n",
"step-4": "import pandas as pd\niris_nan = pd.read_csv('MLData/iris_nan.csv')\niris_nan.head()\nY = iris_nan['class'].values\nX = iris_nan.drop('class', axis=1)\niris_nan.dropna()\niris_nan.dropna(axis=1)\nmean_replace = iris_nan.mean()\niris_nan.fillna(mean_replace)\nmedian_replace = iris_nan.median()\niris_nan.fillna(median_replace)\nmode_replace = iris_nan.mode().iloc[0]\niris_nan.fillna(mode_replace)\nfrom sklearn.impute import SimpleImputer\nimport numpy as np\nimputer = SimpleImputer(strategy='mean', missing_values=np.nan)\nX_imputed = imputer.fit_transform(X)\nX_imputed\n",
"step-5": "import pandas as pd\n\niris_nan = pd.read_csv(\"MLData/iris_nan.csv\")\niris_nan.head()\n\nY = iris_nan[\"class\"].values\nX = iris_nan.drop(\"class\", axis=1)\n\n# Our iris dataframe presents some NaN values, and we need to fix that.\n# We got some methods to apply on a pandas dataframe:\n\n# 1: Drop records presenting a NaN value: We can achieve that with dropna, which\n# will drop all the records presenting a NaN value inside.\n# With dropna() executed, it will remove all the records (rows) presenting a Nan.\niris_nan.dropna()\n\n# 2: A more intrusive method is to use dropna for each row/column that present\n# a NaN value. We can drop an entire column presenting a NaN value by using dropna\n# and specifying the axix: 0 for the row, 1 for the column. In this case, it will\n# then drop the petal_lenght column.\niris_nan.dropna(axis=1)\n\n# 3: A better method is to REPLACE NaN value with another one, that usually match\n# Mean, Median or the Mode. Let's see all of them:\n\n# MEAN - We calculate the mean of the iris_nan dataframe, and the use the method\n# fillna passing the mean to fill the NaN value with the average. Note that,\n# using mean on the entire dataframe will return a Series (dataframe) containing,\n# for all the labels the mean of all their values. We then use this series with\n# fillna() that will fill each NaN with the appropriate value based on the label\n# they appear to be NaN in.\nmean_replace = iris_nan.mean()\niris_nan.fillna(mean_replace)\n\n# MEDIAN - The median is the \"middle\" value of a specific range of values.\n# The median() function works exactly like mean(), it will return a series that\n# will be used by fillna() to replace the missing NaN values.\nmedian_replace = iris_nan.median()\niris_nan.fillna(median_replace)\n\n# MODE - The mode is just the element that appears the most into a set of elements.\n# For example, given the array 3,7,9,13,18,18,24 his mode would be 18 cause it's\n# the element that appears the most. With each value being unique, there will be\n# no mode. the function mode() will return an entire dataframe composed by, the\n# first row as the mode (if present) and the others as NaN. We then need to access\n# just the first row of this dataframe, and we can do that by using ILOC (that\n# works by indexing) using 0 as argument to indicate the first row. We then use\n# fillna to replace the values.\nmode_replace = iris_nan.mode().iloc[0]\niris_nan.fillna(mode_replace)\n\n# For the numpy array we use another simple method: The Imputer. An imputer is just\n# a tool to fill missing values inside of a numpy array. We need to import it as\n# follow: From sklearn 0.22 onward we need to import SimpleImputer since imputer\n# has been deprecated.\nfrom sklearn.impute import SimpleImputer\nimport numpy as np\n\n# we then create an imputer object: We need to specify two things:\n# 1) Strategy: could be mean, median or mode. Works exactly like the previous\n# examples.\n# 2) Missing values: we need to pass the nan type, specifiied by np.nan.\nimputer = SimpleImputer(strategy=\"mean\", missing_values=np.nan)\n\n# We then use fit_transform: as we already know, fit_transform is a combination by\n# both function fit and transform. It initially calculate the mean/median/mode with\n# the function FIT (X' = X - Mean / STD) and then will TRANSFORM all the np.NaN\n# values into the argument passed (could be a dataframe) returning a numpy array\n# with all the nan filled. \nX_imputed = imputer.fit_transform(X)\nX_imputed\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
"""Defines all Rady URL."""
from django.conf.urls import url, include
from django.contrib import admin
apiv1_urls = [
url(r"^users/", include("user.urls")),
url(r"^meetings/", include("meeting.urls")),
url(r"^docs/", include("rest_framework_docs.urls")),
url(r"^auth/", include("auth.urls")),
url(r"^fcm/devices/", include("device.urls")),
url(r"^statistics/", include("stats.urls")),
url(r"^admin/", include("admin.urls")),
]
urlpatterns = [
url(r"^api/v1/", include(apiv1_urls)),
url(r"^admin/", admin.site.urls),
]
|
normal
|
{
"blob_id": "aa00e4569aeae58e3f0ea1a8326e35c0776f7727",
"index": 4849,
"step-1": "<mask token>\n",
"step-2": "<mask token>\napiv1_urls = [url('^users/', include('user.urls')), url('^meetings/',\n include('meeting.urls')), url('^docs/', include(\n 'rest_framework_docs.urls')), url('^auth/', include('auth.urls')), url(\n '^fcm/devices/', include('device.urls')), url('^statistics/', include(\n 'stats.urls')), url('^admin/', include('admin.urls'))]\nurlpatterns = [url('^api/v1/', include(apiv1_urls)), url('^admin/', admin.\n site.urls)]\n",
"step-3": "<mask token>\nfrom django.conf.urls import url, include\nfrom django.contrib import admin\napiv1_urls = [url('^users/', include('user.urls')), url('^meetings/',\n include('meeting.urls')), url('^docs/', include(\n 'rest_framework_docs.urls')), url('^auth/', include('auth.urls')), url(\n '^fcm/devices/', include('device.urls')), url('^statistics/', include(\n 'stats.urls')), url('^admin/', include('admin.urls'))]\nurlpatterns = [url('^api/v1/', include(apiv1_urls)), url('^admin/', admin.\n site.urls)]\n",
"step-4": "\"\"\"Defines all Rady URL.\"\"\"\n\nfrom django.conf.urls import url, include\nfrom django.contrib import admin\n\n\napiv1_urls = [\n url(r\"^users/\", include(\"user.urls\")),\n url(r\"^meetings/\", include(\"meeting.urls\")),\n url(r\"^docs/\", include(\"rest_framework_docs.urls\")),\n url(r\"^auth/\", include(\"auth.urls\")),\n url(r\"^fcm/devices/\", include(\"device.urls\")),\n url(r\"^statistics/\", include(\"stats.urls\")),\n url(r\"^admin/\", include(\"admin.urls\")),\n]\n\nurlpatterns = [\n url(r\"^api/v1/\", include(apiv1_urls)),\n url(r\"^admin/\", admin.site.urls),\n]\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from random import random
def random_numbers():
print('start generator')
while True:
val = random()
print(f'will yield {val}')
yield val
def run_random_numbers():
print(f'{random_numbers=}')
rnd_gen = random_numbers()
print(f'{rnd_gen=}')
print(f'{next(rnd_gen)=}')
print(f'{next(rnd_gen)=}')
# but we can have two way communication
print(f'{rnd_gen.send(None)=}')
print(f'{rnd_gen.send(42)=}')
# rnd_gen.throw(Exception)
# rnd_gen.close()
# next(rnd_gen)
def inout_gen():
print('init')
ret_val = None
while True:
x = yield ret_val
if x is not None:
ret_val = x
def run_input_gen():
inout_g = inout_gen()
next(inout_g)
print(f'{next(inout_g)}')
print(f'{inout_g.send(22)}')
print(f'{next(inout_g)}')
def exercise_gen(ret_val, times):
"""Return `ret_value` `times` times.
If generator will receive some value from outside, update `ret_value`"""
def exercise1():
"""Make it pass"""
g1 = exercise_gen(42, 3)
assert next(g1) == 42
assert g1.send('new val') == 'new val'
assert next(g1) == 'new val'
try:
next(g1)
except StopIteration:
# ok
pass
else:
raise Exception('Generator should be invalid')
def exercise2():
"""Update `exercise_gen`, so it will ignore all exceptions"""
g1 = exercise_gen("I'll ignore errors", 300)
assert next(g1) == "I'll ignore errors"
assert g1.send('new val') == 'new val'
assert g1.throw(Exception) == 'new val'
assert next(g1) == 'new val'
if __name__ == '__main__':
run_random_numbers()
run_input_gen()
exercise1()
exercise2()
|
normal
|
{
"blob_id": "e5979aeb7cff0e2a75966924382bae87aebcfcb2",
"index": 3312,
"step-1": "<mask token>\n\n\ndef exercise_gen(ret_val, times):\n \"\"\"Return `ret_value` `times` times.\n If generator will receive some value from outside, update `ret_value`\"\"\"\n\n\ndef exercise1():\n \"\"\"Make it pass\"\"\"\n g1 = exercise_gen(42, 3)\n assert next(g1) == 42\n assert g1.send('new val') == 'new val'\n assert next(g1) == 'new val'\n try:\n next(g1)\n except StopIteration:\n pass\n else:\n raise Exception('Generator should be invalid')\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef random_numbers():\n print('start generator')\n while True:\n val = random()\n print(f'will yield {val}')\n yield val\n\n\n<mask token>\n\n\ndef inout_gen():\n print('init')\n ret_val = None\n while True:\n x = yield ret_val\n if x is not None:\n ret_val = x\n\n\n<mask token>\n\n\ndef exercise_gen(ret_val, times):\n \"\"\"Return `ret_value` `times` times.\n If generator will receive some value from outside, update `ret_value`\"\"\"\n\n\ndef exercise1():\n \"\"\"Make it pass\"\"\"\n g1 = exercise_gen(42, 3)\n assert next(g1) == 42\n assert g1.send('new val') == 'new val'\n assert next(g1) == 'new val'\n try:\n next(g1)\n except StopIteration:\n pass\n else:\n raise Exception('Generator should be invalid')\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef random_numbers():\n print('start generator')\n while True:\n val = random()\n print(f'will yield {val}')\n yield val\n\n\n<mask token>\n\n\ndef inout_gen():\n print('init')\n ret_val = None\n while True:\n x = yield ret_val\n if x is not None:\n ret_val = x\n\n\ndef run_input_gen():\n inout_g = inout_gen()\n next(inout_g)\n print(f'{next(inout_g)}')\n print(f'{inout_g.send(22)}')\n print(f'{next(inout_g)}')\n\n\ndef exercise_gen(ret_val, times):\n \"\"\"Return `ret_value` `times` times.\n If generator will receive some value from outside, update `ret_value`\"\"\"\n\n\ndef exercise1():\n \"\"\"Make it pass\"\"\"\n g1 = exercise_gen(42, 3)\n assert next(g1) == 42\n assert g1.send('new val') == 'new val'\n assert next(g1) == 'new val'\n try:\n next(g1)\n except StopIteration:\n pass\n else:\n raise Exception('Generator should be invalid')\n\n\ndef exercise2():\n \"\"\"Update `exercise_gen`, so it will ignore all exceptions\"\"\"\n g1 = exercise_gen(\"I'll ignore errors\", 300)\n assert next(g1) == \"I'll ignore errors\"\n assert g1.send('new val') == 'new val'\n assert g1.throw(Exception) == 'new val'\n assert next(g1) == 'new val'\n\n\n<mask token>\n",
"step-4": "from random import random\n\n\ndef random_numbers():\n print('start generator')\n while True:\n val = random()\n print(f'will yield {val}')\n yield val\n\n\ndef run_random_numbers():\n print(f'random_numbers={random_numbers!r}')\n rnd_gen = random_numbers()\n print(f'rnd_gen={rnd_gen!r}')\n print(f'next(rnd_gen)={next(rnd_gen)!r}')\n print(f'next(rnd_gen)={next(rnd_gen)!r}')\n print(f'rnd_gen.send(None)={rnd_gen.send(None)!r}')\n print(f'rnd_gen.send(42)={rnd_gen.send(42)!r}')\n\n\ndef inout_gen():\n print('init')\n ret_val = None\n while True:\n x = yield ret_val\n if x is not None:\n ret_val = x\n\n\ndef run_input_gen():\n inout_g = inout_gen()\n next(inout_g)\n print(f'{next(inout_g)}')\n print(f'{inout_g.send(22)}')\n print(f'{next(inout_g)}')\n\n\ndef exercise_gen(ret_val, times):\n \"\"\"Return `ret_value` `times` times.\n If generator will receive some value from outside, update `ret_value`\"\"\"\n\n\ndef exercise1():\n \"\"\"Make it pass\"\"\"\n g1 = exercise_gen(42, 3)\n assert next(g1) == 42\n assert g1.send('new val') == 'new val'\n assert next(g1) == 'new val'\n try:\n next(g1)\n except StopIteration:\n pass\n else:\n raise Exception('Generator should be invalid')\n\n\ndef exercise2():\n \"\"\"Update `exercise_gen`, so it will ignore all exceptions\"\"\"\n g1 = exercise_gen(\"I'll ignore errors\", 300)\n assert next(g1) == \"I'll ignore errors\"\n assert g1.send('new val') == 'new val'\n assert g1.throw(Exception) == 'new val'\n assert next(g1) == 'new val'\n\n\nif __name__ == '__main__':\n run_random_numbers()\n run_input_gen()\n exercise1()\n exercise2()\n",
"step-5": "from random import random\n\n\ndef random_numbers():\n print('start generator')\n while True:\n val = random()\n print(f'will yield {val}')\n yield val\n\n\ndef run_random_numbers():\n print(f'{random_numbers=}')\n rnd_gen = random_numbers()\n print(f'{rnd_gen=}')\n print(f'{next(rnd_gen)=}')\n print(f'{next(rnd_gen)=}')\n\n # but we can have two way communication\n print(f'{rnd_gen.send(None)=}')\n print(f'{rnd_gen.send(42)=}')\n # rnd_gen.throw(Exception)\n # rnd_gen.close()\n # next(rnd_gen)\n\n\ndef inout_gen():\n print('init')\n ret_val = None\n while True:\n x = yield ret_val\n if x is not None:\n ret_val = x\n\n\ndef run_input_gen():\n inout_g = inout_gen()\n next(inout_g)\n\n print(f'{next(inout_g)}')\n print(f'{inout_g.send(22)}')\n print(f'{next(inout_g)}')\n\n\ndef exercise_gen(ret_val, times):\n \"\"\"Return `ret_value` `times` times.\n If generator will receive some value from outside, update `ret_value`\"\"\"\n\n\ndef exercise1():\n \"\"\"Make it pass\"\"\"\n g1 = exercise_gen(42, 3)\n assert next(g1) == 42\n assert g1.send('new val') == 'new val'\n assert next(g1) == 'new val'\n try:\n next(g1)\n except StopIteration:\n # ok\n pass\n else:\n raise Exception('Generator should be invalid')\n\n\ndef exercise2():\n \"\"\"Update `exercise_gen`, so it will ignore all exceptions\"\"\"\n g1 = exercise_gen(\"I'll ignore errors\", 300)\n assert next(g1) == \"I'll ignore errors\"\n assert g1.send('new val') == 'new val'\n assert g1.throw(Exception) == 'new val'\n assert next(g1) == 'new val'\n\n\nif __name__ == '__main__':\n run_random_numbers()\n run_input_gen()\n exercise1()\n exercise2()\n",
"step-ids": [
2,
4,
6,
9,
10
]
}
|
[
2,
4,
6,
9,
10
] |
import datetime # to add timestamps on every block in blockchain
import hashlib # library that is ued to hash the block
import json # to communicate in json data
# Flask to implement webservices jsonify to see the jsop message/response
# request help us to connect all the nodes of the blockchain together froming the p2p network
from flask import Flask, jsonify, request
# it will help us to verify that all the blockchain have same blockhain or not http requests (used in replace_cahin)
import requests
from uuid import uuid4
from urllib.parse import urlparse
# Building a Blockchain
class Blockchain:
def __init__(self):
self.chain = [] # our main block chain
# now we will create the list of transation which will record the all transactions
self.transactions = []
# create_block used to create the block in blockchain so it is executed only when the block is mined(meaning it has winnnig proof_of_work=proof) proof=0 and previous_hash='0' for the genesis block
self.create_block(proof=0, previous_hash='0')
# nodes will contains the unique identifier of the address of all nodes in p2p network
self.nodes = set() # we have taken set() instead of list because we know that address are randomly generated by uuid4 to avoid duplicacy in it
# part1
def create_block(self, proof, previous_hash):
block = { # dictionary of python data structure
'index': len(self.chain)+1,
'timestamp': str(datetime.datetime.now()),
'proof': proof, # works like a nounce of block stops when we reach at or below the target
'previous_hash': previous_hash,
'transactions': self.transactions}
self.transactions = [] # this need to be done bcoz we cant have duplicates lists of transactions in the further blocks so empty the transation that had been added in the block
self.chain.append(block)
return block
def get_previous_block(self):
return self.chain[-1]
def proof_of_work(self, previous_proof):
new_proof = 1
check_proof = False
while check_proof is False:
hash_operation = hashlib.sha256(
str(new_proof**2-previous_proof**2).encode()).hexdigest()
if hash_operation[:4] == '0000':
check_proof = True
else:
new_proof += 1
return new_proof # it is just a no. corresponding to the game solved by person is having a hash with trailing 4 zeroe's
# hash of a block is created after generating block thats we have only use previous_hash because its already created
def hash(self, block):
encoded_block = json.dumps(block, sort_keys=True).encode()
return hashlib.sha256(encoded_block).hexdigest()
def is_chain_valid(self, chain):
# reference of first block stored genesis block
previous_block = chain[0]
block_index = 1 # required for iteration
while block_index < len(chain):
block = chain[block_index] # cuurent block
# checking weather the refernce stored in property previus_hash is currently matched or not with the hash of previous block using hash function
if block['previous_hash'] != self.hash(previous_block):
return False
previous_proof = previous_block['proof']
proof = block['proof']
# verfying the proof of block with the data proof and previous proof it is easy then creating the proof
hash_operation = hashlib.sha256(
str(proof**2 - previous_proof**2).encode()).hexdigest()
# the more is zero's the more is harder to mine the block
if hash_operation[:4] != '0000':
return False
previous_block = block
block_index += 1
return True
# functions used to get add the transactions to the lists
def add_transaction(self, senders, receiver, amount):
self.transactions.append({
'senders': senders,
'receiver': receiver,
'amount': amount
})
previous_block = self.get_previous_block()
# +1 beacause before mining the transaction are added so new_block index will be +1 then previous
return previous_block['index']+1
# part-1 ends
# part-3--> dealing with decentarlized application and transactions
# this function allow us to add different nodes to chain
def add_node(self, address): # generating the decentarlized application
# we need to parse the url before adding it
parsed_url = urlparse(address)
# .netloc gives us the unique identifier of the node address removing the unrequired part from it
self.nodes.add(parsed_url.netloc)
# this function help us to solve the problem of consensus protocols (competing chain)
def replace_chain(self):
# this variable help us to find the length of longest chain among different network
max_length = len(self.chain)
longest_chain = None
network = self.nodes # this variable will hold the address of all the nodes in network
for node in network:
# we know the nodes array will hold only the netlock value in nodes so we are going to use taht and make a request to that node check its length
# using the requests library we make a requests to that node address ([f'http://{node}/get_chain'] --> [f'http://127.0.0.5000/get_chain')]
response = requests.get(f'http://{node}/get_chain')
if response.status_code == 200: # this ids the vode chaeck something is received in request
length = response.json()['length']
chain = response.json()['chain']
if length > max_length and self.is_chain_valid(chain):
max_length = length
longest_chain = chain
# this will happen in every node of network
if longest_chain:
# if this chain is shorter than otherit will be updated
self.chain = longest_chain
return True
# if this chain is only longest in network than return false and no update
return False
# part-3 ends
# Mining our Blockchain
app = Flask(__name__)
# Creating a Blockchain
# creating the instance of blockchain
blockchain = Blockchain()
# Mining the blockchain
# create an random and unique address for the node on port 5000
# this is the address used by to send the whale coin when the miner mines the wahle coin
node_address = str(uuid4()).replace('-', '')
# part-2
@app.route('/mine_block', methods=['GET'])
def mine_block():
previous_block = blockchain.get_previous_block()
previous_proof = previous_block['proof']
proof = blockchain.proof_of_work(previous_proof)
previous_hash = blockchain.hash(previous_block)
# miners price
# usually the reciever public address is created when user generate the wallet and mining pool send the coin after mining the block to miner address present in the bat file which is edited after downloading the software
blockchain.add_transaction(node_address, 'Bhavjot', 1)
# when created blockchain is called all the transactions performed will be inserted inside the current created block and when appended in transactions it will be again change to [] empty to avoid the duplicacy
block = blockchain.create_block(proof, previous_hash)
response = {'message': 'Congratulations, you just mined a block! 😈😈😈😈😈🤓🤓🤓', # response is a json data
'index': block['index'],
'timestamp': block['timestamp'],
'proof': block['proof'],
'previous_hash': block['previous_hash'],
'transactions': block['transactions']}
return jsonify(response), 200
# getting all blocks in chain
@app.route('/get_chain', methods=['GET'])
def get_chain():
response = {
'chain': blockchain.chain,
'length': len(blockchain.chain)
}
return jsonify(response), 200
# custom message
@app.route('/', methods=['GET'])
def custom_message():
response = {
'message': 'Congratulations you are on Whalecoin 🐳🐳🐳🐳🐳🐳'
}
return jsonify(response), 200
# part-2 ends
# creating the transactions
@app.route('/add_transactions', methods=['POST'])
def add_transaction():
# this will help us to extract te post request made in postman like req.params.name in express
json = request.get_json()
# this will hep us to check that all the parameters are present or not for adding the transactions
transaction_keys = ['sender', 'receiver', 'amount']
if not all(key in json for key in transaction_keys):
return 'Some elements of the transaction are missing', 400
index = blockchain.add_transaction(
json['sender'], json['receiver'], json['amount'])
# when the block is mined all the transations in lists is added to block
response = {'message': f'This transaction will be added to Block {index}'}
return jsonify(response), 201
@app.route('/connect_node', methods=['POST'])
def connect_node():
json = request.get_json() # we will get request message send from postman
# {'nodes':['http://127.0.0.1:5000','http://127.0.0.1:5001','http://127.0.0.1:5003',...]} when adding nodes using add_nodes 127.0.0.1:5001 it will be extracted using netloc
nodes = json.get('nodes')
if nodes is None:
return "No node", 400
for node in nodes:
blockchain.add_node(node) # add our nodes to network
response = {'message': 'All the nodes are now connected. The Whalecoin 🐳🐳🐳🐳🐳🐳 Blockchain now contains the following nodes:',
'total_nodes': list(blockchain.nodes)}
return jsonify(response), 201
# Replacing the chain by the longest chain if needed
# this function will present in every node of blockchain and always checked so that the node remain upadatesd with other blockchains by hitiing replace_chain URL
@ app.route('/replace_chain', methods=['GET'])
def replace_chain():
# using the above defined function in class
is_chain_replaced = blockchain.replace_chain()
if is_chain_replaced: # means the current blockchain was the shortest one and it is replaced
response = {'message': 'The nodes had different chains so the chain was replaced by the longest one.',
'new_chain': blockchain.chain}
else: # means the current blockchain was not the shortest one and it is not replaced
response = {'message': 'All good. The chain is the largest one.',
'actual_chain': blockchain.chain}
return jsonify(response), 200
# Running the app
# host= '0.0.0.0' specifies that it is available publicily
app.run(host='0.0.0.0', port=5001)
|
normal
|
{
"blob_id": "e85d3660968410b83b14ba610150c0c8cc880119",
"index": 9191,
"step-1": "<mask token>\n\n\nclass Blockchain:\n\n def __init__(self):\n self.chain = []\n self.transactions = []\n self.create_block(proof=0, previous_hash='0')\n self.nodes = set()\n\n def create_block(self, proof, previous_hash):\n block = {'index': len(self.chain) + 1, 'timestamp': str(datetime.\n datetime.now()), 'proof': proof, 'previous_hash': previous_hash,\n 'transactions': self.transactions}\n self.transactions = []\n self.chain.append(block)\n return block\n <mask token>\n <mask token>\n\n def hash(self, block):\n encoded_block = json.dumps(block, sort_keys=True).encode()\n return hashlib.sha256(encoded_block).hexdigest()\n <mask token>\n\n def add_transaction(self, senders, receiver, amount):\n self.transactions.append({'senders': senders, 'receiver': receiver,\n 'amount': amount})\n previous_block = self.get_previous_block()\n return previous_block['index'] + 1\n <mask token>\n\n def replace_chain(self):\n max_length = len(self.chain)\n longest_chain = None\n network = self.nodes\n for node in network:\n response = requests.get(f'http://{node}/get_chain')\n if response.status_code == 200:\n length = response.json()['length']\n chain = response.json()['chain']\n if length > max_length and self.is_chain_valid(chain):\n max_length = length\n longest_chain = chain\n if longest_chain:\n self.chain = longest_chain\n return True\n return False\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Blockchain:\n\n def __init__(self):\n self.chain = []\n self.transactions = []\n self.create_block(proof=0, previous_hash='0')\n self.nodes = set()\n\n def create_block(self, proof, previous_hash):\n block = {'index': len(self.chain) + 1, 'timestamp': str(datetime.\n datetime.now()), 'proof': proof, 'previous_hash': previous_hash,\n 'transactions': self.transactions}\n self.transactions = []\n self.chain.append(block)\n return block\n\n def get_previous_block(self):\n return self.chain[-1]\n\n def proof_of_work(self, previous_proof):\n new_proof = 1\n check_proof = False\n while check_proof is False:\n hash_operation = hashlib.sha256(str(new_proof ** 2 - \n previous_proof ** 2).encode()).hexdigest()\n if hash_operation[:4] == '0000':\n check_proof = True\n else:\n new_proof += 1\n return new_proof\n\n def hash(self, block):\n encoded_block = json.dumps(block, sort_keys=True).encode()\n return hashlib.sha256(encoded_block).hexdigest()\n\n def is_chain_valid(self, chain):\n previous_block = chain[0]\n block_index = 1\n while block_index < len(chain):\n block = chain[block_index]\n if block['previous_hash'] != self.hash(previous_block):\n return False\n previous_proof = previous_block['proof']\n proof = block['proof']\n hash_operation = hashlib.sha256(str(proof ** 2 - previous_proof **\n 2).encode()).hexdigest()\n if hash_operation[:4] != '0000':\n return False\n previous_block = block\n block_index += 1\n return True\n\n def add_transaction(self, senders, receiver, amount):\n self.transactions.append({'senders': senders, 'receiver': receiver,\n 'amount': amount})\n previous_block = self.get_previous_block()\n return previous_block['index'] + 1\n\n def add_node(self, address):\n parsed_url = urlparse(address)\n self.nodes.add(parsed_url.netloc)\n\n def replace_chain(self):\n max_length = len(self.chain)\n longest_chain = None\n network = self.nodes\n for node in network:\n response = requests.get(f'http://{node}/get_chain')\n if response.status_code == 200:\n length = response.json()['length']\n chain = response.json()['chain']\n if length > max_length and self.is_chain_valid(chain):\n max_length = length\n longest_chain = chain\n if longest_chain:\n self.chain = longest_chain\n return True\n return False\n\n\n<mask token>\n\n\[email protected]('/mine_block', methods=['GET'])\ndef mine_block():\n previous_block = blockchain.get_previous_block()\n previous_proof = previous_block['proof']\n proof = blockchain.proof_of_work(previous_proof)\n previous_hash = blockchain.hash(previous_block)\n blockchain.add_transaction(node_address, 'Bhavjot', 1)\n block = blockchain.create_block(proof, previous_hash)\n response = {'message':\n 'Congratulations, you just mined a block! 😈😈😈😈😈🤓🤓🤓', 'index': block\n ['index'], 'timestamp': block['timestamp'], 'proof': block['proof'],\n 'previous_hash': block['previous_hash'], 'transactions': block[\n 'transactions']}\n return jsonify(response), 200\n\n\[email protected]('/get_chain', methods=['GET'])\ndef get_chain():\n response = {'chain': blockchain.chain, 'length': len(blockchain.chain)}\n return jsonify(response), 200\n\n\n<mask token>\n\n\[email protected]('/add_transactions', methods=['POST'])\ndef add_transaction():\n json = request.get_json()\n transaction_keys = ['sender', 'receiver', 'amount']\n if not all(key in json for key in transaction_keys):\n return 'Some elements of the transaction are missing', 400\n index = blockchain.add_transaction(json['sender'], json['receiver'],\n json['amount'])\n response = {'message': f'This transaction will be added to Block {index}'}\n return jsonify(response), 201\n\n\[email protected]('/connect_node', methods=['POST'])\ndef connect_node():\n json = request.get_json()\n nodes = json.get('nodes')\n if nodes is None:\n return 'No node', 400\n for node in nodes:\n blockchain.add_node(node)\n response = {'message':\n 'All the nodes are now connected. The Whalecoin 🐳🐳🐳🐳🐳🐳 Blockchain now contains the following nodes:'\n , 'total_nodes': list(blockchain.nodes)}\n return jsonify(response), 201\n\n\[email protected]('/replace_chain', methods=['GET'])\ndef replace_chain():\n is_chain_replaced = blockchain.replace_chain()\n if is_chain_replaced:\n response = {'message':\n 'The nodes had different chains so the chain was replaced by the longest one.'\n , 'new_chain': blockchain.chain}\n else:\n response = {'message': 'All good. The chain is the largest one.',\n 'actual_chain': blockchain.chain}\n return jsonify(response), 200\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Blockchain:\n\n def __init__(self):\n self.chain = []\n self.transactions = []\n self.create_block(proof=0, previous_hash='0')\n self.nodes = set()\n\n def create_block(self, proof, previous_hash):\n block = {'index': len(self.chain) + 1, 'timestamp': str(datetime.\n datetime.now()), 'proof': proof, 'previous_hash': previous_hash,\n 'transactions': self.transactions}\n self.transactions = []\n self.chain.append(block)\n return block\n\n def get_previous_block(self):\n return self.chain[-1]\n\n def proof_of_work(self, previous_proof):\n new_proof = 1\n check_proof = False\n while check_proof is False:\n hash_operation = hashlib.sha256(str(new_proof ** 2 - \n previous_proof ** 2).encode()).hexdigest()\n if hash_operation[:4] == '0000':\n check_proof = True\n else:\n new_proof += 1\n return new_proof\n\n def hash(self, block):\n encoded_block = json.dumps(block, sort_keys=True).encode()\n return hashlib.sha256(encoded_block).hexdigest()\n\n def is_chain_valid(self, chain):\n previous_block = chain[0]\n block_index = 1\n while block_index < len(chain):\n block = chain[block_index]\n if block['previous_hash'] != self.hash(previous_block):\n return False\n previous_proof = previous_block['proof']\n proof = block['proof']\n hash_operation = hashlib.sha256(str(proof ** 2 - previous_proof **\n 2).encode()).hexdigest()\n if hash_operation[:4] != '0000':\n return False\n previous_block = block\n block_index += 1\n return True\n\n def add_transaction(self, senders, receiver, amount):\n self.transactions.append({'senders': senders, 'receiver': receiver,\n 'amount': amount})\n previous_block = self.get_previous_block()\n return previous_block['index'] + 1\n\n def add_node(self, address):\n parsed_url = urlparse(address)\n self.nodes.add(parsed_url.netloc)\n\n def replace_chain(self):\n max_length = len(self.chain)\n longest_chain = None\n network = self.nodes\n for node in network:\n response = requests.get(f'http://{node}/get_chain')\n if response.status_code == 200:\n length = response.json()['length']\n chain = response.json()['chain']\n if length > max_length and self.is_chain_valid(chain):\n max_length = length\n longest_chain = chain\n if longest_chain:\n self.chain = longest_chain\n return True\n return False\n\n\n<mask token>\n\n\[email protected]('/mine_block', methods=['GET'])\ndef mine_block():\n previous_block = blockchain.get_previous_block()\n previous_proof = previous_block['proof']\n proof = blockchain.proof_of_work(previous_proof)\n previous_hash = blockchain.hash(previous_block)\n blockchain.add_transaction(node_address, 'Bhavjot', 1)\n block = blockchain.create_block(proof, previous_hash)\n response = {'message':\n 'Congratulations, you just mined a block! 😈😈😈😈😈🤓🤓🤓', 'index': block\n ['index'], 'timestamp': block['timestamp'], 'proof': block['proof'],\n 'previous_hash': block['previous_hash'], 'transactions': block[\n 'transactions']}\n return jsonify(response), 200\n\n\[email protected]('/get_chain', methods=['GET'])\ndef get_chain():\n response = {'chain': blockchain.chain, 'length': len(blockchain.chain)}\n return jsonify(response), 200\n\n\[email protected]('/', methods=['GET'])\ndef custom_message():\n response = {'message': 'Congratulations you are on Whalecoin 🐳🐳🐳🐳🐳🐳'}\n return jsonify(response), 200\n\n\[email protected]('/add_transactions', methods=['POST'])\ndef add_transaction():\n json = request.get_json()\n transaction_keys = ['sender', 'receiver', 'amount']\n if not all(key in json for key in transaction_keys):\n return 'Some elements of the transaction are missing', 400\n index = blockchain.add_transaction(json['sender'], json['receiver'],\n json['amount'])\n response = {'message': f'This transaction will be added to Block {index}'}\n return jsonify(response), 201\n\n\[email protected]('/connect_node', methods=['POST'])\ndef connect_node():\n json = request.get_json()\n nodes = json.get('nodes')\n if nodes is None:\n return 'No node', 400\n for node in nodes:\n blockchain.add_node(node)\n response = {'message':\n 'All the nodes are now connected. The Whalecoin 🐳🐳🐳🐳🐳🐳 Blockchain now contains the following nodes:'\n , 'total_nodes': list(blockchain.nodes)}\n return jsonify(response), 201\n\n\[email protected]('/replace_chain', methods=['GET'])\ndef replace_chain():\n is_chain_replaced = blockchain.replace_chain()\n if is_chain_replaced:\n response = {'message':\n 'The nodes had different chains so the chain was replaced by the longest one.'\n , 'new_chain': blockchain.chain}\n else:\n response = {'message': 'All good. The chain is the largest one.',\n 'actual_chain': blockchain.chain}\n return jsonify(response), 200\n\n\napp.run(host='0.0.0.0', port=5001)\n",
"step-4": "import datetime\nimport hashlib\nimport json\nfrom flask import Flask, jsonify, request\nimport requests\nfrom uuid import uuid4\nfrom urllib.parse import urlparse\n\n\nclass Blockchain:\n\n def __init__(self):\n self.chain = []\n self.transactions = []\n self.create_block(proof=0, previous_hash='0')\n self.nodes = set()\n\n def create_block(self, proof, previous_hash):\n block = {'index': len(self.chain) + 1, 'timestamp': str(datetime.\n datetime.now()), 'proof': proof, 'previous_hash': previous_hash,\n 'transactions': self.transactions}\n self.transactions = []\n self.chain.append(block)\n return block\n\n def get_previous_block(self):\n return self.chain[-1]\n\n def proof_of_work(self, previous_proof):\n new_proof = 1\n check_proof = False\n while check_proof is False:\n hash_operation = hashlib.sha256(str(new_proof ** 2 - \n previous_proof ** 2).encode()).hexdigest()\n if hash_operation[:4] == '0000':\n check_proof = True\n else:\n new_proof += 1\n return new_proof\n\n def hash(self, block):\n encoded_block = json.dumps(block, sort_keys=True).encode()\n return hashlib.sha256(encoded_block).hexdigest()\n\n def is_chain_valid(self, chain):\n previous_block = chain[0]\n block_index = 1\n while block_index < len(chain):\n block = chain[block_index]\n if block['previous_hash'] != self.hash(previous_block):\n return False\n previous_proof = previous_block['proof']\n proof = block['proof']\n hash_operation = hashlib.sha256(str(proof ** 2 - previous_proof **\n 2).encode()).hexdigest()\n if hash_operation[:4] != '0000':\n return False\n previous_block = block\n block_index += 1\n return True\n\n def add_transaction(self, senders, receiver, amount):\n self.transactions.append({'senders': senders, 'receiver': receiver,\n 'amount': amount})\n previous_block = self.get_previous_block()\n return previous_block['index'] + 1\n\n def add_node(self, address):\n parsed_url = urlparse(address)\n self.nodes.add(parsed_url.netloc)\n\n def replace_chain(self):\n max_length = len(self.chain)\n longest_chain = None\n network = self.nodes\n for node in network:\n response = requests.get(f'http://{node}/get_chain')\n if response.status_code == 200:\n length = response.json()['length']\n chain = response.json()['chain']\n if length > max_length and self.is_chain_valid(chain):\n max_length = length\n longest_chain = chain\n if longest_chain:\n self.chain = longest_chain\n return True\n return False\n\n\napp = Flask(__name__)\nblockchain = Blockchain()\nnode_address = str(uuid4()).replace('-', '')\n\n\[email protected]('/mine_block', methods=['GET'])\ndef mine_block():\n previous_block = blockchain.get_previous_block()\n previous_proof = previous_block['proof']\n proof = blockchain.proof_of_work(previous_proof)\n previous_hash = blockchain.hash(previous_block)\n blockchain.add_transaction(node_address, 'Bhavjot', 1)\n block = blockchain.create_block(proof, previous_hash)\n response = {'message':\n 'Congratulations, you just mined a block! 😈😈😈😈😈🤓🤓🤓', 'index': block\n ['index'], 'timestamp': block['timestamp'], 'proof': block['proof'],\n 'previous_hash': block['previous_hash'], 'transactions': block[\n 'transactions']}\n return jsonify(response), 200\n\n\[email protected]('/get_chain', methods=['GET'])\ndef get_chain():\n response = {'chain': blockchain.chain, 'length': len(blockchain.chain)}\n return jsonify(response), 200\n\n\[email protected]('/', methods=['GET'])\ndef custom_message():\n response = {'message': 'Congratulations you are on Whalecoin 🐳🐳🐳🐳🐳🐳'}\n return jsonify(response), 200\n\n\[email protected]('/add_transactions', methods=['POST'])\ndef add_transaction():\n json = request.get_json()\n transaction_keys = ['sender', 'receiver', 'amount']\n if not all(key in json for key in transaction_keys):\n return 'Some elements of the transaction are missing', 400\n index = blockchain.add_transaction(json['sender'], json['receiver'],\n json['amount'])\n response = {'message': f'This transaction will be added to Block {index}'}\n return jsonify(response), 201\n\n\[email protected]('/connect_node', methods=['POST'])\ndef connect_node():\n json = request.get_json()\n nodes = json.get('nodes')\n if nodes is None:\n return 'No node', 400\n for node in nodes:\n blockchain.add_node(node)\n response = {'message':\n 'All the nodes are now connected. The Whalecoin 🐳🐳🐳🐳🐳🐳 Blockchain now contains the following nodes:'\n , 'total_nodes': list(blockchain.nodes)}\n return jsonify(response), 201\n\n\[email protected]('/replace_chain', methods=['GET'])\ndef replace_chain():\n is_chain_replaced = blockchain.replace_chain()\n if is_chain_replaced:\n response = {'message':\n 'The nodes had different chains so the chain was replaced by the longest one.'\n , 'new_chain': blockchain.chain}\n else:\n response = {'message': 'All good. The chain is the largest one.',\n 'actual_chain': blockchain.chain}\n return jsonify(response), 200\n\n\napp.run(host='0.0.0.0', port=5001)\n",
"step-5": "import datetime # to add timestamps on every block in blockchain\nimport hashlib # library that is ued to hash the block\nimport json # to communicate in json data\n# Flask to implement webservices jsonify to see the jsop message/response\n# request help us to connect all the nodes of the blockchain together froming the p2p network\nfrom flask import Flask, jsonify, request\n# it will help us to verify that all the blockchain have same blockhain or not http requests (used in replace_cahin)\nimport requests\nfrom uuid import uuid4\nfrom urllib.parse import urlparse\n\n# Building a Blockchain\n\n\nclass Blockchain:\n\n def __init__(self):\n self.chain = [] # our main block chain\n # now we will create the list of transation which will record the all transactions\n self.transactions = []\n # create_block used to create the block in blockchain so it is executed only when the block is mined(meaning it has winnnig proof_of_work=proof) proof=0 and previous_hash='0' for the genesis block\n self.create_block(proof=0, previous_hash='0')\n # nodes will contains the unique identifier of the address of all nodes in p2p network\n self.nodes = set() # we have taken set() instead of list because we know that address are randomly generated by uuid4 to avoid duplicacy in it\n # part1\n\n def create_block(self, proof, previous_hash):\n block = { # dictionary of python data structure\n 'index': len(self.chain)+1,\n 'timestamp': str(datetime.datetime.now()),\n 'proof': proof, # works like a nounce of block stops when we reach at or below the target\n 'previous_hash': previous_hash,\n 'transactions': self.transactions}\n self.transactions = [] # this need to be done bcoz we cant have duplicates lists of transactions in the further blocks so empty the transation that had been added in the block\n self.chain.append(block)\n return block\n\n def get_previous_block(self):\n return self.chain[-1]\n\n def proof_of_work(self, previous_proof):\n new_proof = 1\n check_proof = False\n while check_proof is False:\n hash_operation = hashlib.sha256(\n str(new_proof**2-previous_proof**2).encode()).hexdigest()\n if hash_operation[:4] == '0000':\n check_proof = True\n else:\n new_proof += 1\n return new_proof # it is just a no. corresponding to the game solved by person is having a hash with trailing 4 zeroe's\n\n # hash of a block is created after generating block thats we have only use previous_hash because its already created\n def hash(self, block):\n encoded_block = json.dumps(block, sort_keys=True).encode()\n return hashlib.sha256(encoded_block).hexdigest()\n\n def is_chain_valid(self, chain):\n # reference of first block stored genesis block\n previous_block = chain[0]\n block_index = 1 # required for iteration\n while block_index < len(chain):\n block = chain[block_index] # cuurent block\n # checking weather the refernce stored in property previus_hash is currently matched or not with the hash of previous block using hash function\n if block['previous_hash'] != self.hash(previous_block):\n return False\n previous_proof = previous_block['proof']\n proof = block['proof']\n # verfying the proof of block with the data proof and previous proof it is easy then creating the proof\n hash_operation = hashlib.sha256(\n str(proof**2 - previous_proof**2).encode()).hexdigest()\n # the more is zero's the more is harder to mine the block\n if hash_operation[:4] != '0000':\n return False\n previous_block = block\n block_index += 1\n return True\n\n # functions used to get add the transactions to the lists\n def add_transaction(self, senders, receiver, amount):\n self.transactions.append({\n 'senders': senders,\n 'receiver': receiver,\n 'amount': amount\n })\n previous_block = self.get_previous_block()\n # +1 beacause before mining the transaction are added so new_block index will be +1 then previous\n return previous_block['index']+1\n # part-1 ends\n\n # part-3--> dealing with decentarlized application and transactions\n\n # this function allow us to add different nodes to chain\n\n def add_node(self, address): # generating the decentarlized application\n # we need to parse the url before adding it\n parsed_url = urlparse(address)\n # .netloc gives us the unique identifier of the node address removing the unrequired part from it\n self.nodes.add(parsed_url.netloc)\n\n # this function help us to solve the problem of consensus protocols (competing chain)\n\n def replace_chain(self):\n # this variable help us to find the length of longest chain among different network\n max_length = len(self.chain)\n longest_chain = None\n network = self.nodes # this variable will hold the address of all the nodes in network\n for node in network:\n # we know the nodes array will hold only the netlock value in nodes so we are going to use taht and make a request to that node check its length\n # using the requests library we make a requests to that node address ([f'http://{node}/get_chain'] --> [f'http://127.0.0.5000/get_chain')]\n response = requests.get(f'http://{node}/get_chain')\n if response.status_code == 200: # this ids the vode chaeck something is received in request\n length = response.json()['length']\n chain = response.json()['chain']\n if length > max_length and self.is_chain_valid(chain):\n max_length = length\n longest_chain = chain\n # this will happen in every node of network\n if longest_chain:\n # if this chain is shorter than otherit will be updated\n self.chain = longest_chain\n return True\n # if this chain is only longest in network than return false and no update\n return False\n # part-3 ends\n# Mining our Blockchain\n\n\napp = Flask(__name__)\n\n# Creating a Blockchain\n# creating the instance of blockchain\nblockchain = Blockchain()\n\n# Mining the blockchain\n# create an random and unique address for the node on port 5000\n# this is the address used by to send the whale coin when the miner mines the wahle coin\nnode_address = str(uuid4()).replace('-', '')\n\n# part-2\n\n\[email protected]('/mine_block', methods=['GET'])\ndef mine_block():\n previous_block = blockchain.get_previous_block()\n previous_proof = previous_block['proof']\n proof = blockchain.proof_of_work(previous_proof)\n previous_hash = blockchain.hash(previous_block)\n # miners price\n # usually the reciever public address is created when user generate the wallet and mining pool send the coin after mining the block to miner address present in the bat file which is edited after downloading the software\n blockchain.add_transaction(node_address, 'Bhavjot', 1)\n # when created blockchain is called all the transactions performed will be inserted inside the current created block and when appended in transactions it will be again change to [] empty to avoid the duplicacy\n block = blockchain.create_block(proof, previous_hash)\n response = {'message': 'Congratulations, you just mined a block! 😈😈😈😈😈🤓🤓🤓', # response is a json data\n 'index': block['index'],\n 'timestamp': block['timestamp'],\n 'proof': block['proof'],\n 'previous_hash': block['previous_hash'],\n 'transactions': block['transactions']}\n return jsonify(response), 200\n\n# getting all blocks in chain\n\n\[email protected]('/get_chain', methods=['GET'])\ndef get_chain():\n response = {\n 'chain': blockchain.chain,\n 'length': len(blockchain.chain)\n }\n return jsonify(response), 200\n\n# custom message\n\n\[email protected]('/', methods=['GET'])\ndef custom_message():\n response = {\n 'message': 'Congratulations you are on Whalecoin 🐳🐳🐳🐳🐳🐳'\n }\n return jsonify(response), 200\n\n# part-2 ends\n# creating the transactions\n\n\[email protected]('/add_transactions', methods=['POST'])\ndef add_transaction():\n # this will help us to extract te post request made in postman like req.params.name in express\n json = request.get_json()\n # this will hep us to check that all the parameters are present or not for adding the transactions\n transaction_keys = ['sender', 'receiver', 'amount']\n if not all(key in json for key in transaction_keys):\n return 'Some elements of the transaction are missing', 400\n index = blockchain.add_transaction(\n json['sender'], json['receiver'], json['amount'])\n # when the block is mined all the transations in lists is added to block\n response = {'message': f'This transaction will be added to Block {index}'}\n return jsonify(response), 201\n\n\[email protected]('/connect_node', methods=['POST'])\ndef connect_node():\n json = request.get_json() # we will get request message send from postman\n # {'nodes':['http://127.0.0.1:5000','http://127.0.0.1:5001','http://127.0.0.1:5003',...]} when adding nodes using add_nodes 127.0.0.1:5001 it will be extracted using netloc\n nodes = json.get('nodes')\n if nodes is None:\n return \"No node\", 400\n for node in nodes:\n blockchain.add_node(node) # add our nodes to network\n response = {'message': 'All the nodes are now connected. The Whalecoin 🐳🐳🐳🐳🐳🐳 Blockchain now contains the following nodes:',\n 'total_nodes': list(blockchain.nodes)}\n return jsonify(response), 201\n\n\n# Replacing the chain by the longest chain if needed\n# this function will present in every node of blockchain and always checked so that the node remain upadatesd with other blockchains by hitiing replace_chain URL\n@ app.route('/replace_chain', methods=['GET'])\ndef replace_chain():\n # using the above defined function in class\n is_chain_replaced = blockchain.replace_chain()\n if is_chain_replaced: # means the current blockchain was the shortest one and it is replaced\n response = {'message': 'The nodes had different chains so the chain was replaced by the longest one.',\n 'new_chain': blockchain.chain}\n else: # means the current blockchain was not the shortest one and it is not replaced\n response = {'message': 'All good. The chain is the largest one.',\n 'actual_chain': blockchain.chain}\n return jsonify(response), 200\n\n\n# Running the app\n# host= '0.0.0.0' specifies that it is available publicily\napp.run(host='0.0.0.0', port=5001)\n",
"step-ids": [
6,
15,
17,
19,
20
]
}
|
[
6,
15,
17,
19,
20
] |
from enum import Enum
import os
from pathlib import Path
from typing import Optional
from loguru import logger
import pandas as pd
from pydantic.class_validators import root_validator, validator
from tqdm import tqdm
from zamba.data.video import VideoLoaderConfig
from zamba.models.config import (
ZambaBaseModel,
check_files_exist_and_load,
get_filepaths,
validate_model_cache_dir,
)
from zamba.models.densepose.densepose_manager import MODELS, DensePoseManager
from zamba.models.utils import RegionEnum
class DensePoseOutputEnum(Enum):
segmentation = "segmentation"
chimp_anatomy = "chimp_anatomy"
class DensePoseConfig(ZambaBaseModel):
"""Configuration for running dense pose on videos.
Args:
video_loader_config (VideoLoaderConfig): Configuration for loading videos
output_type (str): one of DensePoseOutputEnum (currently "segmentation" or "chimp_anatomy").
render_output (bool): Whether to save a version of the video with the output overlaid on top.
Defaults to False.
embeddings_in_json (bool): Whether to save the embeddings matrices in the json of the
DensePose result. Setting to True can result in large json files. Defaults to False.
data_dir (Path): Where to find the files listed in filepaths (or where to look if
filepaths is not provided).
filepaths (Path, optional): Path to a CSV file with a list of filepaths to process.
save_dir (Path, optional): Directory for where to save the output files;
defaults to os.getcwd().
cache_dir (Path, optional): Path for downloading and saving model weights. Defaults
to env var `MODEL_CACHE_DIR` or the OS app cache dir.
weight_download_region (RegionEnum, optional): region where to download weights; should
be one of RegionEnum (currently 'us', 'asia', and 'eu'). Defaults to 'us'.
"""
video_loader_config: VideoLoaderConfig
output_type: DensePoseOutputEnum
render_output: bool = False
embeddings_in_json: bool = False
data_dir: Path
filepaths: Optional[Path] = None
save_dir: Optional[Path] = None
cache_dir: Optional[Path] = None
weight_download_region: RegionEnum = RegionEnum("us")
_validate_cache_dir = validator("cache_dir", allow_reuse=True, always=True)(
validate_model_cache_dir
)
def run_model(self):
"""Use this configuration to execute DensePose via the DensePoseManager"""
if not isinstance(self.output_type, DensePoseOutputEnum):
self.output_type = DensePoseOutputEnum(self.output_type)
if self.output_type == DensePoseOutputEnum.segmentation.value:
model = MODELS["animals"]
elif self.output_type == DensePoseOutputEnum.chimp_anatomy.value:
model = MODELS["chimps"]
else:
raise Exception(f"invalid {self.output_type}")
output_dir = Path(os.getcwd()) if self.save_dir is None else self.save_dir
dpm = DensePoseManager(
model, model_cache_dir=self.cache_dir, download_region=self.weight_download_region
)
for fp in tqdm(self.filepaths.filepath, desc="Videos"):
fp = Path(fp)
vid_arr, labels = dpm.predict_video(fp, video_loader_config=self.video_loader_config)
# serialize the labels generated by densepose to json
output_path = output_dir / f"{fp.stem}_denspose_labels.json"
dpm.serialize_video_output(
labels, filename=output_path, write_embeddings=self.embeddings_in_json
)
# re-render the video with the densepose labels visualized on top of the video
if self.render_output:
output_path = output_dir / f"{fp.stem}_denspose_video{''.join(fp.suffixes)}"
visualized_video = dpm.visualize_video(
vid_arr, labels, output_path=output_path, fps=self.video_loader_config.fps
)
# write out the anatomy present in each frame to a csv for later analysis
if self.output_type == DensePoseOutputEnum.chimp_anatomy.value:
output_path = output_dir / f"{fp.stem}_denspose_anatomy.csv"
dpm.anatomize_video(
visualized_video,
labels,
output_path=output_path,
fps=self.video_loader_config.fps,
)
_get_filepaths = root_validator(allow_reuse=True, pre=False, skip_on_failure=True)(
get_filepaths
)
@root_validator(skip_on_failure=True)
def validate_files(cls, values):
# if globbing from data directory, already have valid dataframe
if isinstance(values["filepaths"], pd.DataFrame):
files_df = values["filepaths"]
else:
# make into dataframe even if only one column for clearer indexing
files_df = pd.DataFrame(pd.read_csv(values["filepaths"]))
if "filepath" not in files_df.columns:
raise ValueError(f"{values['filepaths']} must contain a `filepath` column.")
# can only contain one row per filepath
duplicated = files_df.filepath.duplicated()
if duplicated.sum() > 0:
logger.warning(
f"Found {duplicated.sum():,} duplicate row(s) in filepaths csv. Dropping duplicates so predictions will have one row per video."
)
files_df = files_df[["filepath"]].drop_duplicates()
values["filepaths"] = check_files_exist_and_load(
df=files_df,
data_dir=values["data_dir"],
skip_load_validation=True,
)
return values
|
normal
|
{
"blob_id": "9d8d8e97f7d3dbbb47dc6d4105f0f1ffb358fd2f",
"index": 6977,
"step-1": "<mask token>\n\n\nclass DensePoseConfig(ZambaBaseModel):\n <mask token>\n video_loader_config: VideoLoaderConfig\n output_type: DensePoseOutputEnum\n render_output: bool = False\n embeddings_in_json: bool = False\n data_dir: Path\n filepaths: Optional[Path] = None\n save_dir: Optional[Path] = None\n cache_dir: Optional[Path] = None\n weight_download_region: RegionEnum = RegionEnum('us')\n _validate_cache_dir = validator('cache_dir', allow_reuse=True, always=True\n )(validate_model_cache_dir)\n\n def run_model(self):\n \"\"\"Use this configuration to execute DensePose via the DensePoseManager\"\"\"\n if not isinstance(self.output_type, DensePoseOutputEnum):\n self.output_type = DensePoseOutputEnum(self.output_type)\n if self.output_type == DensePoseOutputEnum.segmentation.value:\n model = MODELS['animals']\n elif self.output_type == DensePoseOutputEnum.chimp_anatomy.value:\n model = MODELS['chimps']\n else:\n raise Exception(f'invalid {self.output_type}')\n output_dir = Path(os.getcwd()\n ) if self.save_dir is None else self.save_dir\n dpm = DensePoseManager(model, model_cache_dir=self.cache_dir,\n download_region=self.weight_download_region)\n for fp in tqdm(self.filepaths.filepath, desc='Videos'):\n fp = Path(fp)\n vid_arr, labels = dpm.predict_video(fp, video_loader_config=\n self.video_loader_config)\n output_path = output_dir / f'{fp.stem}_denspose_labels.json'\n dpm.serialize_video_output(labels, filename=output_path,\n write_embeddings=self.embeddings_in_json)\n if self.render_output:\n output_path = (output_dir /\n f\"{fp.stem}_denspose_video{''.join(fp.suffixes)}\")\n visualized_video = dpm.visualize_video(vid_arr, labels,\n output_path=output_path, fps=self.video_loader_config.fps)\n if self.output_type == DensePoseOutputEnum.chimp_anatomy.value:\n output_path = output_dir / f'{fp.stem}_denspose_anatomy.csv'\n dpm.anatomize_video(visualized_video, labels, output_path=\n output_path, fps=self.video_loader_config.fps)\n _get_filepaths = root_validator(allow_reuse=True, pre=False,\n skip_on_failure=True)(get_filepaths)\n\n @root_validator(skip_on_failure=True)\n def validate_files(cls, values):\n if isinstance(values['filepaths'], pd.DataFrame):\n files_df = values['filepaths']\n else:\n files_df = pd.DataFrame(pd.read_csv(values['filepaths']))\n if 'filepath' not in files_df.columns:\n raise ValueError(\n f\"{values['filepaths']} must contain a `filepath` column.\")\n duplicated = files_df.filepath.duplicated()\n if duplicated.sum() > 0:\n logger.warning(\n f'Found {duplicated.sum():,} duplicate row(s) in filepaths csv. Dropping duplicates so predictions will have one row per video.'\n )\n files_df = files_df[['filepath']].drop_duplicates()\n values['filepaths'] = check_files_exist_and_load(df=files_df,\n data_dir=values['data_dir'], skip_load_validation=True)\n return values\n",
"step-2": "<mask token>\n\n\nclass DensePoseConfig(ZambaBaseModel):\n \"\"\"Configuration for running dense pose on videos.\n\n Args:\n video_loader_config (VideoLoaderConfig): Configuration for loading videos\n output_type (str): one of DensePoseOutputEnum (currently \"segmentation\" or \"chimp_anatomy\").\n render_output (bool): Whether to save a version of the video with the output overlaid on top.\n Defaults to False.\n embeddings_in_json (bool): Whether to save the embeddings matrices in the json of the\n DensePose result. Setting to True can result in large json files. Defaults to False.\n data_dir (Path): Where to find the files listed in filepaths (or where to look if\n filepaths is not provided).\n filepaths (Path, optional): Path to a CSV file with a list of filepaths to process.\n save_dir (Path, optional): Directory for where to save the output files;\n defaults to os.getcwd().\n cache_dir (Path, optional): Path for downloading and saving model weights. Defaults\n to env var `MODEL_CACHE_DIR` or the OS app cache dir.\n weight_download_region (RegionEnum, optional): region where to download weights; should\n be one of RegionEnum (currently 'us', 'asia', and 'eu'). Defaults to 'us'.\n \"\"\"\n video_loader_config: VideoLoaderConfig\n output_type: DensePoseOutputEnum\n render_output: bool = False\n embeddings_in_json: bool = False\n data_dir: Path\n filepaths: Optional[Path] = None\n save_dir: Optional[Path] = None\n cache_dir: Optional[Path] = None\n weight_download_region: RegionEnum = RegionEnum('us')\n _validate_cache_dir = validator('cache_dir', allow_reuse=True, always=True\n )(validate_model_cache_dir)\n\n def run_model(self):\n \"\"\"Use this configuration to execute DensePose via the DensePoseManager\"\"\"\n if not isinstance(self.output_type, DensePoseOutputEnum):\n self.output_type = DensePoseOutputEnum(self.output_type)\n if self.output_type == DensePoseOutputEnum.segmentation.value:\n model = MODELS['animals']\n elif self.output_type == DensePoseOutputEnum.chimp_anatomy.value:\n model = MODELS['chimps']\n else:\n raise Exception(f'invalid {self.output_type}')\n output_dir = Path(os.getcwd()\n ) if self.save_dir is None else self.save_dir\n dpm = DensePoseManager(model, model_cache_dir=self.cache_dir,\n download_region=self.weight_download_region)\n for fp in tqdm(self.filepaths.filepath, desc='Videos'):\n fp = Path(fp)\n vid_arr, labels = dpm.predict_video(fp, video_loader_config=\n self.video_loader_config)\n output_path = output_dir / f'{fp.stem}_denspose_labels.json'\n dpm.serialize_video_output(labels, filename=output_path,\n write_embeddings=self.embeddings_in_json)\n if self.render_output:\n output_path = (output_dir /\n f\"{fp.stem}_denspose_video{''.join(fp.suffixes)}\")\n visualized_video = dpm.visualize_video(vid_arr, labels,\n output_path=output_path, fps=self.video_loader_config.fps)\n if self.output_type == DensePoseOutputEnum.chimp_anatomy.value:\n output_path = output_dir / f'{fp.stem}_denspose_anatomy.csv'\n dpm.anatomize_video(visualized_video, labels, output_path=\n output_path, fps=self.video_loader_config.fps)\n _get_filepaths = root_validator(allow_reuse=True, pre=False,\n skip_on_failure=True)(get_filepaths)\n\n @root_validator(skip_on_failure=True)\n def validate_files(cls, values):\n if isinstance(values['filepaths'], pd.DataFrame):\n files_df = values['filepaths']\n else:\n files_df = pd.DataFrame(pd.read_csv(values['filepaths']))\n if 'filepath' not in files_df.columns:\n raise ValueError(\n f\"{values['filepaths']} must contain a `filepath` column.\")\n duplicated = files_df.filepath.duplicated()\n if duplicated.sum() > 0:\n logger.warning(\n f'Found {duplicated.sum():,} duplicate row(s) in filepaths csv. Dropping duplicates so predictions will have one row per video.'\n )\n files_df = files_df[['filepath']].drop_duplicates()\n values['filepaths'] = check_files_exist_and_load(df=files_df,\n data_dir=values['data_dir'], skip_load_validation=True)\n return values\n",
"step-3": "<mask token>\n\n\nclass DensePoseOutputEnum(Enum):\n segmentation = 'segmentation'\n chimp_anatomy = 'chimp_anatomy'\n\n\nclass DensePoseConfig(ZambaBaseModel):\n \"\"\"Configuration for running dense pose on videos.\n\n Args:\n video_loader_config (VideoLoaderConfig): Configuration for loading videos\n output_type (str): one of DensePoseOutputEnum (currently \"segmentation\" or \"chimp_anatomy\").\n render_output (bool): Whether to save a version of the video with the output overlaid on top.\n Defaults to False.\n embeddings_in_json (bool): Whether to save the embeddings matrices in the json of the\n DensePose result. Setting to True can result in large json files. Defaults to False.\n data_dir (Path): Where to find the files listed in filepaths (or where to look if\n filepaths is not provided).\n filepaths (Path, optional): Path to a CSV file with a list of filepaths to process.\n save_dir (Path, optional): Directory for where to save the output files;\n defaults to os.getcwd().\n cache_dir (Path, optional): Path for downloading and saving model weights. Defaults\n to env var `MODEL_CACHE_DIR` or the OS app cache dir.\n weight_download_region (RegionEnum, optional): region where to download weights; should\n be one of RegionEnum (currently 'us', 'asia', and 'eu'). Defaults to 'us'.\n \"\"\"\n video_loader_config: VideoLoaderConfig\n output_type: DensePoseOutputEnum\n render_output: bool = False\n embeddings_in_json: bool = False\n data_dir: Path\n filepaths: Optional[Path] = None\n save_dir: Optional[Path] = None\n cache_dir: Optional[Path] = None\n weight_download_region: RegionEnum = RegionEnum('us')\n _validate_cache_dir = validator('cache_dir', allow_reuse=True, always=True\n )(validate_model_cache_dir)\n\n def run_model(self):\n \"\"\"Use this configuration to execute DensePose via the DensePoseManager\"\"\"\n if not isinstance(self.output_type, DensePoseOutputEnum):\n self.output_type = DensePoseOutputEnum(self.output_type)\n if self.output_type == DensePoseOutputEnum.segmentation.value:\n model = MODELS['animals']\n elif self.output_type == DensePoseOutputEnum.chimp_anatomy.value:\n model = MODELS['chimps']\n else:\n raise Exception(f'invalid {self.output_type}')\n output_dir = Path(os.getcwd()\n ) if self.save_dir is None else self.save_dir\n dpm = DensePoseManager(model, model_cache_dir=self.cache_dir,\n download_region=self.weight_download_region)\n for fp in tqdm(self.filepaths.filepath, desc='Videos'):\n fp = Path(fp)\n vid_arr, labels = dpm.predict_video(fp, video_loader_config=\n self.video_loader_config)\n output_path = output_dir / f'{fp.stem}_denspose_labels.json'\n dpm.serialize_video_output(labels, filename=output_path,\n write_embeddings=self.embeddings_in_json)\n if self.render_output:\n output_path = (output_dir /\n f\"{fp.stem}_denspose_video{''.join(fp.suffixes)}\")\n visualized_video = dpm.visualize_video(vid_arr, labels,\n output_path=output_path, fps=self.video_loader_config.fps)\n if self.output_type == DensePoseOutputEnum.chimp_anatomy.value:\n output_path = output_dir / f'{fp.stem}_denspose_anatomy.csv'\n dpm.anatomize_video(visualized_video, labels, output_path=\n output_path, fps=self.video_loader_config.fps)\n _get_filepaths = root_validator(allow_reuse=True, pre=False,\n skip_on_failure=True)(get_filepaths)\n\n @root_validator(skip_on_failure=True)\n def validate_files(cls, values):\n if isinstance(values['filepaths'], pd.DataFrame):\n files_df = values['filepaths']\n else:\n files_df = pd.DataFrame(pd.read_csv(values['filepaths']))\n if 'filepath' not in files_df.columns:\n raise ValueError(\n f\"{values['filepaths']} must contain a `filepath` column.\")\n duplicated = files_df.filepath.duplicated()\n if duplicated.sum() > 0:\n logger.warning(\n f'Found {duplicated.sum():,} duplicate row(s) in filepaths csv. Dropping duplicates so predictions will have one row per video.'\n )\n files_df = files_df[['filepath']].drop_duplicates()\n values['filepaths'] = check_files_exist_and_load(df=files_df,\n data_dir=values['data_dir'], skip_load_validation=True)\n return values\n",
"step-4": "from enum import Enum\nimport os\nfrom pathlib import Path\nfrom typing import Optional\nfrom loguru import logger\nimport pandas as pd\nfrom pydantic.class_validators import root_validator, validator\nfrom tqdm import tqdm\nfrom zamba.data.video import VideoLoaderConfig\nfrom zamba.models.config import ZambaBaseModel, check_files_exist_and_load, get_filepaths, validate_model_cache_dir\nfrom zamba.models.densepose.densepose_manager import MODELS, DensePoseManager\nfrom zamba.models.utils import RegionEnum\n\n\nclass DensePoseOutputEnum(Enum):\n segmentation = 'segmentation'\n chimp_anatomy = 'chimp_anatomy'\n\n\nclass DensePoseConfig(ZambaBaseModel):\n \"\"\"Configuration for running dense pose on videos.\n\n Args:\n video_loader_config (VideoLoaderConfig): Configuration for loading videos\n output_type (str): one of DensePoseOutputEnum (currently \"segmentation\" or \"chimp_anatomy\").\n render_output (bool): Whether to save a version of the video with the output overlaid on top.\n Defaults to False.\n embeddings_in_json (bool): Whether to save the embeddings matrices in the json of the\n DensePose result. Setting to True can result in large json files. Defaults to False.\n data_dir (Path): Where to find the files listed in filepaths (or where to look if\n filepaths is not provided).\n filepaths (Path, optional): Path to a CSV file with a list of filepaths to process.\n save_dir (Path, optional): Directory for where to save the output files;\n defaults to os.getcwd().\n cache_dir (Path, optional): Path for downloading and saving model weights. Defaults\n to env var `MODEL_CACHE_DIR` or the OS app cache dir.\n weight_download_region (RegionEnum, optional): region where to download weights; should\n be one of RegionEnum (currently 'us', 'asia', and 'eu'). Defaults to 'us'.\n \"\"\"\n video_loader_config: VideoLoaderConfig\n output_type: DensePoseOutputEnum\n render_output: bool = False\n embeddings_in_json: bool = False\n data_dir: Path\n filepaths: Optional[Path] = None\n save_dir: Optional[Path] = None\n cache_dir: Optional[Path] = None\n weight_download_region: RegionEnum = RegionEnum('us')\n _validate_cache_dir = validator('cache_dir', allow_reuse=True, always=True\n )(validate_model_cache_dir)\n\n def run_model(self):\n \"\"\"Use this configuration to execute DensePose via the DensePoseManager\"\"\"\n if not isinstance(self.output_type, DensePoseOutputEnum):\n self.output_type = DensePoseOutputEnum(self.output_type)\n if self.output_type == DensePoseOutputEnum.segmentation.value:\n model = MODELS['animals']\n elif self.output_type == DensePoseOutputEnum.chimp_anatomy.value:\n model = MODELS['chimps']\n else:\n raise Exception(f'invalid {self.output_type}')\n output_dir = Path(os.getcwd()\n ) if self.save_dir is None else self.save_dir\n dpm = DensePoseManager(model, model_cache_dir=self.cache_dir,\n download_region=self.weight_download_region)\n for fp in tqdm(self.filepaths.filepath, desc='Videos'):\n fp = Path(fp)\n vid_arr, labels = dpm.predict_video(fp, video_loader_config=\n self.video_loader_config)\n output_path = output_dir / f'{fp.stem}_denspose_labels.json'\n dpm.serialize_video_output(labels, filename=output_path,\n write_embeddings=self.embeddings_in_json)\n if self.render_output:\n output_path = (output_dir /\n f\"{fp.stem}_denspose_video{''.join(fp.suffixes)}\")\n visualized_video = dpm.visualize_video(vid_arr, labels,\n output_path=output_path, fps=self.video_loader_config.fps)\n if self.output_type == DensePoseOutputEnum.chimp_anatomy.value:\n output_path = output_dir / f'{fp.stem}_denspose_anatomy.csv'\n dpm.anatomize_video(visualized_video, labels, output_path=\n output_path, fps=self.video_loader_config.fps)\n _get_filepaths = root_validator(allow_reuse=True, pre=False,\n skip_on_failure=True)(get_filepaths)\n\n @root_validator(skip_on_failure=True)\n def validate_files(cls, values):\n if isinstance(values['filepaths'], pd.DataFrame):\n files_df = values['filepaths']\n else:\n files_df = pd.DataFrame(pd.read_csv(values['filepaths']))\n if 'filepath' not in files_df.columns:\n raise ValueError(\n f\"{values['filepaths']} must contain a `filepath` column.\")\n duplicated = files_df.filepath.duplicated()\n if duplicated.sum() > 0:\n logger.warning(\n f'Found {duplicated.sum():,} duplicate row(s) in filepaths csv. Dropping duplicates so predictions will have one row per video.'\n )\n files_df = files_df[['filepath']].drop_duplicates()\n values['filepaths'] = check_files_exist_and_load(df=files_df,\n data_dir=values['data_dir'], skip_load_validation=True)\n return values\n",
"step-5": "from enum import Enum\nimport os\nfrom pathlib import Path\nfrom typing import Optional\n\nfrom loguru import logger\nimport pandas as pd\nfrom pydantic.class_validators import root_validator, validator\nfrom tqdm import tqdm\n\nfrom zamba.data.video import VideoLoaderConfig\nfrom zamba.models.config import (\n ZambaBaseModel,\n check_files_exist_and_load,\n get_filepaths,\n validate_model_cache_dir,\n)\nfrom zamba.models.densepose.densepose_manager import MODELS, DensePoseManager\nfrom zamba.models.utils import RegionEnum\n\n\nclass DensePoseOutputEnum(Enum):\n segmentation = \"segmentation\"\n chimp_anatomy = \"chimp_anatomy\"\n\n\nclass DensePoseConfig(ZambaBaseModel):\n \"\"\"Configuration for running dense pose on videos.\n\n Args:\n video_loader_config (VideoLoaderConfig): Configuration for loading videos\n output_type (str): one of DensePoseOutputEnum (currently \"segmentation\" or \"chimp_anatomy\").\n render_output (bool): Whether to save a version of the video with the output overlaid on top.\n Defaults to False.\n embeddings_in_json (bool): Whether to save the embeddings matrices in the json of the\n DensePose result. Setting to True can result in large json files. Defaults to False.\n data_dir (Path): Where to find the files listed in filepaths (or where to look if\n filepaths is not provided).\n filepaths (Path, optional): Path to a CSV file with a list of filepaths to process.\n save_dir (Path, optional): Directory for where to save the output files;\n defaults to os.getcwd().\n cache_dir (Path, optional): Path for downloading and saving model weights. Defaults\n to env var `MODEL_CACHE_DIR` or the OS app cache dir.\n weight_download_region (RegionEnum, optional): region where to download weights; should\n be one of RegionEnum (currently 'us', 'asia', and 'eu'). Defaults to 'us'.\n \"\"\"\n\n video_loader_config: VideoLoaderConfig\n output_type: DensePoseOutputEnum\n render_output: bool = False\n embeddings_in_json: bool = False\n data_dir: Path\n filepaths: Optional[Path] = None\n save_dir: Optional[Path] = None\n cache_dir: Optional[Path] = None\n weight_download_region: RegionEnum = RegionEnum(\"us\")\n\n _validate_cache_dir = validator(\"cache_dir\", allow_reuse=True, always=True)(\n validate_model_cache_dir\n )\n\n def run_model(self):\n \"\"\"Use this configuration to execute DensePose via the DensePoseManager\"\"\"\n if not isinstance(self.output_type, DensePoseOutputEnum):\n self.output_type = DensePoseOutputEnum(self.output_type)\n\n if self.output_type == DensePoseOutputEnum.segmentation.value:\n model = MODELS[\"animals\"]\n elif self.output_type == DensePoseOutputEnum.chimp_anatomy.value:\n model = MODELS[\"chimps\"]\n else:\n raise Exception(f\"invalid {self.output_type}\")\n\n output_dir = Path(os.getcwd()) if self.save_dir is None else self.save_dir\n\n dpm = DensePoseManager(\n model, model_cache_dir=self.cache_dir, download_region=self.weight_download_region\n )\n\n for fp in tqdm(self.filepaths.filepath, desc=\"Videos\"):\n fp = Path(fp)\n\n vid_arr, labels = dpm.predict_video(fp, video_loader_config=self.video_loader_config)\n\n # serialize the labels generated by densepose to json\n output_path = output_dir / f\"{fp.stem}_denspose_labels.json\"\n dpm.serialize_video_output(\n labels, filename=output_path, write_embeddings=self.embeddings_in_json\n )\n\n # re-render the video with the densepose labels visualized on top of the video\n if self.render_output:\n output_path = output_dir / f\"{fp.stem}_denspose_video{''.join(fp.suffixes)}\"\n visualized_video = dpm.visualize_video(\n vid_arr, labels, output_path=output_path, fps=self.video_loader_config.fps\n )\n\n # write out the anatomy present in each frame to a csv for later analysis\n if self.output_type == DensePoseOutputEnum.chimp_anatomy.value:\n output_path = output_dir / f\"{fp.stem}_denspose_anatomy.csv\"\n dpm.anatomize_video(\n visualized_video,\n labels,\n output_path=output_path,\n fps=self.video_loader_config.fps,\n )\n\n _get_filepaths = root_validator(allow_reuse=True, pre=False, skip_on_failure=True)(\n get_filepaths\n )\n\n @root_validator(skip_on_failure=True)\n def validate_files(cls, values):\n # if globbing from data directory, already have valid dataframe\n if isinstance(values[\"filepaths\"], pd.DataFrame):\n files_df = values[\"filepaths\"]\n else:\n # make into dataframe even if only one column for clearer indexing\n files_df = pd.DataFrame(pd.read_csv(values[\"filepaths\"]))\n\n if \"filepath\" not in files_df.columns:\n raise ValueError(f\"{values['filepaths']} must contain a `filepath` column.\")\n\n # can only contain one row per filepath\n duplicated = files_df.filepath.duplicated()\n if duplicated.sum() > 0:\n logger.warning(\n f\"Found {duplicated.sum():,} duplicate row(s) in filepaths csv. Dropping duplicates so predictions will have one row per video.\"\n )\n files_df = files_df[[\"filepath\"]].drop_duplicates()\n\n values[\"filepaths\"] = check_files_exist_and_load(\n df=files_df,\n data_dir=values[\"data_dir\"],\n skip_load_validation=True,\n )\n return values\n",
"step-ids": [
4,
5,
7,
8,
9
]
}
|
[
4,
5,
7,
8,
9
] |
import requests
import datetime
from yahoo_finance import Share
def getYahooStock(ticker, date1, date2):
companyData = Share(ticker)
dataList = companyData.get_historical(date1, date2)
endData = dataList[0];
startData = dataList[len(dataList) - 1];
print ticker, float(startData['Open']), float(endData['Open'])
return ticker, float(startData['Open']), float(endData['Open'])
def stockDrop(ticker, date1):
currentDate = datetime.datetime.now()
formattedDate = (str(currentDate.year) + '-' + str(currentDate.month) + '-' + str(currentDate.day))
companyData = Share(ticker)
dataList = companyData.get_historical(date1, formattedDate);
originalStock = float(dataList[len(dataList) - 1]['Open']);
nextLower = 0
days = -1
for index, i in enumerate(reversed(dataList)):
nextLower = i['Open']
if float(nextLower) < float(originalStock):
days = len(dataList) - index
break
print days, originalStock, nextLower
return days, originalStock, nextLower
#def stockRange(ticker, date, range):
# dateRange = datetime.datetime()
def buyStock(ticker, buyDate, sellDate, amount):
data = getYahooStock(ticker, buyDate, sellDate)
print (amount * data[2])/data[1]
return (amount * data[2])/data[1]
start_date = datetime.datetime(2017, 4, 7, 0)
end_date = datetime.datetime(2017, 4, 14, 0)
d = start_date
delta = datetime.timedelta(hours=1)
print delta
companyData = Share('UAL')
dataList = companyData.get_historical(date1, date2)
while d <= end_date:
print getYahooStock
print d.strftime("%Y-%m-%d %H")
d += delta
stockDrop("BP", '2016-03-29')
getYahooStock("WFC", '2016-03-29', '2017-03-29')
buyStock("WFC", '2016-03-29', '2017-03-29', 1)
|
normal
|
{
"blob_id": "07854dc9e0a863834b8e671d29d5f407cdd1c13e",
"index": 9599,
"step-1": "import requests\nimport datetime\nfrom yahoo_finance import Share\n\ndef getYahooStock(ticker, date1, date2):\n companyData = Share(ticker)\n dataList = companyData.get_historical(date1, date2)\n endData = dataList[0];\n startData = dataList[len(dataList) - 1];\n print ticker, float(startData['Open']), float(endData['Open'])\n return ticker, float(startData['Open']), float(endData['Open'])\n\ndef stockDrop(ticker, date1):\n currentDate = datetime.datetime.now()\n formattedDate = (str(currentDate.year) + '-' + str(currentDate.month) + '-' + str(currentDate.day))\n companyData = Share(ticker)\n dataList = companyData.get_historical(date1, formattedDate);\n originalStock = float(dataList[len(dataList) - 1]['Open']);\n nextLower = 0\n days = -1\n for index, i in enumerate(reversed(dataList)):\n nextLower = i['Open']\n if float(nextLower) < float(originalStock):\n days = len(dataList) - index\n break\n print days, originalStock, nextLower\n return days, originalStock, nextLower\n\n#def stockRange(ticker, date, range):\n # dateRange = datetime.datetime()\n\ndef buyStock(ticker, buyDate, sellDate, amount):\n data = getYahooStock(ticker, buyDate, sellDate)\n print (amount * data[2])/data[1]\n return (amount * data[2])/data[1]\n\nstart_date = datetime.datetime(2017, 4, 7, 0)\nend_date = datetime.datetime(2017, 4, 14, 0)\n\nd = start_date\ndelta = datetime.timedelta(hours=1)\nprint delta\ncompanyData = Share('UAL')\ndataList = companyData.get_historical(date1, date2)\nwhile d <= end_date:\n print getYahooStock\n print d.strftime(\"%Y-%m-%d %H\")\n d += delta\n\nstockDrop(\"BP\", '2016-03-29')\ngetYahooStock(\"WFC\", '2016-03-29', '2017-03-29')\nbuyStock(\"WFC\", '2016-03-29', '2017-03-29', 1)\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
import datetime
now = datetime.datetime.now()
# Printing value of now.
print ("Time now : ", now)
|
normal
|
{
"blob_id": "0110d26e17a5402c22f519d0aeb2aacca3279d00",
"index": 7792,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint('Time now : ', now)\n",
"step-3": "<mask token>\nnow = datetime.datetime.now()\nprint('Time now : ', now)\n",
"step-4": "import datetime\nnow = datetime.datetime.now()\nprint('Time now : ', now)\n",
"step-5": "import datetime \r\n\r\nnow = datetime.datetime.now() \r\n \r\n# Printing value of now. \r\nprint (\"Time now : \", now) \r\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from django.apps import AppConfig
class QuadraticEquationsSolverConfig(AppConfig):
name = 'quadratic_equations_solver'
|
normal
|
{
"blob_id": "730fc527f3d2805559e8917e846b0b13f4a9f6ee",
"index": 2316,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass QuadraticEquationsSolverConfig(AppConfig):\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass QuadraticEquationsSolverConfig(AppConfig):\n name = 'quadratic_equations_solver'\n",
"step-4": "from django.apps import AppConfig\n\n\nclass QuadraticEquationsSolverConfig(AppConfig):\n name = 'quadratic_equations_solver'\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
#Modules externes
import os
import re
import logging
import csv
import xml.etree.ElementTree as ET
from chardet import detect
#Modules maison
from Abes_Apis_Interface.AbesXml import AbesXml
from Alma_Apis_Interface import Alma_Apis_Records
from Alma_Apis_Interface import Alma_Apis
from logs import logs
SERVICE = "Recotation_en_masse"
LOGS_LEVEL = 'INFO'
LOGS_DIR = os.getenv('LOGS_PATH')
LIBRARY_CODE = 1601900000
REGION = 'EU'
INSTITUTION = 'ub'
API_KEY = os.getenv('PROD_UB_BIB_API')
FILE_NAME = 'Dewey 20201218 cotes OE Scoop V3'
IN_FILE = '/media/sf_Partage_LouxBox/{}.csv'.format(FILE_NAME)
OUT_FILE = '/media/sf_Partage_LouxBox/{}_Rapport.csv'.format(FILE_NAME)
CALL_ERROR_FILE = '/media/sf_Partage_LouxBox/{}_Anomalies_Cotes.csv'.format(FILE_NAME)
# get file encoding type
def get_encoding_type(file):
with open(file, 'rb') as f:
rawdata = f.read()
return detect(rawdata)['encoding']
def item_change_location(item,location,call):
"""Change location and remove holdinds infos
Arguments:
item {str} -- xml response of get item ws
location {str} -- new location_code
call {str} -- new call
Returns:
[str] -- mms_id, holding_id, pid
"""
mms_id, holding_id, pid = item.find(".//mms_id").text, item.find(".//holding_id").text, item.find(".//pid").text
item.find(".//item_data/location").text = location
# On nettoie la cote présente au niveau de l'exemplaire
item.find(".//item_data/alternative_call_number").text = ''
item.find(".//item_data/alternative_call_number_type").text = ''
# On ne renvoie pas les infos de la holdings
holding_data = item.find(".//holding_data")
item.remove(holding_data)
# Si un autre exemplaire lié à la même notice a déjà été traité
if mms_id in processed_record_dict:
# Si la localisation était la même que celle de l'exemplaire déjà traité
if location_code in processed_record_dict[mms_id]:
# Si les cotes sont différentes alors on créé la cote sous l'exemplaire
if processed_record_dict[mms_id][location_code] != call:
multi_call_report.write("{}\n".format(barcode))
item.find(".//item_data/alternative_call_number").text = call
return mms_id, holding_id, pid
def update_holding_data(holding,new_call):
"""Change call (852$$h) and reset call type (852 fiest indicator)
Arguments:
holding {str} -- response of get holding ws
new_call {str} -- new value for call subfield
Returns:
str -- changed data
"""
holding_data = ET.fromstring(holding)
location_field =holding_data.find(".//datafield[@tag='852']")
location_field.set('ind1', ' ')
call_subfield = holding_data.find(".//datafield[@tag='852']/subfield[@code='h']")
call_subfield.text = new_call
return ET.tostring(holding_data)
#Init logger
logs.init_logs(LOGS_DIR,SERVICE,LOGS_LEVEL)
log_module = logging.getLogger(SERVICE)
conf = Alma_Apis.Alma(apikey=API_KEY, region='EU', service=SERVICE)
alma_api = Alma_Apis_Records.AlmaRecords(apikey=API_KEY, region=REGION, service=SERVICE)
#We get all the locations for the library in a dictionnary
locations_dict = conf.get_locations(LIBRARY_CODE)
log_module.info("Liste des localisation chargée pour la bibliothèque {} :: Main :: Début du traitement".format(LIBRARY_CODE))
report = open(OUT_FILE, "w", encoding='utf-8')
report.write("Code-barres\tStatut\tMessage\n")
processed_record_dict = {}
toprocess_holding_dict = {}
multi_call_report = open(CALL_ERROR_FILE, "w", encoding='utf-8')
multi_call_report.write("code-barres\n")
###Update item sequence
# ######################
from_codec = get_encoding_type(IN_FILE)
with open(IN_FILE, 'r', encoding=from_codec, newline='') as f:
reader = csv.reader(f, delimiter=';')
headers = next(reader)
# We read the file
for row in reader:
if len(row) < 2:
continue
barcode = row[0]
# Test if new call is defined
if row[1] is None or row[1] == '':
log_module.error("{} :: Echec :: pas de cote fournie".format(barcode))
report.write("{}\tErreur Fichier\tPas de cote fournie\n".format(barcode))
continue
call = row[1].upper()
# Test if new localisation is defined
if row[3] is None or row[3] == '':
log_module.error("{} :: Echec :: pas de localisation fournie".format(barcode))
report.write("{}\tErreur Fichier\tPas de localisation fournie\n".format(barcode))
continue
# log_module.info("{} :: Main :: Début du traitement".format(barcode))
# Transform location label in location code
if row[3] not in locations_dict:
log_module.error("{} :: Echec :: La localisation {} est inconnue dans Alma".format(barcode,row[3]))
report.write("{}\tErreur Fichier\tLa localisation '{}' est inconnue dans Alma\n".format(barcode,row[3]))
continue
location_code = locations_dict[row[3]]
log_module.debug("{} :: Succes :: A affecter dans la localisation {}".format(barcode,location_code))
# Get datas item with barcode
status, response = alma_api.get_item_with_barcode(barcode)
if status == 'Error':
log_module.error("{} :: Echec :: {}".format(barcode,response))
report.write("{}\tErreur Retrouve Exemplaire\t{}\n".format(barcode,response))
continue
# Change location and remove holdinds infos
item = ET.fromstring(response)
mms_id, old_holding_id,item_id = item_change_location(item,location_code, call)
# log_module.debug("{} :: {} - {} - {}".format(barcode,mms_id,old_holding_id,item_id))
# Upadte item in Alma
set_status, set_response = alma_api.set_item(mms_id, old_holding_id,item_id,ET.tostring(item))
log_module.debug(set_response)
if set_status == 'Error':
log_module.error("{} :: Echec :: {}".format(barcode,set_response))
report.write("{}\tErreur Mise à jour Exemplaire\t{}\n".format(barcode,set_response))
continue
changed_item = ET.fromstring(set_response)
new_holding_id = changed_item.find(".//holding_id").text
processed_record_dict[mms_id] = {
location_code: call
}
if new_holding_id not in toprocess_holding_dict:
toprocess_holding_dict[new_holding_id] = {
'call' : call,
'barcode': barcode
}
log_module.info("{} :: Succes :: L'exemplaire est maintenant rattaché à la Holding {}".format(barcode,new_holding_id))
log_module.info("FIN DU DEPLACEMENT DES EXEMPLAIRES")
###Update new holding sequence
# ############################
log_module.info("DEBUT DE LA MODIFICATION DES HOLDINGS")
for new_holding_id in toprocess_holding_dict.keys():
call = toprocess_holding_dict[new_holding_id]['call']
barcode = toprocess_holding_dict[new_holding_id]['barcode']
# Get new holding
get_holding_status, get_holding_response = alma_api.get_holding(mms_id, new_holding_id)
if get_holding_status == 'Error':
log_module.error("{} :: Echec :: {}".format(new_holding_id,get_holding_response))
report.write("{}\tErreur Retrouve Holding\t{}\n".format(barcode,get_holding_response))
continue
changed_holding = update_holding_data(get_holding_response,call)
#Update new Holding in Alma
set_holding_status, set_holding_response = alma_api.set_holding(mms_id, new_holding_id,changed_holding)
if set_holding_status == 'Error':
log_module.error("{} :: Echec :: {}".format(new_holding_id,set_holding_response))
report.write("{}\tErreur Ecriture Holding\t{}\n".format(barcode,set_holding_response))
continue
log_module.debug(set_holding_response)
log_module.info("{} :: Succes :: La holding a été mise à jour".format(new_holding_id))
report.close
multi_call_report.close
log_module.info("FIN DU TRAITEMENT")
|
normal
|
{
"blob_id": "1f94ef0aae1128089b34fc952766cc3927677cdf",
"index": 5698,
"step-1": "<mask token>\n\n\ndef get_encoding_type(file):\n with open(file, 'rb') as f:\n rawdata = f.read()\n return detect(rawdata)['encoding']\n\n\ndef item_change_location(item, location, call):\n \"\"\"Change location and remove holdinds infos\n \n Arguments:\n item {str} -- xml response of get item ws\n location {str} -- new location_code\n call {str} -- new call\n\n Returns:\n [str] -- mms_id, holding_id, pid\n \"\"\"\n mms_id, holding_id, pid = item.find('.//mms_id').text, item.find(\n './/holding_id').text, item.find('.//pid').text\n item.find('.//item_data/location').text = location\n item.find('.//item_data/alternative_call_number').text = ''\n item.find('.//item_data/alternative_call_number_type').text = ''\n holding_data = item.find('.//holding_data')\n item.remove(holding_data)\n if mms_id in processed_record_dict:\n if location_code in processed_record_dict[mms_id]:\n if processed_record_dict[mms_id][location_code] != call:\n multi_call_report.write('{}\\n'.format(barcode))\n item.find('.//item_data/alternative_call_number').text = call\n return mms_id, holding_id, pid\n\n\ndef update_holding_data(holding, new_call):\n \"\"\"Change call (852$$h) and reset call type (852 fiest indicator)\n \n Arguments:\n holding {str} -- response of get holding ws \n new_call {str} -- new value for call subfield\n \n Returns:\n str -- changed data\n \"\"\"\n holding_data = ET.fromstring(holding)\n location_field = holding_data.find(\".//datafield[@tag='852']\")\n location_field.set('ind1', ' ')\n call_subfield = holding_data.find(\n \".//datafield[@tag='852']/subfield[@code='h']\")\n call_subfield.text = new_call\n return ET.tostring(holding_data)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef get_encoding_type(file):\n with open(file, 'rb') as f:\n rawdata = f.read()\n return detect(rawdata)['encoding']\n\n\ndef item_change_location(item, location, call):\n \"\"\"Change location and remove holdinds infos\n \n Arguments:\n item {str} -- xml response of get item ws\n location {str} -- new location_code\n call {str} -- new call\n\n Returns:\n [str] -- mms_id, holding_id, pid\n \"\"\"\n mms_id, holding_id, pid = item.find('.//mms_id').text, item.find(\n './/holding_id').text, item.find('.//pid').text\n item.find('.//item_data/location').text = location\n item.find('.//item_data/alternative_call_number').text = ''\n item.find('.//item_data/alternative_call_number_type').text = ''\n holding_data = item.find('.//holding_data')\n item.remove(holding_data)\n if mms_id in processed_record_dict:\n if location_code in processed_record_dict[mms_id]:\n if processed_record_dict[mms_id][location_code] != call:\n multi_call_report.write('{}\\n'.format(barcode))\n item.find('.//item_data/alternative_call_number').text = call\n return mms_id, holding_id, pid\n\n\ndef update_holding_data(holding, new_call):\n \"\"\"Change call (852$$h) and reset call type (852 fiest indicator)\n \n Arguments:\n holding {str} -- response of get holding ws \n new_call {str} -- new value for call subfield\n \n Returns:\n str -- changed data\n \"\"\"\n holding_data = ET.fromstring(holding)\n location_field = holding_data.find(\".//datafield[@tag='852']\")\n location_field.set('ind1', ' ')\n call_subfield = holding_data.find(\n \".//datafield[@tag='852']/subfield[@code='h']\")\n call_subfield.text = new_call\n return ET.tostring(holding_data)\n\n\nlogs.init_logs(LOGS_DIR, SERVICE, LOGS_LEVEL)\n<mask token>\nlog_module.info(\n 'Liste des localisation chargée pour la bibliothèque {} :: Main :: Début du traitement'\n .format(LIBRARY_CODE))\n<mask token>\nreport.write('Code-barres\\tStatut\\tMessage\\n')\n<mask token>\nmulti_call_report.write('code-barres\\n')\n<mask token>\nwith open(IN_FILE, 'r', encoding=from_codec, newline='') as f:\n reader = csv.reader(f, delimiter=';')\n headers = next(reader)\n for row in reader:\n if len(row) < 2:\n continue\n barcode = row[0]\n if row[1] is None or row[1] == '':\n log_module.error('{} :: Echec :: pas de cote fournie'.format(\n barcode))\n report.write('{}\\tErreur Fichier\\tPas de cote fournie\\n'.format\n (barcode))\n continue\n call = row[1].upper()\n if row[3] is None or row[3] == '':\n log_module.error('{} :: Echec :: pas de localisation fournie'.\n format(barcode))\n report.write('{}\\tErreur Fichier\\tPas de localisation fournie\\n'\n .format(barcode))\n continue\n if row[3] not in locations_dict:\n log_module.error(\n '{} :: Echec :: La localisation {} est inconnue dans Alma'.\n format(barcode, row[3]))\n report.write(\n \"{}\\tErreur Fichier\\tLa localisation '{}' est inconnue dans Alma\\n\"\n .format(barcode, row[3]))\n continue\n location_code = locations_dict[row[3]]\n log_module.debug('{} :: Succes :: A affecter dans la localisation {}'\n .format(barcode, location_code))\n status, response = alma_api.get_item_with_barcode(barcode)\n if status == 'Error':\n log_module.error('{} :: Echec :: {}'.format(barcode, response))\n report.write('{}\\tErreur Retrouve Exemplaire\\t{}\\n'.format(\n barcode, response))\n continue\n item = ET.fromstring(response)\n mms_id, old_holding_id, item_id = item_change_location(item,\n location_code, call)\n set_status, set_response = alma_api.set_item(mms_id, old_holding_id,\n item_id, ET.tostring(item))\n log_module.debug(set_response)\n if set_status == 'Error':\n log_module.error('{} :: Echec :: {}'.format(barcode, set_response))\n report.write('{}\\tErreur Mise à jour Exemplaire\\t{}\\n'.format(\n barcode, set_response))\n continue\n changed_item = ET.fromstring(set_response)\n new_holding_id = changed_item.find('.//holding_id').text\n processed_record_dict[mms_id] = {location_code: call}\n if new_holding_id not in toprocess_holding_dict:\n toprocess_holding_dict[new_holding_id] = {'call': call,\n 'barcode': barcode}\n log_module.info(\n \"{} :: Succes :: L'exemplaire est maintenant rattaché à la Holding {}\"\n .format(barcode, new_holding_id))\nlog_module.info('FIN DU DEPLACEMENT DES EXEMPLAIRES')\nlog_module.info('DEBUT DE LA MODIFICATION DES HOLDINGS')\nfor new_holding_id in toprocess_holding_dict.keys():\n call = toprocess_holding_dict[new_holding_id]['call']\n barcode = toprocess_holding_dict[new_holding_id]['barcode']\n get_holding_status, get_holding_response = alma_api.get_holding(mms_id,\n new_holding_id)\n if get_holding_status == 'Error':\n log_module.error('{} :: Echec :: {}'.format(new_holding_id,\n get_holding_response))\n report.write('{}\\tErreur Retrouve Holding\\t{}\\n'.format(barcode,\n get_holding_response))\n continue\n changed_holding = update_holding_data(get_holding_response, call)\n set_holding_status, set_holding_response = alma_api.set_holding(mms_id,\n new_holding_id, changed_holding)\n if set_holding_status == 'Error':\n log_module.error('{} :: Echec :: {}'.format(new_holding_id,\n set_holding_response))\n report.write('{}\\tErreur Ecriture Holding\\t{}\\n'.format(barcode,\n set_holding_response))\n continue\n log_module.debug(set_holding_response)\n log_module.info('{} :: Succes :: La holding a été mise à jour'.format(\n new_holding_id))\nreport.close\nmulti_call_report.close\nlog_module.info('FIN DU TRAITEMENT')\n",
"step-3": "<mask token>\nSERVICE = 'Recotation_en_masse'\nLOGS_LEVEL = 'INFO'\nLOGS_DIR = os.getenv('LOGS_PATH')\nLIBRARY_CODE = 1601900000\nREGION = 'EU'\nINSTITUTION = 'ub'\nAPI_KEY = os.getenv('PROD_UB_BIB_API')\nFILE_NAME = 'Dewey 20201218 cotes OE Scoop V3'\nIN_FILE = '/media/sf_Partage_LouxBox/{}.csv'.format(FILE_NAME)\nOUT_FILE = '/media/sf_Partage_LouxBox/{}_Rapport.csv'.format(FILE_NAME)\nCALL_ERROR_FILE = '/media/sf_Partage_LouxBox/{}_Anomalies_Cotes.csv'.format(\n FILE_NAME)\n\n\ndef get_encoding_type(file):\n with open(file, 'rb') as f:\n rawdata = f.read()\n return detect(rawdata)['encoding']\n\n\ndef item_change_location(item, location, call):\n \"\"\"Change location and remove holdinds infos\n \n Arguments:\n item {str} -- xml response of get item ws\n location {str} -- new location_code\n call {str} -- new call\n\n Returns:\n [str] -- mms_id, holding_id, pid\n \"\"\"\n mms_id, holding_id, pid = item.find('.//mms_id').text, item.find(\n './/holding_id').text, item.find('.//pid').text\n item.find('.//item_data/location').text = location\n item.find('.//item_data/alternative_call_number').text = ''\n item.find('.//item_data/alternative_call_number_type').text = ''\n holding_data = item.find('.//holding_data')\n item.remove(holding_data)\n if mms_id in processed_record_dict:\n if location_code in processed_record_dict[mms_id]:\n if processed_record_dict[mms_id][location_code] != call:\n multi_call_report.write('{}\\n'.format(barcode))\n item.find('.//item_data/alternative_call_number').text = call\n return mms_id, holding_id, pid\n\n\ndef update_holding_data(holding, new_call):\n \"\"\"Change call (852$$h) and reset call type (852 fiest indicator)\n \n Arguments:\n holding {str} -- response of get holding ws \n new_call {str} -- new value for call subfield\n \n Returns:\n str -- changed data\n \"\"\"\n holding_data = ET.fromstring(holding)\n location_field = holding_data.find(\".//datafield[@tag='852']\")\n location_field.set('ind1', ' ')\n call_subfield = holding_data.find(\n \".//datafield[@tag='852']/subfield[@code='h']\")\n call_subfield.text = new_call\n return ET.tostring(holding_data)\n\n\nlogs.init_logs(LOGS_DIR, SERVICE, LOGS_LEVEL)\nlog_module = logging.getLogger(SERVICE)\nconf = Alma_Apis.Alma(apikey=API_KEY, region='EU', service=SERVICE)\nalma_api = Alma_Apis_Records.AlmaRecords(apikey=API_KEY, region=REGION,\n service=SERVICE)\nlocations_dict = conf.get_locations(LIBRARY_CODE)\nlog_module.info(\n 'Liste des localisation chargée pour la bibliothèque {} :: Main :: Début du traitement'\n .format(LIBRARY_CODE))\nreport = open(OUT_FILE, 'w', encoding='utf-8')\nreport.write('Code-barres\\tStatut\\tMessage\\n')\nprocessed_record_dict = {}\ntoprocess_holding_dict = {}\nmulti_call_report = open(CALL_ERROR_FILE, 'w', encoding='utf-8')\nmulti_call_report.write('code-barres\\n')\nfrom_codec = get_encoding_type(IN_FILE)\nwith open(IN_FILE, 'r', encoding=from_codec, newline='') as f:\n reader = csv.reader(f, delimiter=';')\n headers = next(reader)\n for row in reader:\n if len(row) < 2:\n continue\n barcode = row[0]\n if row[1] is None or row[1] == '':\n log_module.error('{} :: Echec :: pas de cote fournie'.format(\n barcode))\n report.write('{}\\tErreur Fichier\\tPas de cote fournie\\n'.format\n (barcode))\n continue\n call = row[1].upper()\n if row[3] is None or row[3] == '':\n log_module.error('{} :: Echec :: pas de localisation fournie'.\n format(barcode))\n report.write('{}\\tErreur Fichier\\tPas de localisation fournie\\n'\n .format(barcode))\n continue\n if row[3] not in locations_dict:\n log_module.error(\n '{} :: Echec :: La localisation {} est inconnue dans Alma'.\n format(barcode, row[3]))\n report.write(\n \"{}\\tErreur Fichier\\tLa localisation '{}' est inconnue dans Alma\\n\"\n .format(barcode, row[3]))\n continue\n location_code = locations_dict[row[3]]\n log_module.debug('{} :: Succes :: A affecter dans la localisation {}'\n .format(barcode, location_code))\n status, response = alma_api.get_item_with_barcode(barcode)\n if status == 'Error':\n log_module.error('{} :: Echec :: {}'.format(barcode, response))\n report.write('{}\\tErreur Retrouve Exemplaire\\t{}\\n'.format(\n barcode, response))\n continue\n item = ET.fromstring(response)\n mms_id, old_holding_id, item_id = item_change_location(item,\n location_code, call)\n set_status, set_response = alma_api.set_item(mms_id, old_holding_id,\n item_id, ET.tostring(item))\n log_module.debug(set_response)\n if set_status == 'Error':\n log_module.error('{} :: Echec :: {}'.format(barcode, set_response))\n report.write('{}\\tErreur Mise à jour Exemplaire\\t{}\\n'.format(\n barcode, set_response))\n continue\n changed_item = ET.fromstring(set_response)\n new_holding_id = changed_item.find('.//holding_id').text\n processed_record_dict[mms_id] = {location_code: call}\n if new_holding_id not in toprocess_holding_dict:\n toprocess_holding_dict[new_holding_id] = {'call': call,\n 'barcode': barcode}\n log_module.info(\n \"{} :: Succes :: L'exemplaire est maintenant rattaché à la Holding {}\"\n .format(barcode, new_holding_id))\nlog_module.info('FIN DU DEPLACEMENT DES EXEMPLAIRES')\nlog_module.info('DEBUT DE LA MODIFICATION DES HOLDINGS')\nfor new_holding_id in toprocess_holding_dict.keys():\n call = toprocess_holding_dict[new_holding_id]['call']\n barcode = toprocess_holding_dict[new_holding_id]['barcode']\n get_holding_status, get_holding_response = alma_api.get_holding(mms_id,\n new_holding_id)\n if get_holding_status == 'Error':\n log_module.error('{} :: Echec :: {}'.format(new_holding_id,\n get_holding_response))\n report.write('{}\\tErreur Retrouve Holding\\t{}\\n'.format(barcode,\n get_holding_response))\n continue\n changed_holding = update_holding_data(get_holding_response, call)\n set_holding_status, set_holding_response = alma_api.set_holding(mms_id,\n new_holding_id, changed_holding)\n if set_holding_status == 'Error':\n log_module.error('{} :: Echec :: {}'.format(new_holding_id,\n set_holding_response))\n report.write('{}\\tErreur Ecriture Holding\\t{}\\n'.format(barcode,\n set_holding_response))\n continue\n log_module.debug(set_holding_response)\n log_module.info('{} :: Succes :: La holding a été mise à jour'.format(\n new_holding_id))\nreport.close\nmulti_call_report.close\nlog_module.info('FIN DU TRAITEMENT')\n",
"step-4": "import os\nimport re\nimport logging\nimport csv\nimport xml.etree.ElementTree as ET\nfrom chardet import detect\nfrom Abes_Apis_Interface.AbesXml import AbesXml\nfrom Alma_Apis_Interface import Alma_Apis_Records\nfrom Alma_Apis_Interface import Alma_Apis\nfrom logs import logs\nSERVICE = 'Recotation_en_masse'\nLOGS_LEVEL = 'INFO'\nLOGS_DIR = os.getenv('LOGS_PATH')\nLIBRARY_CODE = 1601900000\nREGION = 'EU'\nINSTITUTION = 'ub'\nAPI_KEY = os.getenv('PROD_UB_BIB_API')\nFILE_NAME = 'Dewey 20201218 cotes OE Scoop V3'\nIN_FILE = '/media/sf_Partage_LouxBox/{}.csv'.format(FILE_NAME)\nOUT_FILE = '/media/sf_Partage_LouxBox/{}_Rapport.csv'.format(FILE_NAME)\nCALL_ERROR_FILE = '/media/sf_Partage_LouxBox/{}_Anomalies_Cotes.csv'.format(\n FILE_NAME)\n\n\ndef get_encoding_type(file):\n with open(file, 'rb') as f:\n rawdata = f.read()\n return detect(rawdata)['encoding']\n\n\ndef item_change_location(item, location, call):\n \"\"\"Change location and remove holdinds infos\n \n Arguments:\n item {str} -- xml response of get item ws\n location {str} -- new location_code\n call {str} -- new call\n\n Returns:\n [str] -- mms_id, holding_id, pid\n \"\"\"\n mms_id, holding_id, pid = item.find('.//mms_id').text, item.find(\n './/holding_id').text, item.find('.//pid').text\n item.find('.//item_data/location').text = location\n item.find('.//item_data/alternative_call_number').text = ''\n item.find('.//item_data/alternative_call_number_type').text = ''\n holding_data = item.find('.//holding_data')\n item.remove(holding_data)\n if mms_id in processed_record_dict:\n if location_code in processed_record_dict[mms_id]:\n if processed_record_dict[mms_id][location_code] != call:\n multi_call_report.write('{}\\n'.format(barcode))\n item.find('.//item_data/alternative_call_number').text = call\n return mms_id, holding_id, pid\n\n\ndef update_holding_data(holding, new_call):\n \"\"\"Change call (852$$h) and reset call type (852 fiest indicator)\n \n Arguments:\n holding {str} -- response of get holding ws \n new_call {str} -- new value for call subfield\n \n Returns:\n str -- changed data\n \"\"\"\n holding_data = ET.fromstring(holding)\n location_field = holding_data.find(\".//datafield[@tag='852']\")\n location_field.set('ind1', ' ')\n call_subfield = holding_data.find(\n \".//datafield[@tag='852']/subfield[@code='h']\")\n call_subfield.text = new_call\n return ET.tostring(holding_data)\n\n\nlogs.init_logs(LOGS_DIR, SERVICE, LOGS_LEVEL)\nlog_module = logging.getLogger(SERVICE)\nconf = Alma_Apis.Alma(apikey=API_KEY, region='EU', service=SERVICE)\nalma_api = Alma_Apis_Records.AlmaRecords(apikey=API_KEY, region=REGION,\n service=SERVICE)\nlocations_dict = conf.get_locations(LIBRARY_CODE)\nlog_module.info(\n 'Liste des localisation chargée pour la bibliothèque {} :: Main :: Début du traitement'\n .format(LIBRARY_CODE))\nreport = open(OUT_FILE, 'w', encoding='utf-8')\nreport.write('Code-barres\\tStatut\\tMessage\\n')\nprocessed_record_dict = {}\ntoprocess_holding_dict = {}\nmulti_call_report = open(CALL_ERROR_FILE, 'w', encoding='utf-8')\nmulti_call_report.write('code-barres\\n')\nfrom_codec = get_encoding_type(IN_FILE)\nwith open(IN_FILE, 'r', encoding=from_codec, newline='') as f:\n reader = csv.reader(f, delimiter=';')\n headers = next(reader)\n for row in reader:\n if len(row) < 2:\n continue\n barcode = row[0]\n if row[1] is None or row[1] == '':\n log_module.error('{} :: Echec :: pas de cote fournie'.format(\n barcode))\n report.write('{}\\tErreur Fichier\\tPas de cote fournie\\n'.format\n (barcode))\n continue\n call = row[1].upper()\n if row[3] is None or row[3] == '':\n log_module.error('{} :: Echec :: pas de localisation fournie'.\n format(barcode))\n report.write('{}\\tErreur Fichier\\tPas de localisation fournie\\n'\n .format(barcode))\n continue\n if row[3] not in locations_dict:\n log_module.error(\n '{} :: Echec :: La localisation {} est inconnue dans Alma'.\n format(barcode, row[3]))\n report.write(\n \"{}\\tErreur Fichier\\tLa localisation '{}' est inconnue dans Alma\\n\"\n .format(barcode, row[3]))\n continue\n location_code = locations_dict[row[3]]\n log_module.debug('{} :: Succes :: A affecter dans la localisation {}'\n .format(barcode, location_code))\n status, response = alma_api.get_item_with_barcode(barcode)\n if status == 'Error':\n log_module.error('{} :: Echec :: {}'.format(barcode, response))\n report.write('{}\\tErreur Retrouve Exemplaire\\t{}\\n'.format(\n barcode, response))\n continue\n item = ET.fromstring(response)\n mms_id, old_holding_id, item_id = item_change_location(item,\n location_code, call)\n set_status, set_response = alma_api.set_item(mms_id, old_holding_id,\n item_id, ET.tostring(item))\n log_module.debug(set_response)\n if set_status == 'Error':\n log_module.error('{} :: Echec :: {}'.format(barcode, set_response))\n report.write('{}\\tErreur Mise à jour Exemplaire\\t{}\\n'.format(\n barcode, set_response))\n continue\n changed_item = ET.fromstring(set_response)\n new_holding_id = changed_item.find('.//holding_id').text\n processed_record_dict[mms_id] = {location_code: call}\n if new_holding_id not in toprocess_holding_dict:\n toprocess_holding_dict[new_holding_id] = {'call': call,\n 'barcode': barcode}\n log_module.info(\n \"{} :: Succes :: L'exemplaire est maintenant rattaché à la Holding {}\"\n .format(barcode, new_holding_id))\nlog_module.info('FIN DU DEPLACEMENT DES EXEMPLAIRES')\nlog_module.info('DEBUT DE LA MODIFICATION DES HOLDINGS')\nfor new_holding_id in toprocess_holding_dict.keys():\n call = toprocess_holding_dict[new_holding_id]['call']\n barcode = toprocess_holding_dict[new_holding_id]['barcode']\n get_holding_status, get_holding_response = alma_api.get_holding(mms_id,\n new_holding_id)\n if get_holding_status == 'Error':\n log_module.error('{} :: Echec :: {}'.format(new_holding_id,\n get_holding_response))\n report.write('{}\\tErreur Retrouve Holding\\t{}\\n'.format(barcode,\n get_holding_response))\n continue\n changed_holding = update_holding_data(get_holding_response, call)\n set_holding_status, set_holding_response = alma_api.set_holding(mms_id,\n new_holding_id, changed_holding)\n if set_holding_status == 'Error':\n log_module.error('{} :: Echec :: {}'.format(new_holding_id,\n set_holding_response))\n report.write('{}\\tErreur Ecriture Holding\\t{}\\n'.format(barcode,\n set_holding_response))\n continue\n log_module.debug(set_holding_response)\n log_module.info('{} :: Succes :: La holding a été mise à jour'.format(\n new_holding_id))\nreport.close\nmulti_call_report.close\nlog_module.info('FIN DU TRAITEMENT')\n",
"step-5": "#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n#Modules externes\nimport os\nimport re\nimport logging\nimport csv\nimport xml.etree.ElementTree as ET\nfrom chardet import detect\n\n#Modules maison\nfrom Abes_Apis_Interface.AbesXml import AbesXml\nfrom Alma_Apis_Interface import Alma_Apis_Records\nfrom Alma_Apis_Interface import Alma_Apis\nfrom logs import logs\n\nSERVICE = \"Recotation_en_masse\"\n\nLOGS_LEVEL = 'INFO'\nLOGS_DIR = os.getenv('LOGS_PATH')\n\nLIBRARY_CODE = 1601900000\n\nREGION = 'EU'\nINSTITUTION = 'ub'\nAPI_KEY = os.getenv('PROD_UB_BIB_API')\n\nFILE_NAME = 'Dewey 20201218 cotes OE Scoop V3'\nIN_FILE = '/media/sf_Partage_LouxBox/{}.csv'.format(FILE_NAME)\nOUT_FILE = '/media/sf_Partage_LouxBox/{}_Rapport.csv'.format(FILE_NAME)\nCALL_ERROR_FILE = '/media/sf_Partage_LouxBox/{}_Anomalies_Cotes.csv'.format(FILE_NAME)\n\n# get file encoding type\ndef get_encoding_type(file):\n with open(file, 'rb') as f:\n rawdata = f.read()\n return detect(rawdata)['encoding']\n\ndef item_change_location(item,location,call):\n \"\"\"Change location and remove holdinds infos\n \n Arguments:\n item {str} -- xml response of get item ws\n location {str} -- new location_code\n call {str} -- new call\n\n Returns:\n [str] -- mms_id, holding_id, pid\n \"\"\"\n mms_id, holding_id, pid = item.find(\".//mms_id\").text, item.find(\".//holding_id\").text, item.find(\".//pid\").text\n item.find(\".//item_data/location\").text = location\n # On nettoie la cote présente au niveau de l'exemplaire\n item.find(\".//item_data/alternative_call_number\").text = ''\n item.find(\".//item_data/alternative_call_number_type\").text = ''\n # On ne renvoie pas les infos de la holdings\n holding_data = item.find(\".//holding_data\")\n item.remove(holding_data)\n # Si un autre exemplaire lié à la même notice a déjà été traité\n if mms_id in processed_record_dict:\n # Si la localisation était la même que celle de l'exemplaire déjà traité\n if location_code in processed_record_dict[mms_id]:\n # Si les cotes sont différentes alors on créé la cote sous l'exemplaire\n if processed_record_dict[mms_id][location_code] != call:\n multi_call_report.write(\"{}\\n\".format(barcode))\n item.find(\".//item_data/alternative_call_number\").text = call\n return mms_id, holding_id, pid\n\ndef update_holding_data(holding,new_call):\n \"\"\"Change call (852$$h) and reset call type (852 fiest indicator)\n \n Arguments:\n holding {str} -- response of get holding ws \n new_call {str} -- new value for call subfield\n \n Returns:\n str -- changed data\n \"\"\"\n holding_data = ET.fromstring(holding)\n location_field =holding_data.find(\".//datafield[@tag='852']\")\n location_field.set('ind1', ' ')\n call_subfield = holding_data.find(\".//datafield[@tag='852']/subfield[@code='h']\")\n call_subfield.text = new_call\n return ET.tostring(holding_data)\n\n#Init logger\nlogs.init_logs(LOGS_DIR,SERVICE,LOGS_LEVEL)\nlog_module = logging.getLogger(SERVICE)\n\n\nconf = Alma_Apis.Alma(apikey=API_KEY, region='EU', service=SERVICE)\nalma_api = Alma_Apis_Records.AlmaRecords(apikey=API_KEY, region=REGION, service=SERVICE)\n\n#We get all the locations for the library in a dictionnary\nlocations_dict = conf.get_locations(LIBRARY_CODE)\nlog_module.info(\"Liste des localisation chargée pour la bibliothèque {} :: Main :: Début du traitement\".format(LIBRARY_CODE))\n\nreport = open(OUT_FILE, \"w\", encoding='utf-8')\nreport.write(\"Code-barres\\tStatut\\tMessage\\n\")\n\nprocessed_record_dict = {}\ntoprocess_holding_dict = {}\nmulti_call_report = open(CALL_ERROR_FILE, \"w\", encoding='utf-8')\nmulti_call_report.write(\"code-barres\\n\")\n\n###Update item sequence\n# ###################### \nfrom_codec = get_encoding_type(IN_FILE)\nwith open(IN_FILE, 'r', encoding=from_codec, newline='') as f:\n reader = csv.reader(f, delimiter=';')\n headers = next(reader)\n # We read the file\n for row in reader:\n if len(row) < 2:\n continue\n barcode = row[0]\n # Test if new call is defined\n if row[1] is None or row[1] == '':\n log_module.error(\"{} :: Echec :: pas de cote fournie\".format(barcode))\n report.write(\"{}\\tErreur Fichier\\tPas de cote fournie\\n\".format(barcode))\n continue\n call = row[1].upper()\n # Test if new localisation is defined\n if row[3] is None or row[3] == '':\n log_module.error(\"{} :: Echec :: pas de localisation fournie\".format(barcode))\n report.write(\"{}\\tErreur Fichier\\tPas de localisation fournie\\n\".format(barcode))\n continue\n # log_module.info(\"{} :: Main :: Début du traitement\".format(barcode))\n # Transform location label in location code\n if row[3] not in locations_dict:\n log_module.error(\"{} :: Echec :: La localisation {} est inconnue dans Alma\".format(barcode,row[3]))\n report.write(\"{}\\tErreur Fichier\\tLa localisation '{}' est inconnue dans Alma\\n\".format(barcode,row[3]))\n continue\n location_code = locations_dict[row[3]]\n log_module.debug(\"{} :: Succes :: A affecter dans la localisation {}\".format(barcode,location_code))\n \n\n # Get datas item with barcode\n status, response = alma_api.get_item_with_barcode(barcode)\n if status == 'Error':\n log_module.error(\"{} :: Echec :: {}\".format(barcode,response))\n report.write(\"{}\\tErreur Retrouve Exemplaire\\t{}\\n\".format(barcode,response))\n continue\n # Change location and remove holdinds infos\n item = ET.fromstring(response)\n mms_id, old_holding_id,item_id = item_change_location(item,location_code, call)\n # log_module.debug(\"{} :: {} - {} - {}\".format(barcode,mms_id,old_holding_id,item_id))\n # Upadte item in Alma\n set_status, set_response = alma_api.set_item(mms_id, old_holding_id,item_id,ET.tostring(item))\n log_module.debug(set_response)\n if set_status == 'Error':\n log_module.error(\"{} :: Echec :: {}\".format(barcode,set_response))\n report.write(\"{}\\tErreur Mise à jour Exemplaire\\t{}\\n\".format(barcode,set_response))\n continue\n changed_item = ET.fromstring(set_response)\n new_holding_id = changed_item.find(\".//holding_id\").text\n processed_record_dict[mms_id] = {\n location_code: call\n }\n if new_holding_id not in toprocess_holding_dict:\n toprocess_holding_dict[new_holding_id] = {\n 'call' : call,\n 'barcode': barcode\n }\n log_module.info(\"{} :: Succes :: L'exemplaire est maintenant rattaché à la Holding {}\".format(barcode,new_holding_id))\nlog_module.info(\"FIN DU DEPLACEMENT DES EXEMPLAIRES\")\n\n###Update new holding sequence\n# ############################\nlog_module.info(\"DEBUT DE LA MODIFICATION DES HOLDINGS\")\nfor new_holding_id in toprocess_holding_dict.keys():\n call = toprocess_holding_dict[new_holding_id]['call']\n barcode = toprocess_holding_dict[new_holding_id]['barcode']\n # Get new holding\n get_holding_status, get_holding_response = alma_api.get_holding(mms_id, new_holding_id)\n if get_holding_status == 'Error':\n log_module.error(\"{} :: Echec :: {}\".format(new_holding_id,get_holding_response))\n report.write(\"{}\\tErreur Retrouve Holding\\t{}\\n\".format(barcode,get_holding_response))\n continue\n changed_holding = update_holding_data(get_holding_response,call)\n #Update new Holding in Alma\n set_holding_status, set_holding_response = alma_api.set_holding(mms_id, new_holding_id,changed_holding)\n if set_holding_status == 'Error':\n log_module.error(\"{} :: Echec :: {}\".format(new_holding_id,set_holding_response))\n report.write(\"{}\\tErreur Ecriture Holding\\t{}\\n\".format(barcode,set_holding_response))\n continue\n log_module.debug(set_holding_response)\n log_module.info(\"{} :: Succes :: La holding a été mise à jour\".format(new_holding_id))\n\nreport.close\n\nmulti_call_report.close\nlog_module.info(\"FIN DU TRAITEMENT\")\n\n ",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
#game that has a timer and you need to stop the timer
#with 0 at the end.
import simplegui
#necessary global variables
#time for the timer
time = 0
#the display for the timer(string form)
watch = ''
#tries and correct presses
tries = 0
correct = 0
#changes time to watch(number to string of form A:BC.D)
def format():
global time, watch
t = time
deciseconds = t % 10
remains = t - deciseconds
seconds = (remains % 600) / 10
minutes = remains / 600
if seconds<10:
zero = '0'
else:
zero = ''
watch = str(minutes) + ":" + zero + str(seconds) + "." + str(deciseconds)
#increase the time
def increment():
global time
time = time + 1
#start the timer
def start():
timer.start()
#stop the timer + claculate the tries and correct stops
def stop():
global correct, tries
timer.stop()
if time != 0:
tries = tries + 1
if time % 10 == 0:
correct = correct + 1
#reset all values
def reset():
global time, correct, tries
time, correct, tries = 0,0,0
stop()
#necessary drawings
def draw(canvas):
format()
canvas.draw_text(str(correct), (253, 30), 30, 'white')
canvas.draw_text('/', (270, 30), 30, 'white')
canvas.draw_text(str(tries), (280, 30), 30, 'white')
canvas.draw_text(watch, (70, 130), 60,'white')
#frame and event handlers
frame = simplegui.create_frame("StOpWaTcH: gAmE", 320, 200)
button1 = frame.add_button("Start timer", start, 100)
button2 = frame.add_button("Stop timer", stop, 100)
button3 = frame.add_button("Resrt timer", reset, 100)
frame.set_draw_handler(draw)
timer = simplegui.create_timer(100, increment)
#start of the game
frame.start()
|
normal
|
{
"blob_id": "b3c22b4a453aa55da980b090df2749ff9f1066e6",
"index": 5932,
"step-1": "<mask token>\n\n\ndef increment():\n global time\n time = time + 1\n\n\ndef start():\n timer.start()\n\n\ndef stop():\n global correct, tries\n timer.stop()\n if time != 0:\n tries = tries + 1\n if time % 10 == 0:\n correct = correct + 1\n\n\ndef reset():\n global time, correct, tries\n time, correct, tries = 0, 0, 0\n stop()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef increment():\n global time\n time = time + 1\n\n\ndef start():\n timer.start()\n\n\ndef stop():\n global correct, tries\n timer.stop()\n if time != 0:\n tries = tries + 1\n if time % 10 == 0:\n correct = correct + 1\n\n\ndef reset():\n global time, correct, tries\n time, correct, tries = 0, 0, 0\n stop()\n\n\ndef draw(canvas):\n format()\n canvas.draw_text(str(correct), (253, 30), 30, 'white')\n canvas.draw_text('/', (270, 30), 30, 'white')\n canvas.draw_text(str(tries), (280, 30), 30, 'white')\n canvas.draw_text(watch, (70, 130), 60, 'white')\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef format():\n global time, watch\n t = time\n deciseconds = t % 10\n remains = t - deciseconds\n seconds = remains % 600 / 10\n minutes = remains / 600\n if seconds < 10:\n zero = '0'\n else:\n zero = ''\n watch = str(minutes) + ':' + zero + str(seconds) + '.' + str(deciseconds)\n\n\ndef increment():\n global time\n time = time + 1\n\n\ndef start():\n timer.start()\n\n\ndef stop():\n global correct, tries\n timer.stop()\n if time != 0:\n tries = tries + 1\n if time % 10 == 0:\n correct = correct + 1\n\n\ndef reset():\n global time, correct, tries\n time, correct, tries = 0, 0, 0\n stop()\n\n\ndef draw(canvas):\n format()\n canvas.draw_text(str(correct), (253, 30), 30, 'white')\n canvas.draw_text('/', (270, 30), 30, 'white')\n canvas.draw_text(str(tries), (280, 30), 30, 'white')\n canvas.draw_text(watch, (70, 130), 60, 'white')\n\n\n<mask token>\n",
"step-4": "<mask token>\ntime = 0\nwatch = ''\ntries = 0\ncorrect = 0\n\n\ndef format():\n global time, watch\n t = time\n deciseconds = t % 10\n remains = t - deciseconds\n seconds = remains % 600 / 10\n minutes = remains / 600\n if seconds < 10:\n zero = '0'\n else:\n zero = ''\n watch = str(minutes) + ':' + zero + str(seconds) + '.' + str(deciseconds)\n\n\ndef increment():\n global time\n time = time + 1\n\n\ndef start():\n timer.start()\n\n\ndef stop():\n global correct, tries\n timer.stop()\n if time != 0:\n tries = tries + 1\n if time % 10 == 0:\n correct = correct + 1\n\n\ndef reset():\n global time, correct, tries\n time, correct, tries = 0, 0, 0\n stop()\n\n\ndef draw(canvas):\n format()\n canvas.draw_text(str(correct), (253, 30), 30, 'white')\n canvas.draw_text('/', (270, 30), 30, 'white')\n canvas.draw_text(str(tries), (280, 30), 30, 'white')\n canvas.draw_text(watch, (70, 130), 60, 'white')\n\n\nframe = simplegui.create_frame('StOpWaTcH: gAmE', 320, 200)\nbutton1 = frame.add_button('Start timer', start, 100)\nbutton2 = frame.add_button('Stop timer', stop, 100)\nbutton3 = frame.add_button('Resrt timer', reset, 100)\nframe.set_draw_handler(draw)\ntimer = simplegui.create_timer(100, increment)\nframe.start()\n",
"step-5": "#game that has a timer and you need to stop the timer\r\n#with 0 at the end.\r\n\r\nimport simplegui\r\n\r\n#necessary global variables\r\n\r\n#time for the timer\r\ntime = 0\r\n#the display for the timer(string form)\r\nwatch = ''\r\n#tries and correct presses\r\ntries = 0\r\ncorrect = 0\r\n\r\n\r\n#changes time to watch(number to string of form A:BC.D)\r\ndef format():\r\n global time, watch\r\n t = time\r\n deciseconds = t % 10\r\n remains = t - deciseconds\r\n seconds = (remains % 600) / 10\r\n minutes = remains / 600\r\n if seconds<10:\r\n zero = '0'\r\n else:\r\n zero = '' \r\n watch = str(minutes) + \":\" + zero + str(seconds) + \".\" + str(deciseconds)\r\n \r\n\r\n#increase the time \r\ndef increment():\r\n global time\r\n time = time + 1 \r\n \r\n \r\n#start the timer \r\ndef start():\r\n timer.start()\r\n \r\n\r\n#stop the timer + claculate the tries and correct stops\r\ndef stop():\r\n global correct, tries\r\n timer.stop()\r\n if time != 0:\r\n tries = tries + 1\r\n if time % 10 == 0:\r\n correct = correct + 1\r\n\r\n\r\n#reset all values \r\ndef reset():\r\n global time, correct, tries\r\n time, correct, tries = 0,0,0\r\n stop() \r\n\r\n\r\n#necessary drawings \r\ndef draw(canvas):\r\n format()\r\n canvas.draw_text(str(correct), (253, 30), 30, 'white')\r\n canvas.draw_text('/', (270, 30), 30, 'white') \r\n canvas.draw_text(str(tries), (280, 30), 30, 'white')\r\n canvas.draw_text(watch, (70, 130), 60,'white')\r\n \r\n\r\n#frame and event handlers\r\nframe = simplegui.create_frame(\"StOpWaTcH: gAmE\", 320, 200)\r\nbutton1 = frame.add_button(\"Start timer\", start, 100)\r\nbutton2 = frame.add_button(\"Stop timer\", stop, 100)\r\nbutton3 = frame.add_button(\"Resrt timer\", reset, 100)\r\nframe.set_draw_handler(draw)\r\ntimer = simplegui.create_timer(100, increment)\r\n\r\n\r\n#start of the game\r\nframe.start()\r\n",
"step-ids": [
4,
5,
6,
8,
10
]
}
|
[
4,
5,
6,
8,
10
] |
import tensorflow as tf
from keras import layers, Model, Input
from keras.utils import Progbar, to_categorical
from keras.datasets.mnist import load_data
import numpy as np
import matplotlib.pyplot as plt
import config
import datetime
img_height, img_width, _ = config.IMAGE_SHAPE
(X, Y), (_, _) = load_data()
X = X.reshape((-1, img_height, img_width, 1))
X = X.astype("float32")
Y = to_categorical(Y, num_classes=10, dtype="float32")
def preprocess(img, lbl):
img = (img - 127.5) / 127.5
img = tf.convert_to_tensor(img, dtype=tf.float32)
return img, lbl
class Generator(Model):
def __init__(self, name):
super(Generator, self).__init__(name=name)
self.dense = layers.Dense(7*7*128)
self.conv1 = layers.Conv2DTranspose(128, kernel_size=5, padding="same")
self.conv2 = layers.Conv2DTranspose(64, kernel_size=5, strides=2, padding="same")
self.conv3 = layers.Conv2DTranspose(32, kernel_size=5, strides=2, padding="same")
self.conv4 = layers.Conv2DTranspose(1, kernel_size=5, activation="tanh", padding="same")
self.relu = layers.ReLU()
self.bn1 = layers.BatchNormalization()
self.bn2 = layers.BatchNormalization()
self.bn3 = layers.BatchNormalization()
self.bn4 = layers.BatchNormalization()
def call(self, inputs, training=None, mask=None):
noise, label = inputs
x = layers.Concatenate()([noise, label])
x = self.dense(x)
x = layers.Reshape(target_shape=(7, 7, 128))(x)
x = self.bn1(x)
x = self.relu(x)
x = self.conv1(x)
x = self.bn2(x)
x = self.relu(x)
x = self.conv2(x)
x = self.bn3(x)
x = self.relu(x)
x = self.conv3(x)
x = self.bn4(x)
x = self.relu(x)
x = self.conv4(x)
return x
def get_config(self):
return {'name': self.name}
class Discriminator(Model):
def __init__(self, name, img_shape=(28, 28, 1)):
super(Discriminator, self).__init__(name=name)
self.img_shape = img_shape
self.conv1 = layers.Conv2D(32, kernel_size=5, strides=2)
self.conv2 = layers.Conv2D(64, kernel_size=5, strides=2)
self.conv3 = layers.Conv2D(128, kernel_size=5, strides=2, padding="same")
self.conv4 = layers.Conv2D(256, kernel_size=5, padding="same")
self.leaky_relu = layers.LeakyReLU(alpha=0.2)
self.flatten = layers.Flatten()
self.dense_final = layers.Dense(1, activation='sigmoid')
self.dense = layers.Dense(7*7*16)
def call(self, inputs, training=None, mask=None):
image, label = inputs
lb = self.dense(label)
lb = layers.Reshape(target_shape=(28, 28, 1))(lb)
x = layers.Concatenate()([image, lb])
x = self.leaky_relu(x)
x = self.conv1(x)
x = self.leaky_relu(x)
x = self.conv2(x)
x = self.leaky_relu(x)
x = self.conv3(x)
x = self.leaky_relu(x)
x = self.conv4(x)
x = self.flatten(x)
x = self.dense_final(x)
return x
def get_config(self):
return {"img_shape": self.img_shape, "name": self.name}
@classmethod
def from_config(cls, config, custom_objects=None):
return cls(**config)
gen = Generator(name="generator")
disc = Discriminator(name="discriminator", img_shape=config.IMAGE_SHAPE)
gen_optimizer = tf.keras.optimizers.Adam(learning_rate=0.0002)
disc_optimizer = tf.keras.optimizers.Adam(learning_rate=0.0002)
dataset = tf.data.Dataset.from_tensor_slices((X, Y))
train_dataset = dataset.take(int(0.8 * len(X))).map(preprocess).shuffle(10000).batch(config.BATCH_SIZE)
val_dataset = dataset.skip(int(0.8 * len(X))).map(preprocess).shuffle(10000).batch(config.BATCH_SIZE)
checkpoint = tf.train.Checkpoint(generator=gen,
gen_optimizer=gen_optimizer,
discriminator=disc,
disc_optimizer=disc_optimizer)
ckpt_manager = tf.train.CheckpointManager(checkpoint, directory=config.CKPT_DIR, max_to_keep=3)
# creates a summary writer, writes a summary in a file to access on tensorboard later
summary_writer = tf.summary.create_file_writer(
logdir=config.LOG_DIR + "fit/" + datetime.datetime.now().strftime("%Y%m%d-%H%M%S"))
'''LOSSES'''
def disc_loss(real_logits, fake_logits):
real_loss = tf.losses.BinaryCrossentropy()(tf.ones_like(real_logits), real_logits)
fake_loss = tf.losses.BinaryCrossentropy()(tf.zeros_like(fake_logits), fake_logits)
loss = 0.5*(real_loss + fake_loss)
return loss
def gen_loss(fake_logits):
loss = tf.losses.BinaryCrossentropy()(tf.ones_like(fake_logits), fake_logits)
return loss
# give signature to avoid retracing
signature = [
tf.TensorSpec(shape=(None, 28, 28, 1), dtype=tf.float32),
tf.TensorSpec(shape=(None, 10), dtype=tf.float32),
tf.TensorSpec(shape=(), dtype=tf.int64)
]
@tf.function(input_signature=signature)
def train_step(image_batch, label_batch, epoch):
noise = tf.random.normal((config.BATCH_SIZE, config.NOISE_DIM))
with tf.GradientTape(persistent=True) as tape:
fake_img_batch = gen([noise, label_batch], training=True)
fake_logits = disc([fake_img_batch, label_batch], training=True)
real_logits = disc([image_batch, label_batch], training=True)
d_loss = disc_loss(real_logits, fake_logits)
g_loss = gen_loss(fake_logits)
gen_grads = tape.gradient(g_loss, gen.trainable_variables)
disc_grads = tape.gradient(d_loss, disc.trainable_variables)
gen_optimizer.apply_gradients(zip(gen_grads, gen.trainable_variables))
disc_optimizer.apply_gradients(zip(disc_grads, disc.trainable_variables))
# writes a tensorboard summary (creates graph if scalar)
with summary_writer.as_default():
tf.summary.scalar("generator_loss", g_loss, step=epoch)
tf.summary.scalar("discriminator_loss", d_loss, step=epoch)
g_loss = tf.metrics.Mean()
d_loss = tf.metrics.Mean()
prog_bar = Progbar(1500, stateful_metrics=[g_loss, d_loss])
if ckpt_manager.latest_checkpoint:
checkpoint.restore(ckpt_manager.latest_checkpoint).expect_partial()
print(f"Restored the training checkpoint...{ckpt_manager.latest_checkpoint}")
def train():
for epoch in range(config.EPOCHS):
print(f"\nEpoch {epoch+1}/{config.EPOCHS} :")
for n, (image, label) in enumerate(train_dataset):
train_step(image, label, epoch+1)
prog_bar.update(n)
if (epoch+1) % 5 == 0:
ckpt_manager.save()
def generate():
z = tf.random.normal((10, config.NOISE_DIM))
indices = np.arange(0, 10)
labels = tf.one_hot(indices, depth=10)
print(labels)
out = gen([z, labels])
out = (out.numpy() * 127.5) + 127.5 # de-process
for i in range(10):
plt.subplot(1, 10, i + 1)
plt.axis("off")
plt.imshow(out[i].reshape((img_height, img_width)), cmap='gray')
plt.show()
if __name__ == "__main__":
train() # train loop
'''Test Code'''
# gen_out = gen([tf.random.normal((config.BATCH_SIZE, config.NOISE_DIM)),
# tf.ones((config.BATCH_SIZE, 10))])
# disc_out = disc([tf.random.normal((config.BATCH_SIZE,) + config.IMAGE_SHAPE),
# tf.ones((config.BATCH_SIZE, 10))])
#
# assert gen_out.shape == (32, 28, 28, 1)
|
normal
|
{
"blob_id": "e265b2b2ccc0841ccb8b766de4ae2a869f2d280d",
"index": 8326,
"step-1": "<mask token>\n\n\nclass Generator(Model):\n\n def __init__(self, name):\n super(Generator, self).__init__(name=name)\n self.dense = layers.Dense(7 * 7 * 128)\n self.conv1 = layers.Conv2DTranspose(128, kernel_size=5, padding='same')\n self.conv2 = layers.Conv2DTranspose(64, kernel_size=5, strides=2,\n padding='same')\n self.conv3 = layers.Conv2DTranspose(32, kernel_size=5, strides=2,\n padding='same')\n self.conv4 = layers.Conv2DTranspose(1, kernel_size=5, activation=\n 'tanh', padding='same')\n self.relu = layers.ReLU()\n self.bn1 = layers.BatchNormalization()\n self.bn2 = layers.BatchNormalization()\n self.bn3 = layers.BatchNormalization()\n self.bn4 = layers.BatchNormalization()\n <mask token>\n\n def get_config(self):\n return {'name': self.name}\n\n\nclass Discriminator(Model):\n\n def __init__(self, name, img_shape=(28, 28, 1)):\n super(Discriminator, self).__init__(name=name)\n self.img_shape = img_shape\n self.conv1 = layers.Conv2D(32, kernel_size=5, strides=2)\n self.conv2 = layers.Conv2D(64, kernel_size=5, strides=2)\n self.conv3 = layers.Conv2D(128, kernel_size=5, strides=2, padding=\n 'same')\n self.conv4 = layers.Conv2D(256, kernel_size=5, padding='same')\n self.leaky_relu = layers.LeakyReLU(alpha=0.2)\n self.flatten = layers.Flatten()\n self.dense_final = layers.Dense(1, activation='sigmoid')\n self.dense = layers.Dense(7 * 7 * 16)\n\n def call(self, inputs, training=None, mask=None):\n image, label = inputs\n lb = self.dense(label)\n lb = layers.Reshape(target_shape=(28, 28, 1))(lb)\n x = layers.Concatenate()([image, lb])\n x = self.leaky_relu(x)\n x = self.conv1(x)\n x = self.leaky_relu(x)\n x = self.conv2(x)\n x = self.leaky_relu(x)\n x = self.conv3(x)\n x = self.leaky_relu(x)\n x = self.conv4(x)\n x = self.flatten(x)\n x = self.dense_final(x)\n return x\n\n def get_config(self):\n return {'img_shape': self.img_shape, 'name': self.name}\n\n @classmethod\n def from_config(cls, config, custom_objects=None):\n return cls(**config)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef preprocess(img, lbl):\n img = (img - 127.5) / 127.5\n img = tf.convert_to_tensor(img, dtype=tf.float32)\n return img, lbl\n\n\nclass Generator(Model):\n\n def __init__(self, name):\n super(Generator, self).__init__(name=name)\n self.dense = layers.Dense(7 * 7 * 128)\n self.conv1 = layers.Conv2DTranspose(128, kernel_size=5, padding='same')\n self.conv2 = layers.Conv2DTranspose(64, kernel_size=5, strides=2,\n padding='same')\n self.conv3 = layers.Conv2DTranspose(32, kernel_size=5, strides=2,\n padding='same')\n self.conv4 = layers.Conv2DTranspose(1, kernel_size=5, activation=\n 'tanh', padding='same')\n self.relu = layers.ReLU()\n self.bn1 = layers.BatchNormalization()\n self.bn2 = layers.BatchNormalization()\n self.bn3 = layers.BatchNormalization()\n self.bn4 = layers.BatchNormalization()\n\n def call(self, inputs, training=None, mask=None):\n noise, label = inputs\n x = layers.Concatenate()([noise, label])\n x = self.dense(x)\n x = layers.Reshape(target_shape=(7, 7, 128))(x)\n x = self.bn1(x)\n x = self.relu(x)\n x = self.conv1(x)\n x = self.bn2(x)\n x = self.relu(x)\n x = self.conv2(x)\n x = self.bn3(x)\n x = self.relu(x)\n x = self.conv3(x)\n x = self.bn4(x)\n x = self.relu(x)\n x = self.conv4(x)\n return x\n\n def get_config(self):\n return {'name': self.name}\n\n\nclass Discriminator(Model):\n\n def __init__(self, name, img_shape=(28, 28, 1)):\n super(Discriminator, self).__init__(name=name)\n self.img_shape = img_shape\n self.conv1 = layers.Conv2D(32, kernel_size=5, strides=2)\n self.conv2 = layers.Conv2D(64, kernel_size=5, strides=2)\n self.conv3 = layers.Conv2D(128, kernel_size=5, strides=2, padding=\n 'same')\n self.conv4 = layers.Conv2D(256, kernel_size=5, padding='same')\n self.leaky_relu = layers.LeakyReLU(alpha=0.2)\n self.flatten = layers.Flatten()\n self.dense_final = layers.Dense(1, activation='sigmoid')\n self.dense = layers.Dense(7 * 7 * 16)\n\n def call(self, inputs, training=None, mask=None):\n image, label = inputs\n lb = self.dense(label)\n lb = layers.Reshape(target_shape=(28, 28, 1))(lb)\n x = layers.Concatenate()([image, lb])\n x = self.leaky_relu(x)\n x = self.conv1(x)\n x = self.leaky_relu(x)\n x = self.conv2(x)\n x = self.leaky_relu(x)\n x = self.conv3(x)\n x = self.leaky_relu(x)\n x = self.conv4(x)\n x = self.flatten(x)\n x = self.dense_final(x)\n return x\n\n def get_config(self):\n return {'img_shape': self.img_shape, 'name': self.name}\n\n @classmethod\n def from_config(cls, config, custom_objects=None):\n return cls(**config)\n\n\n<mask token>\n\n\[email protected](input_signature=signature)\ndef train_step(image_batch, label_batch, epoch):\n noise = tf.random.normal((config.BATCH_SIZE, config.NOISE_DIM))\n with tf.GradientTape(persistent=True) as tape:\n fake_img_batch = gen([noise, label_batch], training=True)\n fake_logits = disc([fake_img_batch, label_batch], training=True)\n real_logits = disc([image_batch, label_batch], training=True)\n d_loss = disc_loss(real_logits, fake_logits)\n g_loss = gen_loss(fake_logits)\n gen_grads = tape.gradient(g_loss, gen.trainable_variables)\n disc_grads = tape.gradient(d_loss, disc.trainable_variables)\n gen_optimizer.apply_gradients(zip(gen_grads, gen.trainable_variables))\n disc_optimizer.apply_gradients(zip(disc_grads, disc.trainable_variables))\n with summary_writer.as_default():\n tf.summary.scalar('generator_loss', g_loss, step=epoch)\n tf.summary.scalar('discriminator_loss', d_loss, step=epoch)\n\n\n<mask token>\n\n\ndef generate():\n z = tf.random.normal((10, config.NOISE_DIM))\n indices = np.arange(0, 10)\n labels = tf.one_hot(indices, depth=10)\n print(labels)\n out = gen([z, labels])\n out = out.numpy() * 127.5 + 127.5\n for i in range(10):\n plt.subplot(1, 10, i + 1)\n plt.axis('off')\n plt.imshow(out[i].reshape((img_height, img_width)), cmap='gray')\n plt.show()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef preprocess(img, lbl):\n img = (img - 127.5) / 127.5\n img = tf.convert_to_tensor(img, dtype=tf.float32)\n return img, lbl\n\n\nclass Generator(Model):\n\n def __init__(self, name):\n super(Generator, self).__init__(name=name)\n self.dense = layers.Dense(7 * 7 * 128)\n self.conv1 = layers.Conv2DTranspose(128, kernel_size=5, padding='same')\n self.conv2 = layers.Conv2DTranspose(64, kernel_size=5, strides=2,\n padding='same')\n self.conv3 = layers.Conv2DTranspose(32, kernel_size=5, strides=2,\n padding='same')\n self.conv4 = layers.Conv2DTranspose(1, kernel_size=5, activation=\n 'tanh', padding='same')\n self.relu = layers.ReLU()\n self.bn1 = layers.BatchNormalization()\n self.bn2 = layers.BatchNormalization()\n self.bn3 = layers.BatchNormalization()\n self.bn4 = layers.BatchNormalization()\n\n def call(self, inputs, training=None, mask=None):\n noise, label = inputs\n x = layers.Concatenate()([noise, label])\n x = self.dense(x)\n x = layers.Reshape(target_shape=(7, 7, 128))(x)\n x = self.bn1(x)\n x = self.relu(x)\n x = self.conv1(x)\n x = self.bn2(x)\n x = self.relu(x)\n x = self.conv2(x)\n x = self.bn3(x)\n x = self.relu(x)\n x = self.conv3(x)\n x = self.bn4(x)\n x = self.relu(x)\n x = self.conv4(x)\n return x\n\n def get_config(self):\n return {'name': self.name}\n\n\nclass Discriminator(Model):\n\n def __init__(self, name, img_shape=(28, 28, 1)):\n super(Discriminator, self).__init__(name=name)\n self.img_shape = img_shape\n self.conv1 = layers.Conv2D(32, kernel_size=5, strides=2)\n self.conv2 = layers.Conv2D(64, kernel_size=5, strides=2)\n self.conv3 = layers.Conv2D(128, kernel_size=5, strides=2, padding=\n 'same')\n self.conv4 = layers.Conv2D(256, kernel_size=5, padding='same')\n self.leaky_relu = layers.LeakyReLU(alpha=0.2)\n self.flatten = layers.Flatten()\n self.dense_final = layers.Dense(1, activation='sigmoid')\n self.dense = layers.Dense(7 * 7 * 16)\n\n def call(self, inputs, training=None, mask=None):\n image, label = inputs\n lb = self.dense(label)\n lb = layers.Reshape(target_shape=(28, 28, 1))(lb)\n x = layers.Concatenate()([image, lb])\n x = self.leaky_relu(x)\n x = self.conv1(x)\n x = self.leaky_relu(x)\n x = self.conv2(x)\n x = self.leaky_relu(x)\n x = self.conv3(x)\n x = self.leaky_relu(x)\n x = self.conv4(x)\n x = self.flatten(x)\n x = self.dense_final(x)\n return x\n\n def get_config(self):\n return {'img_shape': self.img_shape, 'name': self.name}\n\n @classmethod\n def from_config(cls, config, custom_objects=None):\n return cls(**config)\n\n\n<mask token>\n\n\ndef gen_loss(fake_logits):\n loss = tf.losses.BinaryCrossentropy()(tf.ones_like(fake_logits),\n fake_logits)\n return loss\n\n\n<mask token>\n\n\[email protected](input_signature=signature)\ndef train_step(image_batch, label_batch, epoch):\n noise = tf.random.normal((config.BATCH_SIZE, config.NOISE_DIM))\n with tf.GradientTape(persistent=True) as tape:\n fake_img_batch = gen([noise, label_batch], training=True)\n fake_logits = disc([fake_img_batch, label_batch], training=True)\n real_logits = disc([image_batch, label_batch], training=True)\n d_loss = disc_loss(real_logits, fake_logits)\n g_loss = gen_loss(fake_logits)\n gen_grads = tape.gradient(g_loss, gen.trainable_variables)\n disc_grads = tape.gradient(d_loss, disc.trainable_variables)\n gen_optimizer.apply_gradients(zip(gen_grads, gen.trainable_variables))\n disc_optimizer.apply_gradients(zip(disc_grads, disc.trainable_variables))\n with summary_writer.as_default():\n tf.summary.scalar('generator_loss', g_loss, step=epoch)\n tf.summary.scalar('discriminator_loss', d_loss, step=epoch)\n\n\n<mask token>\n\n\ndef generate():\n z = tf.random.normal((10, config.NOISE_DIM))\n indices = np.arange(0, 10)\n labels = tf.one_hot(indices, depth=10)\n print(labels)\n out = gen([z, labels])\n out = out.numpy() * 127.5 + 127.5\n for i in range(10):\n plt.subplot(1, 10, i + 1)\n plt.axis('off')\n plt.imshow(out[i].reshape((img_height, img_width)), cmap='gray')\n plt.show()\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\ndef preprocess(img, lbl):\n img = (img - 127.5) / 127.5\n img = tf.convert_to_tensor(img, dtype=tf.float32)\n return img, lbl\n\n\nclass Generator(Model):\n\n def __init__(self, name):\n super(Generator, self).__init__(name=name)\n self.dense = layers.Dense(7 * 7 * 128)\n self.conv1 = layers.Conv2DTranspose(128, kernel_size=5, padding='same')\n self.conv2 = layers.Conv2DTranspose(64, kernel_size=5, strides=2,\n padding='same')\n self.conv3 = layers.Conv2DTranspose(32, kernel_size=5, strides=2,\n padding='same')\n self.conv4 = layers.Conv2DTranspose(1, kernel_size=5, activation=\n 'tanh', padding='same')\n self.relu = layers.ReLU()\n self.bn1 = layers.BatchNormalization()\n self.bn2 = layers.BatchNormalization()\n self.bn3 = layers.BatchNormalization()\n self.bn4 = layers.BatchNormalization()\n\n def call(self, inputs, training=None, mask=None):\n noise, label = inputs\n x = layers.Concatenate()([noise, label])\n x = self.dense(x)\n x = layers.Reshape(target_shape=(7, 7, 128))(x)\n x = self.bn1(x)\n x = self.relu(x)\n x = self.conv1(x)\n x = self.bn2(x)\n x = self.relu(x)\n x = self.conv2(x)\n x = self.bn3(x)\n x = self.relu(x)\n x = self.conv3(x)\n x = self.bn4(x)\n x = self.relu(x)\n x = self.conv4(x)\n return x\n\n def get_config(self):\n return {'name': self.name}\n\n\nclass Discriminator(Model):\n\n def __init__(self, name, img_shape=(28, 28, 1)):\n super(Discriminator, self).__init__(name=name)\n self.img_shape = img_shape\n self.conv1 = layers.Conv2D(32, kernel_size=5, strides=2)\n self.conv2 = layers.Conv2D(64, kernel_size=5, strides=2)\n self.conv3 = layers.Conv2D(128, kernel_size=5, strides=2, padding=\n 'same')\n self.conv4 = layers.Conv2D(256, kernel_size=5, padding='same')\n self.leaky_relu = layers.LeakyReLU(alpha=0.2)\n self.flatten = layers.Flatten()\n self.dense_final = layers.Dense(1, activation='sigmoid')\n self.dense = layers.Dense(7 * 7 * 16)\n\n def call(self, inputs, training=None, mask=None):\n image, label = inputs\n lb = self.dense(label)\n lb = layers.Reshape(target_shape=(28, 28, 1))(lb)\n x = layers.Concatenate()([image, lb])\n x = self.leaky_relu(x)\n x = self.conv1(x)\n x = self.leaky_relu(x)\n x = self.conv2(x)\n x = self.leaky_relu(x)\n x = self.conv3(x)\n x = self.leaky_relu(x)\n x = self.conv4(x)\n x = self.flatten(x)\n x = self.dense_final(x)\n return x\n\n def get_config(self):\n return {'img_shape': self.img_shape, 'name': self.name}\n\n @classmethod\n def from_config(cls, config, custom_objects=None):\n return cls(**config)\n\n\n<mask token>\n\n\ndef disc_loss(real_logits, fake_logits):\n real_loss = tf.losses.BinaryCrossentropy()(tf.ones_like(real_logits),\n real_logits)\n fake_loss = tf.losses.BinaryCrossentropy()(tf.zeros_like(fake_logits),\n fake_logits)\n loss = 0.5 * (real_loss + fake_loss)\n return loss\n\n\ndef gen_loss(fake_logits):\n loss = tf.losses.BinaryCrossentropy()(tf.ones_like(fake_logits),\n fake_logits)\n return loss\n\n\n<mask token>\n\n\[email protected](input_signature=signature)\ndef train_step(image_batch, label_batch, epoch):\n noise = tf.random.normal((config.BATCH_SIZE, config.NOISE_DIM))\n with tf.GradientTape(persistent=True) as tape:\n fake_img_batch = gen([noise, label_batch], training=True)\n fake_logits = disc([fake_img_batch, label_batch], training=True)\n real_logits = disc([image_batch, label_batch], training=True)\n d_loss = disc_loss(real_logits, fake_logits)\n g_loss = gen_loss(fake_logits)\n gen_grads = tape.gradient(g_loss, gen.trainable_variables)\n disc_grads = tape.gradient(d_loss, disc.trainable_variables)\n gen_optimizer.apply_gradients(zip(gen_grads, gen.trainable_variables))\n disc_optimizer.apply_gradients(zip(disc_grads, disc.trainable_variables))\n with summary_writer.as_default():\n tf.summary.scalar('generator_loss', g_loss, step=epoch)\n tf.summary.scalar('discriminator_loss', d_loss, step=epoch)\n\n\n<mask token>\n\n\ndef train():\n for epoch in range(config.EPOCHS):\n print(f'\\nEpoch {epoch + 1}/{config.EPOCHS} :')\n for n, (image, label) in enumerate(train_dataset):\n train_step(image, label, epoch + 1)\n prog_bar.update(n)\n if (epoch + 1) % 5 == 0:\n ckpt_manager.save()\n\n\ndef generate():\n z = tf.random.normal((10, config.NOISE_DIM))\n indices = np.arange(0, 10)\n labels = tf.one_hot(indices, depth=10)\n print(labels)\n out = gen([z, labels])\n out = out.numpy() * 127.5 + 127.5\n for i in range(10):\n plt.subplot(1, 10, i + 1)\n plt.axis('off')\n plt.imshow(out[i].reshape((img_height, img_width)), cmap='gray')\n plt.show()\n\n\n<mask token>\n",
"step-5": "import tensorflow as tf\nfrom keras import layers, Model, Input\nfrom keras.utils import Progbar, to_categorical\nfrom keras.datasets.mnist import load_data\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport config\nimport datetime\n\nimg_height, img_width, _ = config.IMAGE_SHAPE\n\n(X, Y), (_, _) = load_data()\nX = X.reshape((-1, img_height, img_width, 1))\nX = X.astype(\"float32\")\nY = to_categorical(Y, num_classes=10, dtype=\"float32\")\n\n\ndef preprocess(img, lbl):\n img = (img - 127.5) / 127.5\n img = tf.convert_to_tensor(img, dtype=tf.float32)\n return img, lbl\n\n\nclass Generator(Model):\n def __init__(self, name):\n super(Generator, self).__init__(name=name)\n self.dense = layers.Dense(7*7*128)\n self.conv1 = layers.Conv2DTranspose(128, kernel_size=5, padding=\"same\")\n self.conv2 = layers.Conv2DTranspose(64, kernel_size=5, strides=2, padding=\"same\")\n self.conv3 = layers.Conv2DTranspose(32, kernel_size=5, strides=2, padding=\"same\")\n self.conv4 = layers.Conv2DTranspose(1, kernel_size=5, activation=\"tanh\", padding=\"same\")\n self.relu = layers.ReLU()\n self.bn1 = layers.BatchNormalization()\n self.bn2 = layers.BatchNormalization()\n self.bn3 = layers.BatchNormalization()\n self.bn4 = layers.BatchNormalization()\n\n def call(self, inputs, training=None, mask=None):\n\n noise, label = inputs\n x = layers.Concatenate()([noise, label])\n x = self.dense(x)\n x = layers.Reshape(target_shape=(7, 7, 128))(x)\n x = self.bn1(x)\n x = self.relu(x)\n x = self.conv1(x)\n x = self.bn2(x)\n x = self.relu(x)\n x = self.conv2(x)\n x = self.bn3(x)\n x = self.relu(x)\n x = self.conv3(x)\n x = self.bn4(x)\n x = self.relu(x)\n x = self.conv4(x)\n\n return x\n\n def get_config(self):\n return {'name': self.name}\n\n\nclass Discriminator(Model):\n def __init__(self, name, img_shape=(28, 28, 1)):\n super(Discriminator, self).__init__(name=name)\n self.img_shape = img_shape\n self.conv1 = layers.Conv2D(32, kernel_size=5, strides=2)\n self.conv2 = layers.Conv2D(64, kernel_size=5, strides=2)\n self.conv3 = layers.Conv2D(128, kernel_size=5, strides=2, padding=\"same\")\n self.conv4 = layers.Conv2D(256, kernel_size=5, padding=\"same\")\n self.leaky_relu = layers.LeakyReLU(alpha=0.2)\n self.flatten = layers.Flatten()\n self.dense_final = layers.Dense(1, activation='sigmoid')\n self.dense = layers.Dense(7*7*16)\n\n def call(self, inputs, training=None, mask=None):\n\n image, label = inputs\n lb = self.dense(label)\n lb = layers.Reshape(target_shape=(28, 28, 1))(lb)\n x = layers.Concatenate()([image, lb])\n x = self.leaky_relu(x)\n x = self.conv1(x)\n x = self.leaky_relu(x)\n x = self.conv2(x)\n x = self.leaky_relu(x)\n x = self.conv3(x)\n x = self.leaky_relu(x)\n x = self.conv4(x)\n x = self.flatten(x)\n x = self.dense_final(x)\n\n return x\n\n def get_config(self):\n return {\"img_shape\": self.img_shape, \"name\": self.name}\n\n @classmethod\n def from_config(cls, config, custom_objects=None):\n return cls(**config)\n\n\ngen = Generator(name=\"generator\")\ndisc = Discriminator(name=\"discriminator\", img_shape=config.IMAGE_SHAPE)\n\ngen_optimizer = tf.keras.optimizers.Adam(learning_rate=0.0002)\ndisc_optimizer = tf.keras.optimizers.Adam(learning_rate=0.0002)\n\ndataset = tf.data.Dataset.from_tensor_slices((X, Y))\ntrain_dataset = dataset.take(int(0.8 * len(X))).map(preprocess).shuffle(10000).batch(config.BATCH_SIZE)\nval_dataset = dataset.skip(int(0.8 * len(X))).map(preprocess).shuffle(10000).batch(config.BATCH_SIZE)\n\ncheckpoint = tf.train.Checkpoint(generator=gen,\n gen_optimizer=gen_optimizer,\n discriminator=disc,\n disc_optimizer=disc_optimizer)\nckpt_manager = tf.train.CheckpointManager(checkpoint, directory=config.CKPT_DIR, max_to_keep=3)\n\n# creates a summary writer, writes a summary in a file to access on tensorboard later\nsummary_writer = tf.summary.create_file_writer(\n logdir=config.LOG_DIR + \"fit/\" + datetime.datetime.now().strftime(\"%Y%m%d-%H%M%S\"))\n\n'''LOSSES'''\n\n\ndef disc_loss(real_logits, fake_logits):\n real_loss = tf.losses.BinaryCrossentropy()(tf.ones_like(real_logits), real_logits)\n fake_loss = tf.losses.BinaryCrossentropy()(tf.zeros_like(fake_logits), fake_logits)\n loss = 0.5*(real_loss + fake_loss)\n return loss\n\n\ndef gen_loss(fake_logits):\n loss = tf.losses.BinaryCrossentropy()(tf.ones_like(fake_logits), fake_logits)\n return loss\n\n\n# give signature to avoid retracing\n\nsignature = [\n tf.TensorSpec(shape=(None, 28, 28, 1), dtype=tf.float32),\n tf.TensorSpec(shape=(None, 10), dtype=tf.float32),\n tf.TensorSpec(shape=(), dtype=tf.int64)\n]\n\n\[email protected](input_signature=signature)\ndef train_step(image_batch, label_batch, epoch):\n noise = tf.random.normal((config.BATCH_SIZE, config.NOISE_DIM))\n with tf.GradientTape(persistent=True) as tape:\n\n fake_img_batch = gen([noise, label_batch], training=True)\n fake_logits = disc([fake_img_batch, label_batch], training=True)\n real_logits = disc([image_batch, label_batch], training=True)\n\n d_loss = disc_loss(real_logits, fake_logits)\n g_loss = gen_loss(fake_logits)\n\n gen_grads = tape.gradient(g_loss, gen.trainable_variables)\n disc_grads = tape.gradient(d_loss, disc.trainable_variables)\n gen_optimizer.apply_gradients(zip(gen_grads, gen.trainable_variables))\n disc_optimizer.apply_gradients(zip(disc_grads, disc.trainable_variables))\n\n # writes a tensorboard summary (creates graph if scalar)\n with summary_writer.as_default():\n tf.summary.scalar(\"generator_loss\", g_loss, step=epoch)\n tf.summary.scalar(\"discriminator_loss\", d_loss, step=epoch)\n\n\ng_loss = tf.metrics.Mean()\nd_loss = tf.metrics.Mean()\nprog_bar = Progbar(1500, stateful_metrics=[g_loss, d_loss])\n\nif ckpt_manager.latest_checkpoint:\n checkpoint.restore(ckpt_manager.latest_checkpoint).expect_partial()\n print(f\"Restored the training checkpoint...{ckpt_manager.latest_checkpoint}\")\n\n\ndef train():\n for epoch in range(config.EPOCHS):\n print(f\"\\nEpoch {epoch+1}/{config.EPOCHS} :\")\n for n, (image, label) in enumerate(train_dataset):\n train_step(image, label, epoch+1)\n prog_bar.update(n)\n\n if (epoch+1) % 5 == 0:\n ckpt_manager.save()\n\n\ndef generate():\n z = tf.random.normal((10, config.NOISE_DIM))\n indices = np.arange(0, 10)\n labels = tf.one_hot(indices, depth=10)\n print(labels)\n\n out = gen([z, labels])\n out = (out.numpy() * 127.5) + 127.5 # de-process\n for i in range(10):\n plt.subplot(1, 10, i + 1)\n plt.axis(\"off\")\n plt.imshow(out[i].reshape((img_height, img_width)), cmap='gray')\n plt.show()\n\n\nif __name__ == \"__main__\":\n train() # train loop\n\n '''Test Code'''\n\n # gen_out = gen([tf.random.normal((config.BATCH_SIZE, config.NOISE_DIM)),\n # tf.ones((config.BATCH_SIZE, 10))])\n # disc_out = disc([tf.random.normal((config.BATCH_SIZE,) + config.IMAGE_SHAPE),\n # tf.ones((config.BATCH_SIZE, 10))])\n #\n # assert gen_out.shape == (32, 28, 28, 1)\n\n\n\n\n\n\n\n",
"step-ids": [
8,
12,
13,
15,
19
]
}
|
[
8,
12,
13,
15,
19
] |
#Problem available at: https://www.hackerrank.com/challenges/weather-observation-station-6/problem
SELECT DISTINCT CITY from STATION where substr(CITY,1,1) in ('a','e','i','o','u');
|
normal
|
{
"blob_id": "1cba7889370cc7de47bb5cd1eaeadfece056e68a",
"index": 5912,
"step-1": "#Problem available at: https://www.hackerrank.com/challenges/weather-observation-station-6/problem\nSELECT DISTINCT CITY from STATION where substr(CITY,1,1) in ('a','e','i','o','u');",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
"""
module rational number
"""
def _gcd(num_a, num_b):
"""
gratest common divisor
"""
if num_a == 0 or num_b == 0:
raise ArithmeticError('gcd of zero')
var_p = num_a
var_q = num_b
if var_p < var_q:
var_p = num_b
var_q = num_a
var_r = var_p % var_q
while var_r != 0:
var_p = var_q
var_q = var_r
var_r = var_p % var_q
return var_q
class Rational(object):
"""
representing rational number
"""
def __init__(self, num, den):
"""
simple constructor
"""
if den == 0:
raise ZeroDivisionError('division by zero')
if num == 0:
self._num = 0
self._den = 1
else:
sign = 1
if num * den < 0:
sign = -1
abs_num = abs(num)
abs_den = abs(den)
divisor = _gcd(abs_num, abs_den)
self._num = sign * abs_num // divisor
self._den = abs_den // divisor
#
def __add__(self, other):
"""
'+' operator
"""
# supported type for operand except Rational
if isinstance(other, int):
return Rational(self.num + self.den * other, self.den)
if not isinstance(other, Rational):
return NotImplemented
return Rational(self.num * other.den + self.den * other.num, self.den * other.den)
def __radd__(self, other):
"""
fallback of '+' operator
"""
if isinstance(other, int):
return self.__add__(other)
return NotImplemented
#
def __sub__(self, other):
"""
'-' binary operator
"""
# supported type for operand except Rational
if isinstance(other, int):
return Rational(self.num - self.den * other, self.den)
if not isinstance(other, Rational):
return NotImplemented
return Rational(self.num * other.den - self.den * other.num, self.den * other.den)
def __rsub__(self, other):
"""
fallback of '-' binary operator
"""
if isinstance(other, int):
return self.__neg__().__add__(- other)
return NotImplemented
#
def __mul__(self, other):
"""
'*' operator
"""
# supported type for operand except Rational
if isinstance(other, int):
return Rational(self.num * other, self.den)
if not isinstance(other, Rational):
return NotImplemented
return Rational(self.num * other.num, self.den * other.den)
def __rmul__(self, other):
"""
fallback of '*' operator
"""
return self.__mul__(other)
#
def __truediv__(self, other):
"""
'/' operator when '__future__.division' is in effect
"""
# supported type for operand except Rational
if isinstance(other, int):
if other == 0:
raise ZeroDivisionError('division by zero')
return Rational(self.num, self.den * other)
if not isinstance(other, Rational):
return NotImplemented
if other == 0:
raise ZeroDivisionError('division by zero')
return Rational(self.num * other.den, self.den * other.num)
def __rtruediv__(self, other):
"""
fallback of '/' operator when '__future__.division' is in effect
"""
if isinstance(other, int):
return Rational(self.den * other, self.num)
return NotImplemented
#
def __floordiv__(self, other):
"""
'//' operator
"""
return self.__truediv__(other)
def __rfloordiv__(self, other):
"""
fallback of '//' operator
"""
return self.__rtruediv__(other)
#
def __div__(self, other):
"""
'/' operator
"""
return self.__truediv__(other)
def __rdiv__(self, other):
"""
fallback of '/' operator
"""
return self.__rtruediv__(other)
#
def __mod__(self, other):
"""
'%' operator
"""
if isinstance(other, int):
if other == 0:
raise ZeroDivisionError('division by zero')
return Rational(0, 1)
if not isinstance(other, Rational):
return NotImplemented
if other == 0:
raise ZeroDivisionError('division by zero')
return Rational(0, 1)
def __rmod__(self, other):
"""
fallback of '%' operator
"""
if self == Rational(0, 1):
raise ZeroDivisionError('division by zero')
return Rational(0, 1)
#
def __divmod__(self, other):
"""
'divmod()' operation
"""
quot = self.__floordiv__(other)
res = self.__mod__(other)
if quot != NotImplemented and res != NotImplemented:
return (quot, res)
return NotImplemented
def __rdivmod__(self, other):
"""
fallback of 'divmod()' operation
"""
quot = self.__rfloordiv__(other)
res = self.__rmod__(other)
if quot != NotImplemented and res != NotImplemented:
return (quot, res)
return NotImplemented
#
def __pos__(self):
"""
'+' unary operator
"""
return self
#
def __neg__(self):
"""
'-' unary operator
"""
return Rational(-1 * self.num, self.den)
#
def __abs__(self):
"""
absolute value
"""
return Rational(abs(self.num), self.den)
#
# "rich comparison" method
def __lt__(self, other):
"""
'<' operator
"""
# supported type for operand except Rational
if isinstance(other, int):
return self.num - other * self.den < 0
if not isinstance(other, Rational):
return NotImplemented
return self.num * other.den - other.num * self.den < 0
#
def __le__(self, other):
"""
'<=' operator
"""
# supported type for operand except Rational
if isinstance(other, int):
return self.num - other * self.den <= 0
if not isinstance(other, Rational):
return NotImplemented
return self.num * other.den - other.num * self.den <= 0
#
def __eq__(self, other):
"""
'==' operator
"""
# supported type for operand except Rational
if isinstance(other, int):
return self.num - other * self.den == 0
if not isinstance(other, Rational):
return NotImplemented
return self.num * other.den - other.num * self.den == 0
#
def __ne__(self, other):
"""
'!=' or '<>' operator
"""
# supported type for operand except Rational
if isinstance(other, int):
return self.num - other * self.den != 0
if not isinstance(other, Rational):
return NotImplemented
return self.num * other.den - other.num * self.den != 0
#
def __gt__(self, other):
"""
'>' operator
"""
# supported type for operand except Rational
if isinstance(other, int):
return self.num - other * self.den > 0
if not isinstance(other, Rational):
return NotImplemented
return self.num * other.den - other.num * self.den > 0
#
def __ge__(self, other):
"""
'>=' operator
"""
# supported type for operand except Rational
if isinstance(other, int):
return self.num - other * self.den >= 0
if not isinstance(other, Rational):
return NotImplemented
return self.num * other.den - other.num * self.den >= 0
#
def __hash__(self):
"""
calc hash value
"""
return hash((self.num, self.den))
#
def __repr__(self):
"""
'official' string representation
"""
return '<Rational: num=%d, den=%d>' % (self.num, self.den)
#
def __str__(self):
"""
'informal' string representation
"""
ret = str(self.num)
if self.den != 1:
ret += '/' + str(self.den)
return ret
#
def __bytes__(self):
"""
'bytes()' operation
"""
return bytes(str(self), 'utf8')
#
def __bool__(self):
"""
'bool()' operation
"""
return self.num != 0
#
def isinteger(self):
"""
Does this Rational instance represent integer?
"""
return self.den == 1
#
def num(self):
"""
returns numerator of Rational
"""
return self.num
#
def den(self):
"""
returns denominator of Rational
"""
return self.den
#
@staticmethod
def parse(string):
"""
parse string to Rational
"""
posslash = string.find('/')
if posslash < 0:
return Rational(int(string), 1)
else:
strs = string.split('/')
return Rational(int(strs[0].strip()), int(strs[1].strip()))
#
ZERO = None
ONE = None
Rational.ZERO = Rational(0, 1)
Rational.ONE = Rational(1, 1)
|
normal
|
{
"blob_id": "b1ab28a99fdcce66f0a1e4e25821073673f531cf",
"index": 657,
"step-1": "<mask token>\n\n\nclass Rational(object):\n <mask token>\n\n def __init__(self, num, den):\n \"\"\"\n simple constructor\n \"\"\"\n if den == 0:\n raise ZeroDivisionError('division by zero')\n if num == 0:\n self._num = 0\n self._den = 1\n else:\n sign = 1\n if num * den < 0:\n sign = -1\n abs_num = abs(num)\n abs_den = abs(den)\n divisor = _gcd(abs_num, abs_den)\n self._num = sign * abs_num // divisor\n self._den = abs_den // divisor\n\n def __add__(self, other):\n \"\"\"\n '+' operator\n \"\"\"\n if isinstance(other, int):\n return Rational(self.num + self.den * other, self.den)\n if not isinstance(other, Rational):\n return NotImplemented\n return Rational(self.num * other.den + self.den * other.num, self.\n den * other.den)\n <mask token>\n\n def __sub__(self, other):\n \"\"\"\n '-' binary operator\n \"\"\"\n if isinstance(other, int):\n return Rational(self.num - self.den * other, self.den)\n if not isinstance(other, Rational):\n return NotImplemented\n return Rational(self.num * other.den - self.den * other.num, self.\n den * other.den)\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def __rfloordiv__(self, other):\n \"\"\"\n fallback of '//' operator\n \"\"\"\n return self.__rtruediv__(other)\n\n def __div__(self, other):\n \"\"\"\n '/' operator\n \"\"\"\n return self.__truediv__(other)\n <mask token>\n\n def __mod__(self, other):\n \"\"\"\n '%' operator\n \"\"\"\n if isinstance(other, int):\n if other == 0:\n raise ZeroDivisionError('division by zero')\n return Rational(0, 1)\n if not isinstance(other, Rational):\n return NotImplemented\n if other == 0:\n raise ZeroDivisionError('division by zero')\n return Rational(0, 1)\n\n def __rmod__(self, other):\n \"\"\"\n fallback of '%' operator\n \"\"\"\n if self == Rational(0, 1):\n raise ZeroDivisionError('division by zero')\n return Rational(0, 1)\n\n def __divmod__(self, other):\n \"\"\"\n 'divmod()' operation\n \"\"\"\n quot = self.__floordiv__(other)\n res = self.__mod__(other)\n if quot != NotImplemented and res != NotImplemented:\n return quot, res\n return NotImplemented\n\n def __rdivmod__(self, other):\n \"\"\"\n fallback of 'divmod()' operation\n \"\"\"\n quot = self.__rfloordiv__(other)\n res = self.__rmod__(other)\n if quot != NotImplemented and res != NotImplemented:\n return quot, res\n return NotImplemented\n\n def __pos__(self):\n \"\"\"\n '+' unary operator\n \"\"\"\n return self\n\n def __neg__(self):\n \"\"\"\n '-' unary operator\n \"\"\"\n return Rational(-1 * self.num, self.den)\n\n def __abs__(self):\n \"\"\"\n absolute value\n \"\"\"\n return Rational(abs(self.num), self.den)\n\n def __lt__(self, other):\n \"\"\"\n '<' operator\n \"\"\"\n if isinstance(other, int):\n return self.num - other * self.den < 0\n if not isinstance(other, Rational):\n return NotImplemented\n return self.num * other.den - other.num * self.den < 0\n <mask token>\n <mask token>\n\n def __ne__(self, other):\n \"\"\"\n '!=' or '<>' operator\n \"\"\"\n if isinstance(other, int):\n return self.num - other * self.den != 0\n if not isinstance(other, Rational):\n return NotImplemented\n return self.num * other.den - other.num * self.den != 0\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def __str__(self):\n \"\"\"\n 'informal' string representation\n \"\"\"\n ret = str(self.num)\n if self.den != 1:\n ret += '/' + str(self.den)\n return ret\n <mask token>\n\n def __bool__(self):\n \"\"\"\n 'bool()' operation\n \"\"\"\n return self.num != 0\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Rational(object):\n <mask token>\n\n def __init__(self, num, den):\n \"\"\"\n simple constructor\n \"\"\"\n if den == 0:\n raise ZeroDivisionError('division by zero')\n if num == 0:\n self._num = 0\n self._den = 1\n else:\n sign = 1\n if num * den < 0:\n sign = -1\n abs_num = abs(num)\n abs_den = abs(den)\n divisor = _gcd(abs_num, abs_den)\n self._num = sign * abs_num // divisor\n self._den = abs_den // divisor\n\n def __add__(self, other):\n \"\"\"\n '+' operator\n \"\"\"\n if isinstance(other, int):\n return Rational(self.num + self.den * other, self.den)\n if not isinstance(other, Rational):\n return NotImplemented\n return Rational(self.num * other.den + self.den * other.num, self.\n den * other.den)\n\n def __radd__(self, other):\n \"\"\"\n fallback of '+' operator\n \"\"\"\n if isinstance(other, int):\n return self.__add__(other)\n return NotImplemented\n\n def __sub__(self, other):\n \"\"\"\n '-' binary operator\n \"\"\"\n if isinstance(other, int):\n return Rational(self.num - self.den * other, self.den)\n if not isinstance(other, Rational):\n return NotImplemented\n return Rational(self.num * other.den - self.den * other.num, self.\n den * other.den)\n\n def __rsub__(self, other):\n \"\"\"\n fallback of '-' binary operator\n \"\"\"\n if isinstance(other, int):\n return self.__neg__().__add__(-other)\n return NotImplemented\n\n def __mul__(self, other):\n \"\"\"\n '*' operator\n \"\"\"\n if isinstance(other, int):\n return Rational(self.num * other, self.den)\n if not isinstance(other, Rational):\n return NotImplemented\n return Rational(self.num * other.num, self.den * other.den)\n\n def __rmul__(self, other):\n \"\"\"\n fallback of '*' operator\n \"\"\"\n return self.__mul__(other)\n <mask token>\n\n def __rtruediv__(self, other):\n \"\"\"\n fallback of '/' operator when '__future__.division' is in effect\n \"\"\"\n if isinstance(other, int):\n return Rational(self.den * other, self.num)\n return NotImplemented\n <mask token>\n\n def __rfloordiv__(self, other):\n \"\"\"\n fallback of '//' operator\n \"\"\"\n return self.__rtruediv__(other)\n\n def __div__(self, other):\n \"\"\"\n '/' operator\n \"\"\"\n return self.__truediv__(other)\n\n def __rdiv__(self, other):\n \"\"\"\n fallback of '/' operator\n \"\"\"\n return self.__rtruediv__(other)\n\n def __mod__(self, other):\n \"\"\"\n '%' operator\n \"\"\"\n if isinstance(other, int):\n if other == 0:\n raise ZeroDivisionError('division by zero')\n return Rational(0, 1)\n if not isinstance(other, Rational):\n return NotImplemented\n if other == 0:\n raise ZeroDivisionError('division by zero')\n return Rational(0, 1)\n\n def __rmod__(self, other):\n \"\"\"\n fallback of '%' operator\n \"\"\"\n if self == Rational(0, 1):\n raise ZeroDivisionError('division by zero')\n return Rational(0, 1)\n\n def __divmod__(self, other):\n \"\"\"\n 'divmod()' operation\n \"\"\"\n quot = self.__floordiv__(other)\n res = self.__mod__(other)\n if quot != NotImplemented and res != NotImplemented:\n return quot, res\n return NotImplemented\n\n def __rdivmod__(self, other):\n \"\"\"\n fallback of 'divmod()' operation\n \"\"\"\n quot = self.__rfloordiv__(other)\n res = self.__rmod__(other)\n if quot != NotImplemented and res != NotImplemented:\n return quot, res\n return NotImplemented\n\n def __pos__(self):\n \"\"\"\n '+' unary operator\n \"\"\"\n return self\n\n def __neg__(self):\n \"\"\"\n '-' unary operator\n \"\"\"\n return Rational(-1 * self.num, self.den)\n\n def __abs__(self):\n \"\"\"\n absolute value\n \"\"\"\n return Rational(abs(self.num), self.den)\n\n def __lt__(self, other):\n \"\"\"\n '<' operator\n \"\"\"\n if isinstance(other, int):\n return self.num - other * self.den < 0\n if not isinstance(other, Rational):\n return NotImplemented\n return self.num * other.den - other.num * self.den < 0\n\n def __le__(self, other):\n \"\"\"\n '<=' operator\n \"\"\"\n if isinstance(other, int):\n return self.num - other * self.den <= 0\n if not isinstance(other, Rational):\n return NotImplemented\n return self.num * other.den - other.num * self.den <= 0\n\n def __eq__(self, other):\n \"\"\"\n '==' operator\n \"\"\"\n if isinstance(other, int):\n return self.num - other * self.den == 0\n if not isinstance(other, Rational):\n return NotImplemented\n return self.num * other.den - other.num * self.den == 0\n\n def __ne__(self, other):\n \"\"\"\n '!=' or '<>' operator\n \"\"\"\n if isinstance(other, int):\n return self.num - other * self.den != 0\n if not isinstance(other, Rational):\n return NotImplemented\n return self.num * other.den - other.num * self.den != 0\n\n def __gt__(self, other):\n \"\"\"\n '>' operator\n \"\"\"\n if isinstance(other, int):\n return self.num - other * self.den > 0\n if not isinstance(other, Rational):\n return NotImplemented\n return self.num * other.den - other.num * self.den > 0\n\n def __ge__(self, other):\n \"\"\"\n '>=' operator\n \"\"\"\n if isinstance(other, int):\n return self.num - other * self.den >= 0\n if not isinstance(other, Rational):\n return NotImplemented\n return self.num * other.den - other.num * self.den >= 0\n <mask token>\n\n def __repr__(self):\n \"\"\"\n 'official' string representation\n \"\"\"\n return '<Rational: num=%d, den=%d>' % (self.num, self.den)\n\n def __str__(self):\n \"\"\"\n 'informal' string representation\n \"\"\"\n ret = str(self.num)\n if self.den != 1:\n ret += '/' + str(self.den)\n return ret\n <mask token>\n\n def __bool__(self):\n \"\"\"\n 'bool()' operation\n \"\"\"\n return self.num != 0\n <mask token>\n\n def num(self):\n \"\"\"\n returns numerator of Rational\n \"\"\"\n return self.num\n\n def den(self):\n \"\"\"\n returns denominator of Rational\n \"\"\"\n return self.den\n <mask token>\n <mask token>\n <mask token>\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Rational(object):\n <mask token>\n\n def __init__(self, num, den):\n \"\"\"\n simple constructor\n \"\"\"\n if den == 0:\n raise ZeroDivisionError('division by zero')\n if num == 0:\n self._num = 0\n self._den = 1\n else:\n sign = 1\n if num * den < 0:\n sign = -1\n abs_num = abs(num)\n abs_den = abs(den)\n divisor = _gcd(abs_num, abs_den)\n self._num = sign * abs_num // divisor\n self._den = abs_den // divisor\n\n def __add__(self, other):\n \"\"\"\n '+' operator\n \"\"\"\n if isinstance(other, int):\n return Rational(self.num + self.den * other, self.den)\n if not isinstance(other, Rational):\n return NotImplemented\n return Rational(self.num * other.den + self.den * other.num, self.\n den * other.den)\n\n def __radd__(self, other):\n \"\"\"\n fallback of '+' operator\n \"\"\"\n if isinstance(other, int):\n return self.__add__(other)\n return NotImplemented\n\n def __sub__(self, other):\n \"\"\"\n '-' binary operator\n \"\"\"\n if isinstance(other, int):\n return Rational(self.num - self.den * other, self.den)\n if not isinstance(other, Rational):\n return NotImplemented\n return Rational(self.num * other.den - self.den * other.num, self.\n den * other.den)\n\n def __rsub__(self, other):\n \"\"\"\n fallback of '-' binary operator\n \"\"\"\n if isinstance(other, int):\n return self.__neg__().__add__(-other)\n return NotImplemented\n\n def __mul__(self, other):\n \"\"\"\n '*' operator\n \"\"\"\n if isinstance(other, int):\n return Rational(self.num * other, self.den)\n if not isinstance(other, Rational):\n return NotImplemented\n return Rational(self.num * other.num, self.den * other.den)\n\n def __rmul__(self, other):\n \"\"\"\n fallback of '*' operator\n \"\"\"\n return self.__mul__(other)\n <mask token>\n\n def __rtruediv__(self, other):\n \"\"\"\n fallback of '/' operator when '__future__.division' is in effect\n \"\"\"\n if isinstance(other, int):\n return Rational(self.den * other, self.num)\n return NotImplemented\n\n def __floordiv__(self, other):\n \"\"\"\n '//' operator\n \"\"\"\n return self.__truediv__(other)\n\n def __rfloordiv__(self, other):\n \"\"\"\n fallback of '//' operator\n \"\"\"\n return self.__rtruediv__(other)\n\n def __div__(self, other):\n \"\"\"\n '/' operator\n \"\"\"\n return self.__truediv__(other)\n\n def __rdiv__(self, other):\n \"\"\"\n fallback of '/' operator\n \"\"\"\n return self.__rtruediv__(other)\n\n def __mod__(self, other):\n \"\"\"\n '%' operator\n \"\"\"\n if isinstance(other, int):\n if other == 0:\n raise ZeroDivisionError('division by zero')\n return Rational(0, 1)\n if not isinstance(other, Rational):\n return NotImplemented\n if other == 0:\n raise ZeroDivisionError('division by zero')\n return Rational(0, 1)\n\n def __rmod__(self, other):\n \"\"\"\n fallback of '%' operator\n \"\"\"\n if self == Rational(0, 1):\n raise ZeroDivisionError('division by zero')\n return Rational(0, 1)\n\n def __divmod__(self, other):\n \"\"\"\n 'divmod()' operation\n \"\"\"\n quot = self.__floordiv__(other)\n res = self.__mod__(other)\n if quot != NotImplemented and res != NotImplemented:\n return quot, res\n return NotImplemented\n\n def __rdivmod__(self, other):\n \"\"\"\n fallback of 'divmod()' operation\n \"\"\"\n quot = self.__rfloordiv__(other)\n res = self.__rmod__(other)\n if quot != NotImplemented and res != NotImplemented:\n return quot, res\n return NotImplemented\n\n def __pos__(self):\n \"\"\"\n '+' unary operator\n \"\"\"\n return self\n\n def __neg__(self):\n \"\"\"\n '-' unary operator\n \"\"\"\n return Rational(-1 * self.num, self.den)\n\n def __abs__(self):\n \"\"\"\n absolute value\n \"\"\"\n return Rational(abs(self.num), self.den)\n\n def __lt__(self, other):\n \"\"\"\n '<' operator\n \"\"\"\n if isinstance(other, int):\n return self.num - other * self.den < 0\n if not isinstance(other, Rational):\n return NotImplemented\n return self.num * other.den - other.num * self.den < 0\n\n def __le__(self, other):\n \"\"\"\n '<=' operator\n \"\"\"\n if isinstance(other, int):\n return self.num - other * self.den <= 0\n if not isinstance(other, Rational):\n return NotImplemented\n return self.num * other.den - other.num * self.den <= 0\n\n def __eq__(self, other):\n \"\"\"\n '==' operator\n \"\"\"\n if isinstance(other, int):\n return self.num - other * self.den == 0\n if not isinstance(other, Rational):\n return NotImplemented\n return self.num * other.den - other.num * self.den == 0\n\n def __ne__(self, other):\n \"\"\"\n '!=' or '<>' operator\n \"\"\"\n if isinstance(other, int):\n return self.num - other * self.den != 0\n if not isinstance(other, Rational):\n return NotImplemented\n return self.num * other.den - other.num * self.den != 0\n\n def __gt__(self, other):\n \"\"\"\n '>' operator\n \"\"\"\n if isinstance(other, int):\n return self.num - other * self.den > 0\n if not isinstance(other, Rational):\n return NotImplemented\n return self.num * other.den - other.num * self.den > 0\n\n def __ge__(self, other):\n \"\"\"\n '>=' operator\n \"\"\"\n if isinstance(other, int):\n return self.num - other * self.den >= 0\n if not isinstance(other, Rational):\n return NotImplemented\n return self.num * other.den - other.num * self.den >= 0\n\n def __hash__(self):\n \"\"\"\n calc hash value\n \"\"\"\n return hash((self.num, self.den))\n\n def __repr__(self):\n \"\"\"\n 'official' string representation\n \"\"\"\n return '<Rational: num=%d, den=%d>' % (self.num, self.den)\n\n def __str__(self):\n \"\"\"\n 'informal' string representation\n \"\"\"\n ret = str(self.num)\n if self.den != 1:\n ret += '/' + str(self.den)\n return ret\n\n def __bytes__(self):\n \"\"\"\n 'bytes()' operation\n \"\"\"\n return bytes(str(self), 'utf8')\n\n def __bool__(self):\n \"\"\"\n 'bool()' operation\n \"\"\"\n return self.num != 0\n <mask token>\n\n def num(self):\n \"\"\"\n returns numerator of Rational\n \"\"\"\n return self.num\n\n def den(self):\n \"\"\"\n returns denominator of Rational\n \"\"\"\n return self.den\n <mask token>\n <mask token>\n <mask token>\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\nclass Rational(object):\n <mask token>\n\n def __init__(self, num, den):\n \"\"\"\n simple constructor\n \"\"\"\n if den == 0:\n raise ZeroDivisionError('division by zero')\n if num == 0:\n self._num = 0\n self._den = 1\n else:\n sign = 1\n if num * den < 0:\n sign = -1\n abs_num = abs(num)\n abs_den = abs(den)\n divisor = _gcd(abs_num, abs_den)\n self._num = sign * abs_num // divisor\n self._den = abs_den // divisor\n\n def __add__(self, other):\n \"\"\"\n '+' operator\n \"\"\"\n if isinstance(other, int):\n return Rational(self.num + self.den * other, self.den)\n if not isinstance(other, Rational):\n return NotImplemented\n return Rational(self.num * other.den + self.den * other.num, self.\n den * other.den)\n\n def __radd__(self, other):\n \"\"\"\n fallback of '+' operator\n \"\"\"\n if isinstance(other, int):\n return self.__add__(other)\n return NotImplemented\n\n def __sub__(self, other):\n \"\"\"\n '-' binary operator\n \"\"\"\n if isinstance(other, int):\n return Rational(self.num - self.den * other, self.den)\n if not isinstance(other, Rational):\n return NotImplemented\n return Rational(self.num * other.den - self.den * other.num, self.\n den * other.den)\n\n def __rsub__(self, other):\n \"\"\"\n fallback of '-' binary operator\n \"\"\"\n if isinstance(other, int):\n return self.__neg__().__add__(-other)\n return NotImplemented\n\n def __mul__(self, other):\n \"\"\"\n '*' operator\n \"\"\"\n if isinstance(other, int):\n return Rational(self.num * other, self.den)\n if not isinstance(other, Rational):\n return NotImplemented\n return Rational(self.num * other.num, self.den * other.den)\n\n def __rmul__(self, other):\n \"\"\"\n fallback of '*' operator\n \"\"\"\n return self.__mul__(other)\n <mask token>\n\n def __rtruediv__(self, other):\n \"\"\"\n fallback of '/' operator when '__future__.division' is in effect\n \"\"\"\n if isinstance(other, int):\n return Rational(self.den * other, self.num)\n return NotImplemented\n\n def __floordiv__(self, other):\n \"\"\"\n '//' operator\n \"\"\"\n return self.__truediv__(other)\n\n def __rfloordiv__(self, other):\n \"\"\"\n fallback of '//' operator\n \"\"\"\n return self.__rtruediv__(other)\n\n def __div__(self, other):\n \"\"\"\n '/' operator\n \"\"\"\n return self.__truediv__(other)\n\n def __rdiv__(self, other):\n \"\"\"\n fallback of '/' operator\n \"\"\"\n return self.__rtruediv__(other)\n\n def __mod__(self, other):\n \"\"\"\n '%' operator\n \"\"\"\n if isinstance(other, int):\n if other == 0:\n raise ZeroDivisionError('division by zero')\n return Rational(0, 1)\n if not isinstance(other, Rational):\n return NotImplemented\n if other == 0:\n raise ZeroDivisionError('division by zero')\n return Rational(0, 1)\n\n def __rmod__(self, other):\n \"\"\"\n fallback of '%' operator\n \"\"\"\n if self == Rational(0, 1):\n raise ZeroDivisionError('division by zero')\n return Rational(0, 1)\n\n def __divmod__(self, other):\n \"\"\"\n 'divmod()' operation\n \"\"\"\n quot = self.__floordiv__(other)\n res = self.__mod__(other)\n if quot != NotImplemented and res != NotImplemented:\n return quot, res\n return NotImplemented\n\n def __rdivmod__(self, other):\n \"\"\"\n fallback of 'divmod()' operation\n \"\"\"\n quot = self.__rfloordiv__(other)\n res = self.__rmod__(other)\n if quot != NotImplemented and res != NotImplemented:\n return quot, res\n return NotImplemented\n\n def __pos__(self):\n \"\"\"\n '+' unary operator\n \"\"\"\n return self\n\n def __neg__(self):\n \"\"\"\n '-' unary operator\n \"\"\"\n return Rational(-1 * self.num, self.den)\n\n def __abs__(self):\n \"\"\"\n absolute value\n \"\"\"\n return Rational(abs(self.num), self.den)\n\n def __lt__(self, other):\n \"\"\"\n '<' operator\n \"\"\"\n if isinstance(other, int):\n return self.num - other * self.den < 0\n if not isinstance(other, Rational):\n return NotImplemented\n return self.num * other.den - other.num * self.den < 0\n\n def __le__(self, other):\n \"\"\"\n '<=' operator\n \"\"\"\n if isinstance(other, int):\n return self.num - other * self.den <= 0\n if not isinstance(other, Rational):\n return NotImplemented\n return self.num * other.den - other.num * self.den <= 0\n\n def __eq__(self, other):\n \"\"\"\n '==' operator\n \"\"\"\n if isinstance(other, int):\n return self.num - other * self.den == 0\n if not isinstance(other, Rational):\n return NotImplemented\n return self.num * other.den - other.num * self.den == 0\n\n def __ne__(self, other):\n \"\"\"\n '!=' or '<>' operator\n \"\"\"\n if isinstance(other, int):\n return self.num - other * self.den != 0\n if not isinstance(other, Rational):\n return NotImplemented\n return self.num * other.den - other.num * self.den != 0\n\n def __gt__(self, other):\n \"\"\"\n '>' operator\n \"\"\"\n if isinstance(other, int):\n return self.num - other * self.den > 0\n if not isinstance(other, Rational):\n return NotImplemented\n return self.num * other.den - other.num * self.den > 0\n\n def __ge__(self, other):\n \"\"\"\n '>=' operator\n \"\"\"\n if isinstance(other, int):\n return self.num - other * self.den >= 0\n if not isinstance(other, Rational):\n return NotImplemented\n return self.num * other.den - other.num * self.den >= 0\n\n def __hash__(self):\n \"\"\"\n calc hash value\n \"\"\"\n return hash((self.num, self.den))\n\n def __repr__(self):\n \"\"\"\n 'official' string representation\n \"\"\"\n return '<Rational: num=%d, den=%d>' % (self.num, self.den)\n\n def __str__(self):\n \"\"\"\n 'informal' string representation\n \"\"\"\n ret = str(self.num)\n if self.den != 1:\n ret += '/' + str(self.den)\n return ret\n\n def __bytes__(self):\n \"\"\"\n 'bytes()' operation\n \"\"\"\n return bytes(str(self), 'utf8')\n\n def __bool__(self):\n \"\"\"\n 'bool()' operation\n \"\"\"\n return self.num != 0\n\n def isinteger(self):\n \"\"\"\n Does this Rational instance represent integer?\n \"\"\"\n return self.den == 1\n\n def num(self):\n \"\"\"\n returns numerator of Rational\n \"\"\"\n return self.num\n\n def den(self):\n \"\"\"\n returns denominator of Rational\n \"\"\"\n return self.den\n\n @staticmethod\n def parse(string):\n \"\"\"\n parse string to Rational\n \"\"\"\n posslash = string.find('/')\n if posslash < 0:\n return Rational(int(string), 1)\n else:\n strs = string.split('/')\n return Rational(int(strs[0].strip()), int(strs[1].strip()))\n <mask token>\n <mask token>\n\n\n<mask token>\n",
"step-5": "\"\"\"\nmodule rational number\n\"\"\"\n\ndef _gcd(num_a, num_b):\n \"\"\"\n gratest common divisor\n \"\"\"\n if num_a == 0 or num_b == 0:\n raise ArithmeticError('gcd of zero')\n var_p = num_a\n var_q = num_b\n if var_p < var_q:\n var_p = num_b\n var_q = num_a\n var_r = var_p % var_q\n while var_r != 0:\n var_p = var_q\n var_q = var_r\n var_r = var_p % var_q\n return var_q\n\nclass Rational(object):\n \"\"\"\n representing rational number\n \"\"\"\n def __init__(self, num, den):\n \"\"\"\n simple constructor\n \"\"\"\n if den == 0:\n raise ZeroDivisionError('division by zero')\n if num == 0:\n self._num = 0\n self._den = 1\n else:\n sign = 1\n if num * den < 0:\n sign = -1\n abs_num = abs(num)\n abs_den = abs(den)\n divisor = _gcd(abs_num, abs_den)\n self._num = sign * abs_num // divisor\n self._den = abs_den // divisor\n #\n def __add__(self, other):\n \"\"\"\n '+' operator\n \"\"\"\n # supported type for operand except Rational\n if isinstance(other, int):\n return Rational(self.num + self.den * other, self.den)\n if not isinstance(other, Rational):\n return NotImplemented\n return Rational(self.num * other.den + self.den * other.num, self.den * other.den)\n def __radd__(self, other):\n \"\"\"\n fallback of '+' operator\n \"\"\"\n if isinstance(other, int):\n return self.__add__(other)\n return NotImplemented\n #\n def __sub__(self, other):\n \"\"\"\n '-' binary operator\n \"\"\"\n # supported type for operand except Rational\n if isinstance(other, int):\n return Rational(self.num - self.den * other, self.den)\n if not isinstance(other, Rational):\n return NotImplemented\n return Rational(self.num * other.den - self.den * other.num, self.den * other.den)\n def __rsub__(self, other):\n \"\"\"\n fallback of '-' binary operator\n \"\"\"\n if isinstance(other, int):\n return self.__neg__().__add__(- other)\n return NotImplemented\n #\n def __mul__(self, other):\n \"\"\"\n '*' operator\n \"\"\"\n # supported type for operand except Rational\n if isinstance(other, int):\n return Rational(self.num * other, self.den)\n if not isinstance(other, Rational):\n return NotImplemented\n return Rational(self.num * other.num, self.den * other.den)\n def __rmul__(self, other):\n \"\"\"\n fallback of '*' operator\n \"\"\"\n return self.__mul__(other)\n #\n def __truediv__(self, other):\n \"\"\"\n '/' operator when '__future__.division' is in effect\n \"\"\"\n # supported type for operand except Rational\n if isinstance(other, int):\n if other == 0:\n raise ZeroDivisionError('division by zero')\n return Rational(self.num, self.den * other)\n if not isinstance(other, Rational):\n return NotImplemented\n if other == 0:\n raise ZeroDivisionError('division by zero')\n return Rational(self.num * other.den, self.den * other.num)\n def __rtruediv__(self, other):\n \"\"\"\n fallback of '/' operator when '__future__.division' is in effect\n \"\"\"\n if isinstance(other, int):\n return Rational(self.den * other, self.num)\n return NotImplemented\n #\n def __floordiv__(self, other):\n \"\"\"\n '//' operator\n \"\"\"\n return self.__truediv__(other)\n def __rfloordiv__(self, other):\n \"\"\"\n fallback of '//' operator\n \"\"\"\n return self.__rtruediv__(other)\n #\n def __div__(self, other):\n \"\"\"\n '/' operator\n \"\"\"\n return self.__truediv__(other)\n def __rdiv__(self, other):\n \"\"\"\n fallback of '/' operator\n \"\"\"\n return self.__rtruediv__(other)\n #\n def __mod__(self, other):\n \"\"\"\n '%' operator\n \"\"\"\n if isinstance(other, int):\n if other == 0:\n raise ZeroDivisionError('division by zero')\n return Rational(0, 1)\n if not isinstance(other, Rational):\n return NotImplemented\n if other == 0:\n raise ZeroDivisionError('division by zero')\n return Rational(0, 1)\n def __rmod__(self, other):\n \"\"\"\n fallback of '%' operator\n \"\"\"\n if self == Rational(0, 1):\n raise ZeroDivisionError('division by zero')\n return Rational(0, 1)\n #\n def __divmod__(self, other):\n \"\"\"\n 'divmod()' operation\n \"\"\"\n quot = self.__floordiv__(other)\n res = self.__mod__(other)\n if quot != NotImplemented and res != NotImplemented:\n return (quot, res)\n return NotImplemented\n def __rdivmod__(self, other):\n \"\"\"\n fallback of 'divmod()' operation\n \"\"\"\n quot = self.__rfloordiv__(other)\n res = self.__rmod__(other)\n if quot != NotImplemented and res != NotImplemented:\n return (quot, res)\n return NotImplemented\n #\n def __pos__(self):\n \"\"\"\n '+' unary operator\n \"\"\"\n return self\n #\n def __neg__(self):\n \"\"\"\n '-' unary operator\n \"\"\"\n return Rational(-1 * self.num, self.den)\n #\n def __abs__(self):\n \"\"\"\n absolute value\n \"\"\"\n return Rational(abs(self.num), self.den)\n #\n # \"rich comparison\" method\n def __lt__(self, other):\n \"\"\"\n '<' operator\n \"\"\"\n # supported type for operand except Rational\n if isinstance(other, int):\n return self.num - other * self.den < 0\n if not isinstance(other, Rational):\n return NotImplemented\n return self.num * other.den - other.num * self.den < 0\n #\n def __le__(self, other):\n \"\"\"\n '<=' operator\n \"\"\"\n # supported type for operand except Rational\n if isinstance(other, int):\n return self.num - other * self.den <= 0\n if not isinstance(other, Rational):\n return NotImplemented\n return self.num * other.den - other.num * self.den <= 0\n #\n def __eq__(self, other):\n \"\"\"\n '==' operator\n \"\"\"\n # supported type for operand except Rational\n if isinstance(other, int):\n return self.num - other * self.den == 0\n if not isinstance(other, Rational):\n return NotImplemented\n return self.num * other.den - other.num * self.den == 0\n #\n def __ne__(self, other):\n \"\"\"\n '!=' or '<>' operator\n \"\"\"\n # supported type for operand except Rational\n if isinstance(other, int):\n return self.num - other * self.den != 0\n if not isinstance(other, Rational):\n return NotImplemented\n return self.num * other.den - other.num * self.den != 0\n #\n def __gt__(self, other):\n \"\"\"\n '>' operator\n \"\"\"\n # supported type for operand except Rational\n if isinstance(other, int):\n return self.num - other * self.den > 0\n if not isinstance(other, Rational):\n return NotImplemented\n return self.num * other.den - other.num * self.den > 0\n #\n def __ge__(self, other):\n \"\"\"\n '>=' operator\n \"\"\"\n # supported type for operand except Rational\n if isinstance(other, int):\n return self.num - other * self.den >= 0\n if not isinstance(other, Rational):\n return NotImplemented\n return self.num * other.den - other.num * self.den >= 0\n #\n def __hash__(self):\n \"\"\"\n calc hash value\n \"\"\"\n return hash((self.num, self.den))\n #\n def __repr__(self):\n \"\"\"\n 'official' string representation\n \"\"\"\n return '<Rational: num=%d, den=%d>' % (self.num, self.den)\n #\n def __str__(self):\n \"\"\"\n 'informal' string representation\n \"\"\"\n ret = str(self.num)\n if self.den != 1:\n ret += '/' + str(self.den)\n return ret\n #\n def __bytes__(self):\n \"\"\"\n 'bytes()' operation\n \"\"\"\n return bytes(str(self), 'utf8')\n #\n def __bool__(self):\n \"\"\"\n 'bool()' operation\n \"\"\"\n return self.num != 0\n #\n def isinteger(self):\n \"\"\"\n Does this Rational instance represent integer?\n \"\"\"\n return self.den == 1\n #\n def num(self):\n \"\"\"\n returns numerator of Rational\n \"\"\"\n return self.num\n #\n def den(self):\n \"\"\"\n returns denominator of Rational\n \"\"\"\n return self.den\n #\n @staticmethod\n def parse(string):\n \"\"\"\n parse string to Rational\n \"\"\"\n posslash = string.find('/')\n if posslash < 0:\n return Rational(int(string), 1)\n else:\n strs = string.split('/')\n return Rational(int(strs[0].strip()), int(strs[1].strip()))\n #\n ZERO = None\n ONE = None\n\nRational.ZERO = Rational(0, 1)\nRational.ONE = Rational(1, 1)\n",
"step-ids": [
17,
30,
33,
35,
41
]
}
|
[
17,
30,
33,
35,
41
] |
from SpritesClass import Sprite
from JogadorClass import Jogador
from OpenGL.GL import *
from OpenGL.GLUT import *
from OpenGL.GLU import *
class Tela:
def __init__(self,j,t0):
self.telas = ["jogo","game over"] #telas existentes
self.estagio = "jogo"
self.j = j
#sprites
self.v0 = Sprite(40,40,30,30,t0)
self.v1 = Sprite(40,80,30,30,t0)
self.v2 = Sprite(40,120,30,30,t0)
self.sprites = [self.v0,self.v1,self.v2]
def getEstagio(self):
return self.estagio
def setEstagio(self,temp):
if temp in self.telas:
self.estagio=temp
else:
print("Tela não existe, erro de digitação no código")
def getSprites(self):
return self.sprites
def atualizarSprites(self):
if self.j.getVidas() == 2:
self.sprites.remove(self.v2)
if self.j.getVidas() == 1:
self.sprites.remove(self.v1)
if self.j.getVidas() == 0:
self.sprites.remove(self.v0)
|
normal
|
{
"blob_id": "d1f0baa1ff87ece50aaded5e60908269e81b6734",
"index": 1952,
"step-1": "<mask token>\n\n\nclass Tela:\n <mask token>\n <mask token>\n\n def setEstagio(self, temp):\n if temp in self.telas:\n self.estagio = temp\n else:\n print('Tela não existe, erro de digitação no código')\n <mask token>\n\n def atualizarSprites(self):\n if self.j.getVidas() == 2:\n self.sprites.remove(self.v2)\n if self.j.getVidas() == 1:\n self.sprites.remove(self.v1)\n if self.j.getVidas() == 0:\n self.sprites.remove(self.v0)\n",
"step-2": "<mask token>\n\n\nclass Tela:\n\n def __init__(self, j, t0):\n self.telas = ['jogo', 'game over']\n self.estagio = 'jogo'\n self.j = j\n self.v0 = Sprite(40, 40, 30, 30, t0)\n self.v1 = Sprite(40, 80, 30, 30, t0)\n self.v2 = Sprite(40, 120, 30, 30, t0)\n self.sprites = [self.v0, self.v1, self.v2]\n\n def getEstagio(self):\n return self.estagio\n\n def setEstagio(self, temp):\n if temp in self.telas:\n self.estagio = temp\n else:\n print('Tela não existe, erro de digitação no código')\n <mask token>\n\n def atualizarSprites(self):\n if self.j.getVidas() == 2:\n self.sprites.remove(self.v2)\n if self.j.getVidas() == 1:\n self.sprites.remove(self.v1)\n if self.j.getVidas() == 0:\n self.sprites.remove(self.v0)\n",
"step-3": "<mask token>\n\n\nclass Tela:\n\n def __init__(self, j, t0):\n self.telas = ['jogo', 'game over']\n self.estagio = 'jogo'\n self.j = j\n self.v0 = Sprite(40, 40, 30, 30, t0)\n self.v1 = Sprite(40, 80, 30, 30, t0)\n self.v2 = Sprite(40, 120, 30, 30, t0)\n self.sprites = [self.v0, self.v1, self.v2]\n\n def getEstagio(self):\n return self.estagio\n\n def setEstagio(self, temp):\n if temp in self.telas:\n self.estagio = temp\n else:\n print('Tela não existe, erro de digitação no código')\n\n def getSprites(self):\n return self.sprites\n\n def atualizarSprites(self):\n if self.j.getVidas() == 2:\n self.sprites.remove(self.v2)\n if self.j.getVidas() == 1:\n self.sprites.remove(self.v1)\n if self.j.getVidas() == 0:\n self.sprites.remove(self.v0)\n",
"step-4": "from SpritesClass import Sprite\nfrom JogadorClass import Jogador\nfrom OpenGL.GL import *\nfrom OpenGL.GLUT import *\nfrom OpenGL.GLU import *\n\n\nclass Tela:\n\n def __init__(self, j, t0):\n self.telas = ['jogo', 'game over']\n self.estagio = 'jogo'\n self.j = j\n self.v0 = Sprite(40, 40, 30, 30, t0)\n self.v1 = Sprite(40, 80, 30, 30, t0)\n self.v2 = Sprite(40, 120, 30, 30, t0)\n self.sprites = [self.v0, self.v1, self.v2]\n\n def getEstagio(self):\n return self.estagio\n\n def setEstagio(self, temp):\n if temp in self.telas:\n self.estagio = temp\n else:\n print('Tela não existe, erro de digitação no código')\n\n def getSprites(self):\n return self.sprites\n\n def atualizarSprites(self):\n if self.j.getVidas() == 2:\n self.sprites.remove(self.v2)\n if self.j.getVidas() == 1:\n self.sprites.remove(self.v1)\n if self.j.getVidas() == 0:\n self.sprites.remove(self.v0)\n",
"step-5": "from SpritesClass import Sprite\nfrom JogadorClass import Jogador\n\nfrom OpenGL.GL import *\nfrom OpenGL.GLUT import *\nfrom OpenGL.GLU import *\n\nclass Tela:\n def __init__(self,j,t0):\n self.telas = [\"jogo\",\"game over\"] #telas existentes\n self.estagio = \"jogo\"\n self.j = j\n\n #sprites\n self.v0 = Sprite(40,40,30,30,t0)\n self.v1 = Sprite(40,80,30,30,t0)\n self.v2 = Sprite(40,120,30,30,t0)\n self.sprites = [self.v0,self.v1,self.v2]\n\n\n def getEstagio(self):\n return self.estagio\n\n def setEstagio(self,temp):\n if temp in self.telas:\n self.estagio=temp\n else:\n print(\"Tela não existe, erro de digitação no código\")\n\n def getSprites(self):\n return self.sprites\n\n def atualizarSprites(self):\n if self.j.getVidas() == 2:\n self.sprites.remove(self.v2)\n if self.j.getVidas() == 1:\n self.sprites.remove(self.v1)\n if self.j.getVidas() == 0:\n self.sprites.remove(self.v0)",
"step-ids": [
3,
5,
6,
7,
8
]
}
|
[
3,
5,
6,
7,
8
] |
"""
* @section LICENSE
*
* @copyright
* Copyright (c) 2017 Intel Corporation
*
* @copyright
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* @copyright
* http://www.apache.org/licenses/LICENSE-2.0
*
* @copyright
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* @section DESCRIPTION
"""
from re import match
from os import environ
import sys
from cts_core.commons.error import cts_error
from cts_core.commons.replay_controller import ReplayController
from cts_framework.actions.action import Action
from cts_framework.actions.execute.execute_test_scripts_action import ExecuteTestScriptsAction
from cts_framework.build_information import BuildInformation
from cts_framework.commons.color_printer import ColorPrinter
from cts_framework.commons.logging_helper import LoggingHelper
from cts_framework.db.dao.script_dao import ScriptDAO
from cts_framework.tests_managing.test_package.tests_packages_container import TestsPackagesContainer
from cts_framework.tests_managing.tests_manager import TestsManager
from cts_framework.tests_running.execution_feed import ExecutionFeed
def split_replay_id(replay_id):
"""converts replay_id provided by the user into script execution id
:type replay_id: str
:rtype: (Boolean, int)
"""
m = match(r"^(\d+)$", replay_id.strip())
if m:
return None, int(m.groups()[0])
cts_error("Replay id has invalid format. Expected: unsigned integer")
return True, None
class ReplayTestRunAction(Action):
ACTION = "replay"
PARAM_NAME = "ACTION"
def __init__(self, *params, **kwargs):
Action.__init__(self, *params, **kwargs)
self._logger = LoggingHelper(__name__)
def fill_parser_arguments(self):
self.parser.add_argument("replay_id", help="ID of the test script run to replay", type=str, nargs=1)
def process_action(self, configuration):
replay_id = configuration.replay_id[0]
print "Using CTS in version %s to replay execution %s" \
% (ColorPrinter.format_text(BuildInformation.BUILD_VERSION, bold=True), replay_id)
error, script_execution_id = split_replay_id(replay_id)
if error:
return
# TODO: warn user when he tries to replay using newer CTS
script_execution = ScriptDAO.get_script_execution_details(script_execution_id)
if script_execution is None:
cts_error("Recording for script execution id={id:ignore} not found", id=script_execution_id)
return
script_path = script_execution.script_path
configuration = self._configuration_from_string(script_execution.configuration)
test_plan = self._prepare_test_plan(script_path)
environ[ReplayController.CTS_REPLAY_SCRIPT_EXECUTION_ID] = str(script_execution_id)
self._execute(configuration, test_plan)
def _configuration_from_string(self, configuration_str):
configuration = {b[0]: b[1] for b in
(a.strip().split(' ', 1) for a in filter(None, configuration_str.split('--')))}
return configuration
def _prepare_test_plan(self, script_path):
test_plan = TestsPackagesContainer()
tests_manager = TestsManager()
test_scripts_found = tests_manager.get_packages()
test_scripts_found.filter(script_paths=[script_path], remove_empty=True)
test_plan += test_scripts_found
if not test_plan.packages:
print "Script to execute not found in any package"
sys.exit(0)
return test_plan
def _execute(self, configuration, test_plan):
"""
:type configuration: dict
:type test_plan: cts_framework.tests_managing.test_package.tests_packages_container.TestsPackagesContainer
"""
message = "Executing "
print "Executing:"
for package in test_plan.packages:
for suite in package.suites:
for script in suite.scripts:
print "\t* %s from suite %s from package %s" % (script.name, suite.name, package.name)
message += "%s from suite %s from package %s, " % (script.name, suite.name, package.name)
self._logger.log_debug(message)
execution_feed = ExecutionFeed(test_plan, configuration)
ExecuteTestScriptsAction.execute_configuration_group(execution_feed)
|
normal
|
{
"blob_id": "f11e6a53d8dfc60f73f346772df7a3cab14088ce",
"index": 2751,
"step-1": "\"\"\"\n * @section LICENSE\n *\n * @copyright\n * Copyright (c) 2017 Intel Corporation\n *\n * @copyright\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * @copyright\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * @copyright\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * @section DESCRIPTION\n\"\"\"\nfrom re import match\nfrom os import environ\nimport sys\n\nfrom cts_core.commons.error import cts_error\nfrom cts_core.commons.replay_controller import ReplayController\nfrom cts_framework.actions.action import Action\nfrom cts_framework.actions.execute.execute_test_scripts_action import ExecuteTestScriptsAction\nfrom cts_framework.build_information import BuildInformation\nfrom cts_framework.commons.color_printer import ColorPrinter\nfrom cts_framework.commons.logging_helper import LoggingHelper\nfrom cts_framework.db.dao.script_dao import ScriptDAO\nfrom cts_framework.tests_managing.test_package.tests_packages_container import TestsPackagesContainer\nfrom cts_framework.tests_managing.tests_manager import TestsManager\nfrom cts_framework.tests_running.execution_feed import ExecutionFeed\n\n\ndef split_replay_id(replay_id):\n \"\"\"converts replay_id provided by the user into script execution id\n :type replay_id: str\n :rtype: (Boolean, int)\n \"\"\"\n\n m = match(r\"^(\\d+)$\", replay_id.strip())\n if m:\n return None, int(m.groups()[0])\n\n cts_error(\"Replay id has invalid format. Expected: unsigned integer\")\n return True, None\n\n\nclass ReplayTestRunAction(Action):\n ACTION = \"replay\"\n PARAM_NAME = \"ACTION\"\n\n def __init__(self, *params, **kwargs):\n Action.__init__(self, *params, **kwargs)\n\n self._logger = LoggingHelper(__name__)\n\n def fill_parser_arguments(self):\n self.parser.add_argument(\"replay_id\", help=\"ID of the test script run to replay\", type=str, nargs=1)\n\n def process_action(self, configuration):\n replay_id = configuration.replay_id[0]\n print \"Using CTS in version %s to replay execution %s\" \\\n % (ColorPrinter.format_text(BuildInformation.BUILD_VERSION, bold=True), replay_id)\n\n error, script_execution_id = split_replay_id(replay_id)\n if error:\n return\n\n # TODO: warn user when he tries to replay using newer CTS\n\n script_execution = ScriptDAO.get_script_execution_details(script_execution_id)\n if script_execution is None:\n cts_error(\"Recording for script execution id={id:ignore} not found\", id=script_execution_id)\n return\n\n script_path = script_execution.script_path\n configuration = self._configuration_from_string(script_execution.configuration)\n\n test_plan = self._prepare_test_plan(script_path)\n\n environ[ReplayController.CTS_REPLAY_SCRIPT_EXECUTION_ID] = str(script_execution_id)\n self._execute(configuration, test_plan)\n\n def _configuration_from_string(self, configuration_str):\n configuration = {b[0]: b[1] for b in\n (a.strip().split(' ', 1) for a in filter(None, configuration_str.split('--')))}\n return configuration\n\n def _prepare_test_plan(self, script_path):\n test_plan = TestsPackagesContainer()\n tests_manager = TestsManager()\n test_scripts_found = tests_manager.get_packages()\n test_scripts_found.filter(script_paths=[script_path], remove_empty=True)\n test_plan += test_scripts_found\n if not test_plan.packages:\n print \"Script to execute not found in any package\"\n sys.exit(0)\n return test_plan\n\n def _execute(self, configuration, test_plan):\n \"\"\"\n :type configuration: dict\n :type test_plan: cts_framework.tests_managing.test_package.tests_packages_container.TestsPackagesContainer\n \"\"\"\n message = \"Executing \"\n print \"Executing:\"\n for package in test_plan.packages:\n for suite in package.suites:\n for script in suite.scripts:\n print \"\\t* %s from suite %s from package %s\" % (script.name, suite.name, package.name)\n message += \"%s from suite %s from package %s, \" % (script.name, suite.name, package.name)\n self._logger.log_debug(message)\n execution_feed = ExecutionFeed(test_plan, configuration)\n ExecuteTestScriptsAction.execute_configuration_group(execution_feed)\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
points_dict = {
'+': 5,
'-': 4,
'*': 3,
'/': 2,
'(': -1,
}
op_list = ['+','-','*','/']
def fitness(x1,op,x2):
#Mengembalikan point dari penyambungan expresi dengan operasi dan bilangan berikutnya
try:
hasil = eval(f"{x1} {op} {x2}")
diff = points_dict[op] - abs(24-hasil)
if (abs(24-hasil) == 0):
return diff+10
else:
return diff
except ZeroDivisionError:
return float("-inf")
def calc_points(expr):
points = 0
hasil = eval(expr)
points -= abs(24-hasil)
for c in expr:
points += points_dict.get(c,0)
return points
def solve(bil):
bil.sort(reverse=True)
expr = str(bil[0])
bil = bil[1:]
for _ in range(3):
b_max_fitness = float("-Inf")
for b in bil:
for op in op_list:
curr_fitness = fitness(expr,op,b)
if curr_fitness > b_max_fitness:
b_max_fitness = curr_fitness
curr_op_max = op
curr_b_max = b
expr += f" {curr_op_max} {curr_b_max}"
bil.remove(curr_b_max)
points = calc_points(expr)
# print(f"{expr} ~ Points: {points}")
return (expr,points)
def main():
# bil = [int(c) for c in input("Masukkan 4 angka dipisahkan spasi:").strip().split()]
points = 0
solves = []
for a in range(1,14):
for b in range(1,14):
for c in range(1,14):
for d in range(1,14):
bil = [a,b,c,d]
expre,point = solve(bil)
if expre not in solves:
solves.append((expre,point))
points += point
print(f"{(a-1)*13*13*13+(b-1)*13*13+(c-1)*13+d} : {expre}")
avg_points = points/(13**4)
print(f"Average points : {avg_points}")
count24 = 0
for expr in solves:
res = eval(expr[0])
if res==24:
count24 += 1
print(f"24 Count : {count24}")
if __name__ == "__main__":
main()
|
normal
|
{
"blob_id": "c420fb855fbf5691798eadca476b6eccec4aee57",
"index": 7409,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef calc_points(expr):\n points = 0\n hasil = eval(expr)\n points -= abs(24 - hasil)\n for c in expr:\n points += points_dict.get(c, 0)\n return points\n\n\ndef solve(bil):\n bil.sort(reverse=True)\n expr = str(bil[0])\n bil = bil[1:]\n for _ in range(3):\n b_max_fitness = float('-Inf')\n for b in bil:\n for op in op_list:\n curr_fitness = fitness(expr, op, b)\n if curr_fitness > b_max_fitness:\n b_max_fitness = curr_fitness\n curr_op_max = op\n curr_b_max = b\n expr += f' {curr_op_max} {curr_b_max}'\n bil.remove(curr_b_max)\n points = calc_points(expr)\n return expr, points\n\n\ndef main():\n points = 0\n solves = []\n for a in range(1, 14):\n for b in range(1, 14):\n for c in range(1, 14):\n for d in range(1, 14):\n bil = [a, b, c, d]\n expre, point = solve(bil)\n if expre not in solves:\n solves.append((expre, point))\n points += point\n print(\n f'{(a - 1) * 13 * 13 * 13 + (b - 1) * 13 * 13 + (c - 1) * 13 + d} : {expre}'\n )\n avg_points = points / 13 ** 4\n print(f'Average points : {avg_points}')\n count24 = 0\n for expr in solves:\n res = eval(expr[0])\n if res == 24:\n count24 += 1\n print(f'24 Count : {count24}')\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef fitness(x1, op, x2):\n try:\n hasil = eval(f'{x1} {op} {x2}')\n diff = points_dict[op] - abs(24 - hasil)\n if abs(24 - hasil) == 0:\n return diff + 10\n else:\n return diff\n except ZeroDivisionError:\n return float('-inf')\n\n\ndef calc_points(expr):\n points = 0\n hasil = eval(expr)\n points -= abs(24 - hasil)\n for c in expr:\n points += points_dict.get(c, 0)\n return points\n\n\ndef solve(bil):\n bil.sort(reverse=True)\n expr = str(bil[0])\n bil = bil[1:]\n for _ in range(3):\n b_max_fitness = float('-Inf')\n for b in bil:\n for op in op_list:\n curr_fitness = fitness(expr, op, b)\n if curr_fitness > b_max_fitness:\n b_max_fitness = curr_fitness\n curr_op_max = op\n curr_b_max = b\n expr += f' {curr_op_max} {curr_b_max}'\n bil.remove(curr_b_max)\n points = calc_points(expr)\n return expr, points\n\n\ndef main():\n points = 0\n solves = []\n for a in range(1, 14):\n for b in range(1, 14):\n for c in range(1, 14):\n for d in range(1, 14):\n bil = [a, b, c, d]\n expre, point = solve(bil)\n if expre not in solves:\n solves.append((expre, point))\n points += point\n print(\n f'{(a - 1) * 13 * 13 * 13 + (b - 1) * 13 * 13 + (c - 1) * 13 + d} : {expre}'\n )\n avg_points = points / 13 ** 4\n print(f'Average points : {avg_points}')\n count24 = 0\n for expr in solves:\n res = eval(expr[0])\n if res == 24:\n count24 += 1\n print(f'24 Count : {count24}')\n\n\nif __name__ == '__main__':\n main()\n",
"step-4": "points_dict = {'+': 5, '-': 4, '*': 3, '/': 2, '(': -1}\nop_list = ['+', '-', '*', '/']\n\n\ndef fitness(x1, op, x2):\n try:\n hasil = eval(f'{x1} {op} {x2}')\n diff = points_dict[op] - abs(24 - hasil)\n if abs(24 - hasil) == 0:\n return diff + 10\n else:\n return diff\n except ZeroDivisionError:\n return float('-inf')\n\n\ndef calc_points(expr):\n points = 0\n hasil = eval(expr)\n points -= abs(24 - hasil)\n for c in expr:\n points += points_dict.get(c, 0)\n return points\n\n\ndef solve(bil):\n bil.sort(reverse=True)\n expr = str(bil[0])\n bil = bil[1:]\n for _ in range(3):\n b_max_fitness = float('-Inf')\n for b in bil:\n for op in op_list:\n curr_fitness = fitness(expr, op, b)\n if curr_fitness > b_max_fitness:\n b_max_fitness = curr_fitness\n curr_op_max = op\n curr_b_max = b\n expr += f' {curr_op_max} {curr_b_max}'\n bil.remove(curr_b_max)\n points = calc_points(expr)\n return expr, points\n\n\ndef main():\n points = 0\n solves = []\n for a in range(1, 14):\n for b in range(1, 14):\n for c in range(1, 14):\n for d in range(1, 14):\n bil = [a, b, c, d]\n expre, point = solve(bil)\n if expre not in solves:\n solves.append((expre, point))\n points += point\n print(\n f'{(a - 1) * 13 * 13 * 13 + (b - 1) * 13 * 13 + (c - 1) * 13 + d} : {expre}'\n )\n avg_points = points / 13 ** 4\n print(f'Average points : {avg_points}')\n count24 = 0\n for expr in solves:\n res = eval(expr[0])\n if res == 24:\n count24 += 1\n print(f'24 Count : {count24}')\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "points_dict = {\n '+': 5,\n '-': 4,\n '*': 3,\n '/': 2,\n '(': -1,\n}\n\nop_list = ['+','-','*','/']\n\ndef fitness(x1,op,x2):\n #Mengembalikan point dari penyambungan expresi dengan operasi dan bilangan berikutnya\n try:\n hasil = eval(f\"{x1} {op} {x2}\")\n diff = points_dict[op] - abs(24-hasil)\n if (abs(24-hasil) == 0):\n return diff+10\n else:\n return diff\n except ZeroDivisionError:\n return float(\"-inf\")\n\ndef calc_points(expr):\n points = 0\n hasil = eval(expr)\n points -= abs(24-hasil)\n for c in expr:\n points += points_dict.get(c,0)\n return points\n\ndef solve(bil):\n bil.sort(reverse=True)\n\n expr = str(bil[0])\n bil = bil[1:]\n for _ in range(3):\n b_max_fitness = float(\"-Inf\")\n for b in bil:\n for op in op_list:\n curr_fitness = fitness(expr,op,b)\n if curr_fitness > b_max_fitness:\n b_max_fitness = curr_fitness\n curr_op_max = op\n curr_b_max = b\n expr += f\" {curr_op_max} {curr_b_max}\"\n bil.remove(curr_b_max)\n\n points = calc_points(expr)\n # print(f\"{expr} ~ Points: {points}\")\n return (expr,points)\n\ndef main():\n # bil = [int(c) for c in input(\"Masukkan 4 angka dipisahkan spasi:\").strip().split()]\n points = 0\n solves = []\n for a in range(1,14):\n for b in range(1,14):\n for c in range(1,14):\n for d in range(1,14):\n bil = [a,b,c,d]\n expre,point = solve(bil)\n if expre not in solves:\n solves.append((expre,point))\n points += point\n print(f\"{(a-1)*13*13*13+(b-1)*13*13+(c-1)*13+d} : {expre}\")\n\n avg_points = points/(13**4)\n print(f\"Average points : {avg_points}\")\n\n count24 = 0\n for expr in solves:\n res = eval(expr[0])\n if res==24:\n count24 += 1\n print(f\"24 Count : {count24}\")\n\nif __name__ == \"__main__\":\n main()\n",
"step-ids": [
0,
3,
5,
6,
7
]
}
|
[
0,
3,
5,
6,
7
] |
import numpy as np
from scipy import fft
import math
from sklearn import svm
from activity_recognition import WiiGesture
class WiiGestureClassifier():
"""
This class uses the FFT on the average of all three sensor values
to provide the training data for the SVM
Three good distinguishable gestures are:
Fast circle movement
Still, doing nothing
Fast swing movement from behind the shoulder (like a whip)
"""
def __init__(self):
super(self.__class__, self).__init__()
def train(self, gestureList):
self.gestureList = gestureList
self.parsedGestureList = []
self.parseArrays(self.gestureList)
if self.checkListForEmpty():
return "\na gesture has no trained samples"
self.minlen = self.calcMinLength()
self.cutGestureList()
self.getFrequencies()
self.buildClassifier()
def parseArrays(self, data):
parsedData = []
for gesture in data:
parsedGesture = WiiGesture(gesture.name)
parsedData = [self.parseDataset(dataSet)
for dataSet in gesture.trainingsData]
parsedGesture.trainingsData = parsedData
self.parsedGestureList.append(parsedGesture)
def parseDataset(self, dataSet):
x = []
y = []
z = []
avg = []
#Use the difference from default sensor value
for values in dataSet:
x.append(values[0]-512)
y.append(values[1]-512)
z.append(values[2]-512)
avg.append((values[0]-512 + values[1]-512 + values[2]-512) / 3)
return avg
def calcMinLength(self):
all = []
for gesture in self.parsedGestureList:
all += gesture.trainingsData
minlen = min([len(x) for x in all])
return minlen
def cutGestureList(self):
for gesture in self.parsedGestureList:
gesture.trainingsData = [l[:self.minlen] for l in gesture.trainingsData]
def getFrequencies(self):
for gesture in self.parsedGestureList:
gesture.frequencies = [
np.abs(fft(l) / len(l))[1:len(l) / 2] for l in gesture.trainingsData]
def buildClassifier(self):
self.c = svm.SVC()
count = 0
categories = []
trainingData = []
for gesture in self.parsedGestureList:
categories += [count] * len(gesture.frequencies)
trainingData += gesture.frequencies
count += 1
try:
self.c.fit(trainingData, categories)
except ValueError:
return 'More traininsdata for some gestures required'
def classify(self, gesture):
parsedData = []
parsedGesture = WiiGesture(gesture.name)
parsedData = [self.parseDataset(dataSet) for dataSet in gesture.trainingsData]
parsedGesture.trainingsData = parsedData
if len(parsedGesture.trainingsData[0]) < self.minlen:
missingValues = self.minlen - len(parsedGesture.trainingsData[0])
for x in range(missingValues):
parsedGesture.trainingsData[0].append(0)
parsedGesture.trainingsData = [l[:self.minlen] for l in parsedGesture.trainingsData]
parsedGesture.frequencies = [np.abs(fft(l) / len(l))[1:len(l) / 2] for l in parsedGesture.trainingsData]
return self.c.predict(parsedGesture.frequencies[0])
def checkListForEmpty(self):
#checks for empty gestures and exits code
if len(self.parsedGestureList) <= 0:
return True
for gesture in self.parsedGestureList:
if len(gesture.trainingsData) <= 0:
return True
else:
return False
|
normal
|
{
"blob_id": "0b7bba826b82c3751c072395431e17bc1dc9bb90",
"index": 6037,
"step-1": "<mask token>\n\n\nclass WiiGestureClassifier:\n <mask token>\n\n def __init__(self):\n super(self.__class__, self).__init__()\n <mask token>\n\n def parseArrays(self, data):\n parsedData = []\n for gesture in data:\n parsedGesture = WiiGesture(gesture.name)\n parsedData = [self.parseDataset(dataSet) for dataSet in gesture\n .trainingsData]\n parsedGesture.trainingsData = parsedData\n self.parsedGestureList.append(parsedGesture)\n <mask token>\n\n def calcMinLength(self):\n all = []\n for gesture in self.parsedGestureList:\n all += gesture.trainingsData\n minlen = min([len(x) for x in all])\n return minlen\n\n def cutGestureList(self):\n for gesture in self.parsedGestureList:\n gesture.trainingsData = [l[:self.minlen] for l in gesture.\n trainingsData]\n <mask token>\n\n def buildClassifier(self):\n self.c = svm.SVC()\n count = 0\n categories = []\n trainingData = []\n for gesture in self.parsedGestureList:\n categories += [count] * len(gesture.frequencies)\n trainingData += gesture.frequencies\n count += 1\n try:\n self.c.fit(trainingData, categories)\n except ValueError:\n return 'More traininsdata for some gestures required'\n\n def classify(self, gesture):\n parsedData = []\n parsedGesture = WiiGesture(gesture.name)\n parsedData = [self.parseDataset(dataSet) for dataSet in gesture.\n trainingsData]\n parsedGesture.trainingsData = parsedData\n if len(parsedGesture.trainingsData[0]) < self.minlen:\n missingValues = self.minlen - len(parsedGesture.trainingsData[0])\n for x in range(missingValues):\n parsedGesture.trainingsData[0].append(0)\n parsedGesture.trainingsData = [l[:self.minlen] for l in\n parsedGesture.trainingsData]\n parsedGesture.frequencies = [np.abs(fft(l) / len(l))[1:len(l) / 2] for\n l in parsedGesture.trainingsData]\n return self.c.predict(parsedGesture.frequencies[0])\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass WiiGestureClassifier:\n <mask token>\n\n def __init__(self):\n super(self.__class__, self).__init__()\n <mask token>\n\n def parseArrays(self, data):\n parsedData = []\n for gesture in data:\n parsedGesture = WiiGesture(gesture.name)\n parsedData = [self.parseDataset(dataSet) for dataSet in gesture\n .trainingsData]\n parsedGesture.trainingsData = parsedData\n self.parsedGestureList.append(parsedGesture)\n <mask token>\n\n def calcMinLength(self):\n all = []\n for gesture in self.parsedGestureList:\n all += gesture.trainingsData\n minlen = min([len(x) for x in all])\n return minlen\n\n def cutGestureList(self):\n for gesture in self.parsedGestureList:\n gesture.trainingsData = [l[:self.minlen] for l in gesture.\n trainingsData]\n <mask token>\n\n def buildClassifier(self):\n self.c = svm.SVC()\n count = 0\n categories = []\n trainingData = []\n for gesture in self.parsedGestureList:\n categories += [count] * len(gesture.frequencies)\n trainingData += gesture.frequencies\n count += 1\n try:\n self.c.fit(trainingData, categories)\n except ValueError:\n return 'More traininsdata for some gestures required'\n\n def classify(self, gesture):\n parsedData = []\n parsedGesture = WiiGesture(gesture.name)\n parsedData = [self.parseDataset(dataSet) for dataSet in gesture.\n trainingsData]\n parsedGesture.trainingsData = parsedData\n if len(parsedGesture.trainingsData[0]) < self.minlen:\n missingValues = self.minlen - len(parsedGesture.trainingsData[0])\n for x in range(missingValues):\n parsedGesture.trainingsData[0].append(0)\n parsedGesture.trainingsData = [l[:self.minlen] for l in\n parsedGesture.trainingsData]\n parsedGesture.frequencies = [np.abs(fft(l) / len(l))[1:len(l) / 2] for\n l in parsedGesture.trainingsData]\n return self.c.predict(parsedGesture.frequencies[0])\n\n def checkListForEmpty(self):\n if len(self.parsedGestureList) <= 0:\n return True\n for gesture in self.parsedGestureList:\n if len(gesture.trainingsData) <= 0:\n return True\n else:\n return False\n",
"step-3": "<mask token>\n\n\nclass WiiGestureClassifier:\n <mask token>\n\n def __init__(self):\n super(self.__class__, self).__init__()\n\n def train(self, gestureList):\n self.gestureList = gestureList\n self.parsedGestureList = []\n self.parseArrays(self.gestureList)\n if self.checkListForEmpty():\n return '\\na gesture has no trained samples'\n self.minlen = self.calcMinLength()\n self.cutGestureList()\n self.getFrequencies()\n self.buildClassifier()\n\n def parseArrays(self, data):\n parsedData = []\n for gesture in data:\n parsedGesture = WiiGesture(gesture.name)\n parsedData = [self.parseDataset(dataSet) for dataSet in gesture\n .trainingsData]\n parsedGesture.trainingsData = parsedData\n self.parsedGestureList.append(parsedGesture)\n <mask token>\n\n def calcMinLength(self):\n all = []\n for gesture in self.parsedGestureList:\n all += gesture.trainingsData\n minlen = min([len(x) for x in all])\n return minlen\n\n def cutGestureList(self):\n for gesture in self.parsedGestureList:\n gesture.trainingsData = [l[:self.minlen] for l in gesture.\n trainingsData]\n\n def getFrequencies(self):\n for gesture in self.parsedGestureList:\n gesture.frequencies = [np.abs(fft(l) / len(l))[1:len(l) / 2] for\n l in gesture.trainingsData]\n\n def buildClassifier(self):\n self.c = svm.SVC()\n count = 0\n categories = []\n trainingData = []\n for gesture in self.parsedGestureList:\n categories += [count] * len(gesture.frequencies)\n trainingData += gesture.frequencies\n count += 1\n try:\n self.c.fit(trainingData, categories)\n except ValueError:\n return 'More traininsdata for some gestures required'\n\n def classify(self, gesture):\n parsedData = []\n parsedGesture = WiiGesture(gesture.name)\n parsedData = [self.parseDataset(dataSet) for dataSet in gesture.\n trainingsData]\n parsedGesture.trainingsData = parsedData\n if len(parsedGesture.trainingsData[0]) < self.minlen:\n missingValues = self.minlen - len(parsedGesture.trainingsData[0])\n for x in range(missingValues):\n parsedGesture.trainingsData[0].append(0)\n parsedGesture.trainingsData = [l[:self.minlen] for l in\n parsedGesture.trainingsData]\n parsedGesture.frequencies = [np.abs(fft(l) / len(l))[1:len(l) / 2] for\n l in parsedGesture.trainingsData]\n return self.c.predict(parsedGesture.frequencies[0])\n\n def checkListForEmpty(self):\n if len(self.parsedGestureList) <= 0:\n return True\n for gesture in self.parsedGestureList:\n if len(gesture.trainingsData) <= 0:\n return True\n else:\n return False\n",
"step-4": "import numpy as np\nfrom scipy import fft\nimport math\nfrom sklearn import svm\nfrom activity_recognition import WiiGesture\n\n\nclass WiiGestureClassifier:\n \"\"\"\n This class uses the FFT on the average of all three sensor values\n to provide the training data for the SVM\n\n Three good distinguishable gestures are:\n Fast circle movement\n Still, doing nothing\n Fast swing movement from behind the shoulder (like a whip)\n \"\"\"\n\n def __init__(self):\n super(self.__class__, self).__init__()\n\n def train(self, gestureList):\n self.gestureList = gestureList\n self.parsedGestureList = []\n self.parseArrays(self.gestureList)\n if self.checkListForEmpty():\n return '\\na gesture has no trained samples'\n self.minlen = self.calcMinLength()\n self.cutGestureList()\n self.getFrequencies()\n self.buildClassifier()\n\n def parseArrays(self, data):\n parsedData = []\n for gesture in data:\n parsedGesture = WiiGesture(gesture.name)\n parsedData = [self.parseDataset(dataSet) for dataSet in gesture\n .trainingsData]\n parsedGesture.trainingsData = parsedData\n self.parsedGestureList.append(parsedGesture)\n\n def parseDataset(self, dataSet):\n x = []\n y = []\n z = []\n avg = []\n for values in dataSet:\n x.append(values[0] - 512)\n y.append(values[1] - 512)\n z.append(values[2] - 512)\n avg.append((values[0] - 512 + values[1] - 512 + values[2] - 512\n ) / 3)\n return avg\n\n def calcMinLength(self):\n all = []\n for gesture in self.parsedGestureList:\n all += gesture.trainingsData\n minlen = min([len(x) for x in all])\n return minlen\n\n def cutGestureList(self):\n for gesture in self.parsedGestureList:\n gesture.trainingsData = [l[:self.minlen] for l in gesture.\n trainingsData]\n\n def getFrequencies(self):\n for gesture in self.parsedGestureList:\n gesture.frequencies = [np.abs(fft(l) / len(l))[1:len(l) / 2] for\n l in gesture.trainingsData]\n\n def buildClassifier(self):\n self.c = svm.SVC()\n count = 0\n categories = []\n trainingData = []\n for gesture in self.parsedGestureList:\n categories += [count] * len(gesture.frequencies)\n trainingData += gesture.frequencies\n count += 1\n try:\n self.c.fit(trainingData, categories)\n except ValueError:\n return 'More traininsdata for some gestures required'\n\n def classify(self, gesture):\n parsedData = []\n parsedGesture = WiiGesture(gesture.name)\n parsedData = [self.parseDataset(dataSet) for dataSet in gesture.\n trainingsData]\n parsedGesture.trainingsData = parsedData\n if len(parsedGesture.trainingsData[0]) < self.minlen:\n missingValues = self.minlen - len(parsedGesture.trainingsData[0])\n for x in range(missingValues):\n parsedGesture.trainingsData[0].append(0)\n parsedGesture.trainingsData = [l[:self.minlen] for l in\n parsedGesture.trainingsData]\n parsedGesture.frequencies = [np.abs(fft(l) / len(l))[1:len(l) / 2] for\n l in parsedGesture.trainingsData]\n return self.c.predict(parsedGesture.frequencies[0])\n\n def checkListForEmpty(self):\n if len(self.parsedGestureList) <= 0:\n return True\n for gesture in self.parsedGestureList:\n if len(gesture.trainingsData) <= 0:\n return True\n else:\n return False\n",
"step-5": "import numpy as np\nfrom scipy import fft\nimport math\nfrom sklearn import svm\nfrom activity_recognition import WiiGesture\n\n\nclass WiiGestureClassifier():\n \"\"\"\n This class uses the FFT on the average of all three sensor values\n to provide the training data for the SVM\n\n Three good distinguishable gestures are:\n Fast circle movement\n Still, doing nothing\n Fast swing movement from behind the shoulder (like a whip)\n \"\"\"\n def __init__(self):\n super(self.__class__, self).__init__()\n\n def train(self, gestureList):\n self.gestureList = gestureList\n self.parsedGestureList = []\n self.parseArrays(self.gestureList)\n if self.checkListForEmpty():\n return \"\\na gesture has no trained samples\"\n self.minlen = self.calcMinLength()\n self.cutGestureList()\n self.getFrequencies()\n self.buildClassifier()\n\n\n def parseArrays(self, data):\n parsedData = []\n for gesture in data:\n parsedGesture = WiiGesture(gesture.name)\n parsedData = [self.parseDataset(dataSet)\n for dataSet in gesture.trainingsData]\n parsedGesture.trainingsData = parsedData\n self.parsedGestureList.append(parsedGesture)\n\n def parseDataset(self, dataSet):\n x = []\n y = []\n z = []\n avg = []\n #Use the difference from default sensor value\n for values in dataSet:\n x.append(values[0]-512)\n y.append(values[1]-512)\n z.append(values[2]-512)\n avg.append((values[0]-512 + values[1]-512 + values[2]-512) / 3)\n return avg\n\n def calcMinLength(self):\n all = []\n for gesture in self.parsedGestureList:\n all += gesture.trainingsData\n minlen = min([len(x) for x in all])\n return minlen\n\n def cutGestureList(self):\n for gesture in self.parsedGestureList:\n gesture.trainingsData = [l[:self.minlen] for l in gesture.trainingsData]\n\n def getFrequencies(self):\n for gesture in self.parsedGestureList:\n gesture.frequencies = [\n np.abs(fft(l) / len(l))[1:len(l) / 2] for l in gesture.trainingsData]\n\n def buildClassifier(self):\n self.c = svm.SVC()\n count = 0\n categories = []\n trainingData = []\n for gesture in self.parsedGestureList:\n categories += [count] * len(gesture.frequencies)\n trainingData += gesture.frequencies\n count += 1\n try:\n self.c.fit(trainingData, categories)\n except ValueError:\n return 'More traininsdata for some gestures required'\n\n def classify(self, gesture):\n parsedData = []\n parsedGesture = WiiGesture(gesture.name)\n parsedData = [self.parseDataset(dataSet) for dataSet in gesture.trainingsData]\n parsedGesture.trainingsData = parsedData\n if len(parsedGesture.trainingsData[0]) < self.minlen:\n missingValues = self.minlen - len(parsedGesture.trainingsData[0])\n for x in range(missingValues):\n parsedGesture.trainingsData[0].append(0)\n parsedGesture.trainingsData = [l[:self.minlen] for l in parsedGesture.trainingsData]\n parsedGesture.frequencies = [np.abs(fft(l) / len(l))[1:len(l) / 2] for l in parsedGesture.trainingsData]\n return self.c.predict(parsedGesture.frequencies[0])\n\n def checkListForEmpty(self):\n #checks for empty gestures and exits code\n if len(self.parsedGestureList) <= 0:\n return True\n for gesture in self.parsedGestureList:\n if len(gesture.trainingsData) <= 0:\n return True\n else:\n return False",
"step-ids": [
7,
8,
10,
13,
14
]
}
|
[
7,
8,
10,
13,
14
] |
# -*- coding: utf-8 -*-
import logging
from django.contrib.auth import authenticate, login as django_login, logout as django_logout
from django.contrib.auth.models import User
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.core.urlresolvers import reverse
from django.db.utils import IntegrityError
from django.shortcuts import redirect, render
from django.utils.translation import gettext_lazy as _
from keymanager.settings import PAGE_SIZE
from .forms import LoginForm
from .forms import UserCreateForm, UserEditForm
from utils.filters import require_superuser
LOG = logging.getLogger(__name__)
def require_superuser_or_self(func):
def check(request, user_id):
if request.user.is_superuser or \
user_id.encode("utf-8") == str(request.user.id):
return func(request, user_id)
return render(request, "403.html")
return check
@require_superuser
def index(request):
template_name = "users/index.html"
msg = ""
try:
users = User.objects.exclude(id=request.user.id)
except:
msg = _("Unable to list users.")
LOG.error(msg)
users = []
paginator = Paginator(users, PAGE_SIZE)
page = request.GET.get('page')
try:
users = paginator.page(page)
except PageNotAnInteger:
users = paginator.page(1)
except EmptyPage:
users = paginator.page(paginator.num_pages)
return render(request, template_name, {"users": users, "message": msg})
@require_superuser
def create(request):
template_name = "users/create_user.html"
msg = ""
user_form = UserCreateForm()
if request.method == "POST":
user_form = UserCreateForm(request.POST)
if user_form.is_valid():
try:
new_user = User.objects.create_user(
request.POST['username'],
request.POST['email'],
request.POST['password'])
new_user.save()
msg = _('Success create user "%s"') % \
user_form.cleaned_data['username'].encode("utf-8")
LOG.info(msg)
except IntegrityError:
msg = _("User already exist, please try another username.")
LOG.error(msg)
except:
msg = _('Unable to create user "%s"') % \
user_form.cleaned_data['username'].encode("utf-8")
LOG.error(msg)
return render(request, template_name, {"user_form": user_form,
"message": msg})
@require_superuser
def delete(request, user_id):
try:
User.objects.get(id=user_id).delete()
except Exception:
msg = _("Unable to delete user(%s)") % user_id
LOG.error(msg)
if user_id == request.user.id:
logout(request)
return redirect(reverse('users:index'))
@require_superuser
def deactivate(request, user_id):
try:
user = User.objects.get(id=user_id)
user.is_active = False
user.save()
except:
msg = _("Unable to deactivate user(%s)") % user_id
LOG.error(msg)
if user_id == request.user.id:
logout(request)
return redirect(reverse('users:index'))
@require_superuser
def activate(request, user_id):
try:
user = User.objects.get(id=user_id)
user.is_active = True
user.save()
except:
msg = _("Unable to activate user(%s)") % user_id
LOG.error(msg)
if user_id == request.user.id:
logout(request)
return redirect(reverse('users:index'))
@require_superuser_or_self
def edit(request, user_id):
template_name = "users/update_user.html"
msg = ""
user = User.objects.get(id=user_id)
user_form = UserEditForm(initial={"username": user.username,
"email": user.email})
if request.method == "POST":
user_form = UserEditForm(request.POST)
if user_form.is_valid():
username = request.POST['username']
email = request.POST['email']
password = request.POST['password']
if username:
user.username = username
if email:
user.email = email
if password:
user.set_password(password)
user.save()
msg = _('Success updated user "%s"') % username.encode("utf-8")
LOG.info(msg)
return render(request, template_name, {"user_id": user_id,
"user_form": user_form,
"message": msg})
def login(request):
template_name = 'auth/login.html'
msg = ""
if request.user.is_authenticated():
return redirect(reverse("keys:index"))
form = LoginForm
if request.method == "POST":
login_form = LoginForm(request.POST)
if login_form.is_valid():
username = login_form.cleaned_data['username']
password = login_form.cleaned_data["password"]
user = authenticate(username=username, password=password)
if user:
if user.is_active:
django_login(request, user)
msg = _("%s logged in successfully.") % \
username.encode('utf-8')
LOG.info(msg)
return redirect(reverse('keys:index'))
msg = _("Invalid username or password.")
LOG.error(msg)
return render(request, template_name, {"user_form": form,
"message": msg})
def logout(request):
django_logout(request)
return redirect(reverse("index"))
|
normal
|
{
"blob_id": "b739a5d359b4d1c0323c7cd8234e4fe5eb9f3fcb",
"index": 6286,
"step-1": "<mask token>\n\n\n@require_superuser\ndef index(request):\n template_name = 'users/index.html'\n msg = ''\n try:\n users = User.objects.exclude(id=request.user.id)\n except:\n msg = _('Unable to list users.')\n LOG.error(msg)\n users = []\n paginator = Paginator(users, PAGE_SIZE)\n page = request.GET.get('page')\n try:\n users = paginator.page(page)\n except PageNotAnInteger:\n users = paginator.page(1)\n except EmptyPage:\n users = paginator.page(paginator.num_pages)\n return render(request, template_name, {'users': users, 'message': msg})\n\n\n@require_superuser\ndef create(request):\n template_name = 'users/create_user.html'\n msg = ''\n user_form = UserCreateForm()\n if request.method == 'POST':\n user_form = UserCreateForm(request.POST)\n if user_form.is_valid():\n try:\n new_user = User.objects.create_user(request.POST['username'\n ], request.POST['email'], request.POST['password'])\n new_user.save()\n msg = _('Success create user \"%s\"') % user_form.cleaned_data[\n 'username'].encode('utf-8')\n LOG.info(msg)\n except IntegrityError:\n msg = _('User already exist, please try another username.')\n LOG.error(msg)\n except:\n msg = _('Unable to create user \"%s\"') % user_form.cleaned_data[\n 'username'].encode('utf-8')\n LOG.error(msg)\n return render(request, template_name, {'user_form': user_form,\n 'message': msg})\n\n\n@require_superuser\ndef delete(request, user_id):\n try:\n User.objects.get(id=user_id).delete()\n except Exception:\n msg = _('Unable to delete user(%s)') % user_id\n LOG.error(msg)\n if user_id == request.user.id:\n logout(request)\n return redirect(reverse('users:index'))\n\n\n@require_superuser\ndef deactivate(request, user_id):\n try:\n user = User.objects.get(id=user_id)\n user.is_active = False\n user.save()\n except:\n msg = _('Unable to deactivate user(%s)') % user_id\n LOG.error(msg)\n if user_id == request.user.id:\n logout(request)\n return redirect(reverse('users:index'))\n\n\n@require_superuser\ndef activate(request, user_id):\n try:\n user = User.objects.get(id=user_id)\n user.is_active = True\n user.save()\n except:\n msg = _('Unable to activate user(%s)') % user_id\n LOG.error(msg)\n if user_id == request.user.id:\n logout(request)\n return redirect(reverse('users:index'))\n\n\n@require_superuser_or_self\ndef edit(request, user_id):\n template_name = 'users/update_user.html'\n msg = ''\n user = User.objects.get(id=user_id)\n user_form = UserEditForm(initial={'username': user.username, 'email':\n user.email})\n if request.method == 'POST':\n user_form = UserEditForm(request.POST)\n if user_form.is_valid():\n username = request.POST['username']\n email = request.POST['email']\n password = request.POST['password']\n if username:\n user.username = username\n if email:\n user.email = email\n if password:\n user.set_password(password)\n user.save()\n msg = _('Success updated user \"%s\"') % username.encode('utf-8')\n LOG.info(msg)\n return render(request, template_name, {'user_id': user_id, 'user_form':\n user_form, 'message': msg})\n\n\ndef login(request):\n template_name = 'auth/login.html'\n msg = ''\n if request.user.is_authenticated():\n return redirect(reverse('keys:index'))\n form = LoginForm\n if request.method == 'POST':\n login_form = LoginForm(request.POST)\n if login_form.is_valid():\n username = login_form.cleaned_data['username']\n password = login_form.cleaned_data['password']\n user = authenticate(username=username, password=password)\n if user:\n if user.is_active:\n django_login(request, user)\n msg = _('%s logged in successfully.') % username.encode(\n 'utf-8')\n LOG.info(msg)\n return redirect(reverse('keys:index'))\n msg = _('Invalid username or password.')\n LOG.error(msg)\n return render(request, template_name, {'user_form': form, 'message': msg})\n\n\ndef logout(request):\n django_logout(request)\n return redirect(reverse('index'))\n",
"step-2": "<mask token>\n\n\ndef require_superuser_or_self(func):\n\n def check(request, user_id):\n if request.user.is_superuser or user_id.encode('utf-8') == str(request\n .user.id):\n return func(request, user_id)\n return render(request, '403.html')\n return check\n\n\n@require_superuser\ndef index(request):\n template_name = 'users/index.html'\n msg = ''\n try:\n users = User.objects.exclude(id=request.user.id)\n except:\n msg = _('Unable to list users.')\n LOG.error(msg)\n users = []\n paginator = Paginator(users, PAGE_SIZE)\n page = request.GET.get('page')\n try:\n users = paginator.page(page)\n except PageNotAnInteger:\n users = paginator.page(1)\n except EmptyPage:\n users = paginator.page(paginator.num_pages)\n return render(request, template_name, {'users': users, 'message': msg})\n\n\n@require_superuser\ndef create(request):\n template_name = 'users/create_user.html'\n msg = ''\n user_form = UserCreateForm()\n if request.method == 'POST':\n user_form = UserCreateForm(request.POST)\n if user_form.is_valid():\n try:\n new_user = User.objects.create_user(request.POST['username'\n ], request.POST['email'], request.POST['password'])\n new_user.save()\n msg = _('Success create user \"%s\"') % user_form.cleaned_data[\n 'username'].encode('utf-8')\n LOG.info(msg)\n except IntegrityError:\n msg = _('User already exist, please try another username.')\n LOG.error(msg)\n except:\n msg = _('Unable to create user \"%s\"') % user_form.cleaned_data[\n 'username'].encode('utf-8')\n LOG.error(msg)\n return render(request, template_name, {'user_form': user_form,\n 'message': msg})\n\n\n@require_superuser\ndef delete(request, user_id):\n try:\n User.objects.get(id=user_id).delete()\n except Exception:\n msg = _('Unable to delete user(%s)') % user_id\n LOG.error(msg)\n if user_id == request.user.id:\n logout(request)\n return redirect(reverse('users:index'))\n\n\n@require_superuser\ndef deactivate(request, user_id):\n try:\n user = User.objects.get(id=user_id)\n user.is_active = False\n user.save()\n except:\n msg = _('Unable to deactivate user(%s)') % user_id\n LOG.error(msg)\n if user_id == request.user.id:\n logout(request)\n return redirect(reverse('users:index'))\n\n\n@require_superuser\ndef activate(request, user_id):\n try:\n user = User.objects.get(id=user_id)\n user.is_active = True\n user.save()\n except:\n msg = _('Unable to activate user(%s)') % user_id\n LOG.error(msg)\n if user_id == request.user.id:\n logout(request)\n return redirect(reverse('users:index'))\n\n\n@require_superuser_or_self\ndef edit(request, user_id):\n template_name = 'users/update_user.html'\n msg = ''\n user = User.objects.get(id=user_id)\n user_form = UserEditForm(initial={'username': user.username, 'email':\n user.email})\n if request.method == 'POST':\n user_form = UserEditForm(request.POST)\n if user_form.is_valid():\n username = request.POST['username']\n email = request.POST['email']\n password = request.POST['password']\n if username:\n user.username = username\n if email:\n user.email = email\n if password:\n user.set_password(password)\n user.save()\n msg = _('Success updated user \"%s\"') % username.encode('utf-8')\n LOG.info(msg)\n return render(request, template_name, {'user_id': user_id, 'user_form':\n user_form, 'message': msg})\n\n\ndef login(request):\n template_name = 'auth/login.html'\n msg = ''\n if request.user.is_authenticated():\n return redirect(reverse('keys:index'))\n form = LoginForm\n if request.method == 'POST':\n login_form = LoginForm(request.POST)\n if login_form.is_valid():\n username = login_form.cleaned_data['username']\n password = login_form.cleaned_data['password']\n user = authenticate(username=username, password=password)\n if user:\n if user.is_active:\n django_login(request, user)\n msg = _('%s logged in successfully.') % username.encode(\n 'utf-8')\n LOG.info(msg)\n return redirect(reverse('keys:index'))\n msg = _('Invalid username or password.')\n LOG.error(msg)\n return render(request, template_name, {'user_form': form, 'message': msg})\n\n\ndef logout(request):\n django_logout(request)\n return redirect(reverse('index'))\n",
"step-3": "<mask token>\nLOG = logging.getLogger(__name__)\n\n\ndef require_superuser_or_self(func):\n\n def check(request, user_id):\n if request.user.is_superuser or user_id.encode('utf-8') == str(request\n .user.id):\n return func(request, user_id)\n return render(request, '403.html')\n return check\n\n\n@require_superuser\ndef index(request):\n template_name = 'users/index.html'\n msg = ''\n try:\n users = User.objects.exclude(id=request.user.id)\n except:\n msg = _('Unable to list users.')\n LOG.error(msg)\n users = []\n paginator = Paginator(users, PAGE_SIZE)\n page = request.GET.get('page')\n try:\n users = paginator.page(page)\n except PageNotAnInteger:\n users = paginator.page(1)\n except EmptyPage:\n users = paginator.page(paginator.num_pages)\n return render(request, template_name, {'users': users, 'message': msg})\n\n\n@require_superuser\ndef create(request):\n template_name = 'users/create_user.html'\n msg = ''\n user_form = UserCreateForm()\n if request.method == 'POST':\n user_form = UserCreateForm(request.POST)\n if user_form.is_valid():\n try:\n new_user = User.objects.create_user(request.POST['username'\n ], request.POST['email'], request.POST['password'])\n new_user.save()\n msg = _('Success create user \"%s\"') % user_form.cleaned_data[\n 'username'].encode('utf-8')\n LOG.info(msg)\n except IntegrityError:\n msg = _('User already exist, please try another username.')\n LOG.error(msg)\n except:\n msg = _('Unable to create user \"%s\"') % user_form.cleaned_data[\n 'username'].encode('utf-8')\n LOG.error(msg)\n return render(request, template_name, {'user_form': user_form,\n 'message': msg})\n\n\n@require_superuser\ndef delete(request, user_id):\n try:\n User.objects.get(id=user_id).delete()\n except Exception:\n msg = _('Unable to delete user(%s)') % user_id\n LOG.error(msg)\n if user_id == request.user.id:\n logout(request)\n return redirect(reverse('users:index'))\n\n\n@require_superuser\ndef deactivate(request, user_id):\n try:\n user = User.objects.get(id=user_id)\n user.is_active = False\n user.save()\n except:\n msg = _('Unable to deactivate user(%s)') % user_id\n LOG.error(msg)\n if user_id == request.user.id:\n logout(request)\n return redirect(reverse('users:index'))\n\n\n@require_superuser\ndef activate(request, user_id):\n try:\n user = User.objects.get(id=user_id)\n user.is_active = True\n user.save()\n except:\n msg = _('Unable to activate user(%s)') % user_id\n LOG.error(msg)\n if user_id == request.user.id:\n logout(request)\n return redirect(reverse('users:index'))\n\n\n@require_superuser_or_self\ndef edit(request, user_id):\n template_name = 'users/update_user.html'\n msg = ''\n user = User.objects.get(id=user_id)\n user_form = UserEditForm(initial={'username': user.username, 'email':\n user.email})\n if request.method == 'POST':\n user_form = UserEditForm(request.POST)\n if user_form.is_valid():\n username = request.POST['username']\n email = request.POST['email']\n password = request.POST['password']\n if username:\n user.username = username\n if email:\n user.email = email\n if password:\n user.set_password(password)\n user.save()\n msg = _('Success updated user \"%s\"') % username.encode('utf-8')\n LOG.info(msg)\n return render(request, template_name, {'user_id': user_id, 'user_form':\n user_form, 'message': msg})\n\n\ndef login(request):\n template_name = 'auth/login.html'\n msg = ''\n if request.user.is_authenticated():\n return redirect(reverse('keys:index'))\n form = LoginForm\n if request.method == 'POST':\n login_form = LoginForm(request.POST)\n if login_form.is_valid():\n username = login_form.cleaned_data['username']\n password = login_form.cleaned_data['password']\n user = authenticate(username=username, password=password)\n if user:\n if user.is_active:\n django_login(request, user)\n msg = _('%s logged in successfully.') % username.encode(\n 'utf-8')\n LOG.info(msg)\n return redirect(reverse('keys:index'))\n msg = _('Invalid username or password.')\n LOG.error(msg)\n return render(request, template_name, {'user_form': form, 'message': msg})\n\n\ndef logout(request):\n django_logout(request)\n return redirect(reverse('index'))\n",
"step-4": "import logging\nfrom django.contrib.auth import authenticate, login as django_login, logout as django_logout\nfrom django.contrib.auth.models import User\nfrom django.core.paginator import Paginator, EmptyPage, PageNotAnInteger\nfrom django.core.urlresolvers import reverse\nfrom django.db.utils import IntegrityError\nfrom django.shortcuts import redirect, render\nfrom django.utils.translation import gettext_lazy as _\nfrom keymanager.settings import PAGE_SIZE\nfrom .forms import LoginForm\nfrom .forms import UserCreateForm, UserEditForm\nfrom utils.filters import require_superuser\nLOG = logging.getLogger(__name__)\n\n\ndef require_superuser_or_self(func):\n\n def check(request, user_id):\n if request.user.is_superuser or user_id.encode('utf-8') == str(request\n .user.id):\n return func(request, user_id)\n return render(request, '403.html')\n return check\n\n\n@require_superuser\ndef index(request):\n template_name = 'users/index.html'\n msg = ''\n try:\n users = User.objects.exclude(id=request.user.id)\n except:\n msg = _('Unable to list users.')\n LOG.error(msg)\n users = []\n paginator = Paginator(users, PAGE_SIZE)\n page = request.GET.get('page')\n try:\n users = paginator.page(page)\n except PageNotAnInteger:\n users = paginator.page(1)\n except EmptyPage:\n users = paginator.page(paginator.num_pages)\n return render(request, template_name, {'users': users, 'message': msg})\n\n\n@require_superuser\ndef create(request):\n template_name = 'users/create_user.html'\n msg = ''\n user_form = UserCreateForm()\n if request.method == 'POST':\n user_form = UserCreateForm(request.POST)\n if user_form.is_valid():\n try:\n new_user = User.objects.create_user(request.POST['username'\n ], request.POST['email'], request.POST['password'])\n new_user.save()\n msg = _('Success create user \"%s\"') % user_form.cleaned_data[\n 'username'].encode('utf-8')\n LOG.info(msg)\n except IntegrityError:\n msg = _('User already exist, please try another username.')\n LOG.error(msg)\n except:\n msg = _('Unable to create user \"%s\"') % user_form.cleaned_data[\n 'username'].encode('utf-8')\n LOG.error(msg)\n return render(request, template_name, {'user_form': user_form,\n 'message': msg})\n\n\n@require_superuser\ndef delete(request, user_id):\n try:\n User.objects.get(id=user_id).delete()\n except Exception:\n msg = _('Unable to delete user(%s)') % user_id\n LOG.error(msg)\n if user_id == request.user.id:\n logout(request)\n return redirect(reverse('users:index'))\n\n\n@require_superuser\ndef deactivate(request, user_id):\n try:\n user = User.objects.get(id=user_id)\n user.is_active = False\n user.save()\n except:\n msg = _('Unable to deactivate user(%s)') % user_id\n LOG.error(msg)\n if user_id == request.user.id:\n logout(request)\n return redirect(reverse('users:index'))\n\n\n@require_superuser\ndef activate(request, user_id):\n try:\n user = User.objects.get(id=user_id)\n user.is_active = True\n user.save()\n except:\n msg = _('Unable to activate user(%s)') % user_id\n LOG.error(msg)\n if user_id == request.user.id:\n logout(request)\n return redirect(reverse('users:index'))\n\n\n@require_superuser_or_self\ndef edit(request, user_id):\n template_name = 'users/update_user.html'\n msg = ''\n user = User.objects.get(id=user_id)\n user_form = UserEditForm(initial={'username': user.username, 'email':\n user.email})\n if request.method == 'POST':\n user_form = UserEditForm(request.POST)\n if user_form.is_valid():\n username = request.POST['username']\n email = request.POST['email']\n password = request.POST['password']\n if username:\n user.username = username\n if email:\n user.email = email\n if password:\n user.set_password(password)\n user.save()\n msg = _('Success updated user \"%s\"') % username.encode('utf-8')\n LOG.info(msg)\n return render(request, template_name, {'user_id': user_id, 'user_form':\n user_form, 'message': msg})\n\n\ndef login(request):\n template_name = 'auth/login.html'\n msg = ''\n if request.user.is_authenticated():\n return redirect(reverse('keys:index'))\n form = LoginForm\n if request.method == 'POST':\n login_form = LoginForm(request.POST)\n if login_form.is_valid():\n username = login_form.cleaned_data['username']\n password = login_form.cleaned_data['password']\n user = authenticate(username=username, password=password)\n if user:\n if user.is_active:\n django_login(request, user)\n msg = _('%s logged in successfully.') % username.encode(\n 'utf-8')\n LOG.info(msg)\n return redirect(reverse('keys:index'))\n msg = _('Invalid username or password.')\n LOG.error(msg)\n return render(request, template_name, {'user_form': form, 'message': msg})\n\n\ndef logout(request):\n django_logout(request)\n return redirect(reverse('index'))\n",
"step-5": "# -*- coding: utf-8 -*-\n\nimport logging\n\nfrom django.contrib.auth import authenticate, login as django_login, logout as django_logout\nfrom django.contrib.auth.models import User\nfrom django.core.paginator import Paginator, EmptyPage, PageNotAnInteger\nfrom django.core.urlresolvers import reverse\nfrom django.db.utils import IntegrityError\nfrom django.shortcuts import redirect, render\nfrom django.utils.translation import gettext_lazy as _\n\nfrom keymanager.settings import PAGE_SIZE\n\nfrom .forms import LoginForm\nfrom .forms import UserCreateForm, UserEditForm\nfrom utils.filters import require_superuser\n\n\nLOG = logging.getLogger(__name__)\n\n\ndef require_superuser_or_self(func):\n def check(request, user_id):\n if request.user.is_superuser or \\\n user_id.encode(\"utf-8\") == str(request.user.id):\n return func(request, user_id)\n\n return render(request, \"403.html\")\n return check\n\n\n@require_superuser\ndef index(request):\n template_name = \"users/index.html\"\n msg = \"\"\n\n try:\n users = User.objects.exclude(id=request.user.id)\n except:\n msg = _(\"Unable to list users.\")\n LOG.error(msg)\n users = []\n\n paginator = Paginator(users, PAGE_SIZE)\n page = request.GET.get('page')\n try:\n users = paginator.page(page)\n except PageNotAnInteger:\n users = paginator.page(1)\n except EmptyPage:\n users = paginator.page(paginator.num_pages)\n\n return render(request, template_name, {\"users\": users, \"message\": msg})\n\n\n@require_superuser\ndef create(request):\n template_name = \"users/create_user.html\"\n msg = \"\"\n user_form = UserCreateForm()\n\n if request.method == \"POST\":\n user_form = UserCreateForm(request.POST)\n if user_form.is_valid():\n try:\n new_user = User.objects.create_user(\n request.POST['username'],\n request.POST['email'],\n request.POST['password'])\n new_user.save()\n msg = _('Success create user \"%s\"') % \\\n user_form.cleaned_data['username'].encode(\"utf-8\")\n LOG.info(msg)\n except IntegrityError:\n msg = _(\"User already exist, please try another username.\")\n LOG.error(msg)\n except:\n msg = _('Unable to create user \"%s\"') % \\\n user_form.cleaned_data['username'].encode(\"utf-8\")\n LOG.error(msg)\n\n return render(request, template_name, {\"user_form\": user_form,\n \"message\": msg})\n\n\n@require_superuser\ndef delete(request, user_id):\n try:\n User.objects.get(id=user_id).delete()\n except Exception:\n msg = _(\"Unable to delete user(%s)\") % user_id\n LOG.error(msg)\n\n if user_id == request.user.id:\n logout(request)\n\n return redirect(reverse('users:index'))\n\n\n@require_superuser\ndef deactivate(request, user_id):\n try:\n user = User.objects.get(id=user_id)\n user.is_active = False\n user.save()\n except:\n msg = _(\"Unable to deactivate user(%s)\") % user_id\n LOG.error(msg)\n\n if user_id == request.user.id:\n logout(request)\n\n return redirect(reverse('users:index'))\n\n\n@require_superuser\ndef activate(request, user_id):\n try:\n user = User.objects.get(id=user_id)\n user.is_active = True\n user.save()\n except:\n msg = _(\"Unable to activate user(%s)\") % user_id\n LOG.error(msg)\n\n if user_id == request.user.id:\n logout(request)\n\n return redirect(reverse('users:index'))\n\n\n@require_superuser_or_self\ndef edit(request, user_id):\n template_name = \"users/update_user.html\"\n msg = \"\"\n user = User.objects.get(id=user_id)\n user_form = UserEditForm(initial={\"username\": user.username,\n \"email\": user.email})\n\n if request.method == \"POST\":\n user_form = UserEditForm(request.POST)\n if user_form.is_valid():\n username = request.POST['username']\n email = request.POST['email']\n password = request.POST['password']\n if username:\n user.username = username\n if email:\n user.email = email\n if password:\n user.set_password(password)\n user.save()\n msg = _('Success updated user \"%s\"') % username.encode(\"utf-8\")\n LOG.info(msg)\n return render(request, template_name, {\"user_id\": user_id,\n \"user_form\": user_form,\n \"message\": msg})\n\n\ndef login(request):\n template_name = 'auth/login.html'\n msg = \"\"\n if request.user.is_authenticated():\n return redirect(reverse(\"keys:index\"))\n\n form = LoginForm\n\n if request.method == \"POST\":\n login_form = LoginForm(request.POST)\n if login_form.is_valid():\n username = login_form.cleaned_data['username']\n password = login_form.cleaned_data[\"password\"]\n user = authenticate(username=username, password=password)\n if user:\n if user.is_active:\n django_login(request, user)\n msg = _(\"%s logged in successfully.\") % \\\n username.encode('utf-8')\n LOG.info(msg)\n return redirect(reverse('keys:index'))\n msg = _(\"Invalid username or password.\")\n LOG.error(msg)\n\n return render(request, template_name, {\"user_form\": form,\n \"message\": msg})\n\n\ndef logout(request):\n django_logout(request)\n return redirect(reverse(\"index\"))",
"step-ids": [
8,
9,
10,
11,
12
]
}
|
[
8,
9,
10,
11,
12
] |
# list audio files
import glob
def listFiles(path):
return glob.glob(path + '*.wav')
import random
def getNextFile(files):
return random.choice(files)
import pyaudio
import wave
CHUNK = 1024
def getRandomFile(folder = 'test/'):
files = listFiles(folder)
filename = getNextFile(files)
return filename
def play(filename):
# opem file
f = wave.open(filename, 'rb')
p = pyaudio.PyAudio()
#open stream
stream = p.open(format = p.get_format_from_width(f.getsampwidth()),
channels = f.getnchannels(),
rate = f.getframerate(),
output = True)
# read data
data = f.readframes(CHUNK)
#play stream
while data:
stream.write(data)
data = f.readframes(CHUNK)
#stop stream
stream.stop_stream()
stream.close()
#close PyAudio
p.terminate()
|
normal
|
{
"blob_id": "a3bcd383656284a2236e79b5d5d7acdfe433a13b",
"index": 8409,
"step-1": "<mask token>\n\n\ndef getNextFile(files):\n return random.choice(files)\n\n\n<mask token>\n\n\ndef getRandomFile(folder='test/'):\n files = listFiles(folder)\n filename = getNextFile(files)\n return filename\n\n\ndef play(filename):\n f = wave.open(filename, 'rb')\n p = pyaudio.PyAudio()\n stream = p.open(format=p.get_format_from_width(f.getsampwidth()),\n channels=f.getnchannels(), rate=f.getframerate(), output=True)\n data = f.readframes(CHUNK)\n while data:\n stream.write(data)\n data = f.readframes(CHUNK)\n stream.stop_stream()\n stream.close()\n p.terminate()\n",
"step-2": "<mask token>\n\n\ndef listFiles(path):\n return glob.glob(path + '*.wav')\n\n\n<mask token>\n\n\ndef getNextFile(files):\n return random.choice(files)\n\n\n<mask token>\n\n\ndef getRandomFile(folder='test/'):\n files = listFiles(folder)\n filename = getNextFile(files)\n return filename\n\n\ndef play(filename):\n f = wave.open(filename, 'rb')\n p = pyaudio.PyAudio()\n stream = p.open(format=p.get_format_from_width(f.getsampwidth()),\n channels=f.getnchannels(), rate=f.getframerate(), output=True)\n data = f.readframes(CHUNK)\n while data:\n stream.write(data)\n data = f.readframes(CHUNK)\n stream.stop_stream()\n stream.close()\n p.terminate()\n",
"step-3": "<mask token>\n\n\ndef listFiles(path):\n return glob.glob(path + '*.wav')\n\n\n<mask token>\n\n\ndef getNextFile(files):\n return random.choice(files)\n\n\n<mask token>\nCHUNK = 1024\n\n\ndef getRandomFile(folder='test/'):\n files = listFiles(folder)\n filename = getNextFile(files)\n return filename\n\n\ndef play(filename):\n f = wave.open(filename, 'rb')\n p = pyaudio.PyAudio()\n stream = p.open(format=p.get_format_from_width(f.getsampwidth()),\n channels=f.getnchannels(), rate=f.getframerate(), output=True)\n data = f.readframes(CHUNK)\n while data:\n stream.write(data)\n data = f.readframes(CHUNK)\n stream.stop_stream()\n stream.close()\n p.terminate()\n",
"step-4": "import glob\n\n\ndef listFiles(path):\n return glob.glob(path + '*.wav')\n\n\nimport random\n\n\ndef getNextFile(files):\n return random.choice(files)\n\n\nimport pyaudio\nimport wave\nCHUNK = 1024\n\n\ndef getRandomFile(folder='test/'):\n files = listFiles(folder)\n filename = getNextFile(files)\n return filename\n\n\ndef play(filename):\n f = wave.open(filename, 'rb')\n p = pyaudio.PyAudio()\n stream = p.open(format=p.get_format_from_width(f.getsampwidth()),\n channels=f.getnchannels(), rate=f.getframerate(), output=True)\n data = f.readframes(CHUNK)\n while data:\n stream.write(data)\n data = f.readframes(CHUNK)\n stream.stop_stream()\n stream.close()\n p.terminate()\n",
"step-5": "# list audio files\nimport glob\ndef listFiles(path):\n return glob.glob(path + '*.wav')\n\nimport random\ndef getNextFile(files):\n return random.choice(files)\n\nimport pyaudio\nimport wave\nCHUNK = 1024\n\ndef getRandomFile(folder = 'test/'):\n files = listFiles(folder)\n filename = getNextFile(files)\n return filename\n\ndef play(filename):\n # opem file\n f = wave.open(filename, 'rb')\n p = pyaudio.PyAudio()\n #open stream \n stream = p.open(format = p.get_format_from_width(f.getsampwidth()), \n channels = f.getnchannels(), \n rate = f.getframerate(), \n output = True)\n # read data\n data = f.readframes(CHUNK)\n #play stream \n while data:\n stream.write(data) \n data = f.readframes(CHUNK)\n #stop stream \n stream.stop_stream() \n stream.close() \n #close PyAudio \n p.terminate()\n ",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
# Generated by Django 2.1.4 on 2019-04-23 23:37
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('machine', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='AboutRequest',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True, null=True)),
('modified', models.DateTimeField(auto_now=True, null=True)),
('address_of_delivery', models.CharField(choices=[('meru', 'Meru Town'), ('kianjai', 'Kianjai'), ('nkubu', 'Nkubu'), ('maua', 'Maua'), ('Nchiu', 'Nchiru')], default='Meru Town', max_length=50)),
('approved', models.BooleanField(default=False)),
('active', models.BooleanField(default=True)),
('paid', models.BooleanField(default=False)),
],
options={
'ordering': ('-created',),
},
),
migrations.CreateModel(
name='Request',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True, null=True)),
('modified', models.DateTimeField(auto_now=True, null=True)),
('price', models.DecimalField(decimal_places=2, max_digits=6, null=True)),
('quantity', models.PositiveIntegerField(default=1)),
('order', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='details', to='request.AboutRequest')),
('product', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='order_item', to='machine.Machine')),
('user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='orders', to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ('-quantity',),
},
),
]
|
normal
|
{
"blob_id": "b9608208f71f25ae05ed9bd7bdf94b8882a26e06",
"index": 3091,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n initial = True\n dependencies = [migrations.swappable_dependency(settings.\n AUTH_USER_MODEL), ('machine', '0001_initial')]\n operations = [migrations.CreateModel(name='AboutRequest', fields=[('id',\n models.AutoField(auto_created=True, primary_key=True, serialize=\n False, verbose_name='ID')), ('created', models.DateTimeField(\n auto_now_add=True, null=True)), ('modified', models.DateTimeField(\n auto_now=True, null=True)), ('address_of_delivery', models.\n CharField(choices=[('meru', 'Meru Town'), ('kianjai', 'Kianjai'), (\n 'nkubu', 'Nkubu'), ('maua', 'Maua'), ('Nchiu', 'Nchiru')], default=\n 'Meru Town', max_length=50)), ('approved', models.BooleanField(\n default=False)), ('active', models.BooleanField(default=True)), (\n 'paid', models.BooleanField(default=False))], options={'ordering':\n ('-created',)}), migrations.CreateModel(name='Request', fields=[(\n 'id', models.AutoField(auto_created=True, primary_key=True,\n serialize=False, verbose_name='ID')), ('created', models.\n DateTimeField(auto_now_add=True, null=True)), ('modified', models.\n DateTimeField(auto_now=True, null=True)), ('price', models.\n DecimalField(decimal_places=2, max_digits=6, null=True)), (\n 'quantity', models.PositiveIntegerField(default=1)), ('order',\n models.ForeignKey(null=True, on_delete=django.db.models.deletion.\n CASCADE, related_name='details', to='request.AboutRequest')), (\n 'product', models.ForeignKey(null=True, on_delete=django.db.models.\n deletion.CASCADE, related_name='order_item', to='machine.Machine')),\n ('user', models.ForeignKey(null=True, on_delete=django.db.models.\n deletion.CASCADE, related_name='orders', to=settings.\n AUTH_USER_MODEL))], options={'ordering': ('-quantity',)})]\n",
"step-4": "from django.conf import settings\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n initial = True\n dependencies = [migrations.swappable_dependency(settings.\n AUTH_USER_MODEL), ('machine', '0001_initial')]\n operations = [migrations.CreateModel(name='AboutRequest', fields=[('id',\n models.AutoField(auto_created=True, primary_key=True, serialize=\n False, verbose_name='ID')), ('created', models.DateTimeField(\n auto_now_add=True, null=True)), ('modified', models.DateTimeField(\n auto_now=True, null=True)), ('address_of_delivery', models.\n CharField(choices=[('meru', 'Meru Town'), ('kianjai', 'Kianjai'), (\n 'nkubu', 'Nkubu'), ('maua', 'Maua'), ('Nchiu', 'Nchiru')], default=\n 'Meru Town', max_length=50)), ('approved', models.BooleanField(\n default=False)), ('active', models.BooleanField(default=True)), (\n 'paid', models.BooleanField(default=False))], options={'ordering':\n ('-created',)}), migrations.CreateModel(name='Request', fields=[(\n 'id', models.AutoField(auto_created=True, primary_key=True,\n serialize=False, verbose_name='ID')), ('created', models.\n DateTimeField(auto_now_add=True, null=True)), ('modified', models.\n DateTimeField(auto_now=True, null=True)), ('price', models.\n DecimalField(decimal_places=2, max_digits=6, null=True)), (\n 'quantity', models.PositiveIntegerField(default=1)), ('order',\n models.ForeignKey(null=True, on_delete=django.db.models.deletion.\n CASCADE, related_name='details', to='request.AboutRequest')), (\n 'product', models.ForeignKey(null=True, on_delete=django.db.models.\n deletion.CASCADE, related_name='order_item', to='machine.Machine')),\n ('user', models.ForeignKey(null=True, on_delete=django.db.models.\n deletion.CASCADE, related_name='orders', to=settings.\n AUTH_USER_MODEL))], options={'ordering': ('-quantity',)})]\n",
"step-5": "# Generated by Django 2.1.4 on 2019-04-23 23:37\n\nfrom django.conf import settings\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ('machine', '0001_initial'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='AboutRequest',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('created', models.DateTimeField(auto_now_add=True, null=True)),\n ('modified', models.DateTimeField(auto_now=True, null=True)),\n ('address_of_delivery', models.CharField(choices=[('meru', 'Meru Town'), ('kianjai', 'Kianjai'), ('nkubu', 'Nkubu'), ('maua', 'Maua'), ('Nchiu', 'Nchiru')], default='Meru Town', max_length=50)),\n ('approved', models.BooleanField(default=False)),\n ('active', models.BooleanField(default=True)),\n ('paid', models.BooleanField(default=False)),\n ],\n options={\n 'ordering': ('-created',),\n },\n ),\n migrations.CreateModel(\n name='Request',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('created', models.DateTimeField(auto_now_add=True, null=True)),\n ('modified', models.DateTimeField(auto_now=True, null=True)),\n ('price', models.DecimalField(decimal_places=2, max_digits=6, null=True)),\n ('quantity', models.PositiveIntegerField(default=1)),\n ('order', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='details', to='request.AboutRequest')),\n ('product', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='order_item', to='machine.Machine')),\n ('user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='orders', to=settings.AUTH_USER_MODEL)),\n ],\n options={\n 'ordering': ('-quantity',),\n },\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Nov 5 11:56:41 2017
@author: cgao
"""
from beautifultable import BeautifulTable
#1. 新旧税率Bracket
def tax_calculator(taxable_income, bracket, rate):
bracket2 = bracket[1:]
bracket2.append(float('Inf'))
bracket3 = [y-x for x,y in zip(bracket, bracket2)]
income_seg = [min(max(0, taxable_income - x), y) for x, y in zip(bracket, bracket3)]
return sum([x*y for x, y in zip(income_seg, rate)])
def old_bracket(taxable_income, joint = True):
rate= [0.1, 0.15, 0.25, 0.28, 0.33, 0.35, 0.396]
if not joint:
bracket = [0, 9325, 37950, 91900, 191650, 416700, 418400]
else:
bracket = [0, 18650, 75900, 153100, 233350, 416700, 470700]
return tax_calculator(taxable_income, bracket, rate)
def new_bracket(taxable_income, joint = True):
rate= [0.12, 0.25, 0.35, 0.396]
if not joint:
bracket = [0, 45000, 200000, 500000]
else:
bracket = [0, 90000, 260000, 1000000]
return tax_calculator(taxable_income, bracket, rate)
def AMT_bracket(taxable_income, joint = True):
rate= [0.26, 0.28]
if not joint:
bracket = [0, 93900]
else:
bracket = [0, 187800]
return tax_calculator(taxable_income, bracket, rate)
#2. 增加标准扣除(Standard Deduction)额度
'''
if joint:
old_standard_deduction = 12600
new_standard_deduction = 24000
else:
old_standard_deduction = 6300
new_standard_deduction = 12000
'''
#3. 减少利息扣除
def MTG_IR_deduction_old(UPB, rate):
return min(1000000.0, UPB)*rate
# existing_mtg = True: existing loan. Grand fathered 1.0 Million limit
def MTG_IR_deduction_new(UPB, rate, existing_mtg = False):
if existing_mtg:
return min(1000000.0, UPB)*rate
else:
return min(750000.0, UPB)*rate
#4. 减少州与地方税收(房产税等)扣除
def SALT_deduction_old(taxable_income, efficient_state_rate, local_tax):
return taxable_income*efficient_state_rate + local_tax
def SALT_deduction_new(taxable_income, efficient_state_rate, local_tax):
return min(10000.0, taxable_income*efficient_state_rate + local_tax)
#5. 取消Personal Exemption
def PersonalExemption_deduction_old(taxable_income, member, joint = True):
if joint:
phaseout = min(0.02*round((max(taxable_income - 311300, 0)/2500 + 1e-7)), 1)
return int(4050*member*(1 - phaseout))
else:
phaseout = min(0.02*round(max(taxable_income - 259000, 0)/2500 + 1e-7), 1)
return int(4050*member*(1 - phaseout))
def PersonalExemption_deduction_new():
return 0
#6. Child Care Tax Credit
def ChildCare_Credit_old(taxable_income, child, joint = True):
if joint:
phaseout = round(max(taxable_income - 110000, 0)/20 + 1e-7)
return int(max(0,1000*child - phaseout))
else:
phaseout = round(max(taxable_income - 55000, 0)/20 + 1e-7)
return int(max(0,1000*child - phaseout))
def ChildCare_Credit_new(taxable_income, child, joint = True):
if joint:
phaseout = round(max(taxable_income - 230000, 0)/20 + 1e-7)
return int(max(0,1600*child - phaseout))
else:
phaseout = round(max(taxable_income - 115000, 0)/20 + 1e-7)
return int(max(0,1600*child - phaseout))
#7. 取消AMT (Alternative Minimum Tax)
def AMT_exemption(taxable_income, joint = True):
if joint:
return max(0, 84500 - max(taxable_income - 160900, 0)/4)
else:
return max(0, 54300 - max(taxable_income - 120700, 0)/4)
#8. 逐步取消遗产税 (Estate Tax)
#9. 综合影响
def tax_comparison(taxable_income, member, child, UPB, rate, efficient_state_rate, local_tax, joint = True, existing_mtg = False, display = True, detail = False):
# Personal exemption (applied to both standard and itemized)
old_PersonalExemption_deduction = PersonalExemption_deduction_old(taxable_income, member, joint = joint)
# Child care tax credit (applied to both standard and itemized)
old_ChildCare_Credit = ChildCare_Credit_old(taxable_income, child, joint = joint)
new_ChildCare_Credit = ChildCare_Credit_new(taxable_income, child, joint = joint)
# Mortgage Interest Rate deduction (applied to itemized and AMT)
old_MTG_IR_deduction= MTG_IR_deduction_old(UPB, rate)
new_MTG_IR_deduction= MTG_IR_deduction_new(UPB, rate, existing_mtg = existing_mtg)
# State and local tax (applied to itemized only)
old_SALT_deduction = SALT_deduction_old(taxable_income, efficient_state_rate, local_tax)
new_SALT_deduction = SALT_deduction_new(taxable_income, efficient_state_rate, local_tax)
# calculate standard tax
if joint:
old_standard_deduction = 12600
new_standard_deduction = 24000
else:
old_standard_deduction = 6300
new_standard_deduction = 12000
# tax before Child care credit
old_tax_beforeCCTC_standard = old_bracket(taxable_income - old_standard_deduction - old_PersonalExemption_deduction, joint = joint)
new_tax_beforeCCTC_standard = new_bracket(taxable_income - new_standard_deduction, joint = joint)
# tax before Child after credit
old_tax_standard = max(0, old_tax_beforeCCTC_standard - old_ChildCare_Credit)
new_tax_standard = max(0, new_tax_beforeCCTC_standard - new_ChildCare_Credit)
# calculate itemized tax
# tax before Child care credit
old_tax_beforeCCTC_itemized = old_bracket(taxable_income - old_MTG_IR_deduction - old_SALT_deduction - old_PersonalExemption_deduction, joint = joint)
new_tax_beforeCCTC_itemized = new_bracket(taxable_income - new_MTG_IR_deduction - new_SALT_deduction, joint = joint)
# tax before Child after credit
old_tax_itemized = max(0, old_tax_beforeCCTC_itemized - old_ChildCare_Credit)
new_tax_itemized = max(0, new_tax_beforeCCTC_itemized - new_ChildCare_Credit)
# calculate AMT tax
AMT_exemption_amount = AMT_exemption(taxable_income, joint = joint)
# tax before Child care credit
old_tax_beforeCCTC_AMT = AMT_bracket(taxable_income - AMT_exemption_amount - old_MTG_IR_deduction, joint = joint)
# tax before Child after credit
old_tax_AMT = max(0, old_tax_beforeCCTC_AMT - old_ChildCare_Credit)
tax_old = max(min(old_tax_standard, old_tax_itemized),old_tax_AMT)
tax_new = min(new_tax_standard, new_tax_itemized)
if display:
print("Current Tax Should Pay: $%3.2f"%tax_old)
print(" Standard: $%3.2f"%old_tax_standard)
print(" Itemized: $%3.2f"%old_tax_itemized)
print(" AMT tax: $%3.2f"%old_tax_AMT)
print("New Tax Should Pay: $%3.2f"%tax_new)
print(" Standard: $%3.2f"%new_tax_standard)
print(" Itemized: $%3.2f"%new_tax_itemized)
if detail:
print("***********************************************")
print("${:,} taxable income".format(taxable_income) + ', joint = %r'%joint)
print("%d Family Member, %d child(ren)"%(member, child))
print('Existing Mortgage: %r'%existing_mtg + ', ${:,} Mortgage Balance'.format(UPB) + ', %3.2f%% Interest Rate'%(rate*100),)
print('${:,} Local Tax'.format(local_tax) + ', %d%% State/City Tax Rate'%(efficient_state_rate*100),)
print("***********************************************")
table = BeautifulTable()
table.column_headers = ["Item", "Current", "New"]
table.append_row(["Standard Deduction", old_standard_deduction, new_standard_deduction])
table.append_row(["Personal Exemption", old_PersonalExemption_deduction, 'NA'])
table.append_row(["Child Care Tax Credit", old_ChildCare_Credit, new_ChildCare_Credit])
table.append_row(["Mortgage Interest Deduction", old_MTG_IR_deduction, new_MTG_IR_deduction])
table.append_row(["State and Local Tax Deduction", old_SALT_deduction, new_SALT_deduction])
table.append_row(["AMT Exemption (not including MTG Interest)", AMT_exemption_amount, "NA"])
table.append_row(["Tax", tax_old, tax_new])
print(table)
return [tax_old, tax_new, old_tax_standard, new_tax_standard, old_tax_itemized, new_tax_itemized, old_tax_AMT]
|
normal
|
{
"blob_id": "70cb5673a13967247b6da1fa5948000db39a92c8",
"index": 7253,
"step-1": "<mask token>\n\n\ndef old_bracket(taxable_income, joint=True):\n rate = [0.1, 0.15, 0.25, 0.28, 0.33, 0.35, 0.396]\n if not joint:\n bracket = [0, 9325, 37950, 91900, 191650, 416700, 418400]\n else:\n bracket = [0, 18650, 75900, 153100, 233350, 416700, 470700]\n return tax_calculator(taxable_income, bracket, rate)\n\n\n<mask token>\n\n\ndef MTG_IR_deduction_old(UPB, rate):\n return min(1000000.0, UPB) * rate\n\n\n<mask token>\n\n\ndef SALT_deduction_new(taxable_income, efficient_state_rate, local_tax):\n return min(10000.0, taxable_income * efficient_state_rate + local_tax)\n\n\n<mask token>\n\n\ndef PersonalExemption_deduction_new():\n return 0\n\n\n<mask token>\n\n\ndef ChildCare_Credit_new(taxable_income, child, joint=True):\n if joint:\n phaseout = round(max(taxable_income - 230000, 0) / 20 + 1e-07)\n return int(max(0, 1600 * child - phaseout))\n else:\n phaseout = round(max(taxable_income - 115000, 0) / 20 + 1e-07)\n return int(max(0, 1600 * child - phaseout))\n\n\ndef AMT_exemption(taxable_income, joint=True):\n if joint:\n return max(0, 84500 - max(taxable_income - 160900, 0) / 4)\n else:\n return max(0, 54300 - max(taxable_income - 120700, 0) / 4)\n\n\ndef tax_comparison(taxable_income, member, child, UPB, rate,\n efficient_state_rate, local_tax, joint=True, existing_mtg=False,\n display=True, detail=False):\n old_PersonalExemption_deduction = PersonalExemption_deduction_old(\n taxable_income, member, joint=joint)\n old_ChildCare_Credit = ChildCare_Credit_old(taxable_income, child,\n joint=joint)\n new_ChildCare_Credit = ChildCare_Credit_new(taxable_income, child,\n joint=joint)\n old_MTG_IR_deduction = MTG_IR_deduction_old(UPB, rate)\n new_MTG_IR_deduction = MTG_IR_deduction_new(UPB, rate, existing_mtg=\n existing_mtg)\n old_SALT_deduction = SALT_deduction_old(taxable_income,\n efficient_state_rate, local_tax)\n new_SALT_deduction = SALT_deduction_new(taxable_income,\n efficient_state_rate, local_tax)\n if joint:\n old_standard_deduction = 12600\n new_standard_deduction = 24000\n else:\n old_standard_deduction = 6300\n new_standard_deduction = 12000\n old_tax_beforeCCTC_standard = old_bracket(taxable_income -\n old_standard_deduction - old_PersonalExemption_deduction, joint=joint)\n new_tax_beforeCCTC_standard = new_bracket(taxable_income -\n new_standard_deduction, joint=joint)\n old_tax_standard = max(0, old_tax_beforeCCTC_standard -\n old_ChildCare_Credit)\n new_tax_standard = max(0, new_tax_beforeCCTC_standard -\n new_ChildCare_Credit)\n old_tax_beforeCCTC_itemized = old_bracket(taxable_income -\n old_MTG_IR_deduction - old_SALT_deduction -\n old_PersonalExemption_deduction, joint=joint)\n new_tax_beforeCCTC_itemized = new_bracket(taxable_income -\n new_MTG_IR_deduction - new_SALT_deduction, joint=joint)\n old_tax_itemized = max(0, old_tax_beforeCCTC_itemized -\n old_ChildCare_Credit)\n new_tax_itemized = max(0, new_tax_beforeCCTC_itemized -\n new_ChildCare_Credit)\n AMT_exemption_amount = AMT_exemption(taxable_income, joint=joint)\n old_tax_beforeCCTC_AMT = AMT_bracket(taxable_income -\n AMT_exemption_amount - old_MTG_IR_deduction, joint=joint)\n old_tax_AMT = max(0, old_tax_beforeCCTC_AMT - old_ChildCare_Credit)\n tax_old = max(min(old_tax_standard, old_tax_itemized), old_tax_AMT)\n tax_new = min(new_tax_standard, new_tax_itemized)\n if display:\n print('Current Tax Should Pay: $%3.2f' % tax_old)\n print(' Standard: $%3.2f' % old_tax_standard)\n print(' Itemized: $%3.2f' % old_tax_itemized)\n print(' AMT tax: $%3.2f' % old_tax_AMT)\n print('New Tax Should Pay: $%3.2f' % tax_new)\n print(' Standard: $%3.2f' % new_tax_standard)\n print(' Itemized: $%3.2f' % new_tax_itemized)\n if detail:\n print('***********************************************')\n print('${:,} taxable income'.format(taxable_income) + \n ', joint = %r' % joint)\n print('%d Family Member, %d child(ren)' % (member, child))\n print('Existing Mortgage: %r' % existing_mtg +\n ', ${:,} Mortgage Balance'.format(UPB) + \n ', %3.2f%% Interest Rate' % (rate * 100))\n print('${:,} Local Tax'.format(local_tax) + \n ', %d%% State/City Tax Rate' % (efficient_state_rate * 100))\n print('***********************************************')\n table = BeautifulTable()\n table.column_headers = ['Item', 'Current', 'New']\n table.append_row(['Standard Deduction', old_standard_deduction,\n new_standard_deduction])\n table.append_row(['Personal Exemption',\n old_PersonalExemption_deduction, 'NA'])\n table.append_row(['Child Care Tax Credit', old_ChildCare_Credit,\n new_ChildCare_Credit])\n table.append_row(['Mortgage Interest Deduction',\n old_MTG_IR_deduction, new_MTG_IR_deduction])\n table.append_row(['State and Local Tax Deduction',\n old_SALT_deduction, new_SALT_deduction])\n table.append_row(['AMT Exemption (not including MTG Interest)',\n AMT_exemption_amount, 'NA'])\n table.append_row(['Tax', tax_old, tax_new])\n print(table)\n return [tax_old, tax_new, old_tax_standard, new_tax_standard,\n old_tax_itemized, new_tax_itemized, old_tax_AMT]\n",
"step-2": "<mask token>\n\n\ndef old_bracket(taxable_income, joint=True):\n rate = [0.1, 0.15, 0.25, 0.28, 0.33, 0.35, 0.396]\n if not joint:\n bracket = [0, 9325, 37950, 91900, 191650, 416700, 418400]\n else:\n bracket = [0, 18650, 75900, 153100, 233350, 416700, 470700]\n return tax_calculator(taxable_income, bracket, rate)\n\n\ndef new_bracket(taxable_income, joint=True):\n rate = [0.12, 0.25, 0.35, 0.396]\n if not joint:\n bracket = [0, 45000, 200000, 500000]\n else:\n bracket = [0, 90000, 260000, 1000000]\n return tax_calculator(taxable_income, bracket, rate)\n\n\n<mask token>\n\n\ndef MTG_IR_deduction_old(UPB, rate):\n return min(1000000.0, UPB) * rate\n\n\ndef MTG_IR_deduction_new(UPB, rate, existing_mtg=False):\n if existing_mtg:\n return min(1000000.0, UPB) * rate\n else:\n return min(750000.0, UPB) * rate\n\n\n<mask token>\n\n\ndef SALT_deduction_new(taxable_income, efficient_state_rate, local_tax):\n return min(10000.0, taxable_income * efficient_state_rate + local_tax)\n\n\n<mask token>\n\n\ndef PersonalExemption_deduction_new():\n return 0\n\n\ndef ChildCare_Credit_old(taxable_income, child, joint=True):\n if joint:\n phaseout = round(max(taxable_income - 110000, 0) / 20 + 1e-07)\n return int(max(0, 1000 * child - phaseout))\n else:\n phaseout = round(max(taxable_income - 55000, 0) / 20 + 1e-07)\n return int(max(0, 1000 * child - phaseout))\n\n\ndef ChildCare_Credit_new(taxable_income, child, joint=True):\n if joint:\n phaseout = round(max(taxable_income - 230000, 0) / 20 + 1e-07)\n return int(max(0, 1600 * child - phaseout))\n else:\n phaseout = round(max(taxable_income - 115000, 0) / 20 + 1e-07)\n return int(max(0, 1600 * child - phaseout))\n\n\ndef AMT_exemption(taxable_income, joint=True):\n if joint:\n return max(0, 84500 - max(taxable_income - 160900, 0) / 4)\n else:\n return max(0, 54300 - max(taxable_income - 120700, 0) / 4)\n\n\ndef tax_comparison(taxable_income, member, child, UPB, rate,\n efficient_state_rate, local_tax, joint=True, existing_mtg=False,\n display=True, detail=False):\n old_PersonalExemption_deduction = PersonalExemption_deduction_old(\n taxable_income, member, joint=joint)\n old_ChildCare_Credit = ChildCare_Credit_old(taxable_income, child,\n joint=joint)\n new_ChildCare_Credit = ChildCare_Credit_new(taxable_income, child,\n joint=joint)\n old_MTG_IR_deduction = MTG_IR_deduction_old(UPB, rate)\n new_MTG_IR_deduction = MTG_IR_deduction_new(UPB, rate, existing_mtg=\n existing_mtg)\n old_SALT_deduction = SALT_deduction_old(taxable_income,\n efficient_state_rate, local_tax)\n new_SALT_deduction = SALT_deduction_new(taxable_income,\n efficient_state_rate, local_tax)\n if joint:\n old_standard_deduction = 12600\n new_standard_deduction = 24000\n else:\n old_standard_deduction = 6300\n new_standard_deduction = 12000\n old_tax_beforeCCTC_standard = old_bracket(taxable_income -\n old_standard_deduction - old_PersonalExemption_deduction, joint=joint)\n new_tax_beforeCCTC_standard = new_bracket(taxable_income -\n new_standard_deduction, joint=joint)\n old_tax_standard = max(0, old_tax_beforeCCTC_standard -\n old_ChildCare_Credit)\n new_tax_standard = max(0, new_tax_beforeCCTC_standard -\n new_ChildCare_Credit)\n old_tax_beforeCCTC_itemized = old_bracket(taxable_income -\n old_MTG_IR_deduction - old_SALT_deduction -\n old_PersonalExemption_deduction, joint=joint)\n new_tax_beforeCCTC_itemized = new_bracket(taxable_income -\n new_MTG_IR_deduction - new_SALT_deduction, joint=joint)\n old_tax_itemized = max(0, old_tax_beforeCCTC_itemized -\n old_ChildCare_Credit)\n new_tax_itemized = max(0, new_tax_beforeCCTC_itemized -\n new_ChildCare_Credit)\n AMT_exemption_amount = AMT_exemption(taxable_income, joint=joint)\n old_tax_beforeCCTC_AMT = AMT_bracket(taxable_income -\n AMT_exemption_amount - old_MTG_IR_deduction, joint=joint)\n old_tax_AMT = max(0, old_tax_beforeCCTC_AMT - old_ChildCare_Credit)\n tax_old = max(min(old_tax_standard, old_tax_itemized), old_tax_AMT)\n tax_new = min(new_tax_standard, new_tax_itemized)\n if display:\n print('Current Tax Should Pay: $%3.2f' % tax_old)\n print(' Standard: $%3.2f' % old_tax_standard)\n print(' Itemized: $%3.2f' % old_tax_itemized)\n print(' AMT tax: $%3.2f' % old_tax_AMT)\n print('New Tax Should Pay: $%3.2f' % tax_new)\n print(' Standard: $%3.2f' % new_tax_standard)\n print(' Itemized: $%3.2f' % new_tax_itemized)\n if detail:\n print('***********************************************')\n print('${:,} taxable income'.format(taxable_income) + \n ', joint = %r' % joint)\n print('%d Family Member, %d child(ren)' % (member, child))\n print('Existing Mortgage: %r' % existing_mtg +\n ', ${:,} Mortgage Balance'.format(UPB) + \n ', %3.2f%% Interest Rate' % (rate * 100))\n print('${:,} Local Tax'.format(local_tax) + \n ', %d%% State/City Tax Rate' % (efficient_state_rate * 100))\n print('***********************************************')\n table = BeautifulTable()\n table.column_headers = ['Item', 'Current', 'New']\n table.append_row(['Standard Deduction', old_standard_deduction,\n new_standard_deduction])\n table.append_row(['Personal Exemption',\n old_PersonalExemption_deduction, 'NA'])\n table.append_row(['Child Care Tax Credit', old_ChildCare_Credit,\n new_ChildCare_Credit])\n table.append_row(['Mortgage Interest Deduction',\n old_MTG_IR_deduction, new_MTG_IR_deduction])\n table.append_row(['State and Local Tax Deduction',\n old_SALT_deduction, new_SALT_deduction])\n table.append_row(['AMT Exemption (not including MTG Interest)',\n AMT_exemption_amount, 'NA'])\n table.append_row(['Tax', tax_old, tax_new])\n print(table)\n return [tax_old, tax_new, old_tax_standard, new_tax_standard,\n old_tax_itemized, new_tax_itemized, old_tax_AMT]\n",
"step-3": "<mask token>\n\n\ndef old_bracket(taxable_income, joint=True):\n rate = [0.1, 0.15, 0.25, 0.28, 0.33, 0.35, 0.396]\n if not joint:\n bracket = [0, 9325, 37950, 91900, 191650, 416700, 418400]\n else:\n bracket = [0, 18650, 75900, 153100, 233350, 416700, 470700]\n return tax_calculator(taxable_income, bracket, rate)\n\n\ndef new_bracket(taxable_income, joint=True):\n rate = [0.12, 0.25, 0.35, 0.396]\n if not joint:\n bracket = [0, 45000, 200000, 500000]\n else:\n bracket = [0, 90000, 260000, 1000000]\n return tax_calculator(taxable_income, bracket, rate)\n\n\ndef AMT_bracket(taxable_income, joint=True):\n rate = [0.26, 0.28]\n if not joint:\n bracket = [0, 93900]\n else:\n bracket = [0, 187800]\n return tax_calculator(taxable_income, bracket, rate)\n\n\n<mask token>\n\n\ndef MTG_IR_deduction_old(UPB, rate):\n return min(1000000.0, UPB) * rate\n\n\ndef MTG_IR_deduction_new(UPB, rate, existing_mtg=False):\n if existing_mtg:\n return min(1000000.0, UPB) * rate\n else:\n return min(750000.0, UPB) * rate\n\n\ndef SALT_deduction_old(taxable_income, efficient_state_rate, local_tax):\n return taxable_income * efficient_state_rate + local_tax\n\n\ndef SALT_deduction_new(taxable_income, efficient_state_rate, local_tax):\n return min(10000.0, taxable_income * efficient_state_rate + local_tax)\n\n\ndef PersonalExemption_deduction_old(taxable_income, member, joint=True):\n if joint:\n phaseout = min(0.02 * round(max(taxable_income - 311300, 0) / 2500 +\n 1e-07), 1)\n return int(4050 * member * (1 - phaseout))\n else:\n phaseout = min(0.02 * round(max(taxable_income - 259000, 0) / 2500 +\n 1e-07), 1)\n return int(4050 * member * (1 - phaseout))\n\n\ndef PersonalExemption_deduction_new():\n return 0\n\n\ndef ChildCare_Credit_old(taxable_income, child, joint=True):\n if joint:\n phaseout = round(max(taxable_income - 110000, 0) / 20 + 1e-07)\n return int(max(0, 1000 * child - phaseout))\n else:\n phaseout = round(max(taxable_income - 55000, 0) / 20 + 1e-07)\n return int(max(0, 1000 * child - phaseout))\n\n\ndef ChildCare_Credit_new(taxable_income, child, joint=True):\n if joint:\n phaseout = round(max(taxable_income - 230000, 0) / 20 + 1e-07)\n return int(max(0, 1600 * child - phaseout))\n else:\n phaseout = round(max(taxable_income - 115000, 0) / 20 + 1e-07)\n return int(max(0, 1600 * child - phaseout))\n\n\ndef AMT_exemption(taxable_income, joint=True):\n if joint:\n return max(0, 84500 - max(taxable_income - 160900, 0) / 4)\n else:\n return max(0, 54300 - max(taxable_income - 120700, 0) / 4)\n\n\ndef tax_comparison(taxable_income, member, child, UPB, rate,\n efficient_state_rate, local_tax, joint=True, existing_mtg=False,\n display=True, detail=False):\n old_PersonalExemption_deduction = PersonalExemption_deduction_old(\n taxable_income, member, joint=joint)\n old_ChildCare_Credit = ChildCare_Credit_old(taxable_income, child,\n joint=joint)\n new_ChildCare_Credit = ChildCare_Credit_new(taxable_income, child,\n joint=joint)\n old_MTG_IR_deduction = MTG_IR_deduction_old(UPB, rate)\n new_MTG_IR_deduction = MTG_IR_deduction_new(UPB, rate, existing_mtg=\n existing_mtg)\n old_SALT_deduction = SALT_deduction_old(taxable_income,\n efficient_state_rate, local_tax)\n new_SALT_deduction = SALT_deduction_new(taxable_income,\n efficient_state_rate, local_tax)\n if joint:\n old_standard_deduction = 12600\n new_standard_deduction = 24000\n else:\n old_standard_deduction = 6300\n new_standard_deduction = 12000\n old_tax_beforeCCTC_standard = old_bracket(taxable_income -\n old_standard_deduction - old_PersonalExemption_deduction, joint=joint)\n new_tax_beforeCCTC_standard = new_bracket(taxable_income -\n new_standard_deduction, joint=joint)\n old_tax_standard = max(0, old_tax_beforeCCTC_standard -\n old_ChildCare_Credit)\n new_tax_standard = max(0, new_tax_beforeCCTC_standard -\n new_ChildCare_Credit)\n old_tax_beforeCCTC_itemized = old_bracket(taxable_income -\n old_MTG_IR_deduction - old_SALT_deduction -\n old_PersonalExemption_deduction, joint=joint)\n new_tax_beforeCCTC_itemized = new_bracket(taxable_income -\n new_MTG_IR_deduction - new_SALT_deduction, joint=joint)\n old_tax_itemized = max(0, old_tax_beforeCCTC_itemized -\n old_ChildCare_Credit)\n new_tax_itemized = max(0, new_tax_beforeCCTC_itemized -\n new_ChildCare_Credit)\n AMT_exemption_amount = AMT_exemption(taxable_income, joint=joint)\n old_tax_beforeCCTC_AMT = AMT_bracket(taxable_income -\n AMT_exemption_amount - old_MTG_IR_deduction, joint=joint)\n old_tax_AMT = max(0, old_tax_beforeCCTC_AMT - old_ChildCare_Credit)\n tax_old = max(min(old_tax_standard, old_tax_itemized), old_tax_AMT)\n tax_new = min(new_tax_standard, new_tax_itemized)\n if display:\n print('Current Tax Should Pay: $%3.2f' % tax_old)\n print(' Standard: $%3.2f' % old_tax_standard)\n print(' Itemized: $%3.2f' % old_tax_itemized)\n print(' AMT tax: $%3.2f' % old_tax_AMT)\n print('New Tax Should Pay: $%3.2f' % tax_new)\n print(' Standard: $%3.2f' % new_tax_standard)\n print(' Itemized: $%3.2f' % new_tax_itemized)\n if detail:\n print('***********************************************')\n print('${:,} taxable income'.format(taxable_income) + \n ', joint = %r' % joint)\n print('%d Family Member, %d child(ren)' % (member, child))\n print('Existing Mortgage: %r' % existing_mtg +\n ', ${:,} Mortgage Balance'.format(UPB) + \n ', %3.2f%% Interest Rate' % (rate * 100))\n print('${:,} Local Tax'.format(local_tax) + \n ', %d%% State/City Tax Rate' % (efficient_state_rate * 100))\n print('***********************************************')\n table = BeautifulTable()\n table.column_headers = ['Item', 'Current', 'New']\n table.append_row(['Standard Deduction', old_standard_deduction,\n new_standard_deduction])\n table.append_row(['Personal Exemption',\n old_PersonalExemption_deduction, 'NA'])\n table.append_row(['Child Care Tax Credit', old_ChildCare_Credit,\n new_ChildCare_Credit])\n table.append_row(['Mortgage Interest Deduction',\n old_MTG_IR_deduction, new_MTG_IR_deduction])\n table.append_row(['State and Local Tax Deduction',\n old_SALT_deduction, new_SALT_deduction])\n table.append_row(['AMT Exemption (not including MTG Interest)',\n AMT_exemption_amount, 'NA'])\n table.append_row(['Tax', tax_old, tax_new])\n print(table)\n return [tax_old, tax_new, old_tax_standard, new_tax_standard,\n old_tax_itemized, new_tax_itemized, old_tax_AMT]\n",
"step-4": "<mask token>\nfrom beautifultable import BeautifulTable\n\n\ndef tax_calculator(taxable_income, bracket, rate):\n bracket2 = bracket[1:]\n bracket2.append(float('Inf'))\n bracket3 = [(y - x) for x, y in zip(bracket, bracket2)]\n income_seg = [min(max(0, taxable_income - x), y) for x, y in zip(\n bracket, bracket3)]\n return sum([(x * y) for x, y in zip(income_seg, rate)])\n\n\ndef old_bracket(taxable_income, joint=True):\n rate = [0.1, 0.15, 0.25, 0.28, 0.33, 0.35, 0.396]\n if not joint:\n bracket = [0, 9325, 37950, 91900, 191650, 416700, 418400]\n else:\n bracket = [0, 18650, 75900, 153100, 233350, 416700, 470700]\n return tax_calculator(taxable_income, bracket, rate)\n\n\ndef new_bracket(taxable_income, joint=True):\n rate = [0.12, 0.25, 0.35, 0.396]\n if not joint:\n bracket = [0, 45000, 200000, 500000]\n else:\n bracket = [0, 90000, 260000, 1000000]\n return tax_calculator(taxable_income, bracket, rate)\n\n\ndef AMT_bracket(taxable_income, joint=True):\n rate = [0.26, 0.28]\n if not joint:\n bracket = [0, 93900]\n else:\n bracket = [0, 187800]\n return tax_calculator(taxable_income, bracket, rate)\n\n\n<mask token>\n\n\ndef MTG_IR_deduction_old(UPB, rate):\n return min(1000000.0, UPB) * rate\n\n\ndef MTG_IR_deduction_new(UPB, rate, existing_mtg=False):\n if existing_mtg:\n return min(1000000.0, UPB) * rate\n else:\n return min(750000.0, UPB) * rate\n\n\ndef SALT_deduction_old(taxable_income, efficient_state_rate, local_tax):\n return taxable_income * efficient_state_rate + local_tax\n\n\ndef SALT_deduction_new(taxable_income, efficient_state_rate, local_tax):\n return min(10000.0, taxable_income * efficient_state_rate + local_tax)\n\n\ndef PersonalExemption_deduction_old(taxable_income, member, joint=True):\n if joint:\n phaseout = min(0.02 * round(max(taxable_income - 311300, 0) / 2500 +\n 1e-07), 1)\n return int(4050 * member * (1 - phaseout))\n else:\n phaseout = min(0.02 * round(max(taxable_income - 259000, 0) / 2500 +\n 1e-07), 1)\n return int(4050 * member * (1 - phaseout))\n\n\ndef PersonalExemption_deduction_new():\n return 0\n\n\ndef ChildCare_Credit_old(taxable_income, child, joint=True):\n if joint:\n phaseout = round(max(taxable_income - 110000, 0) / 20 + 1e-07)\n return int(max(0, 1000 * child - phaseout))\n else:\n phaseout = round(max(taxable_income - 55000, 0) / 20 + 1e-07)\n return int(max(0, 1000 * child - phaseout))\n\n\ndef ChildCare_Credit_new(taxable_income, child, joint=True):\n if joint:\n phaseout = round(max(taxable_income - 230000, 0) / 20 + 1e-07)\n return int(max(0, 1600 * child - phaseout))\n else:\n phaseout = round(max(taxable_income - 115000, 0) / 20 + 1e-07)\n return int(max(0, 1600 * child - phaseout))\n\n\ndef AMT_exemption(taxable_income, joint=True):\n if joint:\n return max(0, 84500 - max(taxable_income - 160900, 0) / 4)\n else:\n return max(0, 54300 - max(taxable_income - 120700, 0) / 4)\n\n\ndef tax_comparison(taxable_income, member, child, UPB, rate,\n efficient_state_rate, local_tax, joint=True, existing_mtg=False,\n display=True, detail=False):\n old_PersonalExemption_deduction = PersonalExemption_deduction_old(\n taxable_income, member, joint=joint)\n old_ChildCare_Credit = ChildCare_Credit_old(taxable_income, child,\n joint=joint)\n new_ChildCare_Credit = ChildCare_Credit_new(taxable_income, child,\n joint=joint)\n old_MTG_IR_deduction = MTG_IR_deduction_old(UPB, rate)\n new_MTG_IR_deduction = MTG_IR_deduction_new(UPB, rate, existing_mtg=\n existing_mtg)\n old_SALT_deduction = SALT_deduction_old(taxable_income,\n efficient_state_rate, local_tax)\n new_SALT_deduction = SALT_deduction_new(taxable_income,\n efficient_state_rate, local_tax)\n if joint:\n old_standard_deduction = 12600\n new_standard_deduction = 24000\n else:\n old_standard_deduction = 6300\n new_standard_deduction = 12000\n old_tax_beforeCCTC_standard = old_bracket(taxable_income -\n old_standard_deduction - old_PersonalExemption_deduction, joint=joint)\n new_tax_beforeCCTC_standard = new_bracket(taxable_income -\n new_standard_deduction, joint=joint)\n old_tax_standard = max(0, old_tax_beforeCCTC_standard -\n old_ChildCare_Credit)\n new_tax_standard = max(0, new_tax_beforeCCTC_standard -\n new_ChildCare_Credit)\n old_tax_beforeCCTC_itemized = old_bracket(taxable_income -\n old_MTG_IR_deduction - old_SALT_deduction -\n old_PersonalExemption_deduction, joint=joint)\n new_tax_beforeCCTC_itemized = new_bracket(taxable_income -\n new_MTG_IR_deduction - new_SALT_deduction, joint=joint)\n old_tax_itemized = max(0, old_tax_beforeCCTC_itemized -\n old_ChildCare_Credit)\n new_tax_itemized = max(0, new_tax_beforeCCTC_itemized -\n new_ChildCare_Credit)\n AMT_exemption_amount = AMT_exemption(taxable_income, joint=joint)\n old_tax_beforeCCTC_AMT = AMT_bracket(taxable_income -\n AMT_exemption_amount - old_MTG_IR_deduction, joint=joint)\n old_tax_AMT = max(0, old_tax_beforeCCTC_AMT - old_ChildCare_Credit)\n tax_old = max(min(old_tax_standard, old_tax_itemized), old_tax_AMT)\n tax_new = min(new_tax_standard, new_tax_itemized)\n if display:\n print('Current Tax Should Pay: $%3.2f' % tax_old)\n print(' Standard: $%3.2f' % old_tax_standard)\n print(' Itemized: $%3.2f' % old_tax_itemized)\n print(' AMT tax: $%3.2f' % old_tax_AMT)\n print('New Tax Should Pay: $%3.2f' % tax_new)\n print(' Standard: $%3.2f' % new_tax_standard)\n print(' Itemized: $%3.2f' % new_tax_itemized)\n if detail:\n print('***********************************************')\n print('${:,} taxable income'.format(taxable_income) + \n ', joint = %r' % joint)\n print('%d Family Member, %d child(ren)' % (member, child))\n print('Existing Mortgage: %r' % existing_mtg +\n ', ${:,} Mortgage Balance'.format(UPB) + \n ', %3.2f%% Interest Rate' % (rate * 100))\n print('${:,} Local Tax'.format(local_tax) + \n ', %d%% State/City Tax Rate' % (efficient_state_rate * 100))\n print('***********************************************')\n table = BeautifulTable()\n table.column_headers = ['Item', 'Current', 'New']\n table.append_row(['Standard Deduction', old_standard_deduction,\n new_standard_deduction])\n table.append_row(['Personal Exemption',\n old_PersonalExemption_deduction, 'NA'])\n table.append_row(['Child Care Tax Credit', old_ChildCare_Credit,\n new_ChildCare_Credit])\n table.append_row(['Mortgage Interest Deduction',\n old_MTG_IR_deduction, new_MTG_IR_deduction])\n table.append_row(['State and Local Tax Deduction',\n old_SALT_deduction, new_SALT_deduction])\n table.append_row(['AMT Exemption (not including MTG Interest)',\n AMT_exemption_amount, 'NA'])\n table.append_row(['Tax', tax_old, tax_new])\n print(table)\n return [tax_old, tax_new, old_tax_standard, new_tax_standard,\n old_tax_itemized, new_tax_itemized, old_tax_AMT]\n",
"step-5": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Nov 5 11:56:41 2017\n\n@author: cgao\n\"\"\"\n\nfrom beautifultable import BeautifulTable\n\n\n\n#1. 新旧税率Bracket\ndef tax_calculator(taxable_income, bracket, rate):\n bracket2 = bracket[1:]\n bracket2.append(float('Inf'))\n bracket3 = [y-x for x,y in zip(bracket, bracket2)]\n income_seg = [min(max(0, taxable_income - x), y) for x, y in zip(bracket, bracket3)]\n return sum([x*y for x, y in zip(income_seg, rate)])\n\ndef old_bracket(taxable_income, joint = True):\n rate= [0.1, 0.15, 0.25, 0.28, 0.33, 0.35, 0.396]\n if not joint:\n bracket = [0, 9325, 37950, 91900, 191650, 416700, 418400]\n else:\n bracket = [0, 18650, 75900, 153100, 233350, 416700, 470700]\n return tax_calculator(taxable_income, bracket, rate) \n\ndef new_bracket(taxable_income, joint = True):\n rate= [0.12, 0.25, 0.35, 0.396]\n if not joint:\n bracket = [0, 45000, 200000, 500000]\n else:\n bracket = [0, 90000, 260000, 1000000]\n return tax_calculator(taxable_income, bracket, rate) \n\n\ndef AMT_bracket(taxable_income, joint = True):\n rate= [0.26, 0.28]\n if not joint:\n bracket = [0, 93900]\n else:\n bracket = [0, 187800]\n return tax_calculator(taxable_income, bracket, rate) \n\n#2. 增加标准扣除(Standard Deduction)额度\n'''\n if joint:\n old_standard_deduction = 12600\n new_standard_deduction = 24000\n else:\n old_standard_deduction = 6300\n new_standard_deduction = 12000\n'''\n\n#3. 减少利息扣除\ndef MTG_IR_deduction_old(UPB, rate):\n return min(1000000.0, UPB)*rate\n# existing_mtg = True: existing loan. Grand fathered 1.0 Million limit\ndef MTG_IR_deduction_new(UPB, rate, existing_mtg = False):\n if existing_mtg:\n return min(1000000.0, UPB)*rate\n else:\n return min(750000.0, UPB)*rate\n\n#4. 减少州与地方税收(房产税等)扣除\ndef SALT_deduction_old(taxable_income, efficient_state_rate, local_tax):\n return taxable_income*efficient_state_rate + local_tax\n\ndef SALT_deduction_new(taxable_income, efficient_state_rate, local_tax):\n return min(10000.0, taxable_income*efficient_state_rate + local_tax)\n\n#5. 取消Personal Exemption\ndef PersonalExemption_deduction_old(taxable_income, member, joint = True):\n if joint:\n phaseout = min(0.02*round((max(taxable_income - 311300, 0)/2500 + 1e-7)), 1)\n return int(4050*member*(1 - phaseout))\n else:\n phaseout = min(0.02*round(max(taxable_income - 259000, 0)/2500 + 1e-7), 1)\n return int(4050*member*(1 - phaseout))\n \ndef PersonalExemption_deduction_new():\n return 0\n\n#6. Child Care Tax Credit\ndef ChildCare_Credit_old(taxable_income, child, joint = True):\n if joint:\n phaseout = round(max(taxable_income - 110000, 0)/20 + 1e-7)\n return int(max(0,1000*child - phaseout))\n else:\n phaseout = round(max(taxable_income - 55000, 0)/20 + 1e-7)\n return int(max(0,1000*child - phaseout))\n\n \ndef ChildCare_Credit_new(taxable_income, child, joint = True):\n if joint:\n phaseout = round(max(taxable_income - 230000, 0)/20 + 1e-7)\n return int(max(0,1600*child - phaseout))\n else:\n phaseout = round(max(taxable_income - 115000, 0)/20 + 1e-7)\n return int(max(0,1600*child - phaseout))\n \n#7. 取消AMT (Alternative Minimum Tax)\ndef AMT_exemption(taxable_income, joint = True):\n if joint:\n return max(0, 84500 - max(taxable_income - 160900, 0)/4)\n else:\n return max(0, 54300 - max(taxable_income - 120700, 0)/4)\n \n#8. 逐步取消遗产税 (Estate Tax)\n\n#9. 综合影响\ndef tax_comparison(taxable_income, member, child, UPB, rate, efficient_state_rate, local_tax, joint = True, existing_mtg = False, display = True, detail = False):\n# Personal exemption (applied to both standard and itemized)\n old_PersonalExemption_deduction = PersonalExemption_deduction_old(taxable_income, member, joint = joint)\n# Child care tax credit (applied to both standard and itemized)\n old_ChildCare_Credit = ChildCare_Credit_old(taxable_income, child, joint = joint)\n new_ChildCare_Credit = ChildCare_Credit_new(taxable_income, child, joint = joint)\n# Mortgage Interest Rate deduction (applied to itemized and AMT)\n old_MTG_IR_deduction= MTG_IR_deduction_old(UPB, rate)\n new_MTG_IR_deduction= MTG_IR_deduction_new(UPB, rate, existing_mtg = existing_mtg)\n# State and local tax (applied to itemized only)\n old_SALT_deduction = SALT_deduction_old(taxable_income, efficient_state_rate, local_tax) \n new_SALT_deduction = SALT_deduction_new(taxable_income, efficient_state_rate, local_tax)\n# calculate standard tax\n if joint:\n old_standard_deduction = 12600\n new_standard_deduction = 24000\n else:\n old_standard_deduction = 6300\n new_standard_deduction = 12000\n # tax before Child care credit\n old_tax_beforeCCTC_standard = old_bracket(taxable_income - old_standard_deduction - old_PersonalExemption_deduction, joint = joint)\n new_tax_beforeCCTC_standard = new_bracket(taxable_income - new_standard_deduction, joint = joint)\n # tax before Child after credit\n old_tax_standard = max(0, old_tax_beforeCCTC_standard - old_ChildCare_Credit)\n new_tax_standard = max(0, new_tax_beforeCCTC_standard - new_ChildCare_Credit)\n# calculate itemized tax \n # tax before Child care credit\n old_tax_beforeCCTC_itemized = old_bracket(taxable_income - old_MTG_IR_deduction - old_SALT_deduction - old_PersonalExemption_deduction, joint = joint)\n new_tax_beforeCCTC_itemized = new_bracket(taxable_income - new_MTG_IR_deduction - new_SALT_deduction, joint = joint)\n # tax before Child after credit\n old_tax_itemized = max(0, old_tax_beforeCCTC_itemized - old_ChildCare_Credit)\n new_tax_itemized = max(0, new_tax_beforeCCTC_itemized - new_ChildCare_Credit)\n# calculate AMT tax \n AMT_exemption_amount = AMT_exemption(taxable_income, joint = joint)\n # tax before Child care credit\n old_tax_beforeCCTC_AMT = AMT_bracket(taxable_income - AMT_exemption_amount - old_MTG_IR_deduction, joint = joint)\n # tax before Child after credit\n old_tax_AMT = max(0, old_tax_beforeCCTC_AMT - old_ChildCare_Credit)\n tax_old = max(min(old_tax_standard, old_tax_itemized),old_tax_AMT)\n tax_new = min(new_tax_standard, new_tax_itemized)\n if display:\n print(\"Current Tax Should Pay: $%3.2f\"%tax_old)\n print(\" Standard: $%3.2f\"%old_tax_standard)\n print(\" Itemized: $%3.2f\"%old_tax_itemized)\n print(\" AMT tax: $%3.2f\"%old_tax_AMT)\n print(\"New Tax Should Pay: $%3.2f\"%tax_new)\n print(\" Standard: $%3.2f\"%new_tax_standard)\n print(\" Itemized: $%3.2f\"%new_tax_itemized) \n if detail:\n print(\"***********************************************\")\n print(\"${:,} taxable income\".format(taxable_income) + ', joint = %r'%joint)\n print(\"%d Family Member, %d child(ren)\"%(member, child))\n print('Existing Mortgage: %r'%existing_mtg + ', ${:,} Mortgage Balance'.format(UPB) + ', %3.2f%% Interest Rate'%(rate*100),)\n print('${:,} Local Tax'.format(local_tax) + ', %d%% State/City Tax Rate'%(efficient_state_rate*100),)\n print(\"***********************************************\")\n table = BeautifulTable()\n table.column_headers = [\"Item\", \"Current\", \"New\"]\n table.append_row([\"Standard Deduction\", old_standard_deduction, new_standard_deduction])\n table.append_row([\"Personal Exemption\", old_PersonalExemption_deduction, 'NA'])\n table.append_row([\"Child Care Tax Credit\", old_ChildCare_Credit, new_ChildCare_Credit])\n table.append_row([\"Mortgage Interest Deduction\", old_MTG_IR_deduction, new_MTG_IR_deduction])\n table.append_row([\"State and Local Tax Deduction\", old_SALT_deduction, new_SALT_deduction])\n table.append_row([\"AMT Exemption (not including MTG Interest)\", AMT_exemption_amount, \"NA\"])\n table.append_row([\"Tax\", tax_old, tax_new])\n print(table)\n return [tax_old, tax_new, old_tax_standard, new_tax_standard, old_tax_itemized, new_tax_itemized, old_tax_AMT]\n\n",
"step-ids": [
7,
10,
13,
15,
16
]
}
|
[
7,
10,
13,
15,
16
] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
class Vertex():
def __init__(self, key):
self.id = key
self.connections = {}
def add_neighbor(self, nbr, weight=0):
self.connections[nbr] = weight
def get_connections(self):
return self.connections.keys()
def get_id(self):
return self.id
def get_weight(self, nbr):
return self.connections[nbr]
def __str__(self):
connections = str([x.id for x in self.connections])
return f'{str(self.id)} connected to: {connections}'
class Graph():
def __init__(self):
self.vertices = {}
self.num_vertices = 0
def add_vertex(self, key):
new_vertex = Vertex(key)
self.num_vertices += 1
self.vertices[key] = new_vertex
return new_vertex
def get_vertex(self, key):
if key in self.vertices:
return self.vertices[key]
else:
return None
def add_edge(self, origin, dest, weight=0):
if origin not in self.vertices:
self.add_vertex(origin)
if dest not in self.vertices:
self.add_vertex(dest)
self.vertices[origin].add_neighbor(self.vertices[dest], weight)
def get_vertices(self):
return self.vertices.keys()
def __iter__(self):
return iter(self.vertices.values())
def __contains__(self, n):
return n in self.vertices
if __name__ == '__main__':
g = Graph()
for i in range(6):
g.add_vertex(i)
print(g.vertices)
g.add_edge(0, 1, 2)
for vertex in g:
print(vertex)
print(vertex.get_connections)
print('---------------------')
|
normal
|
{
"blob_id": "3af78dcc0bb0b6f253af01d2945ad6ada02ca7a0",
"index": 7270,
"step-1": "class Vertex:\n <mask token>\n <mask token>\n\n def get_connections(self):\n return self.connections.keys()\n <mask token>\n <mask token>\n <mask token>\n\n\nclass Graph:\n\n def __init__(self):\n self.vertices = {}\n self.num_vertices = 0\n\n def add_vertex(self, key):\n new_vertex = Vertex(key)\n self.num_vertices += 1\n self.vertices[key] = new_vertex\n return new_vertex\n\n def get_vertex(self, key):\n if key in self.vertices:\n return self.vertices[key]\n else:\n return None\n\n def add_edge(self, origin, dest, weight=0):\n if origin not in self.vertices:\n self.add_vertex(origin)\n if dest not in self.vertices:\n self.add_vertex(dest)\n self.vertices[origin].add_neighbor(self.vertices[dest], weight)\n\n def get_vertices(self):\n return self.vertices.keys()\n\n def __iter__(self):\n return iter(self.vertices.values())\n\n def __contains__(self, n):\n return n in self.vertices\n\n\n<mask token>\n",
"step-2": "class Vertex:\n\n def __init__(self, key):\n self.id = key\n self.connections = {}\n\n def add_neighbor(self, nbr, weight=0):\n self.connections[nbr] = weight\n\n def get_connections(self):\n return self.connections.keys()\n <mask token>\n <mask token>\n <mask token>\n\n\nclass Graph:\n\n def __init__(self):\n self.vertices = {}\n self.num_vertices = 0\n\n def add_vertex(self, key):\n new_vertex = Vertex(key)\n self.num_vertices += 1\n self.vertices[key] = new_vertex\n return new_vertex\n\n def get_vertex(self, key):\n if key in self.vertices:\n return self.vertices[key]\n else:\n return None\n\n def add_edge(self, origin, dest, weight=0):\n if origin not in self.vertices:\n self.add_vertex(origin)\n if dest not in self.vertices:\n self.add_vertex(dest)\n self.vertices[origin].add_neighbor(self.vertices[dest], weight)\n\n def get_vertices(self):\n return self.vertices.keys()\n\n def __iter__(self):\n return iter(self.vertices.values())\n\n def __contains__(self, n):\n return n in self.vertices\n\n\n<mask token>\n",
"step-3": "class Vertex:\n\n def __init__(self, key):\n self.id = key\n self.connections = {}\n\n def add_neighbor(self, nbr, weight=0):\n self.connections[nbr] = weight\n\n def get_connections(self):\n return self.connections.keys()\n <mask token>\n <mask token>\n\n def __str__(self):\n connections = str([x.id for x in self.connections])\n return f'{str(self.id)} connected to: {connections}'\n\n\nclass Graph:\n\n def __init__(self):\n self.vertices = {}\n self.num_vertices = 0\n\n def add_vertex(self, key):\n new_vertex = Vertex(key)\n self.num_vertices += 1\n self.vertices[key] = new_vertex\n return new_vertex\n\n def get_vertex(self, key):\n if key in self.vertices:\n return self.vertices[key]\n else:\n return None\n\n def add_edge(self, origin, dest, weight=0):\n if origin not in self.vertices:\n self.add_vertex(origin)\n if dest not in self.vertices:\n self.add_vertex(dest)\n self.vertices[origin].add_neighbor(self.vertices[dest], weight)\n\n def get_vertices(self):\n return self.vertices.keys()\n\n def __iter__(self):\n return iter(self.vertices.values())\n\n def __contains__(self, n):\n return n in self.vertices\n\n\n<mask token>\n",
"step-4": "class Vertex:\n\n def __init__(self, key):\n self.id = key\n self.connections = {}\n\n def add_neighbor(self, nbr, weight=0):\n self.connections[nbr] = weight\n\n def get_connections(self):\n return self.connections.keys()\n\n def get_id(self):\n return self.id\n\n def get_weight(self, nbr):\n return self.connections[nbr]\n\n def __str__(self):\n connections = str([x.id for x in self.connections])\n return f'{str(self.id)} connected to: {connections}'\n\n\nclass Graph:\n\n def __init__(self):\n self.vertices = {}\n self.num_vertices = 0\n\n def add_vertex(self, key):\n new_vertex = Vertex(key)\n self.num_vertices += 1\n self.vertices[key] = new_vertex\n return new_vertex\n\n def get_vertex(self, key):\n if key in self.vertices:\n return self.vertices[key]\n else:\n return None\n\n def add_edge(self, origin, dest, weight=0):\n if origin not in self.vertices:\n self.add_vertex(origin)\n if dest not in self.vertices:\n self.add_vertex(dest)\n self.vertices[origin].add_neighbor(self.vertices[dest], weight)\n\n def get_vertices(self):\n return self.vertices.keys()\n\n def __iter__(self):\n return iter(self.vertices.values())\n\n def __contains__(self, n):\n return n in self.vertices\n\n\n<mask token>\n",
"step-5": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n\nclass Vertex():\n\n def __init__(self, key):\n self.id = key\n self.connections = {}\n\n def add_neighbor(self, nbr, weight=0):\n self.connections[nbr] = weight\n\n def get_connections(self):\n return self.connections.keys()\n\n def get_id(self):\n return self.id\n\n def get_weight(self, nbr):\n return self.connections[nbr]\n\n def __str__(self):\n connections = str([x.id for x in self.connections])\n return f'{str(self.id)} connected to: {connections}'\n\n\nclass Graph():\n\n def __init__(self):\n self.vertices = {}\n self.num_vertices = 0\n\n def add_vertex(self, key):\n new_vertex = Vertex(key)\n self.num_vertices += 1\n self.vertices[key] = new_vertex\n return new_vertex\n\n def get_vertex(self, key):\n if key in self.vertices:\n return self.vertices[key]\n else:\n return None\n\n def add_edge(self, origin, dest, weight=0):\n if origin not in self.vertices:\n self.add_vertex(origin)\n if dest not in self.vertices:\n self.add_vertex(dest)\n\n self.vertices[origin].add_neighbor(self.vertices[dest], weight)\n\n def get_vertices(self):\n return self.vertices.keys()\n\n def __iter__(self):\n return iter(self.vertices.values())\n\n def __contains__(self, n):\n return n in self.vertices\n\n\nif __name__ == '__main__':\n g = Graph()\n for i in range(6):\n g.add_vertex(i)\n print(g.vertices)\n g.add_edge(0, 1, 2)\n for vertex in g:\n print(vertex)\n print(vertex.get_connections)\n print('---------------------')\n",
"step-ids": [
10,
12,
13,
15,
17
]
}
|
[
10,
12,
13,
15,
17
] |
config = {'numIndividuals': 50, 'maxNumGen': 20, 'eliteProp': 0.1,
'mutantProp': 0.2, 'inheritanceProb': 0.7}
|
normal
|
{
"blob_id": "85d1069d85e285bc5c36811f569dabd793b5064b",
"index": 4460,
"step-1": "<mask token>\n",
"step-2": "config = {'numIndividuals': 50, 'maxNumGen': 20, 'eliteProp': 0.1,\n 'mutantProp': 0.2, 'inheritanceProb': 0.7}\n",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
'''
Paulie Jo Gonzalez
CS 4375 - os
Lab 0
Last modified: 02/14/2021
This code includes a reference to C code for my_getChar method provided by Dr. Freudenthal.
'''
from os import read
next_c = 0
limit = 0
def get_char():
global next_c, limit
if next_c == limit:
next_c = 0
limit = read(0, 100) # allocate bytes
if limit == 0:
return ''
if next_c >= len(limit) - 1: # check upperbound
return ''
ch = chr(limit[next_c]) # convert to char (from ASCII)
next_c += 1
return ch
def my_read_line():
global next_c, limit
line = ''
ch = get_char()
# get each char of line
while (ch != '\n'): # while char is not new line
line += ch # build line
ch = get_char()
if ch == '':
return line # EOF
next_c = 0 # reset next_c and limit after line is read
limit = 0
line += '\n'
return line
# def my_read_lines():
# num_lines = 0
# in_line = my_read_line() # read line
# while len(in_line):
# num_lines += 1
# print(f'###line {num_lines}: <{str(in_line)}> ###\n')
# in_line = my_read_lines()
# print(f'eof after {num_lines}\n')
|
normal
|
{
"blob_id": "67ac5d82bc37b67cfdae73b6667b73b70ed33cfb",
"index": 8868,
"step-1": "<mask token>\n\n\ndef get_char():\n global next_c, limit\n if next_c == limit:\n next_c = 0\n limit = read(0, 100)\n if limit == 0:\n return ''\n if next_c >= len(limit) - 1:\n return ''\n ch = chr(limit[next_c])\n next_c += 1\n return ch\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef get_char():\n global next_c, limit\n if next_c == limit:\n next_c = 0\n limit = read(0, 100)\n if limit == 0:\n return ''\n if next_c >= len(limit) - 1:\n return ''\n ch = chr(limit[next_c])\n next_c += 1\n return ch\n\n\ndef my_read_line():\n global next_c, limit\n line = ''\n ch = get_char()\n while ch != '\\n':\n line += ch\n ch = get_char()\n if ch == '':\n return line\n next_c = 0\n limit = 0\n line += '\\n'\n return line\n",
"step-3": "<mask token>\nnext_c = 0\nlimit = 0\n\n\ndef get_char():\n global next_c, limit\n if next_c == limit:\n next_c = 0\n limit = read(0, 100)\n if limit == 0:\n return ''\n if next_c >= len(limit) - 1:\n return ''\n ch = chr(limit[next_c])\n next_c += 1\n return ch\n\n\ndef my_read_line():\n global next_c, limit\n line = ''\n ch = get_char()\n while ch != '\\n':\n line += ch\n ch = get_char()\n if ch == '':\n return line\n next_c = 0\n limit = 0\n line += '\\n'\n return line\n",
"step-4": "<mask token>\nfrom os import read\nnext_c = 0\nlimit = 0\n\n\ndef get_char():\n global next_c, limit\n if next_c == limit:\n next_c = 0\n limit = read(0, 100)\n if limit == 0:\n return ''\n if next_c >= len(limit) - 1:\n return ''\n ch = chr(limit[next_c])\n next_c += 1\n return ch\n\n\ndef my_read_line():\n global next_c, limit\n line = ''\n ch = get_char()\n while ch != '\\n':\n line += ch\n ch = get_char()\n if ch == '':\n return line\n next_c = 0\n limit = 0\n line += '\\n'\n return line\n",
"step-5": "'''\nPaulie Jo Gonzalez\nCS 4375 - os\nLab 0\nLast modified: 02/14/2021\nThis code includes a reference to C code for my_getChar method provided by Dr. Freudenthal.\n'''\n\nfrom os import read\n\nnext_c = 0\nlimit = 0\n\n\ndef get_char():\n global next_c, limit\n\n if next_c == limit:\n next_c = 0\n limit = read(0, 100) # allocate bytes\n\n if limit == 0:\n return ''\n\n if next_c >= len(limit) - 1: # check upperbound\n return ''\n ch = chr(limit[next_c]) # convert to char (from ASCII)\n next_c += 1\n\n return ch\n\n\ndef my_read_line():\n global next_c, limit\n\n line = ''\n ch = get_char()\n\n # get each char of line\n while (ch != '\\n'): # while char is not new line\n line += ch # build line\n ch = get_char()\n if ch == '':\n return line # EOF\n\n next_c = 0 # reset next_c and limit after line is read\n limit = 0\n line += '\\n'\n\n return line\n\n\n# def my_read_lines():\n# num_lines = 0\n# in_line = my_read_line() # read line\n\n# while len(in_line):\n# num_lines += 1\n# print(f'###line {num_lines}: <{str(in_line)}> ###\\n')\n\n# in_line = my_read_lines()\n# print(f'eof after {num_lines}\\n')\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
from authtools.models import AbstractNamedUser
class User(AbstractNamedUser):
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = ['name']
|
normal
|
{
"blob_id": "e7d7a002547047a9bcae830be96dd35db80a86e8",
"index": 7001,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass User(AbstractNamedUser):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass User(AbstractNamedUser):\n USERNAME_FIELD = 'email'\n REQUIRED_FIELDS = ['name']\n",
"step-4": "from authtools.models import AbstractNamedUser\n\n\nclass User(AbstractNamedUser):\n USERNAME_FIELD = 'email'\n REQUIRED_FIELDS = ['name']\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from binance.client import Client
from binance.websockets import BinanceSocketManager
from binance.enums import *
import time
import threading
import winsound
# Replace your_api_key, your_api_secret with your api_key, api_secret
client = Client(your_api_key, your_api_secret)
# Calculate list of symbols
def calculate_data_list():
counter=0
btc='BTC'
symbols=[]
all_positions=[]
positions_final=[]
volume=[]
c=[]
price_change = []
data=client.get_ticker()
for x in range(len(data)):
if (btc in data[x]['symbol']) and data[x]['symbol'] != 'BTCUSDT'and data[x]['symbol'] != 'VENBTC':
if float(data[x]['quoteVolume'])>100:
all_positions.append(x)
for x in all_positions:
c.append(float(data[x]['priceChangePercent']))
i = sorted(range(len(c)), key=lambda k: c[k])
i.reverse()
while (len(positions_final) < 20 and len(positions_final) < len(all_positions)):
symbols.append(data[all_positions[i[counter]]]['symbol'])
positions_final.append(all_positions[i[counter]])
volume.append(data[all_positions[i[counter]]]['quoteVolume'])
price_change.append(data[all_positions[i[counter]]]['priceChangePercent'])
counter += 1
return symbols, volume, positions_final, price_change
# Get candlestick data from Binance
def get_kline():
symbols, volume, pozitii,price_change = calculate_data_list()
prices = []
prices1 = []
k=[]
for x in symbols:
try:
order = client.get_klines( # Get 1 minute candlestick data from server
symbol=x,
interval='1m')
except BinanceAPIException as e:
print (e.status_code)
print (e.message)
try:
order1 = client.get_klines( # Get 15 minute candlestick data from server
symbol=x,
limit= 1000,
interval='15m')
except BinanceAPIException as e:
print (e.status_code)
print (e.message)
if len(order1) < 970: # check if coin have at least 10 days of data
a = symbols.index(x) # get index of x in symbols
k.append(a)
else:
prices.append([]) # add empty list to list of 1 minute
prices1.append([]) # add empty list to list of 15 minutes
for i in range(len(order)):
prices[-1].append(float(order[i][1])) # save 1 minute data
for i in range(len(order1)):
prices1[-1].append(float(order1[i][1])) # save 15 minute data
k.reverse()
for x in k:
symbols.pop(x)
volume.pop(x)
all_positions.pop(x)
price_change.pop(x)
return symbols, volume, pozitii, prices, prices1,price_change
# Calculate report between bid and ask offers
def process_depth(msg):
sums5=0
sumb5=0
m=-1
for x in range(5):
if float(msg['data']['bids'][x][1])>m:
m=float(msg['data']['bids'][x][1])
sums5 = sums5 + float(msg['data']['bids'][x][1])
sumb5 = sumb5 + float(msg['data']['asks'][x][1])
ratio1 = sums5 / sumb5
if (ratio1 < 1):
ratio1 = ((1 / ratio1) * -1) + 1
else:
ratio1 -= 1
sums20 = 0
sumb20 = 0
ratio2 = 0
try:
for x in range(17):
sums20 = sums20 + float(msg['data']['bids'][x][1])
sumb20 = sumb20 + float(msg['data']['asks'][x][1])
ratio2 = sums20 / sumb20
if (ratio2 < 1):
ratio2 = ((1 / ratio2) * -1) + 1
else:
ratio2 -= 1
except Exception as e:
print("")
for i in range(len(symbols)):
simbol = symbols[i].lower() + '@depth20'
if simbol == msg['stream']:
ratio5[i] = round(ratio1, 2)
ratio20[i] = round(ratio2, 2)
max_order5[i] = m
ratio5_sum[i] = round(float(sums5) * float(current_price[i]) * 100 / float(volume[i]),2)
current_price[i] = float(msg['data']['bids'][0][0])
# Refresh price and volume to current price and volume
def process_ticker(msg):
i=0
for x in symbols:
for y in range(len(msg)):
if x == str(msg[y]['s']):
volume[i] = int(float(msg[y]['q']))
price_change[i] = int(float(msg[y]['P']))
i+=1
symbols,volume,pozitii,k_line_1m,k_line_15m,price_change =get_kline()
# Declaring lists necessary for storing data
max_order5=[0 for x in range(len(symbols))]
current_price= [0 for x in range(len(symbols))]
price_chance_2_min = [0 for x in range(len(symbols))]
price_chance_5_min = [0 for x in range(len(symbols))]
price_chance_15_min = [0 for x in range(len(symbols))]
price_chance_30_min = [0 for x in range(len(symbols))]
price_change_25_30_min = [0 for x in range(len(symbols))]
price_chance_1_hour = [0 for x in range(len(symbols))]
price_chance_3_hour = [0 for x in range(len(symbols))]
price_chance_8_hour = [0 for x in range(len(symbols))]
price_change_1_days = [0 for x in range(len(symbols))]
price_change_3_days = [0 for x in range(len(symbols))]
price_change_5_days = [0 for x in range(len(symbols))]
price_change_7_days = [0 for x in range(len(symbols))]
price_change_10_days = [0 for x in range(len(symbols))]
average_10_min = [0 for x in range(len(symbols))]
average_20_min = [0 for x in range(len(symbols))]
average_50_min = [0 for x in range(len(symbols))]
average_100_min = [0 for x in range(len(symbols))]
average_change_10_min = [0 for x in range(len(symbols))]
average_change_20_min = [0 for x in range(len(symbols))]
average_change_50_min = [0 for x in range(len(symbols))]
average_change_100_min = [0 for x in range(len(symbols))]
total_score = [0 for x in range(len(symbols))]
ratio5=[0 for x in range(len(symbols))]
ratio5_10sec=[[] for y in range(len(symbols))]
ratio5_sum = [0 for x in range(len(symbols))]
ratio5_sum_10sec = [[] for y in range(len(symbols))]
ratio20= [0 for x in range(len(symbols))]
# Create list neccessary for depth socked
list=[]
for x in symbols:
list.append(x.lower()+'@depth20') # append @depth20 to each symbol and add it into list
bm = BinanceSocketManager(client)
bm.start()
depth_socket = bm.start_multiplex_socket(list,process_depth) # start depth socket
ticker_socket = bm.start_ticker_socket(process_ticker) # start price socket
# maintain candlestick lists
def kline_continuum():
i=0
while True:
time.sleep(60)
for x in range(len(symbols)):
k_line_1m[x].pop(0)
k_line_1m[x].append(current_price[x]) # add price to list of 1 minute candlestick every 1 minute
if i%15==0:
k_line_15m[x].pop(0)
k_line_15m[x].append(current_price[x]) # add price to list of 15 minute candlestick every 15 minute
i+=1
# Save report between ask and bit for the last 10 seconds
def report_10_seconds():
while True:
for x in range(len(symbols)):
if len(ratio5_10sec[x])>10:
ratio5_10sec[x].pop(0)
if len(ratio5_sum_10sec[x]) > 10:
ratio5_sum_10sec[x].pop(0)
ratio5_10sec[x].append(ratio5[x])
ratio5_sum_10sec[x].append(ratio5_sum[x])
time.sleep(1)
# Calculate score for each symbol, you can add as many parameters as you want
def calculate_score():
for x in range(len(symbols)):
score = 0
# 2 minute change parameter score calculation
a = float(price_chance_2_min[x])
if a > 0 and a < 0.5:
score += 1
elif a >= 0.5 and a < 1:
score += 1.25
elif a >= 1 and a < 1.5:
score += 1.5
elif a >= 1.5 and a < 2:
score += 0.5
elif a >= 3:
score += 0.25
# 5 minute change parameter score calculation
a = float(price_chance_5_min[x])
if a > 0 and a < 0.5:
score += 1
elif a >= 0.5 and a < 1:
score += 1.25
elif a >= 1 and a < 2:
score += 1.5
elif a >= 2 and a < 3:
score += 0.5
elif a >= 3:
score += 0.25
# 15 minute change parameter score calculation
a = float(price_chance_15_min[x])
if a <= 1 and a > -0.5:
score += 0.25
elif a <= -0.5 and a > -1:
score += 0.5
elif a <= -1 and a > -1.5:
score += 0.75
elif a <= -1.5:
score += 1
# change between 25 and 30 minutes ago parameter score calculation
a = float(price_change_25_30_min[x])
if a <= 2 and a > -0.75:
score += 0.25
elif a <= -0.75 and a > -1.25:
score += 0.5
elif a <= -1.25 and a > -1.75:
score += 0.75
elif a <= -1.75:
score += 1
# 1 hour change parameter score calculation
a = float(price_chance_1_hour[x])
if a <= 2 and a >= 0:
score += 0.5
elif a <= 0 and a > -2:
score += 0.75
elif a <= -2:
score += 1
# 3 hour change parameter score calculation
a = float(price_chance_3_hour[x])
if a <= 5 and a > -1:
score += 0.25
elif a <= -1 and a > -3:
score += 0.5
elif a <= -3 and a > -6:
score += 0.75
elif a <= -6:
score += 1
# 8 hour change parameter score calculation
a = float(price_chance_8_hour[x])
if a <= 0 and a > -4:
score += 0.25
elif a <= -4 and a > -6:
score += 0.5
elif a <= -6:
score += 0.75
if float(ratio5[x]) > 0:
score += 1
a = 0
for i in range(len(ratio5_10sec[x])):
if float(price_chance_2_min[x]) > 0.55 or float(price_chance_5_min[x]) > 1:
if float(ratio5_10sec[x][i]) > 0:
a += 1
if float(ratio5_sum_10sec[x][i]) > 0.3:
a += 1
score += a / len(ratio5_sum_10sec[x])
if float(ratio20[x]) > 0:
score += 1
a = 0
for i in range(len(ratio5_10sec[x])-1):
if float(ratio5_10sec[x][i]) > 0:
a += 1
if a <= 2:
score += 0.25
elif a > 2 and a <= 4:
score += 0.5
elif a > 4 and a <= 7:
score += 0.75
elif a > 7:
score += 1
a = 0
for i in range(20, 1, -1):
if float(k_line_1m[x][-i]) > float(k_line_1m[x][-(i - 1)]):
a += 1
score += a / 10
# 1 day change parameter score calculation
if float(price_change_1_days[x]) > 5:
score+=0.3
# 3 day change parameter score calculation
if float(price_change_3_days[x]) > 10:
score += 0.25
# 5 day change parameter score calculation
if float(price_change_5_days[x]) > 15:
score += 0.25
# 7 day change parameter score calculation
if float(price_change_7_days[x]) > 20:
score += 0.25
# 10 day change parameter score calculation
if float(price_change_10_days[x]) > -25:
score += 0.25
# 10 minutes moving average parameter score calculation
a=float(average_change_10_min[x])
if a<0.2 and a>-0.3:
score+=0.1
# 20 minutes moving average parameter score calculation
a = float(average_change_20_min[x])
if a < 0.2 and a > -0.3:
score += 0.1
# 50 minutes moving average parameter score calculation
a = float(average_change_50_min[x])
if a < 0.2 and a > -0.3:
score += 0.1
# 100 minutes moving average parameter score calculation
a = float(average_change_100_min[x])
if a < 0.2 and a > -0.3:
score += 0.1
# save score
total_score[x] = score
def print_results():
# sleep time before starting calculations
time.sleep(10)
while True:
for x in range(len(symbols)):
# calculate parameters percentages
try:
price_chance_2_min[x] = round(float(current_price[x]) * 100 / float(k_line_1m[x][- 2]) - 100, 2)
price_chance_5_min[x] = round(float(current_price[x]) * 100 / float(k_line_1m[x][- 5]) - 100, 2)
price_chance_15_min[x] = round(float(current_price[x]) * 100 / float(k_line_1m[x][- 15]) - 100, 2)
price_chance_30_min[x] = round(float(current_price[x]) * 100 / float(k_line_1m[x][- 30]) - 100, 2)
price_chance_1_hour[x] = round(float(current_price[x]) * 100 / float(k_line_1m[x][- 60]) - 100, 2)
price_chance_3_hour[x] = round(float(current_price[x]) * 100 / float(k_line_1m[x][- 180]) - 100, 2)
price_chance_8_hour[x] = round(float(current_price[x]) * 100 / float(k_line_1m[x][20]) - 100, 2)
price_change_25_30_min[x] = round(float(k_line_1m[x][- 6]) * 100 / float(k_line_1m[x][- 30]) - 100, 2)
price_change_1_days[x] = round(float(current_price[x]) * 100 / float(k_line_15m[x][- 96]) - 100, 1)
price_change_3_days[x] = round(float(current_price[x]) * 100 / float(k_line_15m[x][- 288]) - 100, 1)
price_change_5_days[x] = round(float(current_price[x]) * 100 / float(k_line_15m[x][- 480] )- 100, 1)
price_change_7_days[x] = round(float(current_price[x]) * 100 / float(k_line_15m[x][- 672]) - 100, 1)
price_change_10_days[x] = round(float(current_price[x]) * 100 / float(k_line_15m[x][- 960]) - 100, 1)
average_10_min[x] = round(float(sum(k_line_1m[x][- 10:])) / 10, 8)
average_20_min[x] = round(float(sum(k_line_1m[x][- 20:])) / 20, 8)
average_50_min[x] = round(float(sum(k_line_1m[x][- 50:])) / 50, 8)
average_100_min[x] = round(float(sum(k_line_1m[x][- 100:])) / 100, 8)
average_change_10_min[x] = round(float(current_price[x]) * 100 / float(average_10_min[x]) - 100, 2)
average_change_20_min[x] = round(float(current_price[x]) * 100 / float(average_20_min[x]) - 100, 2)
average_change_50_min[x] = round(float(current_price[x]) * 100 / float(average_50_min[x]) - 100, 2)
average_change_100_min[x] = round(float(current_price[x]) * 100 / float(average_100_min[x]) - 100, 2)
except Exception as e:
print(e)
# call function for score calculation
calculate_score()
# select parameter for which data is sorted
sort_by = total_score
# sort data
sorted_data = sorted(range(len(sort_by)), key=lambda k: sort_by[k])
# sort data in reverse order
sorted_data.reverse()
#print table header
print (time.ctime())
print ('%5s %5s %6s %6s %6s %6s %6s %6s %6s %6s %6s %6s %6s %6s %6s %5s %6s %6s %6s %6s %6s' % (
'Symbol', 'score', 'r5', 'r20', '2m_ch', '5m_ch', '15m_ch', '30m_ch', '1h_ch', '10MA', '20MA', '50MA', '100MA', '8h_ch',
'25-30m', 'r5sum', '1d_ch', '3d_ch','5d_ch', '7d_ch', '10d_ch'))
# print top 10 cryptocurrencies data
for k in range(10):
i = sorted_data[k]
print ('%5s %6s %6s %6s %6s %6s %6s %6s %6s %6s %6s %6s %6s %6s %6s %5s %6s %6s %6s %6s %6s' % (
symbols[i][:-3], total_score[i], ratio5[i], ratio20[i], price_chance_2_min[i], price_chance_5_min[i],
price_chance_15_min[i],price_chance_30_min[i], price_chance_1_hour[i], average_change_10_min[i],
average_change_20_min[i],average_change_50_min[i], average_change_100_min[i], price_chance_8_hour[i],
price_change_25_30_min[i], ratio5_sum[i], price_change_1_days[i], price_change_3_days[i],
price_change_5_days[i], price_change_7_days[i], price_change_10_days[i]))
# if score for one coin is > 10 will play sound
try:
if float(total_score[sorted_data[0]]) > 10:
winsound.PlaySound('\\Sound.wav', winsound.SND_FILENAME)
except Exception as e:
print(e)
# Seconds to wait before repeating while loop
time.sleep(1)
# Declaring threads
threads = [threading.Thread(target=kline_continuum),
threading.Thread(target=report_10_seconds),
threading.Thread(target=print_results)]
# Starting threads
[thread.start() for thread in threads]
[thread.join() for thread in threads]
|
normal
|
{
"blob_id": "dcc85b143f2394b7839f2fb9c2079a7dd9fa8e88",
"index": 4733,
"step-1": "<mask token>\n\n\ndef calculate_data_list():\n counter = 0\n btc = 'BTC'\n symbols = []\n all_positions = []\n positions_final = []\n volume = []\n c = []\n price_change = []\n data = client.get_ticker()\n for x in range(len(data)):\n if btc in data[x]['symbol'] and data[x]['symbol'\n ] != 'BTCUSDT' and data[x]['symbol'] != 'VENBTC':\n if float(data[x]['quoteVolume']) > 100:\n all_positions.append(x)\n for x in all_positions:\n c.append(float(data[x]['priceChangePercent']))\n i = sorted(range(len(c)), key=lambda k: c[k])\n i.reverse()\n while len(positions_final) < 20 and len(positions_final) < len(\n all_positions):\n symbols.append(data[all_positions[i[counter]]]['symbol'])\n positions_final.append(all_positions[i[counter]])\n volume.append(data[all_positions[i[counter]]]['quoteVolume'])\n price_change.append(data[all_positions[i[counter]]][\n 'priceChangePercent'])\n counter += 1\n return symbols, volume, positions_final, price_change\n\n\ndef get_kline():\n symbols, volume, pozitii, price_change = calculate_data_list()\n prices = []\n prices1 = []\n k = []\n for x in symbols:\n try:\n order = client.get_klines(symbol=x, interval='1m')\n except BinanceAPIException as e:\n print(e.status_code)\n print(e.message)\n try:\n order1 = client.get_klines(symbol=x, limit=1000, interval='15m')\n except BinanceAPIException as e:\n print(e.status_code)\n print(e.message)\n if len(order1) < 970:\n a = symbols.index(x)\n k.append(a)\n else:\n prices.append([])\n prices1.append([])\n for i in range(len(order)):\n prices[-1].append(float(order[i][1]))\n for i in range(len(order1)):\n prices1[-1].append(float(order1[i][1]))\n k.reverse()\n for x in k:\n symbols.pop(x)\n volume.pop(x)\n all_positions.pop(x)\n price_change.pop(x)\n return symbols, volume, pozitii, prices, prices1, price_change\n\n\ndef process_depth(msg):\n sums5 = 0\n sumb5 = 0\n m = -1\n for x in range(5):\n if float(msg['data']['bids'][x][1]) > m:\n m = float(msg['data']['bids'][x][1])\n sums5 = sums5 + float(msg['data']['bids'][x][1])\n sumb5 = sumb5 + float(msg['data']['asks'][x][1])\n ratio1 = sums5 / sumb5\n if ratio1 < 1:\n ratio1 = 1 / ratio1 * -1 + 1\n else:\n ratio1 -= 1\n sums20 = 0\n sumb20 = 0\n ratio2 = 0\n try:\n for x in range(17):\n sums20 = sums20 + float(msg['data']['bids'][x][1])\n sumb20 = sumb20 + float(msg['data']['asks'][x][1])\n ratio2 = sums20 / sumb20\n if ratio2 < 1:\n ratio2 = 1 / ratio2 * -1 + 1\n else:\n ratio2 -= 1\n except Exception as e:\n print('')\n for i in range(len(symbols)):\n simbol = symbols[i].lower() + '@depth20'\n if simbol == msg['stream']:\n ratio5[i] = round(ratio1, 2)\n ratio20[i] = round(ratio2, 2)\n max_order5[i] = m\n ratio5_sum[i] = round(float(sums5) * float(current_price[i]) * \n 100 / float(volume[i]), 2)\n current_price[i] = float(msg['data']['bids'][0][0])\n\n\n<mask token>\n\n\ndef kline_continuum():\n i = 0\n while True:\n time.sleep(60)\n for x in range(len(symbols)):\n k_line_1m[x].pop(0)\n k_line_1m[x].append(current_price[x])\n if i % 15 == 0:\n k_line_15m[x].pop(0)\n k_line_15m[x].append(current_price[x])\n i += 1\n\n\n<mask token>\n\n\ndef calculate_score():\n for x in range(len(symbols)):\n score = 0\n a = float(price_chance_2_min[x])\n if a > 0 and a < 0.5:\n score += 1\n elif a >= 0.5 and a < 1:\n score += 1.25\n elif a >= 1 and a < 1.5:\n score += 1.5\n elif a >= 1.5 and a < 2:\n score += 0.5\n elif a >= 3:\n score += 0.25\n a = float(price_chance_5_min[x])\n if a > 0 and a < 0.5:\n score += 1\n elif a >= 0.5 and a < 1:\n score += 1.25\n elif a >= 1 and a < 2:\n score += 1.5\n elif a >= 2 and a < 3:\n score += 0.5\n elif a >= 3:\n score += 0.25\n a = float(price_chance_15_min[x])\n if a <= 1 and a > -0.5:\n score += 0.25\n elif a <= -0.5 and a > -1:\n score += 0.5\n elif a <= -1 and a > -1.5:\n score += 0.75\n elif a <= -1.5:\n score += 1\n a = float(price_change_25_30_min[x])\n if a <= 2 and a > -0.75:\n score += 0.25\n elif a <= -0.75 and a > -1.25:\n score += 0.5\n elif a <= -1.25 and a > -1.75:\n score += 0.75\n elif a <= -1.75:\n score += 1\n a = float(price_chance_1_hour[x])\n if a <= 2 and a >= 0:\n score += 0.5\n elif a <= 0 and a > -2:\n score += 0.75\n elif a <= -2:\n score += 1\n a = float(price_chance_3_hour[x])\n if a <= 5 and a > -1:\n score += 0.25\n elif a <= -1 and a > -3:\n score += 0.5\n elif a <= -3 and a > -6:\n score += 0.75\n elif a <= -6:\n score += 1\n a = float(price_chance_8_hour[x])\n if a <= 0 and a > -4:\n score += 0.25\n elif a <= -4 and a > -6:\n score += 0.5\n elif a <= -6:\n score += 0.75\n if float(ratio5[x]) > 0:\n score += 1\n a = 0\n for i in range(len(ratio5_10sec[x])):\n if float(price_chance_2_min[x]) > 0.55 or float(price_chance_5_min\n [x]) > 1:\n if float(ratio5_10sec[x][i]) > 0:\n a += 1\n if float(ratio5_sum_10sec[x][i]) > 0.3:\n a += 1\n score += a / len(ratio5_sum_10sec[x])\n if float(ratio20[x]) > 0:\n score += 1\n a = 0\n for i in range(len(ratio5_10sec[x]) - 1):\n if float(ratio5_10sec[x][i]) > 0:\n a += 1\n if a <= 2:\n score += 0.25\n elif a > 2 and a <= 4:\n score += 0.5\n elif a > 4 and a <= 7:\n score += 0.75\n elif a > 7:\n score += 1\n a = 0\n for i in range(20, 1, -1):\n if float(k_line_1m[x][-i]) > float(k_line_1m[x][-(i - 1)]):\n a += 1\n score += a / 10\n if float(price_change_1_days[x]) > 5:\n score += 0.3\n if float(price_change_3_days[x]) > 10:\n score += 0.25\n if float(price_change_5_days[x]) > 15:\n score += 0.25\n if float(price_change_7_days[x]) > 20:\n score += 0.25\n if float(price_change_10_days[x]) > -25:\n score += 0.25\n a = float(average_change_10_min[x])\n if a < 0.2 and a > -0.3:\n score += 0.1\n a = float(average_change_20_min[x])\n if a < 0.2 and a > -0.3:\n score += 0.1\n a = float(average_change_50_min[x])\n if a < 0.2 and a > -0.3:\n score += 0.1\n a = float(average_change_100_min[x])\n if a < 0.2 and a > -0.3:\n score += 0.1\n total_score[x] = score\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef calculate_data_list():\n counter = 0\n btc = 'BTC'\n symbols = []\n all_positions = []\n positions_final = []\n volume = []\n c = []\n price_change = []\n data = client.get_ticker()\n for x in range(len(data)):\n if btc in data[x]['symbol'] and data[x]['symbol'\n ] != 'BTCUSDT' and data[x]['symbol'] != 'VENBTC':\n if float(data[x]['quoteVolume']) > 100:\n all_positions.append(x)\n for x in all_positions:\n c.append(float(data[x]['priceChangePercent']))\n i = sorted(range(len(c)), key=lambda k: c[k])\n i.reverse()\n while len(positions_final) < 20 and len(positions_final) < len(\n all_positions):\n symbols.append(data[all_positions[i[counter]]]['symbol'])\n positions_final.append(all_positions[i[counter]])\n volume.append(data[all_positions[i[counter]]]['quoteVolume'])\n price_change.append(data[all_positions[i[counter]]][\n 'priceChangePercent'])\n counter += 1\n return symbols, volume, positions_final, price_change\n\n\ndef get_kline():\n symbols, volume, pozitii, price_change = calculate_data_list()\n prices = []\n prices1 = []\n k = []\n for x in symbols:\n try:\n order = client.get_klines(symbol=x, interval='1m')\n except BinanceAPIException as e:\n print(e.status_code)\n print(e.message)\n try:\n order1 = client.get_klines(symbol=x, limit=1000, interval='15m')\n except BinanceAPIException as e:\n print(e.status_code)\n print(e.message)\n if len(order1) < 970:\n a = symbols.index(x)\n k.append(a)\n else:\n prices.append([])\n prices1.append([])\n for i in range(len(order)):\n prices[-1].append(float(order[i][1]))\n for i in range(len(order1)):\n prices1[-1].append(float(order1[i][1]))\n k.reverse()\n for x in k:\n symbols.pop(x)\n volume.pop(x)\n all_positions.pop(x)\n price_change.pop(x)\n return symbols, volume, pozitii, prices, prices1, price_change\n\n\ndef process_depth(msg):\n sums5 = 0\n sumb5 = 0\n m = -1\n for x in range(5):\n if float(msg['data']['bids'][x][1]) > m:\n m = float(msg['data']['bids'][x][1])\n sums5 = sums5 + float(msg['data']['bids'][x][1])\n sumb5 = sumb5 + float(msg['data']['asks'][x][1])\n ratio1 = sums5 / sumb5\n if ratio1 < 1:\n ratio1 = 1 / ratio1 * -1 + 1\n else:\n ratio1 -= 1\n sums20 = 0\n sumb20 = 0\n ratio2 = 0\n try:\n for x in range(17):\n sums20 = sums20 + float(msg['data']['bids'][x][1])\n sumb20 = sumb20 + float(msg['data']['asks'][x][1])\n ratio2 = sums20 / sumb20\n if ratio2 < 1:\n ratio2 = 1 / ratio2 * -1 + 1\n else:\n ratio2 -= 1\n except Exception as e:\n print('')\n for i in range(len(symbols)):\n simbol = symbols[i].lower() + '@depth20'\n if simbol == msg['stream']:\n ratio5[i] = round(ratio1, 2)\n ratio20[i] = round(ratio2, 2)\n max_order5[i] = m\n ratio5_sum[i] = round(float(sums5) * float(current_price[i]) * \n 100 / float(volume[i]), 2)\n current_price[i] = float(msg['data']['bids'][0][0])\n\n\ndef process_ticker(msg):\n i = 0\n for x in symbols:\n for y in range(len(msg)):\n if x == str(msg[y]['s']):\n volume[i] = int(float(msg[y]['q']))\n price_change[i] = int(float(msg[y]['P']))\n i += 1\n\n\n<mask token>\n\n\ndef kline_continuum():\n i = 0\n while True:\n time.sleep(60)\n for x in range(len(symbols)):\n k_line_1m[x].pop(0)\n k_line_1m[x].append(current_price[x])\n if i % 15 == 0:\n k_line_15m[x].pop(0)\n k_line_15m[x].append(current_price[x])\n i += 1\n\n\ndef report_10_seconds():\n while True:\n for x in range(len(symbols)):\n if len(ratio5_10sec[x]) > 10:\n ratio5_10sec[x].pop(0)\n if len(ratio5_sum_10sec[x]) > 10:\n ratio5_sum_10sec[x].pop(0)\n ratio5_10sec[x].append(ratio5[x])\n ratio5_sum_10sec[x].append(ratio5_sum[x])\n time.sleep(1)\n\n\ndef calculate_score():\n for x in range(len(symbols)):\n score = 0\n a = float(price_chance_2_min[x])\n if a > 0 and a < 0.5:\n score += 1\n elif a >= 0.5 and a < 1:\n score += 1.25\n elif a >= 1 and a < 1.5:\n score += 1.5\n elif a >= 1.5 and a < 2:\n score += 0.5\n elif a >= 3:\n score += 0.25\n a = float(price_chance_5_min[x])\n if a > 0 and a < 0.5:\n score += 1\n elif a >= 0.5 and a < 1:\n score += 1.25\n elif a >= 1 and a < 2:\n score += 1.5\n elif a >= 2 and a < 3:\n score += 0.5\n elif a >= 3:\n score += 0.25\n a = float(price_chance_15_min[x])\n if a <= 1 and a > -0.5:\n score += 0.25\n elif a <= -0.5 and a > -1:\n score += 0.5\n elif a <= -1 and a > -1.5:\n score += 0.75\n elif a <= -1.5:\n score += 1\n a = float(price_change_25_30_min[x])\n if a <= 2 and a > -0.75:\n score += 0.25\n elif a <= -0.75 and a > -1.25:\n score += 0.5\n elif a <= -1.25 and a > -1.75:\n score += 0.75\n elif a <= -1.75:\n score += 1\n a = float(price_chance_1_hour[x])\n if a <= 2 and a >= 0:\n score += 0.5\n elif a <= 0 and a > -2:\n score += 0.75\n elif a <= -2:\n score += 1\n a = float(price_chance_3_hour[x])\n if a <= 5 and a > -1:\n score += 0.25\n elif a <= -1 and a > -3:\n score += 0.5\n elif a <= -3 and a > -6:\n score += 0.75\n elif a <= -6:\n score += 1\n a = float(price_chance_8_hour[x])\n if a <= 0 and a > -4:\n score += 0.25\n elif a <= -4 and a > -6:\n score += 0.5\n elif a <= -6:\n score += 0.75\n if float(ratio5[x]) > 0:\n score += 1\n a = 0\n for i in range(len(ratio5_10sec[x])):\n if float(price_chance_2_min[x]) > 0.55 or float(price_chance_5_min\n [x]) > 1:\n if float(ratio5_10sec[x][i]) > 0:\n a += 1\n if float(ratio5_sum_10sec[x][i]) > 0.3:\n a += 1\n score += a / len(ratio5_sum_10sec[x])\n if float(ratio20[x]) > 0:\n score += 1\n a = 0\n for i in range(len(ratio5_10sec[x]) - 1):\n if float(ratio5_10sec[x][i]) > 0:\n a += 1\n if a <= 2:\n score += 0.25\n elif a > 2 and a <= 4:\n score += 0.5\n elif a > 4 and a <= 7:\n score += 0.75\n elif a > 7:\n score += 1\n a = 0\n for i in range(20, 1, -1):\n if float(k_line_1m[x][-i]) > float(k_line_1m[x][-(i - 1)]):\n a += 1\n score += a / 10\n if float(price_change_1_days[x]) > 5:\n score += 0.3\n if float(price_change_3_days[x]) > 10:\n score += 0.25\n if float(price_change_5_days[x]) > 15:\n score += 0.25\n if float(price_change_7_days[x]) > 20:\n score += 0.25\n if float(price_change_10_days[x]) > -25:\n score += 0.25\n a = float(average_change_10_min[x])\n if a < 0.2 and a > -0.3:\n score += 0.1\n a = float(average_change_20_min[x])\n if a < 0.2 and a > -0.3:\n score += 0.1\n a = float(average_change_50_min[x])\n if a < 0.2 and a > -0.3:\n score += 0.1\n a = float(average_change_100_min[x])\n if a < 0.2 and a > -0.3:\n score += 0.1\n total_score[x] = score\n\n\ndef print_results():\n time.sleep(10)\n while True:\n for x in range(len(symbols)):\n try:\n price_chance_2_min[x] = round(float(current_price[x]) * 100 /\n float(k_line_1m[x][-2]) - 100, 2)\n price_chance_5_min[x] = round(float(current_price[x]) * 100 /\n float(k_line_1m[x][-5]) - 100, 2)\n price_chance_15_min[x] = round(float(current_price[x]) * \n 100 / float(k_line_1m[x][-15]) - 100, 2)\n price_chance_30_min[x] = round(float(current_price[x]) * \n 100 / float(k_line_1m[x][-30]) - 100, 2)\n price_chance_1_hour[x] = round(float(current_price[x]) * \n 100 / float(k_line_1m[x][-60]) - 100, 2)\n price_chance_3_hour[x] = round(float(current_price[x]) * \n 100 / float(k_line_1m[x][-180]) - 100, 2)\n price_chance_8_hour[x] = round(float(current_price[x]) * \n 100 / float(k_line_1m[x][20]) - 100, 2)\n price_change_25_30_min[x] = round(float(k_line_1m[x][-6]) *\n 100 / float(k_line_1m[x][-30]) - 100, 2)\n price_change_1_days[x] = round(float(current_price[x]) * \n 100 / float(k_line_15m[x][-96]) - 100, 1)\n price_change_3_days[x] = round(float(current_price[x]) * \n 100 / float(k_line_15m[x][-288]) - 100, 1)\n price_change_5_days[x] = round(float(current_price[x]) * \n 100 / float(k_line_15m[x][-480]) - 100, 1)\n price_change_7_days[x] = round(float(current_price[x]) * \n 100 / float(k_line_15m[x][-672]) - 100, 1)\n price_change_10_days[x] = round(float(current_price[x]) * \n 100 / float(k_line_15m[x][-960]) - 100, 1)\n average_10_min[x] = round(float(sum(k_line_1m[x][-10:])) / \n 10, 8)\n average_20_min[x] = round(float(sum(k_line_1m[x][-20:])) / \n 20, 8)\n average_50_min[x] = round(float(sum(k_line_1m[x][-50:])) / \n 50, 8)\n average_100_min[x] = round(float(sum(k_line_1m[x][-100:])) /\n 100, 8)\n average_change_10_min[x] = round(float(current_price[x]) * \n 100 / float(average_10_min[x]) - 100, 2)\n average_change_20_min[x] = round(float(current_price[x]) * \n 100 / float(average_20_min[x]) - 100, 2)\n average_change_50_min[x] = round(float(current_price[x]) * \n 100 / float(average_50_min[x]) - 100, 2)\n average_change_100_min[x] = round(float(current_price[x]) *\n 100 / float(average_100_min[x]) - 100, 2)\n except Exception as e:\n print(e)\n calculate_score()\n sort_by = total_score\n sorted_data = sorted(range(len(sort_by)), key=lambda k: sort_by[k])\n sorted_data.reverse()\n print(time.ctime())\n print(\n '%5s %5s %6s %6s %6s %6s %6s %6s %6s %6s %6s %6s %6s %6s %6s %5s %6s %6s %6s %6s %6s'\n % ('Symbol', 'score', 'r5', 'r20', '2m_ch', '5m_ch', '15m_ch',\n '30m_ch', '1h_ch', '10MA', '20MA', '50MA', '100MA', '8h_ch',\n '25-30m', 'r5sum', '1d_ch', '3d_ch', '5d_ch', '7d_ch', '10d_ch'))\n for k in range(10):\n i = sorted_data[k]\n print(\n '%5s %6s %6s %6s %6s %6s %6s %6s %6s %6s %6s %6s %6s %6s %6s %5s %6s %6s %6s %6s %6s'\n % (symbols[i][:-3], total_score[i], ratio5[i], ratio20[i],\n price_chance_2_min[i], price_chance_5_min[i],\n price_chance_15_min[i], price_chance_30_min[i],\n price_chance_1_hour[i], average_change_10_min[i],\n average_change_20_min[i], average_change_50_min[i],\n average_change_100_min[i], price_chance_8_hour[i],\n price_change_25_30_min[i], ratio5_sum[i],\n price_change_1_days[i], price_change_3_days[i],\n price_change_5_days[i], price_change_7_days[i],\n price_change_10_days[i]))\n try:\n if float(total_score[sorted_data[0]]) > 10:\n winsound.PlaySound('\\\\Sound.wav', winsound.SND_FILENAME)\n except Exception as e:\n print(e)\n time.sleep(1)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef calculate_data_list():\n counter = 0\n btc = 'BTC'\n symbols = []\n all_positions = []\n positions_final = []\n volume = []\n c = []\n price_change = []\n data = client.get_ticker()\n for x in range(len(data)):\n if btc in data[x]['symbol'] and data[x]['symbol'\n ] != 'BTCUSDT' and data[x]['symbol'] != 'VENBTC':\n if float(data[x]['quoteVolume']) > 100:\n all_positions.append(x)\n for x in all_positions:\n c.append(float(data[x]['priceChangePercent']))\n i = sorted(range(len(c)), key=lambda k: c[k])\n i.reverse()\n while len(positions_final) < 20 and len(positions_final) < len(\n all_positions):\n symbols.append(data[all_positions[i[counter]]]['symbol'])\n positions_final.append(all_positions[i[counter]])\n volume.append(data[all_positions[i[counter]]]['quoteVolume'])\n price_change.append(data[all_positions[i[counter]]][\n 'priceChangePercent'])\n counter += 1\n return symbols, volume, positions_final, price_change\n\n\ndef get_kline():\n symbols, volume, pozitii, price_change = calculate_data_list()\n prices = []\n prices1 = []\n k = []\n for x in symbols:\n try:\n order = client.get_klines(symbol=x, interval='1m')\n except BinanceAPIException as e:\n print(e.status_code)\n print(e.message)\n try:\n order1 = client.get_klines(symbol=x, limit=1000, interval='15m')\n except BinanceAPIException as e:\n print(e.status_code)\n print(e.message)\n if len(order1) < 970:\n a = symbols.index(x)\n k.append(a)\n else:\n prices.append([])\n prices1.append([])\n for i in range(len(order)):\n prices[-1].append(float(order[i][1]))\n for i in range(len(order1)):\n prices1[-1].append(float(order1[i][1]))\n k.reverse()\n for x in k:\n symbols.pop(x)\n volume.pop(x)\n all_positions.pop(x)\n price_change.pop(x)\n return symbols, volume, pozitii, prices, prices1, price_change\n\n\ndef process_depth(msg):\n sums5 = 0\n sumb5 = 0\n m = -1\n for x in range(5):\n if float(msg['data']['bids'][x][1]) > m:\n m = float(msg['data']['bids'][x][1])\n sums5 = sums5 + float(msg['data']['bids'][x][1])\n sumb5 = sumb5 + float(msg['data']['asks'][x][1])\n ratio1 = sums5 / sumb5\n if ratio1 < 1:\n ratio1 = 1 / ratio1 * -1 + 1\n else:\n ratio1 -= 1\n sums20 = 0\n sumb20 = 0\n ratio2 = 0\n try:\n for x in range(17):\n sums20 = sums20 + float(msg['data']['bids'][x][1])\n sumb20 = sumb20 + float(msg['data']['asks'][x][1])\n ratio2 = sums20 / sumb20\n if ratio2 < 1:\n ratio2 = 1 / ratio2 * -1 + 1\n else:\n ratio2 -= 1\n except Exception as e:\n print('')\n for i in range(len(symbols)):\n simbol = symbols[i].lower() + '@depth20'\n if simbol == msg['stream']:\n ratio5[i] = round(ratio1, 2)\n ratio20[i] = round(ratio2, 2)\n max_order5[i] = m\n ratio5_sum[i] = round(float(sums5) * float(current_price[i]) * \n 100 / float(volume[i]), 2)\n current_price[i] = float(msg['data']['bids'][0][0])\n\n\ndef process_ticker(msg):\n i = 0\n for x in symbols:\n for y in range(len(msg)):\n if x == str(msg[y]['s']):\n volume[i] = int(float(msg[y]['q']))\n price_change[i] = int(float(msg[y]['P']))\n i += 1\n\n\n<mask token>\nfor x in symbols:\n list.append(x.lower() + '@depth20')\n<mask token>\nbm.start()\n<mask token>\n\n\ndef kline_continuum():\n i = 0\n while True:\n time.sleep(60)\n for x in range(len(symbols)):\n k_line_1m[x].pop(0)\n k_line_1m[x].append(current_price[x])\n if i % 15 == 0:\n k_line_15m[x].pop(0)\n k_line_15m[x].append(current_price[x])\n i += 1\n\n\ndef report_10_seconds():\n while True:\n for x in range(len(symbols)):\n if len(ratio5_10sec[x]) > 10:\n ratio5_10sec[x].pop(0)\n if len(ratio5_sum_10sec[x]) > 10:\n ratio5_sum_10sec[x].pop(0)\n ratio5_10sec[x].append(ratio5[x])\n ratio5_sum_10sec[x].append(ratio5_sum[x])\n time.sleep(1)\n\n\ndef calculate_score():\n for x in range(len(symbols)):\n score = 0\n a = float(price_chance_2_min[x])\n if a > 0 and a < 0.5:\n score += 1\n elif a >= 0.5 and a < 1:\n score += 1.25\n elif a >= 1 and a < 1.5:\n score += 1.5\n elif a >= 1.5 and a < 2:\n score += 0.5\n elif a >= 3:\n score += 0.25\n a = float(price_chance_5_min[x])\n if a > 0 and a < 0.5:\n score += 1\n elif a >= 0.5 and a < 1:\n score += 1.25\n elif a >= 1 and a < 2:\n score += 1.5\n elif a >= 2 and a < 3:\n score += 0.5\n elif a >= 3:\n score += 0.25\n a = float(price_chance_15_min[x])\n if a <= 1 and a > -0.5:\n score += 0.25\n elif a <= -0.5 and a > -1:\n score += 0.5\n elif a <= -1 and a > -1.5:\n score += 0.75\n elif a <= -1.5:\n score += 1\n a = float(price_change_25_30_min[x])\n if a <= 2 and a > -0.75:\n score += 0.25\n elif a <= -0.75 and a > -1.25:\n score += 0.5\n elif a <= -1.25 and a > -1.75:\n score += 0.75\n elif a <= -1.75:\n score += 1\n a = float(price_chance_1_hour[x])\n if a <= 2 and a >= 0:\n score += 0.5\n elif a <= 0 and a > -2:\n score += 0.75\n elif a <= -2:\n score += 1\n a = float(price_chance_3_hour[x])\n if a <= 5 and a > -1:\n score += 0.25\n elif a <= -1 and a > -3:\n score += 0.5\n elif a <= -3 and a > -6:\n score += 0.75\n elif a <= -6:\n score += 1\n a = float(price_chance_8_hour[x])\n if a <= 0 and a > -4:\n score += 0.25\n elif a <= -4 and a > -6:\n score += 0.5\n elif a <= -6:\n score += 0.75\n if float(ratio5[x]) > 0:\n score += 1\n a = 0\n for i in range(len(ratio5_10sec[x])):\n if float(price_chance_2_min[x]) > 0.55 or float(price_chance_5_min\n [x]) > 1:\n if float(ratio5_10sec[x][i]) > 0:\n a += 1\n if float(ratio5_sum_10sec[x][i]) > 0.3:\n a += 1\n score += a / len(ratio5_sum_10sec[x])\n if float(ratio20[x]) > 0:\n score += 1\n a = 0\n for i in range(len(ratio5_10sec[x]) - 1):\n if float(ratio5_10sec[x][i]) > 0:\n a += 1\n if a <= 2:\n score += 0.25\n elif a > 2 and a <= 4:\n score += 0.5\n elif a > 4 and a <= 7:\n score += 0.75\n elif a > 7:\n score += 1\n a = 0\n for i in range(20, 1, -1):\n if float(k_line_1m[x][-i]) > float(k_line_1m[x][-(i - 1)]):\n a += 1\n score += a / 10\n if float(price_change_1_days[x]) > 5:\n score += 0.3\n if float(price_change_3_days[x]) > 10:\n score += 0.25\n if float(price_change_5_days[x]) > 15:\n score += 0.25\n if float(price_change_7_days[x]) > 20:\n score += 0.25\n if float(price_change_10_days[x]) > -25:\n score += 0.25\n a = float(average_change_10_min[x])\n if a < 0.2 and a > -0.3:\n score += 0.1\n a = float(average_change_20_min[x])\n if a < 0.2 and a > -0.3:\n score += 0.1\n a = float(average_change_50_min[x])\n if a < 0.2 and a > -0.3:\n score += 0.1\n a = float(average_change_100_min[x])\n if a < 0.2 and a > -0.3:\n score += 0.1\n total_score[x] = score\n\n\ndef print_results():\n time.sleep(10)\n while True:\n for x in range(len(symbols)):\n try:\n price_chance_2_min[x] = round(float(current_price[x]) * 100 /\n float(k_line_1m[x][-2]) - 100, 2)\n price_chance_5_min[x] = round(float(current_price[x]) * 100 /\n float(k_line_1m[x][-5]) - 100, 2)\n price_chance_15_min[x] = round(float(current_price[x]) * \n 100 / float(k_line_1m[x][-15]) - 100, 2)\n price_chance_30_min[x] = round(float(current_price[x]) * \n 100 / float(k_line_1m[x][-30]) - 100, 2)\n price_chance_1_hour[x] = round(float(current_price[x]) * \n 100 / float(k_line_1m[x][-60]) - 100, 2)\n price_chance_3_hour[x] = round(float(current_price[x]) * \n 100 / float(k_line_1m[x][-180]) - 100, 2)\n price_chance_8_hour[x] = round(float(current_price[x]) * \n 100 / float(k_line_1m[x][20]) - 100, 2)\n price_change_25_30_min[x] = round(float(k_line_1m[x][-6]) *\n 100 / float(k_line_1m[x][-30]) - 100, 2)\n price_change_1_days[x] = round(float(current_price[x]) * \n 100 / float(k_line_15m[x][-96]) - 100, 1)\n price_change_3_days[x] = round(float(current_price[x]) * \n 100 / float(k_line_15m[x][-288]) - 100, 1)\n price_change_5_days[x] = round(float(current_price[x]) * \n 100 / float(k_line_15m[x][-480]) - 100, 1)\n price_change_7_days[x] = round(float(current_price[x]) * \n 100 / float(k_line_15m[x][-672]) - 100, 1)\n price_change_10_days[x] = round(float(current_price[x]) * \n 100 / float(k_line_15m[x][-960]) - 100, 1)\n average_10_min[x] = round(float(sum(k_line_1m[x][-10:])) / \n 10, 8)\n average_20_min[x] = round(float(sum(k_line_1m[x][-20:])) / \n 20, 8)\n average_50_min[x] = round(float(sum(k_line_1m[x][-50:])) / \n 50, 8)\n average_100_min[x] = round(float(sum(k_line_1m[x][-100:])) /\n 100, 8)\n average_change_10_min[x] = round(float(current_price[x]) * \n 100 / float(average_10_min[x]) - 100, 2)\n average_change_20_min[x] = round(float(current_price[x]) * \n 100 / float(average_20_min[x]) - 100, 2)\n average_change_50_min[x] = round(float(current_price[x]) * \n 100 / float(average_50_min[x]) - 100, 2)\n average_change_100_min[x] = round(float(current_price[x]) *\n 100 / float(average_100_min[x]) - 100, 2)\n except Exception as e:\n print(e)\n calculate_score()\n sort_by = total_score\n sorted_data = sorted(range(len(sort_by)), key=lambda k: sort_by[k])\n sorted_data.reverse()\n print(time.ctime())\n print(\n '%5s %5s %6s %6s %6s %6s %6s %6s %6s %6s %6s %6s %6s %6s %6s %5s %6s %6s %6s %6s %6s'\n % ('Symbol', 'score', 'r5', 'r20', '2m_ch', '5m_ch', '15m_ch',\n '30m_ch', '1h_ch', '10MA', '20MA', '50MA', '100MA', '8h_ch',\n '25-30m', 'r5sum', '1d_ch', '3d_ch', '5d_ch', '7d_ch', '10d_ch'))\n for k in range(10):\n i = sorted_data[k]\n print(\n '%5s %6s %6s %6s %6s %6s %6s %6s %6s %6s %6s %6s %6s %6s %6s %5s %6s %6s %6s %6s %6s'\n % (symbols[i][:-3], total_score[i], ratio5[i], ratio20[i],\n price_chance_2_min[i], price_chance_5_min[i],\n price_chance_15_min[i], price_chance_30_min[i],\n price_chance_1_hour[i], average_change_10_min[i],\n average_change_20_min[i], average_change_50_min[i],\n average_change_100_min[i], price_chance_8_hour[i],\n price_change_25_30_min[i], ratio5_sum[i],\n price_change_1_days[i], price_change_3_days[i],\n price_change_5_days[i], price_change_7_days[i],\n price_change_10_days[i]))\n try:\n if float(total_score[sorted_data[0]]) > 10:\n winsound.PlaySound('\\\\Sound.wav', winsound.SND_FILENAME)\n except Exception as e:\n print(e)\n time.sleep(1)\n\n\n<mask token>\n[thread.start() for thread in threads]\n[thread.join() for thread in threads]\n",
"step-4": "from binance.client import Client\nfrom binance.websockets import BinanceSocketManager\nfrom binance.enums import *\nimport time\nimport threading\nimport winsound\nclient = Client(your_api_key, your_api_secret)\n\n\ndef calculate_data_list():\n counter = 0\n btc = 'BTC'\n symbols = []\n all_positions = []\n positions_final = []\n volume = []\n c = []\n price_change = []\n data = client.get_ticker()\n for x in range(len(data)):\n if btc in data[x]['symbol'] and data[x]['symbol'\n ] != 'BTCUSDT' and data[x]['symbol'] != 'VENBTC':\n if float(data[x]['quoteVolume']) > 100:\n all_positions.append(x)\n for x in all_positions:\n c.append(float(data[x]['priceChangePercent']))\n i = sorted(range(len(c)), key=lambda k: c[k])\n i.reverse()\n while len(positions_final) < 20 and len(positions_final) < len(\n all_positions):\n symbols.append(data[all_positions[i[counter]]]['symbol'])\n positions_final.append(all_positions[i[counter]])\n volume.append(data[all_positions[i[counter]]]['quoteVolume'])\n price_change.append(data[all_positions[i[counter]]][\n 'priceChangePercent'])\n counter += 1\n return symbols, volume, positions_final, price_change\n\n\ndef get_kline():\n symbols, volume, pozitii, price_change = calculate_data_list()\n prices = []\n prices1 = []\n k = []\n for x in symbols:\n try:\n order = client.get_klines(symbol=x, interval='1m')\n except BinanceAPIException as e:\n print(e.status_code)\n print(e.message)\n try:\n order1 = client.get_klines(symbol=x, limit=1000, interval='15m')\n except BinanceAPIException as e:\n print(e.status_code)\n print(e.message)\n if len(order1) < 970:\n a = symbols.index(x)\n k.append(a)\n else:\n prices.append([])\n prices1.append([])\n for i in range(len(order)):\n prices[-1].append(float(order[i][1]))\n for i in range(len(order1)):\n prices1[-1].append(float(order1[i][1]))\n k.reverse()\n for x in k:\n symbols.pop(x)\n volume.pop(x)\n all_positions.pop(x)\n price_change.pop(x)\n return symbols, volume, pozitii, prices, prices1, price_change\n\n\ndef process_depth(msg):\n sums5 = 0\n sumb5 = 0\n m = -1\n for x in range(5):\n if float(msg['data']['bids'][x][1]) > m:\n m = float(msg['data']['bids'][x][1])\n sums5 = sums5 + float(msg['data']['bids'][x][1])\n sumb5 = sumb5 + float(msg['data']['asks'][x][1])\n ratio1 = sums5 / sumb5\n if ratio1 < 1:\n ratio1 = 1 / ratio1 * -1 + 1\n else:\n ratio1 -= 1\n sums20 = 0\n sumb20 = 0\n ratio2 = 0\n try:\n for x in range(17):\n sums20 = sums20 + float(msg['data']['bids'][x][1])\n sumb20 = sumb20 + float(msg['data']['asks'][x][1])\n ratio2 = sums20 / sumb20\n if ratio2 < 1:\n ratio2 = 1 / ratio2 * -1 + 1\n else:\n ratio2 -= 1\n except Exception as e:\n print('')\n for i in range(len(symbols)):\n simbol = symbols[i].lower() + '@depth20'\n if simbol == msg['stream']:\n ratio5[i] = round(ratio1, 2)\n ratio20[i] = round(ratio2, 2)\n max_order5[i] = m\n ratio5_sum[i] = round(float(sums5) * float(current_price[i]) * \n 100 / float(volume[i]), 2)\n current_price[i] = float(msg['data']['bids'][0][0])\n\n\ndef process_ticker(msg):\n i = 0\n for x in symbols:\n for y in range(len(msg)):\n if x == str(msg[y]['s']):\n volume[i] = int(float(msg[y]['q']))\n price_change[i] = int(float(msg[y]['P']))\n i += 1\n\n\nsymbols, volume, pozitii, k_line_1m, k_line_15m, price_change = get_kline()\nmax_order5 = [(0) for x in range(len(symbols))]\ncurrent_price = [(0) for x in range(len(symbols))]\nprice_chance_2_min = [(0) for x in range(len(symbols))]\nprice_chance_5_min = [(0) for x in range(len(symbols))]\nprice_chance_15_min = [(0) for x in range(len(symbols))]\nprice_chance_30_min = [(0) for x in range(len(symbols))]\nprice_change_25_30_min = [(0) for x in range(len(symbols))]\nprice_chance_1_hour = [(0) for x in range(len(symbols))]\nprice_chance_3_hour = [(0) for x in range(len(symbols))]\nprice_chance_8_hour = [(0) for x in range(len(symbols))]\nprice_change_1_days = [(0) for x in range(len(symbols))]\nprice_change_3_days = [(0) for x in range(len(symbols))]\nprice_change_5_days = [(0) for x in range(len(symbols))]\nprice_change_7_days = [(0) for x in range(len(symbols))]\nprice_change_10_days = [(0) for x in range(len(symbols))]\naverage_10_min = [(0) for x in range(len(symbols))]\naverage_20_min = [(0) for x in range(len(symbols))]\naverage_50_min = [(0) for x in range(len(symbols))]\naverage_100_min = [(0) for x in range(len(symbols))]\naverage_change_10_min = [(0) for x in range(len(symbols))]\naverage_change_20_min = [(0) for x in range(len(symbols))]\naverage_change_50_min = [(0) for x in range(len(symbols))]\naverage_change_100_min = [(0) for x in range(len(symbols))]\ntotal_score = [(0) for x in range(len(symbols))]\nratio5 = [(0) for x in range(len(symbols))]\nratio5_10sec = [[] for y in range(len(symbols))]\nratio5_sum = [(0) for x in range(len(symbols))]\nratio5_sum_10sec = [[] for y in range(len(symbols))]\nratio20 = [(0) for x in range(len(symbols))]\nlist = []\nfor x in symbols:\n list.append(x.lower() + '@depth20')\nbm = BinanceSocketManager(client)\nbm.start()\ndepth_socket = bm.start_multiplex_socket(list, process_depth)\nticker_socket = bm.start_ticker_socket(process_ticker)\n\n\ndef kline_continuum():\n i = 0\n while True:\n time.sleep(60)\n for x in range(len(symbols)):\n k_line_1m[x].pop(0)\n k_line_1m[x].append(current_price[x])\n if i % 15 == 0:\n k_line_15m[x].pop(0)\n k_line_15m[x].append(current_price[x])\n i += 1\n\n\ndef report_10_seconds():\n while True:\n for x in range(len(symbols)):\n if len(ratio5_10sec[x]) > 10:\n ratio5_10sec[x].pop(0)\n if len(ratio5_sum_10sec[x]) > 10:\n ratio5_sum_10sec[x].pop(0)\n ratio5_10sec[x].append(ratio5[x])\n ratio5_sum_10sec[x].append(ratio5_sum[x])\n time.sleep(1)\n\n\ndef calculate_score():\n for x in range(len(symbols)):\n score = 0\n a = float(price_chance_2_min[x])\n if a > 0 and a < 0.5:\n score += 1\n elif a >= 0.5 and a < 1:\n score += 1.25\n elif a >= 1 and a < 1.5:\n score += 1.5\n elif a >= 1.5 and a < 2:\n score += 0.5\n elif a >= 3:\n score += 0.25\n a = float(price_chance_5_min[x])\n if a > 0 and a < 0.5:\n score += 1\n elif a >= 0.5 and a < 1:\n score += 1.25\n elif a >= 1 and a < 2:\n score += 1.5\n elif a >= 2 and a < 3:\n score += 0.5\n elif a >= 3:\n score += 0.25\n a = float(price_chance_15_min[x])\n if a <= 1 and a > -0.5:\n score += 0.25\n elif a <= -0.5 and a > -1:\n score += 0.5\n elif a <= -1 and a > -1.5:\n score += 0.75\n elif a <= -1.5:\n score += 1\n a = float(price_change_25_30_min[x])\n if a <= 2 and a > -0.75:\n score += 0.25\n elif a <= -0.75 and a > -1.25:\n score += 0.5\n elif a <= -1.25 and a > -1.75:\n score += 0.75\n elif a <= -1.75:\n score += 1\n a = float(price_chance_1_hour[x])\n if a <= 2 and a >= 0:\n score += 0.5\n elif a <= 0 and a > -2:\n score += 0.75\n elif a <= -2:\n score += 1\n a = float(price_chance_3_hour[x])\n if a <= 5 and a > -1:\n score += 0.25\n elif a <= -1 and a > -3:\n score += 0.5\n elif a <= -3 and a > -6:\n score += 0.75\n elif a <= -6:\n score += 1\n a = float(price_chance_8_hour[x])\n if a <= 0 and a > -4:\n score += 0.25\n elif a <= -4 and a > -6:\n score += 0.5\n elif a <= -6:\n score += 0.75\n if float(ratio5[x]) > 0:\n score += 1\n a = 0\n for i in range(len(ratio5_10sec[x])):\n if float(price_chance_2_min[x]) > 0.55 or float(price_chance_5_min\n [x]) > 1:\n if float(ratio5_10sec[x][i]) > 0:\n a += 1\n if float(ratio5_sum_10sec[x][i]) > 0.3:\n a += 1\n score += a / len(ratio5_sum_10sec[x])\n if float(ratio20[x]) > 0:\n score += 1\n a = 0\n for i in range(len(ratio5_10sec[x]) - 1):\n if float(ratio5_10sec[x][i]) > 0:\n a += 1\n if a <= 2:\n score += 0.25\n elif a > 2 and a <= 4:\n score += 0.5\n elif a > 4 and a <= 7:\n score += 0.75\n elif a > 7:\n score += 1\n a = 0\n for i in range(20, 1, -1):\n if float(k_line_1m[x][-i]) > float(k_line_1m[x][-(i - 1)]):\n a += 1\n score += a / 10\n if float(price_change_1_days[x]) > 5:\n score += 0.3\n if float(price_change_3_days[x]) > 10:\n score += 0.25\n if float(price_change_5_days[x]) > 15:\n score += 0.25\n if float(price_change_7_days[x]) > 20:\n score += 0.25\n if float(price_change_10_days[x]) > -25:\n score += 0.25\n a = float(average_change_10_min[x])\n if a < 0.2 and a > -0.3:\n score += 0.1\n a = float(average_change_20_min[x])\n if a < 0.2 and a > -0.3:\n score += 0.1\n a = float(average_change_50_min[x])\n if a < 0.2 and a > -0.3:\n score += 0.1\n a = float(average_change_100_min[x])\n if a < 0.2 and a > -0.3:\n score += 0.1\n total_score[x] = score\n\n\ndef print_results():\n time.sleep(10)\n while True:\n for x in range(len(symbols)):\n try:\n price_chance_2_min[x] = round(float(current_price[x]) * 100 /\n float(k_line_1m[x][-2]) - 100, 2)\n price_chance_5_min[x] = round(float(current_price[x]) * 100 /\n float(k_line_1m[x][-5]) - 100, 2)\n price_chance_15_min[x] = round(float(current_price[x]) * \n 100 / float(k_line_1m[x][-15]) - 100, 2)\n price_chance_30_min[x] = round(float(current_price[x]) * \n 100 / float(k_line_1m[x][-30]) - 100, 2)\n price_chance_1_hour[x] = round(float(current_price[x]) * \n 100 / float(k_line_1m[x][-60]) - 100, 2)\n price_chance_3_hour[x] = round(float(current_price[x]) * \n 100 / float(k_line_1m[x][-180]) - 100, 2)\n price_chance_8_hour[x] = round(float(current_price[x]) * \n 100 / float(k_line_1m[x][20]) - 100, 2)\n price_change_25_30_min[x] = round(float(k_line_1m[x][-6]) *\n 100 / float(k_line_1m[x][-30]) - 100, 2)\n price_change_1_days[x] = round(float(current_price[x]) * \n 100 / float(k_line_15m[x][-96]) - 100, 1)\n price_change_3_days[x] = round(float(current_price[x]) * \n 100 / float(k_line_15m[x][-288]) - 100, 1)\n price_change_5_days[x] = round(float(current_price[x]) * \n 100 / float(k_line_15m[x][-480]) - 100, 1)\n price_change_7_days[x] = round(float(current_price[x]) * \n 100 / float(k_line_15m[x][-672]) - 100, 1)\n price_change_10_days[x] = round(float(current_price[x]) * \n 100 / float(k_line_15m[x][-960]) - 100, 1)\n average_10_min[x] = round(float(sum(k_line_1m[x][-10:])) / \n 10, 8)\n average_20_min[x] = round(float(sum(k_line_1m[x][-20:])) / \n 20, 8)\n average_50_min[x] = round(float(sum(k_line_1m[x][-50:])) / \n 50, 8)\n average_100_min[x] = round(float(sum(k_line_1m[x][-100:])) /\n 100, 8)\n average_change_10_min[x] = round(float(current_price[x]) * \n 100 / float(average_10_min[x]) - 100, 2)\n average_change_20_min[x] = round(float(current_price[x]) * \n 100 / float(average_20_min[x]) - 100, 2)\n average_change_50_min[x] = round(float(current_price[x]) * \n 100 / float(average_50_min[x]) - 100, 2)\n average_change_100_min[x] = round(float(current_price[x]) *\n 100 / float(average_100_min[x]) - 100, 2)\n except Exception as e:\n print(e)\n calculate_score()\n sort_by = total_score\n sorted_data = sorted(range(len(sort_by)), key=lambda k: sort_by[k])\n sorted_data.reverse()\n print(time.ctime())\n print(\n '%5s %5s %6s %6s %6s %6s %6s %6s %6s %6s %6s %6s %6s %6s %6s %5s %6s %6s %6s %6s %6s'\n % ('Symbol', 'score', 'r5', 'r20', '2m_ch', '5m_ch', '15m_ch',\n '30m_ch', '1h_ch', '10MA', '20MA', '50MA', '100MA', '8h_ch',\n '25-30m', 'r5sum', '1d_ch', '3d_ch', '5d_ch', '7d_ch', '10d_ch'))\n for k in range(10):\n i = sorted_data[k]\n print(\n '%5s %6s %6s %6s %6s %6s %6s %6s %6s %6s %6s %6s %6s %6s %6s %5s %6s %6s %6s %6s %6s'\n % (symbols[i][:-3], total_score[i], ratio5[i], ratio20[i],\n price_chance_2_min[i], price_chance_5_min[i],\n price_chance_15_min[i], price_chance_30_min[i],\n price_chance_1_hour[i], average_change_10_min[i],\n average_change_20_min[i], average_change_50_min[i],\n average_change_100_min[i], price_chance_8_hour[i],\n price_change_25_30_min[i], ratio5_sum[i],\n price_change_1_days[i], price_change_3_days[i],\n price_change_5_days[i], price_change_7_days[i],\n price_change_10_days[i]))\n try:\n if float(total_score[sorted_data[0]]) > 10:\n winsound.PlaySound('\\\\Sound.wav', winsound.SND_FILENAME)\n except Exception as e:\n print(e)\n time.sleep(1)\n\n\nthreads = [threading.Thread(target=kline_continuum), threading.Thread(\n target=report_10_seconds), threading.Thread(target=print_results)]\n[thread.start() for thread in threads]\n[thread.join() for thread in threads]\n",
"step-5": "from binance.client import Client\nfrom binance.websockets import BinanceSocketManager\nfrom binance.enums import *\nimport time\nimport threading\nimport winsound\n\n# Replace your_api_key, your_api_secret with your api_key, api_secret\nclient = Client(your_api_key, your_api_secret)\n\n\n# Calculate list of symbols\ndef calculate_data_list():\n counter=0\n btc='BTC'\n symbols=[]\n all_positions=[]\n positions_final=[]\n volume=[]\n c=[]\n price_change = []\n data=client.get_ticker()\n for x in range(len(data)):\n if (btc in data[x]['symbol']) and data[x]['symbol'] != 'BTCUSDT'and data[x]['symbol'] != 'VENBTC':\n if float(data[x]['quoteVolume'])>100:\n all_positions.append(x)\n for x in all_positions:\n c.append(float(data[x]['priceChangePercent']))\n i = sorted(range(len(c)), key=lambda k: c[k])\n i.reverse()\n while (len(positions_final) < 20 and len(positions_final) < len(all_positions)):\n symbols.append(data[all_positions[i[counter]]]['symbol'])\n positions_final.append(all_positions[i[counter]])\n volume.append(data[all_positions[i[counter]]]['quoteVolume'])\n price_change.append(data[all_positions[i[counter]]]['priceChangePercent'])\n counter += 1\n return symbols, volume, positions_final, price_change\n\n\n# Get candlestick data from Binance\ndef get_kline():\n symbols, volume, pozitii,price_change = calculate_data_list()\n prices = []\n prices1 = []\n k=[]\n\n for x in symbols:\n try:\n order = client.get_klines( # Get 1 minute candlestick data from server\n symbol=x,\n interval='1m')\n except BinanceAPIException as e:\n print (e.status_code)\n print (e.message)\n try:\n order1 = client.get_klines( # Get 15 minute candlestick data from server\n symbol=x,\n limit= 1000,\n interval='15m')\n except BinanceAPIException as e:\n print (e.status_code)\n print (e.message)\n\n if len(order1) < 970: # check if coin have at least 10 days of data\n a = symbols.index(x) # get index of x in symbols\n k.append(a)\n else:\n prices.append([]) # add empty list to list of 1 minute\n prices1.append([]) # add empty list to list of 15 minutes\n for i in range(len(order)):\n prices[-1].append(float(order[i][1])) # save 1 minute data\n for i in range(len(order1)):\n prices1[-1].append(float(order1[i][1])) # save 15 minute data\n k.reverse()\n\n for x in k:\n symbols.pop(x)\n volume.pop(x)\n all_positions.pop(x)\n price_change.pop(x)\n\n return symbols, volume, pozitii, prices, prices1,price_change\n# Calculate report between bid and ask offers\ndef process_depth(msg):\n sums5=0\n sumb5=0\n m=-1\n for x in range(5):\n if float(msg['data']['bids'][x][1])>m:\n m=float(msg['data']['bids'][x][1])\n sums5 = sums5 + float(msg['data']['bids'][x][1])\n sumb5 = sumb5 + float(msg['data']['asks'][x][1])\n ratio1 = sums5 / sumb5\n if (ratio1 < 1):\n ratio1 = ((1 / ratio1) * -1) + 1\n else:\n ratio1 -= 1\n sums20 = 0\n sumb20 = 0\n ratio2 = 0\n try:\n for x in range(17):\n sums20 = sums20 + float(msg['data']['bids'][x][1])\n sumb20 = sumb20 + float(msg['data']['asks'][x][1])\n ratio2 = sums20 / sumb20\n if (ratio2 < 1):\n ratio2 = ((1 / ratio2) * -1) + 1\n else:\n ratio2 -= 1\n except Exception as e:\n print(\"\")\n\n for i in range(len(symbols)):\n simbol = symbols[i].lower() + '@depth20'\n if simbol == msg['stream']:\n ratio5[i] = round(ratio1, 2)\n ratio20[i] = round(ratio2, 2)\n max_order5[i] = m\n ratio5_sum[i] = round(float(sums5) * float(current_price[i]) * 100 / float(volume[i]),2)\n current_price[i] = float(msg['data']['bids'][0][0])\n\n\n# Refresh price and volume to current price and volume\ndef process_ticker(msg):\n i=0\n for x in symbols:\n for y in range(len(msg)):\n if x == str(msg[y]['s']):\n volume[i] = int(float(msg[y]['q']))\n price_change[i] = int(float(msg[y]['P']))\n i+=1\n\nsymbols,volume,pozitii,k_line_1m,k_line_15m,price_change =get_kline()\n\n\n# Declaring lists necessary for storing data\nmax_order5=[0 for x in range(len(symbols))]\ncurrent_price= [0 for x in range(len(symbols))]\nprice_chance_2_min = [0 for x in range(len(symbols))]\nprice_chance_5_min = [0 for x in range(len(symbols))]\nprice_chance_15_min = [0 for x in range(len(symbols))]\nprice_chance_30_min = [0 for x in range(len(symbols))]\nprice_change_25_30_min = [0 for x in range(len(symbols))]\nprice_chance_1_hour = [0 for x in range(len(symbols))]\nprice_chance_3_hour = [0 for x in range(len(symbols))]\nprice_chance_8_hour = [0 for x in range(len(symbols))]\nprice_change_1_days = [0 for x in range(len(symbols))]\nprice_change_3_days = [0 for x in range(len(symbols))]\nprice_change_5_days = [0 for x in range(len(symbols))]\nprice_change_7_days = [0 for x in range(len(symbols))]\nprice_change_10_days = [0 for x in range(len(symbols))]\naverage_10_min = [0 for x in range(len(symbols))]\naverage_20_min = [0 for x in range(len(symbols))]\naverage_50_min = [0 for x in range(len(symbols))]\naverage_100_min = [0 for x in range(len(symbols))]\naverage_change_10_min = [0 for x in range(len(symbols))]\naverage_change_20_min = [0 for x in range(len(symbols))]\naverage_change_50_min = [0 for x in range(len(symbols))]\naverage_change_100_min = [0 for x in range(len(symbols))]\ntotal_score = [0 for x in range(len(symbols))]\nratio5=[0 for x in range(len(symbols))]\nratio5_10sec=[[] for y in range(len(symbols))]\nratio5_sum = [0 for x in range(len(symbols))]\nratio5_sum_10sec = [[] for y in range(len(symbols))]\nratio20= [0 for x in range(len(symbols))]\n\n# Create list neccessary for depth socked\nlist=[]\nfor x in symbols:\n list.append(x.lower()+'@depth20') # append @depth20 to each symbol and add it into list\n\nbm = BinanceSocketManager(client)\nbm.start()\ndepth_socket = bm.start_multiplex_socket(list,process_depth) # start depth socket\nticker_socket = bm.start_ticker_socket(process_ticker) # start price socket\n\n# maintain candlestick lists\ndef kline_continuum():\n i=0\n while True:\n time.sleep(60)\n for x in range(len(symbols)):\n k_line_1m[x].pop(0)\n k_line_1m[x].append(current_price[x]) # add price to list of 1 minute candlestick every 1 minute\n if i%15==0:\n k_line_15m[x].pop(0)\n k_line_15m[x].append(current_price[x]) # add price to list of 15 minute candlestick every 15 minute\n i+=1\n\n\n# Save report between ask and bit for the last 10 seconds\ndef report_10_seconds():\n while True:\n for x in range(len(symbols)):\n if len(ratio5_10sec[x])>10:\n ratio5_10sec[x].pop(0)\n if len(ratio5_sum_10sec[x]) > 10:\n ratio5_sum_10sec[x].pop(0)\n ratio5_10sec[x].append(ratio5[x])\n ratio5_sum_10sec[x].append(ratio5_sum[x])\n time.sleep(1)\n\n\n# Calculate score for each symbol, you can add as many parameters as you want\ndef calculate_score():\n for x in range(len(symbols)):\n score = 0\n\n # 2 minute change parameter score calculation\n a = float(price_chance_2_min[x])\n if a > 0 and a < 0.5:\n score += 1\n elif a >= 0.5 and a < 1:\n score += 1.25\n elif a >= 1 and a < 1.5:\n score += 1.5\n elif a >= 1.5 and a < 2:\n score += 0.5\n elif a >= 3:\n score += 0.25\n\n # 5 minute change parameter score calculation\n a = float(price_chance_5_min[x])\n if a > 0 and a < 0.5:\n score += 1\n elif a >= 0.5 and a < 1:\n score += 1.25\n elif a >= 1 and a < 2:\n score += 1.5\n elif a >= 2 and a < 3:\n score += 0.5\n elif a >= 3:\n score += 0.25\n\n # 15 minute change parameter score calculation\n a = float(price_chance_15_min[x])\n if a <= 1 and a > -0.5:\n score += 0.25\n elif a <= -0.5 and a > -1:\n score += 0.5\n elif a <= -1 and a > -1.5:\n score += 0.75\n elif a <= -1.5:\n score += 1\n\n # change between 25 and 30 minutes ago parameter score calculation\n a = float(price_change_25_30_min[x])\n if a <= 2 and a > -0.75:\n score += 0.25\n elif a <= -0.75 and a > -1.25:\n score += 0.5\n elif a <= -1.25 and a > -1.75:\n score += 0.75\n elif a <= -1.75:\n score += 1\n\n # 1 hour change parameter score calculation\n a = float(price_chance_1_hour[x])\n if a <= 2 and a >= 0:\n score += 0.5\n elif a <= 0 and a > -2:\n score += 0.75\n elif a <= -2:\n score += 1\n\n # 3 hour change parameter score calculation\n a = float(price_chance_3_hour[x])\n if a <= 5 and a > -1:\n score += 0.25\n elif a <= -1 and a > -3:\n score += 0.5\n elif a <= -3 and a > -6:\n score += 0.75\n elif a <= -6:\n score += 1\n\n # 8 hour change parameter score calculation\n a = float(price_chance_8_hour[x])\n if a <= 0 and a > -4:\n score += 0.25\n elif a <= -4 and a > -6:\n score += 0.5\n elif a <= -6:\n score += 0.75\n\n\n\n if float(ratio5[x]) > 0:\n score += 1\n\n\n a = 0\n for i in range(len(ratio5_10sec[x])):\n if float(price_chance_2_min[x]) > 0.55 or float(price_chance_5_min[x]) > 1:\n if float(ratio5_10sec[x][i]) > 0:\n a += 1\n if float(ratio5_sum_10sec[x][i]) > 0.3:\n a += 1\n score += a / len(ratio5_sum_10sec[x])\n\n\n if float(ratio20[x]) > 0:\n score += 1\n\n a = 0\n for i in range(len(ratio5_10sec[x])-1):\n if float(ratio5_10sec[x][i]) > 0:\n a += 1\n if a <= 2:\n score += 0.25\n elif a > 2 and a <= 4:\n score += 0.5\n elif a > 4 and a <= 7:\n score += 0.75\n elif a > 7:\n score += 1\n\n a = 0\n for i in range(20, 1, -1):\n if float(k_line_1m[x][-i]) > float(k_line_1m[x][-(i - 1)]):\n a += 1\n score += a / 10\n\n # 1 day change parameter score calculation\n if float(price_change_1_days[x]) > 5:\n score+=0.3\n # 3 day change parameter score calculation\n if float(price_change_3_days[x]) > 10:\n score += 0.25\n # 5 day change parameter score calculation\n if float(price_change_5_days[x]) > 15:\n score += 0.25\n # 7 day change parameter score calculation\n if float(price_change_7_days[x]) > 20:\n score += 0.25\n # 10 day change parameter score calculation\n if float(price_change_10_days[x]) > -25:\n score += 0.25\n\n # 10 minutes moving average parameter score calculation\n a=float(average_change_10_min[x])\n if a<0.2 and a>-0.3:\n score+=0.1\n # 20 minutes moving average parameter score calculation\n a = float(average_change_20_min[x])\n if a < 0.2 and a > -0.3:\n score += 0.1\n # 50 minutes moving average parameter score calculation\n a = float(average_change_50_min[x])\n if a < 0.2 and a > -0.3:\n score += 0.1\n # 100 minutes moving average parameter score calculation\n a = float(average_change_100_min[x])\n if a < 0.2 and a > -0.3:\n score += 0.1\n\n # save score\n total_score[x] = score\n\n\ndef print_results():\n # sleep time before starting calculations\n time.sleep(10)\n\n while True:\n for x in range(len(symbols)):\n # calculate parameters percentages\n try:\n price_chance_2_min[x] = round(float(current_price[x]) * 100 / float(k_line_1m[x][- 2]) - 100, 2)\n price_chance_5_min[x] = round(float(current_price[x]) * 100 / float(k_line_1m[x][- 5]) - 100, 2)\n price_chance_15_min[x] = round(float(current_price[x]) * 100 / float(k_line_1m[x][- 15]) - 100, 2)\n price_chance_30_min[x] = round(float(current_price[x]) * 100 / float(k_line_1m[x][- 30]) - 100, 2)\n price_chance_1_hour[x] = round(float(current_price[x]) * 100 / float(k_line_1m[x][- 60]) - 100, 2)\n price_chance_3_hour[x] = round(float(current_price[x]) * 100 / float(k_line_1m[x][- 180]) - 100, 2)\n price_chance_8_hour[x] = round(float(current_price[x]) * 100 / float(k_line_1m[x][20]) - 100, 2)\n price_change_25_30_min[x] = round(float(k_line_1m[x][- 6]) * 100 / float(k_line_1m[x][- 30]) - 100, 2)\n price_change_1_days[x] = round(float(current_price[x]) * 100 / float(k_line_15m[x][- 96]) - 100, 1)\n price_change_3_days[x] = round(float(current_price[x]) * 100 / float(k_line_15m[x][- 288]) - 100, 1)\n price_change_5_days[x] = round(float(current_price[x]) * 100 / float(k_line_15m[x][- 480] )- 100, 1)\n price_change_7_days[x] = round(float(current_price[x]) * 100 / float(k_line_15m[x][- 672]) - 100, 1)\n price_change_10_days[x] = round(float(current_price[x]) * 100 / float(k_line_15m[x][- 960]) - 100, 1)\n average_10_min[x] = round(float(sum(k_line_1m[x][- 10:])) / 10, 8)\n average_20_min[x] = round(float(sum(k_line_1m[x][- 20:])) / 20, 8)\n average_50_min[x] = round(float(sum(k_line_1m[x][- 50:])) / 50, 8)\n average_100_min[x] = round(float(sum(k_line_1m[x][- 100:])) / 100, 8)\n average_change_10_min[x] = round(float(current_price[x]) * 100 / float(average_10_min[x]) - 100, 2)\n average_change_20_min[x] = round(float(current_price[x]) * 100 / float(average_20_min[x]) - 100, 2)\n average_change_50_min[x] = round(float(current_price[x]) * 100 / float(average_50_min[x]) - 100, 2)\n average_change_100_min[x] = round(float(current_price[x]) * 100 / float(average_100_min[x]) - 100, 2)\n except Exception as e:\n print(e)\n\n\n # call function for score calculation\n calculate_score()\n\n # select parameter for which data is sorted\n sort_by = total_score\n\n # sort data\n sorted_data = sorted(range(len(sort_by)), key=lambda k: sort_by[k])\n # sort data in reverse order\n sorted_data.reverse()\n\n #print table header\n print (time.ctime())\n print ('%5s %5s %6s %6s %6s %6s %6s %6s %6s %6s %6s %6s %6s %6s %6s %5s %6s %6s %6s %6s %6s' % (\n 'Symbol', 'score', 'r5', 'r20', '2m_ch', '5m_ch', '15m_ch', '30m_ch', '1h_ch', '10MA', '20MA', '50MA', '100MA', '8h_ch',\n '25-30m', 'r5sum', '1d_ch', '3d_ch','5d_ch', '7d_ch', '10d_ch'))\n\n # print top 10 cryptocurrencies data\n for k in range(10):\n i = sorted_data[k]\n print ('%5s %6s %6s %6s %6s %6s %6s %6s %6s %6s %6s %6s %6s %6s %6s %5s %6s %6s %6s %6s %6s' % (\n symbols[i][:-3], total_score[i], ratio5[i], ratio20[i], price_chance_2_min[i], price_chance_5_min[i],\n price_chance_15_min[i],price_chance_30_min[i], price_chance_1_hour[i], average_change_10_min[i],\n average_change_20_min[i],average_change_50_min[i], average_change_100_min[i], price_chance_8_hour[i],\n price_change_25_30_min[i], ratio5_sum[i], price_change_1_days[i], price_change_3_days[i],\n price_change_5_days[i], price_change_7_days[i], price_change_10_days[i]))\n\n # if score for one coin is > 10 will play sound\n try:\n if float(total_score[sorted_data[0]]) > 10:\n winsound.PlaySound('\\\\Sound.wav', winsound.SND_FILENAME)\n except Exception as e:\n print(e)\n\n # Seconds to wait before repeating while loop\n time.sleep(1)\n\n# Declaring threads\nthreads = [threading.Thread(target=kline_continuum),\n threading.Thread(target=report_10_seconds),\n threading.Thread(target=print_results)]\n# Starting threads\n[thread.start() for thread in threads]\n[thread.join() for thread in threads]\n\n\n",
"step-ids": [
5,
8,
9,
11,
12
]
}
|
[
5,
8,
9,
11,
12
] |
#!/usr/bin/python
#==========================================================================
#
# Copyright Insight Software Consortium
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0.txt
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
#==========================================================================*/
# This script is used to automate the modularization process. The following
# steps are included:
# 1. Move the files in the monolithic ITK into modules of the modularized ITK.
# A manifest text file that lists all the files and their destinations is
# required to run the script.By default, the manifest file is named as
# "Manifest.txt" in the same directory of this script.
# 2. Create CMake Files and put them into modules.
# Modified by Guillaume Pasero <[email protected]>
# add dependencies in otb-module.cmake
# To run it, type ./modulizer.py OTB_PATH Manifest_PATH
# from the otb-modulizer root directory.
print "*************************************************************************"
print "WARNINGs! This modularization script is still in its experimental stage."
print "Current OTB users should not run this script."
print "*************************************************************************"
import shutil
import os.path as op
import re
import sys
import os
import stat
import glob
import documentationCheck
import analyseAppManifest
import dispatchTests
import dispatchExamples
from subprocess import call
def parseFullManifest(path):
sourceList = []
nbFields = 6
fd = open(path,'rb')
# skip first line and detect separator
firstLine = fd.readline()
sep = ','
if (len(firstLine.split(sep)) != nbFields):
sep = ';'
if (len(firstLine.split(sep)) != nbFields):
sep = '\t'
if (len(firstLine.split(sep)) != nbFields):
print "Unknown separator"
return sourceList
fd.seek(0)
# parse file
for line in fd:
if (line.strip()).startswith("#"):
continue
words = line.split(sep)
if (len(words) < (nbFields-1)):
print "Wrong number of fields, skipping this line"
continue
fullPath = words[0].strip(" ,;\t\n\r")
groupName = words[2].strip(" ,;\t\n\r")
moduleName = words[3].strip(" ,;\t\n\r")
subDir = words[4].strip(" ,;\t\n\r")
sourceName = op.basename(fullPath)
sourceList.append({"path":fullPath, "group":groupName, "module":moduleName, "subDir":subDir})
fd.close()
return sourceList
def parseDescriptions(path):
output = {}
sep = '|'
nbFields = 2
fd = open(path,'rb')
for line in fd:
if (line.strip()).startswith("#"):
continue
words = line.split(sep)
if len(words) != nbFields:
continue
moduleName = words[0].strip(" \"\t\n\r")
description = words[1].strip(" \"\t\n\r")
output[moduleName] = description
fd.close()
return output
if len(sys.argv) < 4:
print("USAGE: {0} monolithic_OTB_PATH OUTPUT_DIR Manifest_Path [module_dep [test_dep [mod_description]]]".format(sys.argv[0]))
print(" monolithic_OTB_PATH : checkout of OTB repository (will not be modified)")
print(" OUTPUT_DIR : output directory where OTB_Modular and OTB_remaining will be created ")
print(" Manifest_Path : path to manifest file, in CSV-like format. Fields are :")
print(" source_path/current_subDir/group/module/subDir/comment")
print(" module_dep : dependencies between modules")
print(" test_dep : additional dependencies for tests")
print(" mod_description : description for each module")
print(" migration_password : password to enable MIGRATION")
sys.exit(-1)
scriptDir = op.dirname(op.abspath(sys.argv[0]))
HeadOfOTBTree = sys.argv[1]
if (HeadOfOTBTree[-1] == '/'):
HeadOfOTBTree = HeadOfOTBTree[0:-1]
OutputDir = sys.argv[2]
HeadOfModularOTBTree = op.join(OutputDir,"OTB_Modular")
ManifestPath = sys.argv[3]
EdgePath = ""
if len(sys.argv) >= 5:
EdgePath = sys.argv[4]
testDependPath = ""
if len(sys.argv) >= 6:
testDependPath = sys.argv[5]
modDescriptionPath = ""
if len(sys.argv) >= 7:
modDescriptionPath = sys.argv[6]
enableMigration = False
if len(sys.argv) >= 8:
migrationPass = sys.argv[7]
if migrationPass == "redbutton":
enableMigration = True
# copy the whole OTB tree over to a temporary dir
HeadOfTempTree = op.join(OutputDir,"OTB_remaining")
if op.isdir(HeadOfTempTree):
shutil.rmtree(HeadOfTempTree)
if op.isdir(HeadOfModularOTBTree):
shutil.rmtree(HeadOfModularOTBTree)
print("Start to copy" + HeadOfOTBTree + " to ./OTB_remaining ...")
shutil.copytree(HeadOfOTBTree,HeadOfTempTree, ignore = shutil.ignore_patterns('.hg','.hg*'))
print("Done copying!")
# checkout OTB-Modular
cmd ='hg clone http://hg.orfeo-toolbox.org/OTB-Modular '+HeadOfModularOTBTree
os.system(cmd)
logDir = op.join(OutputDir,"logs")
if not op.isdir(logDir):
os.makedirs(logDir)
# read the manifest file
print ("moving files from ./OTB_remaining into modules in {0}".format(HeadOfModularOTBTree))
numOfMissingFiles = 0;
missingf = open(op.join(logDir,'missingFiles.log'),'w')
moduleList=[]
moduleDic={}
sourceList = parseFullManifest(ManifestPath)
for source in sourceList:
# build module list
moduleDic[source["module"]] = source["group"]
# create the path
inputfile = op.abspath(op.join(HeadOfTempTree,source["path"]))
outputPath = op.join(op.join(HeadOfModularOTBTree,"Modules"),op.join(source["group"],op.join(source["module"],source["subDir"])))
if not op.isdir(outputPath):
os.makedirs(outputPath)
# copying files to the destination
if op.isfile(inputfile):
if op.isfile(op.join(outputPath,op.basename(inputfile))):
os.remove(op.join(outputPath,op.basename(inputfile)))
shutil.move(inputfile, outputPath)
else:
missingf.write(inputfile+'\n')
numOfMissingFiles = numOfMissingFiles + 1
missingf.close()
print ("listed {0} missing files to logs/missingFiles.log").format(numOfMissingFiles)
moduleList = moduleDic.keys()
# after move, operate a documentation check
for source in sourceList:
outputPath = op.join(op.join(HeadOfModularOTBTree,"Modules"),op.join(source["group"],op.join(source["module"],source["subDir"])))
outputFile = op.join(outputPath,op.basename(source["path"]))
if op.isfile(outputFile):
if op.splitext(outputFile)[1] == ".h":
nextContent = documentationCheck.parserHeader(outputFile,source["module"])
fd = open(outputFile,'wb')
fd.writelines(nextContent)
fd.close()
# get dependencies (if file is present)
dependencies = {}
testDependencies = {}
exDependencies = {}
for mod in moduleList:
dependencies[mod] = []
testDependencies[mod] = []
exDependencies[mod] = []
if op.isfile(EdgePath):
fd = open(EdgePath,'rb')
for line in fd:
words = line.split(',')
if len(words) == 2:
depFrom = words[0].strip(" ,;\t\n\r")
depTo = words[1].strip(" ,;\t\n\r")
if dependencies.has_key(depFrom):
dependencies[depFrom].append(depTo)
else:
print("Bad dependency : "+depFrom+" -> "+depTo)
fd.close()
if op.isfile(testDependPath):
fd = open(testDependPath,'rb')
for line in fd:
words = line.split(',')
if len(words) == 2:
depFrom = words[0].strip(" ,;\t\n\r")
depTo = words[1].strip(" ,;\t\n\r")
if testDependencies.has_key(depFrom):
testDependencies[depFrom].append(depTo)
else:
print("Bad dependency : "+depFrom+" -> "+depTo)
fd.close()
"""
if op.isfile(exDependPath):
fd = open(exDependPath,'rb')
for line in fd:
words = line.split(',')
if len(words) == 2:
depFrom = words[0].strip(" ,;\t\n\r")
depTo = words[1].strip(" ,;\t\n\r")
if exDependencies.has_key(depFrom):
exDependencies[depFrom].append(depTo)
else:
print("Bad dependency : "+depFrom+" -> "+depTo)
fd.close()
"""
modDescriptions = {}
if op.isfile(modDescriptionPath):
modDescriptions = parseDescriptions(modDescriptionPath)
# list the new files
newf = open(op.join(logDir,'newFiles.log'),'w')
for (root, subDirs, files) in os.walk(HeadOfTempTree):
for afile in files:
newf.write(op.join(root, afile)+'\n')
newf.close()
print ("listed new files to logs/newFiles.log")
###########################################################################
print ('creating cmake files for each module (from the template module)')
#moduleList = os.listdir(HeadOfModularOTBTree)
for moduleName in moduleList:
moduleDir = op.join(op.join(HeadOfModularOTBTree,"Modules"),op.join(moduleDic[moduleName],moduleName))
cmakeModName = "OTB"+moduleName
if op.isdir(moduleDir):
# write CMakeLists.txt
filepath = moduleDir+'/CMakeLists.txt'
if not op.isfile(filepath):
o = open(filepath,'w')
if op.isdir(moduleDir+'/src'):
template_cmakelist = op.join(scriptDir,'templateModule/otb-template-module/CMakeLists.txt')
else:
template_cmakelist = op.join(scriptDir,'templateModule/otb-template-module/CMakeLists-nosrc.txt')
for line in open(template_cmakelist,'r'):
line = line.replace('otb-template-module',cmakeModName)
o.write(line);
o.close()
# write src/CMakeLists.txt
# list of CXX files
if op.isdir(moduleDir+'/src'):
cxxFiles = glob.glob(moduleDir+'/src/*.cxx')
cxxFileList='';
for cxxf in cxxFiles:
cxxFileList = cxxFileList+' '+cxxf.split('/')[-1]+'\n'
# build list of link dependencies
linkLibs = ""
for dep in dependencies[moduleName]:
#verify if dep is a header-onlymodule
depThirdParty = False
try:
moduleDic[dep]
except KeyError:
# this is a ThirdParty module
depThirdParty = True
if not depThirdParty:
depModuleDir = op.join(op.join(HeadOfModularOTBTree,"Modules"),op.join(moduleDic[dep],dep))
depcxx = glob.glob(depModuleDir+'/src/*.cxx')
if depcxx :
linkLibs = linkLibs + " ${OTB"+dep+"_LIBRARIES}" + "\n"
else:
linkLibs = linkLibs + " ${OTB"+dep+"_LIBRARIES}" + "\n"
if len(linkLibs) == 0:
linkLibs = " ${OTBITK_LIBRARIES}"
filepath = moduleDir+'/src/CMakeLists.txt'
if not op.isfile(filepath):
o = open(filepath,'w')
for line in open(op.join(scriptDir,'templateModule/otb-template-module/src/CMakeLists.txt'),'r'):
line = line.replace('otb-template-module',cmakeModName)
line = line.replace('LIST_OF_CXX_FILES',cxxFileList[0:-1]) #get rid of the last \n
line = line.replace('LINK_LIBRARIES_TO_BE_REPLACED',linkLibs)
o.write(line);
o.close()
# write app/CMakeLists.txt
if op.isdir(moduleDir+'/app'):
os.mkdir(moduleDir+'/test')
srcFiles = glob.glob(moduleDir+'/app/*.cxx')
srcFiles += glob.glob(moduleDir+'/app/*.h')
appList = {}
for srcf in srcFiles:
# get App name
appName = analyseAppManifest.findApplicationName(srcf)
if len(appName) == 0:
continue
appList[appName] = {"source":op.basename(srcf)}
# get original location
cmakeListPath = ""
for item in sourceList:
if op.basename(item["path"]) == op.basename(srcf) and \
moduleName == item["module"]:
appDir = op.basename(op.dirname(item["path"]))
cmakeListPath = op.join(HeadOfOTBTree,op.join("Testing/Applications"),op.join(appDir,"CMakeLists.txt"))
break
# get App tests
if not op.isfile(cmakeListPath):
continue
appList[appName]["test"] = analyseAppManifest.findTestFromApp(cmakeListPath,appName)
# build list of link dependencies
linkLibs = ""
for dep in dependencies[moduleName]:
linkLibs = linkLibs + " ${OTB"+dep+"_LIBRARIES}" + "\n"
filepath = moduleDir+'/app/CMakeLists.txt'
if not op.isfile(filepath):
o = open(filepath,'w')
# define link libraries
o.write("set("+cmakeModName+"_LINK_LIBS\n")
o.write(linkLibs)
o.write(")\n")
for appli in appList:
content = "\notb_create_application(\n"
content += " NAME " + appli + "\n"
content += " SOURCES " + appList[appli]["source"] + "\n"
content += " LINK_LIBRARIES ${${otb-module}_LIBRARIES})\n"
o.write(content)
o.close()
filepath = moduleDir+'/test/CMakeLists.txt'
if not op.isfile(filepath):
o = open(filepath,'w')
o.write("otb_module_test()")
for appli in appList:
if not appList[appli].has_key("test"):
continue
o.write("\n#----------- "+appli+" TESTS ----------------\n")
for test in appList[appli]["test"]:
if test.count("${"):
print "Warning : test name contains a variable : "+test
continue
testcode=appList[appli]["test"][test]
testcode=[s.replace('OTB_TEST_APPLICATION', 'otb_test_application') for s in testcode]
o.writelines(testcode)
o.write("\n")
o.close()
# write test/CMakeLists.txt : done by dispatchTests.py
"""
if op.isdir(moduleDir+'/test'):
cxxFiles = glob.glob(moduleDir+'/test/*.cxx')
cxxFileList='';
for cxxf in cxxFiles:
cxxFileList = cxxFileList+cxxf.split('/')[-1]+'\n'
filepath = moduleDir+'/test/CMakeLists.txt'
if not op.isfile(filepath):
o = open(filepath,'w')
for line in open('./templateModule/otb-template-module/test/CMakeLists.txt','r'):
# TODO : refactor for OTB
words= moduleName.split('-')
moduleNameMod='';
for word in words:
moduleNameMod=moduleNameMod + word.capitalize()
line = line.replace('itkTemplateModule',moduleNameMod)
line = line.replace('itk-template-module',moduleName)
line = line.replace('LIST_OF_CXX_FILES',cxxFileList[0:-1]) #get rid of the last \n
o.write(line);
o.close()
"""
# write otb-module.cmake, which contains dependency info
filepath = moduleDir+'/otb-module.cmake'
if not op.isfile(filepath):
o = open(filepath,'w')
for line in open(op.join(scriptDir,'templateModule/otb-template-module/otb-module.cmake'),'r'):
# replace documentation
if line.find("DESCRIPTION_TO_BE_REPLACED") >= 0:
docString = "\"TBD\""
if moduleName in modDescriptions:
descPos = line.find("DESCRIPTION_TO_BE_REPLACED")
limitChar = 80
docString = "\""+modDescriptions[moduleName]+"\""
curPos = 80 - descPos
while curPos < len(docString):
lastSpace = docString[0:curPos].rfind(' ')
if lastSpace > max(0,curPos-80):
docString = docString[0:lastSpace] + '\n' + docString[lastSpace+1:]
else:
docString = docString[0:curPos] + '\n' + docString[curPos:]
curPos += 81
line = line.replace('DESCRIPTION_TO_BE_REPLACED',docString)
# replace module name
line = line.replace('otb-template-module',cmakeModName)
# replace depend list
dependTagPos = line.find("DEPENDS_TO_BE_REPLACED")
if dependTagPos >= 0:
replacementStr = "DEPENDS"
indentStr = ""
for it in range(dependTagPos+2):
indentStr = indentStr + " "
if len(dependencies[moduleName]) > 0:
deplist = dependencies[moduleName]
else:
deplist = ["Common"]
for dep in deplist:
replacementStr = replacementStr + "\n" + indentStr +"OTB"+ dep
line = line.replace('DEPENDS_TO_BE_REPLACED',replacementStr)
# replace test_depend list
testDependTagPos = line.find("TESTDEP_TO_BE_REPLACED")
if testDependTagPos >= 0:
if moduleName.startswith("App"):
# for application : hardcode TestKernel and CommandLine
indentStr = ""
for it in range(testDependTagPos+2):
indentStr = indentStr + " "
replacementStr = "TEST_DEPENDS\n" + indentStr + "OTBTestKernel\n" + indentStr + "OTBCommandLine"
line = line.replace('TESTDEP_TO_BE_REPLACED',replacementStr)
else:
# standard case
if len(testDependencies[moduleName]) > 0:
indentStr = ""
replacementStr = "TEST_DEPENDS"
for it in range(testDependTagPos+2):
indentStr = indentStr + " "
for dep in testDependencies[moduleName]:
replacementStr = replacementStr + "\n" + indentStr +"OTB"+ dep
line = line.replace('TESTDEP_TO_BE_REPLACED',replacementStr)
else:
line = line.replace('TESTDEP_TO_BE_REPLACED','')
# replace example_depend list
exDependTagPos = line.find("EXDEP_TO_BE_REPLACED")
if exDependTagPos >= 0:
if len(exDependencies[moduleName]) > 0:
indentStr = ""
replacementStr = "EXAMPLE_DEPENDS"
for it in range(exDependTagPos+2):
indentStr = indentStr + " "
for dep in exDependencies[moduleName]:
replacementStr = replacementStr + "\n" + indentStr +"OTB"+ dep
line = line.replace('EXDEP_TO_BE_REPLACED',replacementStr)
else:
line = line.replace('EXDEP_TO_BE_REPLACED','')
o.write(line);
o.close()
# call dispatchTests to fill test/CMakeLists
if op.isfile(testDependPath):
dispatchTests.main(["dispatchTests.py",ManifestPath,HeadOfOTBTree,HeadOfModularOTBTree,testDependPath])
"""
# call dispatchExamples to fill example/CMakeLists
if op.isfile(exDependPath):
dispatchExamples.main(["dispatchExamples.py",ManifestPath,HeadOfOTBTree,HeadOfModularOTBTree,exDependPath])
"""
# examples
for i in sorted(os.listdir(HeadOfTempTree + "/Examples")):
if i == "CMakeLists.txt" or i == "README.txt" or i.startswith("DataRepresentation"):
continue
for j in sorted(os.listdir(HeadOfTempTree + "/Examples/" + i)):
if j == "CMakeLists.txt" or j.startswith("otb"):
continue
command = "mv %s/Examples/%s/%s %s/Examples/%s/%s" % ( HeadOfTempTree, i, j, HeadOfModularOTBTree, i, j)
os.system(command)
for i in sorted(os.listdir(HeadOfTempTree + "/Examples/DataRepresentation")):
if i == "CMakeLists.txt" or i == "README.txt":
continue
for j in sorted(os.listdir(HeadOfTempTree + "/Examples/DataRepresentation/" + i)):
if j == "CMakeLists.txt" or j.startswith("otb"):
continue
command = "mv %s/Examples/DataRepresentation/%s/%s %s/Examples/DataRepresentation/%s/%s" % ( HeadOfTempTree, i, j, HeadOfModularOTBTree, i, j)
os.system(command)
# save version without patches (so that we can regenerate patches later)
os.system( "cp -ar " + op.join(OutputDir,"OTB_Modular") + " " + op.join(OutputDir,"OTB_Modular-nopatch") )
# apply patches in OTB_Modular
curdir = op.abspath(op.dirname(__file__))
command = "cd " + op.join(OutputDir,"OTB_Modular") + " && patch -p1 < " + curdir + "/patches/otbmodular.patch"
print "Executing " + command
os.system( command )
# remove Copyright files we don't want to touch later
os.system( "rm -rf %s" % (op.join(HeadOfTempTree,"Copyright") ) )
os.system( "rm -rf %s" % (op.join(HeadOfTempTree,"RELEASE_NOTES.txt") ) )
os.system( "rm -rf %s" % (op.join(HeadOfTempTree,"README") ) )
# PREPARE MIGRATION COMMIT ON A CLONE OF ORIGINAL CHECKOUT
if enableMigration:
print("Executing migration on a clone of original checkout")
HeadOfTempTree = op.abspath(HeadOfTempTree)
OutputDir = op.abspath(OutputDir)
# clone original checkout
outputModular = op.join(OutputDir,"OTB_Modular")
outputMigration = op.join(OutputDir,"OTB_Migration")
if op.exists(outputMigration):
os.removedirs(outputMigration)
command = ["cp","-ar",HeadOfOTBTree,outputMigration]
call(command)
os.chdir(outputMigration)
# walk through OTB_Remaining and delete corresponding files in OTB checkout
print("DELETE STEP...")
for dirPath, dirNames, fileNames in os.walk(HeadOfTempTree):
currentSourceDir = dirPath.replace(HeadOfTempTree,'.')
for fileName in fileNames:
if op.exists(op.join(currentSourceDir,fileName)):
command = ["hg","remove",op.join(currentSourceDir,fileName)]
call(command)
else:
print("Unknown file : "+op.join(currentSourceDir,fileName))
command = ['hg','commit','-m','ENH: Remove files not necessary after modularization']
call(command)
# walk through manifest and rename files
print("MOVE STEP...")
for source in sourceList:
outputPath = op.join("./Modules",op.join(source["group"],op.join(source["module"],source["subDir"])))
command = ['hg','rename',source["path"],op.join(outputPath,op.basename(source["path"]))]
call(command)
command = ['hg','commit','-m','ENH: Move source and test files into their respective module']
call(command)
# add new files from OTB_Modular (files from OTB-Modular repo + generated files)
print("ADD STEP...")
for dirPath, dirNames, fileNames in os.walk(outputModular):
currentSourceDir = dirPath.replace(outputModular,'.')
if currentSourceDir.startswith("./.hg"):
print("skip .hg")
continue
for fileName in fileNames:
# skip hg files
if fileName.startswith(".hg"):
continue
targetFile = op.join(currentSourceDir,fileName)
if not op.exists(targetFile):
if not op.exists(currentSourceDir):
command = ["mkdir","-p",currentSourceDir]
call(command)
shutil.copy(op.join(dirPath,fileName),targetFile)
command = ['hg','add']
call(command)
command = ['hg','commit','-m','ENH: Add new files for modular build system']
call(command)
# apply patches on OTB Checkout
print("PATCH STEP...")
for dirPath, dirNames, fileNames in os.walk(outputModular):
currentSourceDir = dirPath.replace(outputModular,'.')
if currentSourceDir.startswith("./.hg"):
continue
for fileName in fileNames:
# skip hg files
if fileName.startswith(".hg"):
continue
targetFile = op.join(currentSourceDir,fileName)
if op.exists(targetFile):
command = ['cp',op.join(dirPath,fileName),targetFile]
call(command)
command = ['hg','commit','-m','ENH: Apply patches necessary after modularization']
call(command)
|
normal
|
{
"blob_id": "4f87c2602e3233889888e419296f67fe40a2db0f",
"index": 5886,
"step-1": "#!/usr/bin/python\n#==========================================================================\n#\n# Copyright Insight Software Consortium\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0.txt\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n#\n#==========================================================================*/\n# This script is used to automate the modularization process. The following\n# steps are included:\n# 1. Move the files in the monolithic ITK into modules of the modularized ITK.\n# A manifest text file that lists all the files and their destinations is\n# required to run the script.By default, the manifest file is named as\n# \"Manifest.txt\" in the same directory of this script.\n# 2. Create CMake Files and put them into modules.\n\n# Modified by Guillaume Pasero <[email protected]>\n# add dependencies in otb-module.cmake\n\n# To run it, type ./modulizer.py OTB_PATH Manifest_PATH\n# from the otb-modulizer root directory.\n\nprint \"*************************************************************************\"\nprint \"WARNINGs! This modularization script is still in its experimental stage.\"\nprint \"Current OTB users should not run this script.\"\nprint \"*************************************************************************\"\n\n\nimport shutil\nimport os.path as op\nimport re\nimport sys\nimport os\nimport stat\nimport glob\nimport documentationCheck\nimport analyseAppManifest\nimport dispatchTests\nimport dispatchExamples\nfrom subprocess import call\n\ndef parseFullManifest(path):\n sourceList = []\n nbFields = 6\n fd = open(path,'rb')\n # skip first line and detect separator\n firstLine = fd.readline()\n sep = ','\n if (len(firstLine.split(sep)) != nbFields):\n sep = ';'\n if (len(firstLine.split(sep)) != nbFields):\n sep = '\\t'\n if (len(firstLine.split(sep)) != nbFields):\n print \"Unknown separator\"\n return sourceList\n \n fd.seek(0)\n \n # parse file\n for line in fd:\n if (line.strip()).startswith(\"#\"):\n continue\n words = line.split(sep)\n if (len(words) < (nbFields-1)):\n print \"Wrong number of fields, skipping this line\"\n continue\n fullPath = words[0].strip(\" ,;\\t\\n\\r\")\n groupName = words[2].strip(\" ,;\\t\\n\\r\")\n moduleName = words[3].strip(\" ,;\\t\\n\\r\")\n subDir = words[4].strip(\" ,;\\t\\n\\r\")\n sourceName = op.basename(fullPath)\n \n sourceList.append({\"path\":fullPath, \"group\":groupName, \"module\":moduleName, \"subDir\":subDir})\n fd.close()\n \n return sourceList\n\n\ndef parseDescriptions(path):\n output = {}\n sep = '|'\n nbFields = 2\n fd = open(path,'rb')\n for line in fd:\n if (line.strip()).startswith(\"#\"):\n continue\n words = line.split(sep)\n if len(words) != nbFields:\n continue\n moduleName = words[0].strip(\" \\\"\\t\\n\\r\")\n description = words[1].strip(\" \\\"\\t\\n\\r\")\n output[moduleName] = description\n fd.close()\n \n return output\n\n\nif len(sys.argv) < 4:\n print(\"USAGE: {0} monolithic_OTB_PATH OUTPUT_DIR Manifest_Path [module_dep [test_dep [mod_description]]]\".format(sys.argv[0]))\n print(\" monolithic_OTB_PATH : checkout of OTB repository (will not be modified)\")\n print(\" OUTPUT_DIR : output directory where OTB_Modular and OTB_remaining will be created \")\n print(\" Manifest_Path : path to manifest file, in CSV-like format. Fields are :\")\n print(\" source_path/current_subDir/group/module/subDir/comment\")\n print(\" module_dep : dependencies between modules\")\n print(\" test_dep : additional dependencies for tests\")\n print(\" mod_description : description for each module\")\n print(\" migration_password : password to enable MIGRATION\")\n sys.exit(-1)\n\nscriptDir = op.dirname(op.abspath(sys.argv[0]))\n\nHeadOfOTBTree = sys.argv[1]\nif (HeadOfOTBTree[-1] == '/'):\n HeadOfOTBTree = HeadOfOTBTree[0:-1]\n\nOutputDir = sys.argv[2]\nHeadOfModularOTBTree = op.join(OutputDir,\"OTB_Modular\")\n\nManifestPath = sys.argv[3]\n\nEdgePath = \"\"\nif len(sys.argv) >= 5:\n EdgePath = sys.argv[4]\n \ntestDependPath = \"\"\nif len(sys.argv) >= 6:\n testDependPath = sys.argv[5]\n\nmodDescriptionPath = \"\"\nif len(sys.argv) >= 7:\n modDescriptionPath = sys.argv[6]\n\nenableMigration = False\nif len(sys.argv) >= 8:\n migrationPass = sys.argv[7]\n if migrationPass == \"redbutton\":\n enableMigration = True\n\n# copy the whole OTB tree over to a temporary dir\nHeadOfTempTree = op.join(OutputDir,\"OTB_remaining\")\n\nif op.isdir(HeadOfTempTree):\n shutil.rmtree(HeadOfTempTree)\n\nif op.isdir(HeadOfModularOTBTree):\n shutil.rmtree(HeadOfModularOTBTree)\n\nprint(\"Start to copy\" + HeadOfOTBTree + \" to ./OTB_remaining ...\")\nshutil.copytree(HeadOfOTBTree,HeadOfTempTree, ignore = shutil.ignore_patterns('.hg','.hg*'))\nprint(\"Done copying!\")\n\n# checkout OTB-Modular\ncmd ='hg clone http://hg.orfeo-toolbox.org/OTB-Modular '+HeadOfModularOTBTree\nos.system(cmd)\n\nlogDir = op.join(OutputDir,\"logs\")\nif not op.isdir(logDir):\n os.makedirs(logDir)\n\n# read the manifest file\nprint (\"moving files from ./OTB_remaining into modules in {0}\".format(HeadOfModularOTBTree))\nnumOfMissingFiles = 0;\nmissingf = open(op.join(logDir,'missingFiles.log'),'w')\nmoduleList=[]\nmoduleDic={}\nsourceList = parseFullManifest(ManifestPath)\n\nfor source in sourceList:\n # build module list\n moduleDic[source[\"module\"]] = source[\"group\"]\n \n # create the path\n inputfile = op.abspath(op.join(HeadOfTempTree,source[\"path\"]))\n outputPath = op.join(op.join(HeadOfModularOTBTree,\"Modules\"),op.join(source[\"group\"],op.join(source[\"module\"],source[\"subDir\"])))\n if not op.isdir(outputPath):\n os.makedirs(outputPath)\n \n # copying files to the destination\n if op.isfile(inputfile):\n if op.isfile(op.join(outputPath,op.basename(inputfile))):\n os.remove(op.join(outputPath,op.basename(inputfile)))\n shutil.move(inputfile, outputPath)\n else:\n missingf.write(inputfile+'\\n')\n numOfMissingFiles = numOfMissingFiles + 1\n\nmissingf.close()\nprint (\"listed {0} missing files to logs/missingFiles.log\").format(numOfMissingFiles)\n\nmoduleList = moduleDic.keys()\n\n# after move, operate a documentation check\nfor source in sourceList:\n outputPath = op.join(op.join(HeadOfModularOTBTree,\"Modules\"),op.join(source[\"group\"],op.join(source[\"module\"],source[\"subDir\"])))\n outputFile = op.join(outputPath,op.basename(source[\"path\"]))\n if op.isfile(outputFile):\n if op.splitext(outputFile)[1] == \".h\":\n nextContent = documentationCheck.parserHeader(outputFile,source[\"module\"])\n fd = open(outputFile,'wb')\n fd.writelines(nextContent)\n fd.close()\n\n\n# get dependencies (if file is present)\ndependencies = {}\ntestDependencies = {}\nexDependencies = {}\nfor mod in moduleList:\n dependencies[mod] = []\n testDependencies[mod] = []\n exDependencies[mod] = []\n\nif op.isfile(EdgePath):\n fd = open(EdgePath,'rb')\n for line in fd:\n words = line.split(',')\n if len(words) == 2:\n depFrom = words[0].strip(\" ,;\\t\\n\\r\")\n depTo = words[1].strip(\" ,;\\t\\n\\r\")\n if dependencies.has_key(depFrom):\n dependencies[depFrom].append(depTo)\n else:\n print(\"Bad dependency : \"+depFrom+\" -> \"+depTo)\n fd.close()\n\nif op.isfile(testDependPath):\n fd = open(testDependPath,'rb')\n for line in fd:\n words = line.split(',')\n if len(words) == 2:\n depFrom = words[0].strip(\" ,;\\t\\n\\r\")\n depTo = words[1].strip(\" ,;\\t\\n\\r\")\n if testDependencies.has_key(depFrom):\n testDependencies[depFrom].append(depTo)\n else:\n print(\"Bad dependency : \"+depFrom+\" -> \"+depTo)\n fd.close()\n\n\"\"\"\nif op.isfile(exDependPath):\n fd = open(exDependPath,'rb')\n for line in fd:\n words = line.split(',')\n if len(words) == 2:\n depFrom = words[0].strip(\" ,;\\t\\n\\r\")\n depTo = words[1].strip(\" ,;\\t\\n\\r\")\n if exDependencies.has_key(depFrom):\n exDependencies[depFrom].append(depTo)\n else:\n print(\"Bad dependency : \"+depFrom+\" -> \"+depTo)\n fd.close()\n\"\"\"\nmodDescriptions = {}\nif op.isfile(modDescriptionPath):\n modDescriptions = parseDescriptions(modDescriptionPath)\n\n\n\n# list the new files\nnewf = open(op.join(logDir,'newFiles.log'),'w')\nfor (root, subDirs, files) in os.walk(HeadOfTempTree):\n for afile in files:\n newf.write(op.join(root, afile)+'\\n')\nnewf.close()\nprint (\"listed new files to logs/newFiles.log\")\n\n###########################################################################\n\nprint ('creating cmake files for each module (from the template module)')\n#moduleList = os.listdir(HeadOfModularOTBTree)\nfor moduleName in moduleList:\n moduleDir = op.join(op.join(HeadOfModularOTBTree,\"Modules\"),op.join(moduleDic[moduleName],moduleName))\n cmakeModName = \"OTB\"+moduleName\n \n if op.isdir(moduleDir):\n \n # write CMakeLists.txt\n filepath = moduleDir+'/CMakeLists.txt'\n \n if not op.isfile(filepath):\n o = open(filepath,'w')\n \n if op.isdir(moduleDir+'/src'):\n template_cmakelist = op.join(scriptDir,'templateModule/otb-template-module/CMakeLists.txt')\n else:\n template_cmakelist = op.join(scriptDir,'templateModule/otb-template-module/CMakeLists-nosrc.txt')\n \n for line in open(template_cmakelist,'r'):\n line = line.replace('otb-template-module',cmakeModName)\n o.write(line);\n o.close()\n\n # write src/CMakeLists.txt\n # list of CXX files\n if op.isdir(moduleDir+'/src'):\n cxxFiles = glob.glob(moduleDir+'/src/*.cxx')\n cxxFileList='';\n for cxxf in cxxFiles:\n cxxFileList = cxxFileList+' '+cxxf.split('/')[-1]+'\\n'\n # build list of link dependencies\n linkLibs = \"\"\n for dep in dependencies[moduleName]:\n #verify if dep is a header-onlymodule\n depThirdParty = False\n try:\n moduleDic[dep]\n except KeyError:\n # this is a ThirdParty module\n depThirdParty = True\n \n if not depThirdParty:\n depModuleDir = op.join(op.join(HeadOfModularOTBTree,\"Modules\"),op.join(moduleDic[dep],dep))\n depcxx = glob.glob(depModuleDir+'/src/*.cxx')\n if depcxx :\n linkLibs = linkLibs + \" ${OTB\"+dep+\"_LIBRARIES}\" + \"\\n\"\n else:\n linkLibs = linkLibs + \" ${OTB\"+dep+\"_LIBRARIES}\" + \"\\n\"\n \n if len(linkLibs) == 0:\n linkLibs = \" ${OTBITK_LIBRARIES}\"\n filepath = moduleDir+'/src/CMakeLists.txt'\n if not op.isfile(filepath):\n o = open(filepath,'w')\n for line in open(op.join(scriptDir,'templateModule/otb-template-module/src/CMakeLists.txt'),'r'):\n line = line.replace('otb-template-module',cmakeModName)\n line = line.replace('LIST_OF_CXX_FILES',cxxFileList[0:-1]) #get rid of the last \\n\n line = line.replace('LINK_LIBRARIES_TO_BE_REPLACED',linkLibs)\n o.write(line);\n o.close()\n\n # write app/CMakeLists.txt\n if op.isdir(moduleDir+'/app'):\n os.mkdir(moduleDir+'/test')\n srcFiles = glob.glob(moduleDir+'/app/*.cxx')\n srcFiles += glob.glob(moduleDir+'/app/*.h')\n appList = {}\n \n for srcf in srcFiles:\n # get App name\n appName = analyseAppManifest.findApplicationName(srcf)\n if len(appName) == 0:\n continue\n \n appList[appName] = {\"source\":op.basename(srcf)}\n \n # get original location\n cmakeListPath = \"\"\n for item in sourceList:\n if op.basename(item[\"path\"]) == op.basename(srcf) and \\\n moduleName == item[\"module\"]:\n appDir = op.basename(op.dirname(item[\"path\"]))\n cmakeListPath = op.join(HeadOfOTBTree,op.join(\"Testing/Applications\"),op.join(appDir,\"CMakeLists.txt\"))\n break\n \n # get App tests\n if not op.isfile(cmakeListPath):\n continue\n \n appList[appName][\"test\"] = analyseAppManifest.findTestFromApp(cmakeListPath,appName)\n \n # build list of link dependencies\n linkLibs = \"\"\n for dep in dependencies[moduleName]:\n linkLibs = linkLibs + \" ${OTB\"+dep+\"_LIBRARIES}\" + \"\\n\"\n \n filepath = moduleDir+'/app/CMakeLists.txt'\n if not op.isfile(filepath):\n o = open(filepath,'w')\n # define link libraries \n o.write(\"set(\"+cmakeModName+\"_LINK_LIBS\\n\")\n o.write(linkLibs)\n o.write(\")\\n\")\n \n for appli in appList:\n content = \"\\notb_create_application(\\n\"\n content += \" NAME \" + appli + \"\\n\"\n content += \" SOURCES \" + appList[appli][\"source\"] + \"\\n\"\n content += \" LINK_LIBRARIES ${${otb-module}_LIBRARIES})\\n\"\n o.write(content)\n o.close()\n \n filepath = moduleDir+'/test/CMakeLists.txt'\n if not op.isfile(filepath):\n o = open(filepath,'w')\n o.write(\"otb_module_test()\")\n for appli in appList:\n if not appList[appli].has_key(\"test\"):\n continue\n o.write(\"\\n#----------- \"+appli+\" TESTS ----------------\\n\")\n for test in appList[appli][\"test\"]:\n if test.count(\"${\"):\n print \"Warning : test name contains a variable : \"+test\n continue\n \n testcode=appList[appli][\"test\"][test]\n testcode=[s.replace('OTB_TEST_APPLICATION', 'otb_test_application') for s in testcode]\n o.writelines(testcode)\n o.write(\"\\n\")\n o.close()\n\n # write test/CMakeLists.txt : done by dispatchTests.py\n \"\"\"\n if op.isdir(moduleDir+'/test'):\n cxxFiles = glob.glob(moduleDir+'/test/*.cxx')\n cxxFileList='';\n for cxxf in cxxFiles:\n cxxFileList = cxxFileList+cxxf.split('/')[-1]+'\\n'\n filepath = moduleDir+'/test/CMakeLists.txt'\n if not op.isfile(filepath):\n o = open(filepath,'w')\n for line in open('./templateModule/otb-template-module/test/CMakeLists.txt','r'):\n # TODO : refactor for OTB\n words= moduleName.split('-')\n moduleNameMod='';\n for word in words:\n moduleNameMod=moduleNameMod + word.capitalize()\n line = line.replace('itkTemplateModule',moduleNameMod)\n line = line.replace('itk-template-module',moduleName)\n line = line.replace('LIST_OF_CXX_FILES',cxxFileList[0:-1]) #get rid of the last \\n\n o.write(line);\n o.close()\n \"\"\"\n \n # write otb-module.cmake, which contains dependency info\n filepath = moduleDir+'/otb-module.cmake'\n if not op.isfile(filepath):\n o = open(filepath,'w')\n for line in open(op.join(scriptDir,'templateModule/otb-template-module/otb-module.cmake'),'r'):\n # replace documentation\n if line.find(\"DESCRIPTION_TO_BE_REPLACED\") >= 0:\n docString = \"\\\"TBD\\\"\"\n if moduleName in modDescriptions:\n descPos = line.find(\"DESCRIPTION_TO_BE_REPLACED\")\n limitChar = 80\n docString = \"\\\"\"+modDescriptions[moduleName]+\"\\\"\"\n curPos = 80 - descPos\n while curPos < len(docString):\n lastSpace = docString[0:curPos].rfind(' ')\n if lastSpace > max(0,curPos-80):\n docString = docString[0:lastSpace] + '\\n' + docString[lastSpace+1:]\n else:\n docString = docString[0:curPos] + '\\n' + docString[curPos:]\n curPos += 81\n line = line.replace('DESCRIPTION_TO_BE_REPLACED',docString)\n \n # replace module name\n line = line.replace('otb-template-module',cmakeModName)\n # replace depend list\n dependTagPos = line.find(\"DEPENDS_TO_BE_REPLACED\")\n if dependTagPos >= 0:\n replacementStr = \"DEPENDS\"\n indentStr = \"\"\n for it in range(dependTagPos+2):\n indentStr = indentStr + \" \"\n if len(dependencies[moduleName]) > 0:\n deplist = dependencies[moduleName]\n else:\n deplist = [\"Common\"]\n for dep in deplist:\n replacementStr = replacementStr + \"\\n\" + indentStr +\"OTB\"+ dep\n line = line.replace('DEPENDS_TO_BE_REPLACED',replacementStr)\n # replace test_depend list\n testDependTagPos = line.find(\"TESTDEP_TO_BE_REPLACED\")\n if testDependTagPos >= 0:\n if moduleName.startswith(\"App\"):\n # for application : hardcode TestKernel and CommandLine\n indentStr = \"\"\n for it in range(testDependTagPos+2):\n indentStr = indentStr + \" \"\n replacementStr = \"TEST_DEPENDS\\n\" + indentStr + \"OTBTestKernel\\n\" + indentStr + \"OTBCommandLine\"\n line = line.replace('TESTDEP_TO_BE_REPLACED',replacementStr)\n else:\n # standard case\n\n if len(testDependencies[moduleName]) > 0:\n indentStr = \"\"\n replacementStr = \"TEST_DEPENDS\"\n for it in range(testDependTagPos+2):\n indentStr = indentStr + \" \"\n for dep in testDependencies[moduleName]:\n replacementStr = replacementStr + \"\\n\" + indentStr +\"OTB\"+ dep \n line = line.replace('TESTDEP_TO_BE_REPLACED',replacementStr)\n else:\n line = line.replace('TESTDEP_TO_BE_REPLACED','')\n \n # replace example_depend list\n exDependTagPos = line.find(\"EXDEP_TO_BE_REPLACED\")\n if exDependTagPos >= 0:\n if len(exDependencies[moduleName]) > 0:\n indentStr = \"\"\n replacementStr = \"EXAMPLE_DEPENDS\"\n for it in range(exDependTagPos+2):\n indentStr = indentStr + \" \"\n for dep in exDependencies[moduleName]:\n replacementStr = replacementStr + \"\\n\" + indentStr +\"OTB\"+ dep \n line = line.replace('EXDEP_TO_BE_REPLACED',replacementStr)\n else:\n line = line.replace('EXDEP_TO_BE_REPLACED','')\n o.write(line);\n \n o.close()\n\n# call dispatchTests to fill test/CMakeLists\nif op.isfile(testDependPath):\n dispatchTests.main([\"dispatchTests.py\",ManifestPath,HeadOfOTBTree,HeadOfModularOTBTree,testDependPath])\n\n\"\"\"\n# call dispatchExamples to fill example/CMakeLists\nif op.isfile(exDependPath):\n dispatchExamples.main([\"dispatchExamples.py\",ManifestPath,HeadOfOTBTree,HeadOfModularOTBTree,exDependPath])\n\"\"\"\n\n# examples\nfor i in sorted(os.listdir(HeadOfTempTree + \"/Examples\")):\n if i == \"CMakeLists.txt\" or i == \"README.txt\" or i.startswith(\"DataRepresentation\"):\n continue\n\n for j in sorted(os.listdir(HeadOfTempTree + \"/Examples/\" + i)):\n if j == \"CMakeLists.txt\" or j.startswith(\"otb\"):\n continue\n \n command = \"mv %s/Examples/%s/%s %s/Examples/%s/%s\" % ( HeadOfTempTree, i, j, HeadOfModularOTBTree, i, j)\n os.system(command)\n\nfor i in sorted(os.listdir(HeadOfTempTree + \"/Examples/DataRepresentation\")):\n if i == \"CMakeLists.txt\" or i == \"README.txt\":\n continue\n\n for j in sorted(os.listdir(HeadOfTempTree + \"/Examples/DataRepresentation/\" + i)):\n if j == \"CMakeLists.txt\" or j.startswith(\"otb\"):\n continue\n \n command = \"mv %s/Examples/DataRepresentation/%s/%s %s/Examples/DataRepresentation/%s/%s\" % ( HeadOfTempTree, i, j, HeadOfModularOTBTree, i, j) \n os.system(command)\n\n\n# save version without patches (so that we can regenerate patches later)\nos.system( \"cp -ar \" + op.join(OutputDir,\"OTB_Modular\") + \" \" + op.join(OutputDir,\"OTB_Modular-nopatch\") )\n\n# apply patches in OTB_Modular\ncurdir = op.abspath(op.dirname(__file__))\ncommand = \"cd \" + op.join(OutputDir,\"OTB_Modular\") + \" && patch -p1 < \" + curdir + \"/patches/otbmodular.patch\"\nprint \"Executing \" + command\nos.system( command )\n\n# remove Copyright files we don't want to touch later\nos.system( \"rm -rf %s\" % (op.join(HeadOfTempTree,\"Copyright\") ) )\nos.system( \"rm -rf %s\" % (op.join(HeadOfTempTree,\"RELEASE_NOTES.txt\") ) )\nos.system( \"rm -rf %s\" % (op.join(HeadOfTempTree,\"README\") ) )\n\n# PREPARE MIGRATION COMMIT ON A CLONE OF ORIGINAL CHECKOUT\nif enableMigration:\n print(\"Executing migration on a clone of original checkout\")\n HeadOfTempTree = op.abspath(HeadOfTempTree)\n OutputDir = op.abspath(OutputDir)\n \n # clone original checkout\n outputModular = op.join(OutputDir,\"OTB_Modular\")\n outputMigration = op.join(OutputDir,\"OTB_Migration\")\n if op.exists(outputMigration):\n os.removedirs(outputMigration)\n command = [\"cp\",\"-ar\",HeadOfOTBTree,outputMigration]\n call(command)\n os.chdir(outputMigration)\n \n # walk through OTB_Remaining and delete corresponding files in OTB checkout\n print(\"DELETE STEP...\")\n for dirPath, dirNames, fileNames in os.walk(HeadOfTempTree):\n currentSourceDir = dirPath.replace(HeadOfTempTree,'.')\n for fileName in fileNames:\n if op.exists(op.join(currentSourceDir,fileName)):\n command = [\"hg\",\"remove\",op.join(currentSourceDir,fileName)]\n call(command)\n else:\n print(\"Unknown file : \"+op.join(currentSourceDir,fileName))\n command = ['hg','commit','-m','ENH: Remove files not necessary after modularization']\n call(command)\n \n # walk through manifest and rename files\n print(\"MOVE STEP...\")\n for source in sourceList:\n outputPath = op.join(\"./Modules\",op.join(source[\"group\"],op.join(source[\"module\"],source[\"subDir\"])))\n command = ['hg','rename',source[\"path\"],op.join(outputPath,op.basename(source[\"path\"]))]\n call(command)\n command = ['hg','commit','-m','ENH: Move source and test files into their respective module']\n call(command)\n \n # add new files from OTB_Modular (files from OTB-Modular repo + generated files)\n print(\"ADD STEP...\")\n for dirPath, dirNames, fileNames in os.walk(outputModular):\n currentSourceDir = dirPath.replace(outputModular,'.')\n if currentSourceDir.startswith(\"./.hg\"):\n print(\"skip .hg\")\n continue\n for fileName in fileNames:\n # skip hg files\n if fileName.startswith(\".hg\"):\n continue\n targetFile = op.join(currentSourceDir,fileName)\n if not op.exists(targetFile):\n if not op.exists(currentSourceDir):\n command = [\"mkdir\",\"-p\",currentSourceDir]\n call(command)\n shutil.copy(op.join(dirPath,fileName),targetFile)\n command = ['hg','add']\n call(command)\n command = ['hg','commit','-m','ENH: Add new files for modular build system']\n call(command)\n \n # apply patches on OTB Checkout\n print(\"PATCH STEP...\")\n for dirPath, dirNames, fileNames in os.walk(outputModular):\n currentSourceDir = dirPath.replace(outputModular,'.')\n if currentSourceDir.startswith(\"./.hg\"):\n continue\n for fileName in fileNames:\n # skip hg files\n if fileName.startswith(\".hg\"):\n continue\n targetFile = op.join(currentSourceDir,fileName)\n if op.exists(targetFile):\n command = ['cp',op.join(dirPath,fileName),targetFile]\n call(command)\n command = ['hg','commit','-m','ENH: Apply patches necessary after modularization']\n call(command)\n\n\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
import pyForp
import pprint
pp = pprint.PrettyPrinter(indent=4)
def fib(n):
if n < 2:
return n
return fib(n-2) + fib(n-1)
forp = pyForp.pyForp()
forp.start()
print fib(2)
forp.stop()
pp.pprint(forp.dump())
|
normal
|
{
"blob_id": "80f9c4b7261a894aad2c738d976cfb8efc4d228c",
"index": 4784,
"step-1": "import pyForp\nimport pprint\npp = pprint.PrettyPrinter(indent=4)\ndef fib(n):\n if n < 2:\n return n\n return fib(n-2) + fib(n-1)\n\nforp = pyForp.pyForp()\nforp.start()\nprint fib(2)\nforp.stop()\npp.pprint(forp.dump())\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
from flask import Blueprint, request, make_response
from flask_expects_json import expects_json
from server.validation.schemas import guest_calendar_schema
from tools.for_db.work_with_booking_info import add_booking_info_and_get_uuid
from tools.for_db.work_with_links import get_link
from tools.build_response import build_response
guest_calendar_post = Blueprint('guest_calendar_post', __name__)
@guest_calendar_post.route('/calendars/<link_id>/bookings/', methods=['POST'])
@expects_json(guest_calendar_schema)
def booking(link_id):
request_body = request.get_json()
link = get_link(link_id)
if link is None:
return build_response('link id is invalid', 401)
admin_id = link.admin_id
try:
uuid = add_booking_info_and_get_uuid(request_body['start'],
request_body['end'], admin_id, request_body['guest_name'],
request_body['guest_email'], request_body['topic'] if 'topic' in
request_body else None)
request_body['uuid'] = uuid
except Exception:
return build_response('already booked or deleted', 409)
return make_response(request_body, 200)
|
normal
|
{
"blob_id": "75ef5dd2b82cf79819f18045559f9850c74bb55a",
"index": 5565,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\n@guest_calendar_post.route('/calendars/<link_id>/bookings/', methods=['POST'])\n@expects_json(guest_calendar_schema)\ndef booking(link_id):\n request_body = request.get_json()\n link = get_link(link_id)\n if link is None:\n return build_response('link id is invalid', 401)\n admin_id = link.admin_id\n try:\n uuid = add_booking_info_and_get_uuid(request_body['start'],\n request_body['end'], admin_id, request_body['guest_name'],\n request_body['guest_email'], request_body['topic'] if 'topic' in\n request_body else None)\n request_body['uuid'] = uuid\n except Exception:\n return build_response('already booked or deleted', 409)\n return make_response(request_body, 200)\n",
"step-3": "<mask token>\nguest_calendar_post = Blueprint('guest_calendar_post', __name__)\n\n\n@guest_calendar_post.route('/calendars/<link_id>/bookings/', methods=['POST'])\n@expects_json(guest_calendar_schema)\ndef booking(link_id):\n request_body = request.get_json()\n link = get_link(link_id)\n if link is None:\n return build_response('link id is invalid', 401)\n admin_id = link.admin_id\n try:\n uuid = add_booking_info_and_get_uuid(request_body['start'],\n request_body['end'], admin_id, request_body['guest_name'],\n request_body['guest_email'], request_body['topic'] if 'topic' in\n request_body else None)\n request_body['uuid'] = uuid\n except Exception:\n return build_response('already booked or deleted', 409)\n return make_response(request_body, 200)\n",
"step-4": "from flask import Blueprint, request, make_response\nfrom flask_expects_json import expects_json\nfrom server.validation.schemas import guest_calendar_schema\nfrom tools.for_db.work_with_booking_info import add_booking_info_and_get_uuid\nfrom tools.for_db.work_with_links import get_link\nfrom tools.build_response import build_response\nguest_calendar_post = Blueprint('guest_calendar_post', __name__)\n\n\n@guest_calendar_post.route('/calendars/<link_id>/bookings/', methods=['POST'])\n@expects_json(guest_calendar_schema)\ndef booking(link_id):\n request_body = request.get_json()\n link = get_link(link_id)\n if link is None:\n return build_response('link id is invalid', 401)\n admin_id = link.admin_id\n try:\n uuid = add_booking_info_and_get_uuid(request_body['start'],\n request_body['end'], admin_id, request_body['guest_name'],\n request_body['guest_email'], request_body['topic'] if 'topic' in\n request_body else None)\n request_body['uuid'] = uuid\n except Exception:\n return build_response('already booked or deleted', 409)\n return make_response(request_body, 200)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
' a test module '
__author__ = 'Aaron Jiang'
import sys
def test():
args = sys.argv
if len(args) == 1:
print('Hello World')
elif len(args) == 2:
print('Hello, %s!' % args[1])
else:
print('TOO MANY ARGUMENTS!')
if __name__ == '__main__':
test()
class Test():
count = 0
print('called ', count)
def __init__(self, name):
self.__name = name
Test.count += 1
t1 = Test('Aaron')
print(t1.count)
Test.count = 10
t2 = Test('Aaron2')
print(t2.count)
class Screen:
@property
def width(self):
return self._width
@width.setter
def width(self, width):
self._width = width
@property
def height(self):
return self.__height
@height.setter
def height(self, height):
self.__height = height
@property
def resolution(self):
return self._width * self.__height
sc = Screen()
sc.width = 1024
sc.height = 1
print(sc.resolution)
class Chain(object):
def __init__(self, path=''):
self._path = path
def __getattr__(self, path):
return Chain('%s/%s' % (self._path, path))
def __str__(self):
return self._path
__repr__ = __str__
print(Chain('/nan').status.user.timeline.list)
|
normal
|
{
"blob_id": "ececcf40005054e26e21152bcb5e68a1bce33e88",
"index": 7947,
"step-1": "<mask token>\n\n\nclass Test:\n <mask token>\n print('called ', count)\n <mask token>\n\n\n<mask token>\n\n\nclass Screen:\n\n @property\n def width(self):\n return self._width\n\n @width.setter\n def width(self, width):\n self._width = width\n\n @property\n def height(self):\n return self.__height\n\n @height.setter\n def height(self, height):\n self.__height = height\n\n @property\n def resolution(self):\n return self._width * self.__height\n\n\n<mask token>\n\n\nclass Chain(object):\n\n def __init__(self, path=''):\n self._path = path\n\n def __getattr__(self, path):\n return Chain('%s/%s' % (self._path, path))\n\n def __str__(self):\n return self._path\n __repr__ = __str__\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Test:\n count = 0\n print('called ', count)\n\n def __init__(self, name):\n self.__name = name\n Test.count += 1\n\n\n<mask token>\n\n\nclass Screen:\n\n @property\n def width(self):\n return self._width\n\n @width.setter\n def width(self, width):\n self._width = width\n\n @property\n def height(self):\n return self.__height\n\n @height.setter\n def height(self, height):\n self.__height = height\n\n @property\n def resolution(self):\n return self._width * self.__height\n\n\n<mask token>\n\n\nclass Chain(object):\n\n def __init__(self, path=''):\n self._path = path\n\n def __getattr__(self, path):\n return Chain('%s/%s' % (self._path, path))\n\n def __str__(self):\n return self._path\n __repr__ = __str__\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef test():\n args = sys.argv\n if len(args) == 1:\n print('Hello World')\n elif len(args) == 2:\n print('Hello, %s!' % args[1])\n else:\n print('TOO MANY ARGUMENTS!')\n\n\n<mask token>\n\n\nclass Test:\n count = 0\n print('called ', count)\n\n def __init__(self, name):\n self.__name = name\n Test.count += 1\n\n\n<mask token>\n\n\nclass Screen:\n\n @property\n def width(self):\n return self._width\n\n @width.setter\n def width(self, width):\n self._width = width\n\n @property\n def height(self):\n return self.__height\n\n @height.setter\n def height(self, height):\n self.__height = height\n\n @property\n def resolution(self):\n return self._width * self.__height\n\n\n<mask token>\n\n\nclass Chain(object):\n\n def __init__(self, path=''):\n self._path = path\n\n def __getattr__(self, path):\n return Chain('%s/%s' % (self._path, path))\n\n def __str__(self):\n return self._path\n __repr__ = __str__\n\n\n<mask token>\n",
"step-4": "<mask token>\n__author__ = 'Aaron Jiang'\n<mask token>\n\n\ndef test():\n args = sys.argv\n if len(args) == 1:\n print('Hello World')\n elif len(args) == 2:\n print('Hello, %s!' % args[1])\n else:\n print('TOO MANY ARGUMENTS!')\n\n\nif __name__ == '__main__':\n test()\n\n\nclass Test:\n count = 0\n print('called ', count)\n\n def __init__(self, name):\n self.__name = name\n Test.count += 1\n\n\nt1 = Test('Aaron')\nprint(t1.count)\nTest.count = 10\nt2 = Test('Aaron2')\nprint(t2.count)\n\n\nclass Screen:\n\n @property\n def width(self):\n return self._width\n\n @width.setter\n def width(self, width):\n self._width = width\n\n @property\n def height(self):\n return self.__height\n\n @height.setter\n def height(self, height):\n self.__height = height\n\n @property\n def resolution(self):\n return self._width * self.__height\n\n\nsc = Screen()\nsc.width = 1024\nsc.height = 1\nprint(sc.resolution)\n\n\nclass Chain(object):\n\n def __init__(self, path=''):\n self._path = path\n\n def __getattr__(self, path):\n return Chain('%s/%s' % (self._path, path))\n\n def __str__(self):\n return self._path\n __repr__ = __str__\n\n\nprint(Chain('/nan').status.user.timeline.list)\n",
"step-5": "' a test module '\n\n__author__ = 'Aaron Jiang'\n\nimport sys\n\ndef test():\n args = sys.argv\n if len(args) == 1:\n print('Hello World')\n elif len(args) == 2:\n print('Hello, %s!' % args[1])\n else:\n print('TOO MANY ARGUMENTS!')\n\n\nif __name__ == '__main__':\n test()\n\n\nclass Test():\n count = 0\n print('called ', count)\n\n def __init__(self, name):\n self.__name = name\n Test.count += 1\n\n\nt1 = Test('Aaron')\nprint(t1.count)\n\nTest.count = 10\n\nt2 = Test('Aaron2')\nprint(t2.count)\n\n\nclass Screen:\n @property\n def width(self):\n return self._width\n\n @width.setter\n def width(self, width):\n self._width = width\n\n @property\n def height(self):\n return self.__height\n\n @height.setter\n def height(self, height):\n self.__height = height\n\n @property\n def resolution(self):\n return self._width * self.__height\n\n\nsc = Screen()\nsc.width = 1024\nsc.height = 1\nprint(sc.resolution)\n\nclass Chain(object):\n def __init__(self, path=''):\n self._path = path\n\n def __getattr__(self, path):\n return Chain('%s/%s' % (self._path, path))\n\n def __str__(self):\n return self._path\n\n __repr__ = __str__\n\n\nprint(Chain('/nan').status.user.timeline.list)\n",
"step-ids": [
12,
14,
15,
17,
19
]
}
|
[
12,
14,
15,
17,
19
] |
# -*- coding: utf-8 -*-
# Enter your code here. Read input from STDIN. Print output to STDOUT
n= input()
vals= list(map(int,input().split()))
def median(values):
n=len(values)
values = sorted(values)
if n%2==1:
return values[(n+1)//2 - 1]
else:
return int(sum(values[int((n/2)-1):int((n/2)+1)])/2)
def quartiles(values):
n=len(values)
values.sort()
Q2=median(values)
Q1=median(values[:int(n/2)])
#print ("values=",values)
if n%2==0:
Q3=median(values[int(n/2):])
else:
Q3=median(values[int(n/2+1):])
return Q1,Q2,Q3
Q1,Q2,Q3=quartiles(vals)
print(Q1)
print(Q2)
print(Q3)
|
normal
|
{
"blob_id": "9d6b5baa8462b2996e4518dd39b5bb1efde1fd9d",
"index": 894,
"step-1": "<mask token>\n\n\ndef quartiles(values):\n n = len(values)\n values.sort()\n Q2 = median(values)\n Q1 = median(values[:int(n / 2)])\n if n % 2 == 0:\n Q3 = median(values[int(n / 2):])\n else:\n Q3 = median(values[int(n / 2 + 1):])\n return Q1, Q2, Q3\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef median(values):\n n = len(values)\n values = sorted(values)\n if n % 2 == 1:\n return values[(n + 1) // 2 - 1]\n else:\n return int(sum(values[int(n / 2 - 1):int(n / 2 + 1)]) / 2)\n\n\ndef quartiles(values):\n n = len(values)\n values.sort()\n Q2 = median(values)\n Q1 = median(values[:int(n / 2)])\n if n % 2 == 0:\n Q3 = median(values[int(n / 2):])\n else:\n Q3 = median(values[int(n / 2 + 1):])\n return Q1, Q2, Q3\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef median(values):\n n = len(values)\n values = sorted(values)\n if n % 2 == 1:\n return values[(n + 1) // 2 - 1]\n else:\n return int(sum(values[int(n / 2 - 1):int(n / 2 + 1)]) / 2)\n\n\ndef quartiles(values):\n n = len(values)\n values.sort()\n Q2 = median(values)\n Q1 = median(values[:int(n / 2)])\n if n % 2 == 0:\n Q3 = median(values[int(n / 2):])\n else:\n Q3 = median(values[int(n / 2 + 1):])\n return Q1, Q2, Q3\n\n\n<mask token>\nprint(Q1)\nprint(Q2)\nprint(Q3)\n",
"step-4": "n = input()\nvals = list(map(int, input().split()))\n\n\ndef median(values):\n n = len(values)\n values = sorted(values)\n if n % 2 == 1:\n return values[(n + 1) // 2 - 1]\n else:\n return int(sum(values[int(n / 2 - 1):int(n / 2 + 1)]) / 2)\n\n\ndef quartiles(values):\n n = len(values)\n values.sort()\n Q2 = median(values)\n Q1 = median(values[:int(n / 2)])\n if n % 2 == 0:\n Q3 = median(values[int(n / 2):])\n else:\n Q3 = median(values[int(n / 2 + 1):])\n return Q1, Q2, Q3\n\n\nQ1, Q2, Q3 = quartiles(vals)\nprint(Q1)\nprint(Q2)\nprint(Q3)\n",
"step-5": "# -*- coding: utf-8 -*-\r\n# Enter your code here. Read input from STDIN. Print output to STDOUT\r\n\r\nn= input()\r\nvals= list(map(int,input().split()))\r\n\r\ndef median(values):\r\n n=len(values)\r\n values = sorted(values)\r\n if n%2==1:\r\n return values[(n+1)//2 - 1]\r\n else:\r\n return int(sum(values[int((n/2)-1):int((n/2)+1)])/2)\r\n \r\ndef quartiles(values):\r\n n=len(values)\r\n values.sort()\r\n Q2=median(values)\r\n Q1=median(values[:int(n/2)])\r\n #print (\"values=\",values)\r\n\r\n if n%2==0:\r\n Q3=median(values[int(n/2):]) \r\n\r\n else:\r\n Q3=median(values[int(n/2+1):])\r\n \r\n return Q1,Q2,Q3\r\n\r\nQ1,Q2,Q3=quartiles(vals)\r\n\r\nprint(Q1)\r\nprint(Q2)\r\nprint(Q3)\r\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
#@@range_begin(list1) # ←この行は無視してください。本文に引用するためのものです。
#ファイル名 Chapter07/0703person.py
# __metaclass__ = type #← python 2を使っている場合は行頭の「#」を取る
class Person:
def set_name(self, name):
self.name = name
def get_name(self):
return self.name
def greet(self): # あいさつをする
print(f"こんにちは。私は{self.name}です。")
#@@range_end(list1) # ←この行は無視してください。本文に引用するためのものです。
#実行
#@@range_begin(list2) # ←この行は無視してください。本文に引用するためのものです。
foo = Person()
bar = Person()
foo.set_name('ルーク・スカイウォーカー') #『スター・ウォーズ』の主要登場人物
bar.set_name('アナキン・スカイウォーカー') # ルークの父
foo.greet() #←こんにちは。私はルーク・スカイウォーカーです。
bar.greet() #←こんにちは。私はアナキン・スカイウォーカーです。
#@@range_end(list2) # ←この行は無視してください。本文に引用するためのものです。
#@@range_begin(list3) # ←この行は無視してください。本文に引用するためのものです。
print(foo.name) #←ルーク・スカイウォーカー
bar.name = 'ヨーダ'
bar.greet() #←こんにちは。私はヨーダです。
#@@range_end(list3) # ←この行は無視してください。本文に引用するためのものです。
|
normal
|
{
"blob_id": "321dc411b003949a6744216a13c59c70d919a675",
"index": 8402,
"step-1": "class Person:\n <mask token>\n\n def get_name(self):\n return self.name\n\n def greet(self):\n print(f'こんにちは。私は{self.name}です。')\n\n\n<mask token>\n",
"step-2": "class Person:\n\n def set_name(self, name):\n self.name = name\n\n def get_name(self):\n return self.name\n\n def greet(self):\n print(f'こんにちは。私は{self.name}です。')\n\n\n<mask token>\n",
"step-3": "class Person:\n\n def set_name(self, name):\n self.name = name\n\n def get_name(self):\n return self.name\n\n def greet(self):\n print(f'こんにちは。私は{self.name}です。')\n\n\n<mask token>\nfoo.set_name('ルーク・スカイウォーカー')\nbar.set_name('アナキン・スカイウォーカー')\nfoo.greet()\nbar.greet()\nprint(foo.name)\n<mask token>\nbar.greet()\n",
"step-4": "class Person:\n\n def set_name(self, name):\n self.name = name\n\n def get_name(self):\n return self.name\n\n def greet(self):\n print(f'こんにちは。私は{self.name}です。')\n\n\nfoo = Person()\nbar = Person()\nfoo.set_name('ルーク・スカイウォーカー')\nbar.set_name('アナキン・スカイウォーカー')\nfoo.greet()\nbar.greet()\nprint(foo.name)\nbar.name = 'ヨーダ'\nbar.greet()\n",
"step-5": "#@@range_begin(list1) # ←この行は無視してください。本文に引用するためのものです。\n#ファイル名 Chapter07/0703person.py\n# __metaclass__ = type #← python 2を使っている場合は行頭の「#」を取る\nclass Person:\n def set_name(self, name):\n self.name = name\n def get_name(self):\n return self.name\n def greet(self): # あいさつをする\n print(f\"こんにちは。私は{self.name}です。\")\n#@@range_end(list1) # ←この行は無視してください。本文に引用するためのものです。\n\n#実行\n#@@range_begin(list2) # ←この行は無視してください。本文に引用するためのものです。\nfoo = Person()\nbar = Person()\nfoo.set_name('ルーク・スカイウォーカー') #『スター・ウォーズ』の主要登場人物\nbar.set_name('アナキン・スカイウォーカー') # ルークの父\nfoo.greet() #←こんにちは。私はルーク・スカイウォーカーです。\nbar.greet() #←こんにちは。私はアナキン・スカイウォーカーです。\n#@@range_end(list2) # ←この行は無視してください。本文に引用するためのものです。\n\n#@@range_begin(list3) # ←この行は無視してください。本文に引用するためのものです。\nprint(foo.name) #←ルーク・スカイウォーカー\nbar.name = 'ヨーダ'\nbar.greet() #←こんにちは。私はヨーダです。\n#@@range_end(list3) # ←この行は無視してください。本文に引用するためのものです。\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
"""
commands/map.py
description:
Generates a blank configuration file in the current directory
"""
from json import dumps
from .base_command import BaseCommand
class Map(BaseCommand):
def run(self):
from lib.models import Mapping
from lib.models import Migration
migration = Migration.load(self.options['MIGRATION_FILE'])
mapping = Mapping(self.options)
migration.mappings.append(mapping)
migration.write()
|
normal
|
{
"blob_id": "07783921da2fb4ae9452324f833b08b3f92ba294",
"index": 546,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Map(BaseCommand):\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Map(BaseCommand):\n\n def run(self):\n from lib.models import Mapping\n from lib.models import Migration\n migration = Migration.load(self.options['MIGRATION_FILE'])\n mapping = Mapping(self.options)\n migration.mappings.append(mapping)\n migration.write()\n",
"step-4": "<mask token>\nfrom json import dumps\nfrom .base_command import BaseCommand\n\n\nclass Map(BaseCommand):\n\n def run(self):\n from lib.models import Mapping\n from lib.models import Migration\n migration = Migration.load(self.options['MIGRATION_FILE'])\n mapping = Mapping(self.options)\n migration.mappings.append(mapping)\n migration.write()\n",
"step-5": "\"\"\"\n\ncommands/map.py\n\ndescription:\n\tGenerates a blank configuration file in the current directory\n\n\"\"\"\n\nfrom json import dumps\nfrom .base_command import BaseCommand\n\nclass Map(BaseCommand):\n\tdef run(self):\n\t\tfrom lib.models import Mapping\n\t\tfrom lib.models import Migration\n\n\t\tmigration = Migration.load(self.options['MIGRATION_FILE'])\n\n\t\tmapping = Mapping(self.options)\n\n\t\tmigration.mappings.append(mapping)\n\n\t\tmigration.write()",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
v1 = 3 + 4 * 2
print(v1)
v2 = (2 + 6) * 2
print(v2)
v3 = 2 ** 3 ** 2
print(v3)
v4 = 20 + 80 / 2
print(v4)
|
normal
|
{
"blob_id": "e6694403eecf2c4511c1fce959b5939f5f457bb8",
"index": 9384,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(v1)\n<mask token>\nprint(v2)\n<mask token>\nprint(v3)\n<mask token>\nprint(v4)\n",
"step-3": "v1 = 3 + 4 * 2\nprint(v1)\nv2 = (2 + 6) * 2\nprint(v2)\nv3 = 2 ** 3 ** 2\nprint(v3)\nv4 = 20 + 80 / 2\nprint(v4)\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
from django.core.management.base import BaseCommand, CommandError
from tasks.redisqueue import RedisQueue
from django.conf import settings
class Command(BaseCommand):
def handle(self, *args, **options):
rqueue = RedisQueue(settings.REDIS_URL)
rqueue.worker()
|
normal
|
{
"blob_id": "cccf6ec50ae00d8e00a1a53ea06fa8b6d061b72e",
"index": 8258,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Command(BaseCommand):\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Command(BaseCommand):\n\n def handle(self, *args, **options):\n rqueue = RedisQueue(settings.REDIS_URL)\n rqueue.worker()\n",
"step-4": "from django.core.management.base import BaseCommand, CommandError\nfrom tasks.redisqueue import RedisQueue\nfrom django.conf import settings\n\n\nclass Command(BaseCommand):\n\n def handle(self, *args, **options):\n rqueue = RedisQueue(settings.REDIS_URL)\n rqueue.worker()\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from checkio.home.long_repeat import long_repeat
def test_long_repeat():
assert long_repeat("sdsffffse") == 4, "First"
assert long_repeat("ddvvrwwwrggg") == 3, "Second"
def test_fails_1():
assert long_repeat("") == 0, "Empty String"
def test_fails_2():
assert long_repeat("aa") == 2
|
normal
|
{
"blob_id": "b459919e779063247c176e127368c687c903cf0f",
"index": 7869,
"step-1": "<mask token>\n\n\ndef test_fails_1():\n assert long_repeat('') == 0, 'Empty String'\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef test_fails_1():\n assert long_repeat('') == 0, 'Empty String'\n\n\ndef test_fails_2():\n assert long_repeat('aa') == 2\n",
"step-3": "<mask token>\n\n\ndef test_long_repeat():\n assert long_repeat('sdsffffse') == 4, 'First'\n assert long_repeat('ddvvrwwwrggg') == 3, 'Second'\n\n\ndef test_fails_1():\n assert long_repeat('') == 0, 'Empty String'\n\n\ndef test_fails_2():\n assert long_repeat('aa') == 2\n",
"step-4": "from checkio.home.long_repeat import long_repeat\n\n\ndef test_long_repeat():\n assert long_repeat('sdsffffse') == 4, 'First'\n assert long_repeat('ddvvrwwwrggg') == 3, 'Second'\n\n\ndef test_fails_1():\n assert long_repeat('') == 0, 'Empty String'\n\n\ndef test_fails_2():\n assert long_repeat('aa') == 2\n",
"step-5": "from checkio.home.long_repeat import long_repeat\n\n\ndef test_long_repeat():\n assert long_repeat(\"sdsffffse\") == 4, \"First\"\n assert long_repeat(\"ddvvrwwwrggg\") == 3, \"Second\"\n\n\ndef test_fails_1():\n assert long_repeat(\"\") == 0, \"Empty String\"\n\n\ndef test_fails_2():\n assert long_repeat(\"aa\") == 2\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
from django.conf.urls import patterns, include, url
from django.contrib import admin
from metainfo.views import DomainListView
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'metapull.views.home', name='home'),
# url(r'^blog/', include('blog.urls')),
url(r'^$', DomainListView.as_view()),
url(r'^admin/', include(admin.site.urls)),
url(r'^domains/', include('metainfo.urls', namespace = 'domains')),
)
|
normal
|
{
"blob_id": "1599f5e49ec645b6d448e74719e240343077aedd",
"index": 5464,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nurlpatterns = patterns('', url('^$', DomainListView.as_view()), url(\n '^admin/', include(admin.site.urls)), url('^domains/', include(\n 'metainfo.urls', namespace='domains')))\n",
"step-3": "from django.conf.urls import patterns, include, url\nfrom django.contrib import admin\nfrom metainfo.views import DomainListView\nurlpatterns = patterns('', url('^$', DomainListView.as_view()), url(\n '^admin/', include(admin.site.urls)), url('^domains/', include(\n 'metainfo.urls', namespace='domains')))\n",
"step-4": "from django.conf.urls import patterns, include, url\nfrom django.contrib import admin\nfrom metainfo.views import DomainListView\n\n\nurlpatterns = patterns('',\n # Examples:\n # url(r'^$', 'metapull.views.home', name='home'),\n # url(r'^blog/', include('blog.urls')),\n url(r'^$', DomainListView.as_view()),\n url(r'^admin/', include(admin.site.urls)),\n url(r'^domains/', include('metainfo.urls', namespace = 'domains')),\n)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
# Pose estimation and object detection: OpenCV DNN, ImageAI, YOLO, mpi, caffemodel, tensorflow
# Authors:
# Tutorial by: https://learnopencv.com/deep-learning-based-human-pose-estimation-using-opencv-cpp-python/
# Model file links collection (replace .sh script): Twenkid
# http://posefs1.perception.cs.cmu.edu/OpenPose/models/pose/mpi/pose_iter_160000.caffemodel
#https://github.com/CMU-Perceptual-Computing-Lab/openpose/blob/master/models/pose/mpi/pose_deploy_linevec_faster_4_stages.prototxt
# ImageAI: https://github.com/OlafenwaMoses/ImageAI
# # YOLOv3:
# yolo.h5
# https://github-releases.githubusercontent.com/125932201/1b8496e8-86fc-11e8-895f-fefe61ebb499?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=AKIAIWNJYAX4CSVEH53A%2F20210813%2Fus-east-1%2Fs3%2Faws4_request&X-Amz-Date=20210813T002422Z&X-Amz-Expires=300&X-Amz-Signature=02e6839be131d27b142baf50449d021339cbb334eed67a114ff9b960b8beb987&X-Amz-SignedHeaders=host&actor_id=23367640&key_id=0&repo_id=125932201&response-content-disposition=attachment%3B%20filename%3Dyolo.h5&response-content-type=application%2Foctet-stream
# yolo-tiny.h5
# https://github-releases.githubusercontent.com/125932201/7cf559e6-86fa-11e8-81e8-1e959be261a8?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=AKIAIWNJYAX4CSVEH53A%2F20210812%2Fus-east-1%2Fs3%2Faws4_request&X-Amz-Date=20210812T232641Z&X-Amz-Expires=300&X-Amz-Signature=a5b91876c83b83a6aafba333c63c5f4a880bea9a937b30e52e92bbb0ac784018&X-Amz-SignedHeaders=host&actor_id=23367640&key_id=0&repo_id=125932201&response-content-disposition=attachment%3B%20filename%3Dyolo-tiny.h5&response-content-type=application%2Foctet-stream
# Todor Arnaudov - Twenkid: debug and merging, LearnOpenCV python code had a few misses, 13.8.2021
# It seems the pose model expects only one person so the image must be segmented first! pose1.jpg
# Detect with YOLO or ImageAI etc. then use DNN
# Specify the paths for the 2 files
# I tried with yolo-tiny, but the accuracy of the bounding boxes didn't seem acceptable.
#tf 1.15 for older versions of ImageAI - but tf doesn't support Py 3.8
#ImageAI: older versions require tf 1.x
#tf 2.4 - required by ImageAI 2.1.6 -- no GPU supported on Win 7, tf requires CUDA 11.0 (Win10). Win7: CUDA 10.x. CPU: works
# Set the paths to models, images etc.
# My experiments results: disappointingly bad pose estimation on the images I tested. Sometimes good, sometimes terrible.
import cv2
import tensorflow.compat.v1 as tf
from imageai.Detection import ObjectDetection
import os
boxes = []
def yolo():
#name = "k.jpg"
root = "Z:\\"
name = "23367640.png" #t.jpg" #"p1.jpg" #"2w.jpg" #"grigor.jpg" #"2w.jpg" #"pose1.webp" #1.jpg"
execution_path = os.getcwd()
yolo_path = "Z:\\yolo.h5"
#yolo_path = "Z:\\yolo-tiny.h5"
localdir = False
detector = ObjectDetection()
detector.setModelTypeAsYOLOv3()
#detector.setModelTypeAsTinyYOLOv3()
if localdir:
detector.setModelPath(os.path.join(execution_path , yolo_path))
else:
detector.setModelPath(yolo_path)
#dir(detector)
detector.loadModel()
#loaded_model = tf.keras.models.load_model("./src/mood-saved-models/"model + ".h5")
#loaded_model = tf.keras.models.load_model(detector.)
#path = "E:\capture_023_29092020_150305.jpg" #IMG_20200528_044908.jpg"
#pathOut = "E:\YOLO_capture_023_29092020_150305.jpg"
#path = "pose1.webp" #E:\\capture_046_29092020_150628.jpg"
pathOut = "yolo_out_2.jpg"
path = root + name
pathOut = root + name + "yolo_out" + ".jpg"
detections = detector.detectObjectsFromImage(input_image=os.path.join(execution_path , path), output_image_path=os.path.join(execution_path , pathOut), minimum_percentage_probability=10) #30)
for eachObject in detections:
print(eachObject["name"] , " : ", eachObject["percentage_probability"], " : ", eachObject["box_points"] )
print("--------------------------------")
return detections, path
det,path = yolo()
yoloImage = cv2.imread(path) #crop regions from it
for i in det:
print(i)
protoFile = "Z:\\pose\\mpi\\pose_deploy_linevec_faster_4_stages.prototxt"
#protoFile = "pose_deploy_linevec_faster_4_stages.prototxt"
#weightsFile = "Z:\\pose\\mpi\\pose_iter_440000.caffemodel"
weightsFile = "Z:\\pose\\mpi\\pose_iter_160000.caffemodel"
#weightsFile = "pose_iter_160000.caffemodel"
#weightsFile = "pose_iter_440000.caffemodel"
# Read the network into Memory
net = cv2.dnn.readNetFromCaffe(protoFile, weightsFile)
"""
{'name': 'person', 'percentage_probability': 99.86668229103088, 'box_points': [1
8, 38, 153, 397]}
{'name': 'person', 'percentage_probability': 53.89136075973511, 'box_points': [3
86, 93, 428, 171]}
{'name': 'person', 'percentage_probability': 11.339860409498215, 'box_points': [
585, 99, 641, 180]}
{'name': 'person', 'percentage_probability': 10.276197642087936, 'box_points': [
126, 178, 164, 290]}
{'name': 'person', 'percentage_probability': 99.94878768920898, 'box_points': [2
93, 80, 394, 410]}
{'name': 'person', 'percentage_probability': 99.95986223220825, 'box_points': [4
78, 88, 589, 410]}
{'name': 'person', 'percentage_probability': 67.95878410339355, 'box_points': [1
, 212, 39, 300]}
{'name': 'person', 'percentage_probability': 63.609880208969116, 'box_points': [
153, 193, 192, 306]}
{'name': 'person', 'percentage_probability': 23.985233902931213, 'box_points': [
226, 198, 265, 308]}
{'name': 'sports ball', 'percentage_probability': 20.820775628089905, 'box_point
s': [229, 50, 269, 94]}
{'name': 'person', 'percentage_probability': 40.28712213039398, 'box_points': [4
23, 110, 457, 160]}
H, W, Ch 407 211 3
"""
yolo_thr = 70 #in percents, not 0.7
collected = []
bWiden = False
for d in det:
if (d['name'] == 'person') and d['percentage_probability'] > yolo_thr:
x1,y1,x2,y2 = d['box_points']
if bWiden:
x1-=20
x2+=20
y1-=30
y2+=30
cropped = yoloImage[y1:y2, x1:x2]
cv2.imshow(d['name']+str(x1), cropped)
collected.append(cropped) #or copy first?
cv2.waitKey()
#x1,y1, ...
# for i in collected: cv2.imshow("COLLECTED?", i); cv2.waitKey() #OK
# Read image
#frame = cv2.imread("Z:\\23367640.png") #1.jpg")
#src = "Z:\\2w.jpg" #z:\\pose1.webp" #nacep1.jpg"
#src = "z:\\pose1.webp"
srcs = ["z:\\pose1.webp","Z:\\2w.jpg", "Z:\\grigor.jpg"]
id = 2
#src = srcs[2]
src = path #from first yolo, in order to compare
frame = cv2.imread(src)
cv2.imshow("FRAME"+src, frame)
#frameWidth, frameHeight, _ = frame.shape
frameHeight, frameWidth, ch = frame.shape
print("H, W, Ch", frameHeight, frameWidth, ch)
# Specify the input image dimensions
inWidth = 368 #184 #368
inHeight = 368 #184 #368
# Prepare the frame to be fed to the network
inpBlob = cv2.dnn.blobFromImage(frame, 1.0 / 255, (inWidth, inHeight), (0, 0, 0), swapRB=False, crop=False)
#cv2.imshow("G", inpBlob) #unsupported
#cv2.waitKey(0)
# Set the prepared object as the input blob of the network
net.setInput(inpBlob)
print(inpBlob)
output = net.forward()
print(output)
print("========")
H = output.shape[2]
W = output.shape[3]
# Empty list to store the detected keypoints
points = []
threshold = 0.3
maxKeypoints = 44
Keypoints = output.shape[1]
print("Keypoints from output?", Keypoints)
Keypoints = 15 #MPI ... returns only 15
labels = ["Head", "Neck", "Right Shoulder", "Right Elbow", "Right Wrist", "Left Shoulder", "Left Elbow", "Left Wrist", "Right Hip", "Right Knee", "Right Ankle", "Left Hip", "Left Knee", "Left Ankle", "Chest", "Background"]
#for i in range(len()):
for i in range(Keypoints): #?
# confidence map of corresponding body's part.
probMap = output[0, i, :, :]
# Find global maxima of the probMap.
minVal, prob, minLoc, point = cv2.minMaxLoc(probMap)
# Scale the point to fit on the original image
x = (frameWidth * point[0]) / W
y = (frameHeight * point[1]) / H
if prob > threshold :
cv2.circle(frame, (int(x), int(y)), 5, (0, 255, 255), thickness=-1, lineType=cv2.FILLED)
cv2.putText(frame, "{}".format(i), (int(x), int(y)), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2, lineType=cv2.LINE_AA)
# Add the point to the list if the probability is greater than the threshold
print(i, labels[i])
print(x, y)
points.append((int(x), int(y)))
else :
points.append(None)
print(points)
cv2.imshow("Output-Keypoints",frame)
def Detect(image): #inWidth, Height ... - global, set as params later
frameHeight, frameWidth, ch = image.shape
# Prepare the image to be fed to the network
inpBlob = cv2.dnn.blobFromImage(image, 1.0 / 255, (inWidth, inHeight), (0, 0, 0), swapRB=False, crop=False)
#cv2.imshow("G", inpBlob) #unsupported
#cv2.waitKey(0)
# Set the prepared object as the input blob of the network
net.setInput(inpBlob)
print(inpBlob)
output = net.forward()
print(output)
print("========")
H = output.shape[2]
W = output.shape[3]
# Empty list to store the detected keypoints
points = []
threshold = 0.1
maxKeypoints = 44
Keypoints = output.shape[1]
print("Keypoints from output?", Keypoints)
Keypoints = 15 #MPI ... returns only 15
labels = ["Head", "Neck", "Right Shoulder", "Right Elbow", "Right Wrist", "Left Shoulder", "Left Elbow", "Left Wrist", "Right Hip", "Right Knee", "Right Ankle", "Left Hip", "Left Knee", "Left Ankle", "Chest", "Background"]
#for i in range(len()):
for i in range(Keypoints): #?
# confidence map of corresponding body's part.
probMap = output[0, i, :, :]
# Find global maxima of the probMap.
minVal, prob, minLoc, point = cv2.minMaxLoc(probMap)
# Scale the point to fit on the original image
x = (frameWidth * point[0]) / W
y = (frameHeight * point[1]) / H
if prob > threshold :
cv2.circle(image, (int(x), int(y)), 5, (0, 255, 255), thickness=-1, lineType=cv2.FILLED)
cv2.putText(image, "{}".format(i), (int(x), int(y)), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2, lineType=cv2.LINE_AA)
# Add the point to the list if the probability is greater than the threshold
print(i, labels[i])
print(x, y)
points.append((int(x), int(y)))
else :
points.append(None)
print(points)
cv2.imshow("Output-Keypoints",image)
cv2.waitKey()
for i in collected: Detect(i)
cv2.waitKey(0)
cv2.destroyAllWindows()
|
normal
|
{
"blob_id": "c80ae9d2eb07fd716a80a5e2d7b5237925fda02c",
"index": 5861,
"step-1": "<mask token>\n\n\ndef yolo():\n root = 'Z:\\\\'\n name = '23367640.png'\n execution_path = os.getcwd()\n yolo_path = 'Z:\\\\yolo.h5'\n localdir = False\n detector = ObjectDetection()\n detector.setModelTypeAsYOLOv3()\n if localdir:\n detector.setModelPath(os.path.join(execution_path, yolo_path))\n else:\n detector.setModelPath(yolo_path)\n detector.loadModel()\n pathOut = 'yolo_out_2.jpg'\n path = root + name\n pathOut = root + name + 'yolo_out' + '.jpg'\n detections = detector.detectObjectsFromImage(input_image=os.path.join(\n execution_path, path), output_image_path=os.path.join(\n execution_path, pathOut), minimum_percentage_probability=10)\n for eachObject in detections:\n print(eachObject['name'], ' : ', eachObject[\n 'percentage_probability'], ' : ', eachObject['box_points'])\n print('--------------------------------')\n return detections, path\n\n\n<mask token>\n\n\ndef Detect(image):\n frameHeight, frameWidth, ch = image.shape\n inpBlob = cv2.dnn.blobFromImage(image, 1.0 / 255, (inWidth, inHeight),\n (0, 0, 0), swapRB=False, crop=False)\n net.setInput(inpBlob)\n print(inpBlob)\n output = net.forward()\n print(output)\n print('========')\n H = output.shape[2]\n W = output.shape[3]\n points = []\n threshold = 0.1\n maxKeypoints = 44\n Keypoints = output.shape[1]\n print('Keypoints from output?', Keypoints)\n Keypoints = 15\n labels = ['Head', 'Neck', 'Right Shoulder', 'Right Elbow',\n 'Right Wrist', 'Left Shoulder', 'Left Elbow', 'Left Wrist',\n 'Right Hip', 'Right Knee', 'Right Ankle', 'Left Hip', 'Left Knee',\n 'Left Ankle', 'Chest', 'Background']\n for i in range(Keypoints):\n probMap = output[0, i, :, :]\n minVal, prob, minLoc, point = cv2.minMaxLoc(probMap)\n x = frameWidth * point[0] / W\n y = frameHeight * point[1] / H\n if prob > threshold:\n cv2.circle(image, (int(x), int(y)), 5, (0, 255, 255), thickness\n =-1, lineType=cv2.FILLED)\n cv2.putText(image, '{}'.format(i), (int(x), int(y)), cv2.\n FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2, lineType=cv2.LINE_AA)\n print(i, labels[i])\n print(x, y)\n points.append((int(x), int(y)))\n else:\n points.append(None)\n print(points)\n cv2.imshow('Output-Keypoints', image)\n cv2.waitKey()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef yolo():\n root = 'Z:\\\\'\n name = '23367640.png'\n execution_path = os.getcwd()\n yolo_path = 'Z:\\\\yolo.h5'\n localdir = False\n detector = ObjectDetection()\n detector.setModelTypeAsYOLOv3()\n if localdir:\n detector.setModelPath(os.path.join(execution_path, yolo_path))\n else:\n detector.setModelPath(yolo_path)\n detector.loadModel()\n pathOut = 'yolo_out_2.jpg'\n path = root + name\n pathOut = root + name + 'yolo_out' + '.jpg'\n detections = detector.detectObjectsFromImage(input_image=os.path.join(\n execution_path, path), output_image_path=os.path.join(\n execution_path, pathOut), minimum_percentage_probability=10)\n for eachObject in detections:\n print(eachObject['name'], ' : ', eachObject[\n 'percentage_probability'], ' : ', eachObject['box_points'])\n print('--------------------------------')\n return detections, path\n\n\n<mask token>\nfor i in det:\n print(i)\n<mask token>\nfor d in det:\n if d['name'] == 'person' and d['percentage_probability'] > yolo_thr:\n x1, y1, x2, y2 = d['box_points']\n if bWiden:\n x1 -= 20\n x2 += 20\n y1 -= 30\n y2 += 30\n cropped = yoloImage[y1:y2, x1:x2]\n cv2.imshow(d['name'] + str(x1), cropped)\n collected.append(cropped)\n cv2.waitKey()\n<mask token>\ncv2.imshow('FRAME' + src, frame)\n<mask token>\nprint('H, W, Ch', frameHeight, frameWidth, ch)\n<mask token>\nnet.setInput(inpBlob)\nprint(inpBlob)\n<mask token>\nprint(output)\nprint('========')\n<mask token>\nprint('Keypoints from output?', Keypoints)\n<mask token>\nfor i in range(Keypoints):\n probMap = output[0, i, :, :]\n minVal, prob, minLoc, point = cv2.minMaxLoc(probMap)\n x = frameWidth * point[0] / W\n y = frameHeight * point[1] / H\n if prob > threshold:\n cv2.circle(frame, (int(x), int(y)), 5, (0, 255, 255), thickness=-1,\n lineType=cv2.FILLED)\n cv2.putText(frame, '{}'.format(i), (int(x), int(y)), cv2.\n FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2, lineType=cv2.LINE_AA)\n print(i, labels[i])\n print(x, y)\n points.append((int(x), int(y)))\n else:\n points.append(None)\nprint(points)\ncv2.imshow('Output-Keypoints', frame)\n\n\ndef Detect(image):\n frameHeight, frameWidth, ch = image.shape\n inpBlob = cv2.dnn.blobFromImage(image, 1.0 / 255, (inWidth, inHeight),\n (0, 0, 0), swapRB=False, crop=False)\n net.setInput(inpBlob)\n print(inpBlob)\n output = net.forward()\n print(output)\n print('========')\n H = output.shape[2]\n W = output.shape[3]\n points = []\n threshold = 0.1\n maxKeypoints = 44\n Keypoints = output.shape[1]\n print('Keypoints from output?', Keypoints)\n Keypoints = 15\n labels = ['Head', 'Neck', 'Right Shoulder', 'Right Elbow',\n 'Right Wrist', 'Left Shoulder', 'Left Elbow', 'Left Wrist',\n 'Right Hip', 'Right Knee', 'Right Ankle', 'Left Hip', 'Left Knee',\n 'Left Ankle', 'Chest', 'Background']\n for i in range(Keypoints):\n probMap = output[0, i, :, :]\n minVal, prob, minLoc, point = cv2.minMaxLoc(probMap)\n x = frameWidth * point[0] / W\n y = frameHeight * point[1] / H\n if prob > threshold:\n cv2.circle(image, (int(x), int(y)), 5, (0, 255, 255), thickness\n =-1, lineType=cv2.FILLED)\n cv2.putText(image, '{}'.format(i), (int(x), int(y)), cv2.\n FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2, lineType=cv2.LINE_AA)\n print(i, labels[i])\n print(x, y)\n points.append((int(x), int(y)))\n else:\n points.append(None)\n print(points)\n cv2.imshow('Output-Keypoints', image)\n cv2.waitKey()\n\n\nfor i in collected:\n Detect(i)\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n",
"step-3": "<mask token>\nboxes = []\n\n\ndef yolo():\n root = 'Z:\\\\'\n name = '23367640.png'\n execution_path = os.getcwd()\n yolo_path = 'Z:\\\\yolo.h5'\n localdir = False\n detector = ObjectDetection()\n detector.setModelTypeAsYOLOv3()\n if localdir:\n detector.setModelPath(os.path.join(execution_path, yolo_path))\n else:\n detector.setModelPath(yolo_path)\n detector.loadModel()\n pathOut = 'yolo_out_2.jpg'\n path = root + name\n pathOut = root + name + 'yolo_out' + '.jpg'\n detections = detector.detectObjectsFromImage(input_image=os.path.join(\n execution_path, path), output_image_path=os.path.join(\n execution_path, pathOut), minimum_percentage_probability=10)\n for eachObject in detections:\n print(eachObject['name'], ' : ', eachObject[\n 'percentage_probability'], ' : ', eachObject['box_points'])\n print('--------------------------------')\n return detections, path\n\n\ndet, path = yolo()\nyoloImage = cv2.imread(path)\nfor i in det:\n print(i)\nprotoFile = 'Z:\\\\pose\\\\mpi\\\\pose_deploy_linevec_faster_4_stages.prototxt'\nweightsFile = 'Z:\\\\pose\\\\mpi\\\\pose_iter_160000.caffemodel'\nnet = cv2.dnn.readNetFromCaffe(protoFile, weightsFile)\n<mask token>\nyolo_thr = 70\ncollected = []\nbWiden = False\nfor d in det:\n if d['name'] == 'person' and d['percentage_probability'] > yolo_thr:\n x1, y1, x2, y2 = d['box_points']\n if bWiden:\n x1 -= 20\n x2 += 20\n y1 -= 30\n y2 += 30\n cropped = yoloImage[y1:y2, x1:x2]\n cv2.imshow(d['name'] + str(x1), cropped)\n collected.append(cropped)\n cv2.waitKey()\nsrcs = ['z:\\\\pose1.webp', 'Z:\\\\2w.jpg', 'Z:\\\\grigor.jpg']\nid = 2\nsrc = path\nframe = cv2.imread(src)\ncv2.imshow('FRAME' + src, frame)\nframeHeight, frameWidth, ch = frame.shape\nprint('H, W, Ch', frameHeight, frameWidth, ch)\ninWidth = 368\ninHeight = 368\ninpBlob = cv2.dnn.blobFromImage(frame, 1.0 / 255, (inWidth, inHeight), (0, \n 0, 0), swapRB=False, crop=False)\nnet.setInput(inpBlob)\nprint(inpBlob)\noutput = net.forward()\nprint(output)\nprint('========')\nH = output.shape[2]\nW = output.shape[3]\npoints = []\nthreshold = 0.3\nmaxKeypoints = 44\nKeypoints = output.shape[1]\nprint('Keypoints from output?', Keypoints)\nKeypoints = 15\nlabels = ['Head', 'Neck', 'Right Shoulder', 'Right Elbow', 'Right Wrist',\n 'Left Shoulder', 'Left Elbow', 'Left Wrist', 'Right Hip', 'Right Knee',\n 'Right Ankle', 'Left Hip', 'Left Knee', 'Left Ankle', 'Chest', 'Background'\n ]\nfor i in range(Keypoints):\n probMap = output[0, i, :, :]\n minVal, prob, minLoc, point = cv2.minMaxLoc(probMap)\n x = frameWidth * point[0] / W\n y = frameHeight * point[1] / H\n if prob > threshold:\n cv2.circle(frame, (int(x), int(y)), 5, (0, 255, 255), thickness=-1,\n lineType=cv2.FILLED)\n cv2.putText(frame, '{}'.format(i), (int(x), int(y)), cv2.\n FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2, lineType=cv2.LINE_AA)\n print(i, labels[i])\n print(x, y)\n points.append((int(x), int(y)))\n else:\n points.append(None)\nprint(points)\ncv2.imshow('Output-Keypoints', frame)\n\n\ndef Detect(image):\n frameHeight, frameWidth, ch = image.shape\n inpBlob = cv2.dnn.blobFromImage(image, 1.0 / 255, (inWidth, inHeight),\n (0, 0, 0), swapRB=False, crop=False)\n net.setInput(inpBlob)\n print(inpBlob)\n output = net.forward()\n print(output)\n print('========')\n H = output.shape[2]\n W = output.shape[3]\n points = []\n threshold = 0.1\n maxKeypoints = 44\n Keypoints = output.shape[1]\n print('Keypoints from output?', Keypoints)\n Keypoints = 15\n labels = ['Head', 'Neck', 'Right Shoulder', 'Right Elbow',\n 'Right Wrist', 'Left Shoulder', 'Left Elbow', 'Left Wrist',\n 'Right Hip', 'Right Knee', 'Right Ankle', 'Left Hip', 'Left Knee',\n 'Left Ankle', 'Chest', 'Background']\n for i in range(Keypoints):\n probMap = output[0, i, :, :]\n minVal, prob, minLoc, point = cv2.minMaxLoc(probMap)\n x = frameWidth * point[0] / W\n y = frameHeight * point[1] / H\n if prob > threshold:\n cv2.circle(image, (int(x), int(y)), 5, (0, 255, 255), thickness\n =-1, lineType=cv2.FILLED)\n cv2.putText(image, '{}'.format(i), (int(x), int(y)), cv2.\n FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2, lineType=cv2.LINE_AA)\n print(i, labels[i])\n print(x, y)\n points.append((int(x), int(y)))\n else:\n points.append(None)\n print(points)\n cv2.imshow('Output-Keypoints', image)\n cv2.waitKey()\n\n\nfor i in collected:\n Detect(i)\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n",
"step-4": "import cv2\nimport tensorflow.compat.v1 as tf\nfrom imageai.Detection import ObjectDetection\nimport os\nboxes = []\n\n\ndef yolo():\n root = 'Z:\\\\'\n name = '23367640.png'\n execution_path = os.getcwd()\n yolo_path = 'Z:\\\\yolo.h5'\n localdir = False\n detector = ObjectDetection()\n detector.setModelTypeAsYOLOv3()\n if localdir:\n detector.setModelPath(os.path.join(execution_path, yolo_path))\n else:\n detector.setModelPath(yolo_path)\n detector.loadModel()\n pathOut = 'yolo_out_2.jpg'\n path = root + name\n pathOut = root + name + 'yolo_out' + '.jpg'\n detections = detector.detectObjectsFromImage(input_image=os.path.join(\n execution_path, path), output_image_path=os.path.join(\n execution_path, pathOut), minimum_percentage_probability=10)\n for eachObject in detections:\n print(eachObject['name'], ' : ', eachObject[\n 'percentage_probability'], ' : ', eachObject['box_points'])\n print('--------------------------------')\n return detections, path\n\n\ndet, path = yolo()\nyoloImage = cv2.imread(path)\nfor i in det:\n print(i)\nprotoFile = 'Z:\\\\pose\\\\mpi\\\\pose_deploy_linevec_faster_4_stages.prototxt'\nweightsFile = 'Z:\\\\pose\\\\mpi\\\\pose_iter_160000.caffemodel'\nnet = cv2.dnn.readNetFromCaffe(protoFile, weightsFile)\n<mask token>\nyolo_thr = 70\ncollected = []\nbWiden = False\nfor d in det:\n if d['name'] == 'person' and d['percentage_probability'] > yolo_thr:\n x1, y1, x2, y2 = d['box_points']\n if bWiden:\n x1 -= 20\n x2 += 20\n y1 -= 30\n y2 += 30\n cropped = yoloImage[y1:y2, x1:x2]\n cv2.imshow(d['name'] + str(x1), cropped)\n collected.append(cropped)\n cv2.waitKey()\nsrcs = ['z:\\\\pose1.webp', 'Z:\\\\2w.jpg', 'Z:\\\\grigor.jpg']\nid = 2\nsrc = path\nframe = cv2.imread(src)\ncv2.imshow('FRAME' + src, frame)\nframeHeight, frameWidth, ch = frame.shape\nprint('H, W, Ch', frameHeight, frameWidth, ch)\ninWidth = 368\ninHeight = 368\ninpBlob = cv2.dnn.blobFromImage(frame, 1.0 / 255, (inWidth, inHeight), (0, \n 0, 0), swapRB=False, crop=False)\nnet.setInput(inpBlob)\nprint(inpBlob)\noutput = net.forward()\nprint(output)\nprint('========')\nH = output.shape[2]\nW = output.shape[3]\npoints = []\nthreshold = 0.3\nmaxKeypoints = 44\nKeypoints = output.shape[1]\nprint('Keypoints from output?', Keypoints)\nKeypoints = 15\nlabels = ['Head', 'Neck', 'Right Shoulder', 'Right Elbow', 'Right Wrist',\n 'Left Shoulder', 'Left Elbow', 'Left Wrist', 'Right Hip', 'Right Knee',\n 'Right Ankle', 'Left Hip', 'Left Knee', 'Left Ankle', 'Chest', 'Background'\n ]\nfor i in range(Keypoints):\n probMap = output[0, i, :, :]\n minVal, prob, minLoc, point = cv2.minMaxLoc(probMap)\n x = frameWidth * point[0] / W\n y = frameHeight * point[1] / H\n if prob > threshold:\n cv2.circle(frame, (int(x), int(y)), 5, (0, 255, 255), thickness=-1,\n lineType=cv2.FILLED)\n cv2.putText(frame, '{}'.format(i), (int(x), int(y)), cv2.\n FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2, lineType=cv2.LINE_AA)\n print(i, labels[i])\n print(x, y)\n points.append((int(x), int(y)))\n else:\n points.append(None)\nprint(points)\ncv2.imshow('Output-Keypoints', frame)\n\n\ndef Detect(image):\n frameHeight, frameWidth, ch = image.shape\n inpBlob = cv2.dnn.blobFromImage(image, 1.0 / 255, (inWidth, inHeight),\n (0, 0, 0), swapRB=False, crop=False)\n net.setInput(inpBlob)\n print(inpBlob)\n output = net.forward()\n print(output)\n print('========')\n H = output.shape[2]\n W = output.shape[3]\n points = []\n threshold = 0.1\n maxKeypoints = 44\n Keypoints = output.shape[1]\n print('Keypoints from output?', Keypoints)\n Keypoints = 15\n labels = ['Head', 'Neck', 'Right Shoulder', 'Right Elbow',\n 'Right Wrist', 'Left Shoulder', 'Left Elbow', 'Left Wrist',\n 'Right Hip', 'Right Knee', 'Right Ankle', 'Left Hip', 'Left Knee',\n 'Left Ankle', 'Chest', 'Background']\n for i in range(Keypoints):\n probMap = output[0, i, :, :]\n minVal, prob, minLoc, point = cv2.minMaxLoc(probMap)\n x = frameWidth * point[0] / W\n y = frameHeight * point[1] / H\n if prob > threshold:\n cv2.circle(image, (int(x), int(y)), 5, (0, 255, 255), thickness\n =-1, lineType=cv2.FILLED)\n cv2.putText(image, '{}'.format(i), (int(x), int(y)), cv2.\n FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2, lineType=cv2.LINE_AA)\n print(i, labels[i])\n print(x, y)\n points.append((int(x), int(y)))\n else:\n points.append(None)\n print(points)\n cv2.imshow('Output-Keypoints', image)\n cv2.waitKey()\n\n\nfor i in collected:\n Detect(i)\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n",
"step-5": "# Pose estimation and object detection: OpenCV DNN, ImageAI, YOLO, mpi, caffemodel, tensorflow\n# Authors:\n# Tutorial by: https://learnopencv.com/deep-learning-based-human-pose-estimation-using-opencv-cpp-python/\n# Model file links collection (replace .sh script): Twenkid\n# http://posefs1.perception.cs.cmu.edu/OpenPose/models/pose/mpi/pose_iter_160000.caffemodel\n#https://github.com/CMU-Perceptual-Computing-Lab/openpose/blob/master/models/pose/mpi/pose_deploy_linevec_faster_4_stages.prototxt\n# ImageAI: https://github.com/OlafenwaMoses/ImageAI\n# # YOLOv3:\n# yolo.h5\n# https://github-releases.githubusercontent.com/125932201/1b8496e8-86fc-11e8-895f-fefe61ebb499?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=AKIAIWNJYAX4CSVEH53A%2F20210813%2Fus-east-1%2Fs3%2Faws4_request&X-Amz-Date=20210813T002422Z&X-Amz-Expires=300&X-Amz-Signature=02e6839be131d27b142baf50449d021339cbb334eed67a114ff9b960b8beb987&X-Amz-SignedHeaders=host&actor_id=23367640&key_id=0&repo_id=125932201&response-content-disposition=attachment%3B%20filename%3Dyolo.h5&response-content-type=application%2Foctet-stream\n# yolo-tiny.h5\n# https://github-releases.githubusercontent.com/125932201/7cf559e6-86fa-11e8-81e8-1e959be261a8?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=AKIAIWNJYAX4CSVEH53A%2F20210812%2Fus-east-1%2Fs3%2Faws4_request&X-Amz-Date=20210812T232641Z&X-Amz-Expires=300&X-Amz-Signature=a5b91876c83b83a6aafba333c63c5f4a880bea9a937b30e52e92bbb0ac784018&X-Amz-SignedHeaders=host&actor_id=23367640&key_id=0&repo_id=125932201&response-content-disposition=attachment%3B%20filename%3Dyolo-tiny.h5&response-content-type=application%2Foctet-stream\n# Todor Arnaudov - Twenkid: debug and merging, LearnOpenCV python code had a few misses, 13.8.2021\n# It seems the pose model expects only one person so the image must be segmented first! pose1.jpg\n# Detect with YOLO or ImageAI etc. then use DNN\n# Specify the paths for the 2 files\n# I tried with yolo-tiny, but the accuracy of the bounding boxes didn't seem acceptable.\n#tf 1.15 for older versions of ImageAI - but tf doesn't support Py 3.8\n#ImageAI: older versions require tf 1.x\n#tf 2.4 - required by ImageAI 2.1.6 -- no GPU supported on Win 7, tf requires CUDA 11.0 (Win10). Win7: CUDA 10.x. CPU: works\n# Set the paths to models, images etc.\n# My experiments results: disappointingly bad pose estimation on the images I tested. Sometimes good, sometimes terrible. \n\nimport cv2\nimport tensorflow.compat.v1 as tf\nfrom imageai.Detection import ObjectDetection\nimport os\nboxes = []\n\ndef yolo():\n #name = \"k.jpg\"\n root = \"Z:\\\\\"\n name = \"23367640.png\" #t.jpg\" #\"p1.jpg\" #\"2w.jpg\" #\"grigor.jpg\" #\"2w.jpg\" #\"pose1.webp\" #1.jpg\"\n execution_path = os.getcwd()\n yolo_path = \"Z:\\\\yolo.h5\"\n #yolo_path = \"Z:\\\\yolo-tiny.h5\"\n localdir = False\n\n detector = ObjectDetection()\n detector.setModelTypeAsYOLOv3()\n #detector.setModelTypeAsTinyYOLOv3()\n \n if localdir:\n detector.setModelPath(os.path.join(execution_path , yolo_path))\n else: \n detector.setModelPath(yolo_path)\n\n #dir(detector)\n detector.loadModel()\n #loaded_model = tf.keras.models.load_model(\"./src/mood-saved-models/\"model + \".h5\")\n #loaded_model = tf.keras.models.load_model(detector.)\n\n #path = \"E:\\capture_023_29092020_150305.jpg\" #IMG_20200528_044908.jpg\"\n #pathOut = \"E:\\YOLO_capture_023_29092020_150305.jpg\"\n\n #path = \"pose1.webp\" #E:\\\\capture_046_29092020_150628.jpg\"\n pathOut = \"yolo_out_2.jpg\"\n\n\n \n path = root + name\n pathOut = root + name + \"yolo_out\" + \".jpg\"\n\n detections = detector.detectObjectsFromImage(input_image=os.path.join(execution_path , path), output_image_path=os.path.join(execution_path , pathOut), minimum_percentage_probability=10) #30)\n\n for eachObject in detections:\n print(eachObject[\"name\"] , \" : \", eachObject[\"percentage_probability\"], \" : \", eachObject[\"box_points\"] )\n print(\"--------------------------------\")\n return detections, path\n\ndet,path = yolo()\nyoloImage = cv2.imread(path) #crop regions from it \nfor i in det:\n print(i)\n \n\nprotoFile = \"Z:\\\\pose\\\\mpi\\\\pose_deploy_linevec_faster_4_stages.prototxt\"\n#protoFile = \"pose_deploy_linevec_faster_4_stages.prototxt\"\n#weightsFile = \"Z:\\\\pose\\\\mpi\\\\pose_iter_440000.caffemodel\"\nweightsFile = \"Z:\\\\pose\\\\mpi\\\\pose_iter_160000.caffemodel\"\n#weightsFile = \"pose_iter_160000.caffemodel\"\n#weightsFile = \"pose_iter_440000.caffemodel\"\n\n# Read the network into Memory\nnet = cv2.dnn.readNetFromCaffe(protoFile, weightsFile)\n\n\"\"\"\n{'name': 'person', 'percentage_probability': 99.86668229103088, 'box_points': [1\n8, 38, 153, 397]}\n{'name': 'person', 'percentage_probability': 53.89136075973511, 'box_points': [3\n86, 93, 428, 171]}\n{'name': 'person', 'percentage_probability': 11.339860409498215, 'box_points': [\n585, 99, 641, 180]}\n{'name': 'person', 'percentage_probability': 10.276197642087936, 'box_points': [\n126, 178, 164, 290]}\n{'name': 'person', 'percentage_probability': 99.94878768920898, 'box_points': [2\n93, 80, 394, 410]}\n{'name': 'person', 'percentage_probability': 99.95986223220825, 'box_points': [4\n78, 88, 589, 410]}\n{'name': 'person', 'percentage_probability': 67.95878410339355, 'box_points': [1\n, 212, 39, 300]}\n{'name': 'person', 'percentage_probability': 63.609880208969116, 'box_points': [\n153, 193, 192, 306]}\n{'name': 'person', 'percentage_probability': 23.985233902931213, 'box_points': [\n226, 198, 265, 308]}\n{'name': 'sports ball', 'percentage_probability': 20.820775628089905, 'box_point\ns': [229, 50, 269, 94]}\n{'name': 'person', 'percentage_probability': 40.28712213039398, 'box_points': [4\n23, 110, 457, 160]}\nH, W, Ch 407 211 3\n\"\"\"\nyolo_thr = 70 #in percents, not 0.7\ncollected = []\nbWiden = False\nfor d in det:\n if (d['name'] == 'person') and d['percentage_probability'] > yolo_thr:\n x1,y1,x2,y2 = d['box_points']\n if bWiden:\n x1-=20\n x2+=20\n y1-=30\n y2+=30\n cropped = yoloImage[y1:y2, x1:x2] \n cv2.imshow(d['name']+str(x1), cropped)\n collected.append(cropped) #or copy first?\n cv2.waitKey()\n #x1,y1, ...\n\n# for i in collected: cv2.imshow(\"COLLECTED?\", i); cv2.waitKey() #OK\n \n# Read image\n#frame = cv2.imread(\"Z:\\\\23367640.png\") #1.jpg\")\n#src = \"Z:\\\\2w.jpg\" #z:\\\\pose1.webp\" #nacep1.jpg\"\n#src = \"z:\\\\pose1.webp\" \nsrcs = [\"z:\\\\pose1.webp\",\"Z:\\\\2w.jpg\", \"Z:\\\\grigor.jpg\"]\nid = 2\n#src = srcs[2] \nsrc = path #from first yolo, in order to compare\n\nframe = cv2.imread(src)\ncv2.imshow(\"FRAME\"+src, frame)\n#frameWidth, frameHeight, _ = frame.shape\nframeHeight, frameWidth, ch = frame.shape\nprint(\"H, W, Ch\", frameHeight, frameWidth, ch)\n \n# Specify the input image dimensions\ninWidth = 368 #184 #368\ninHeight = 368 #184 #368\n\n# Prepare the frame to be fed to the network\ninpBlob = cv2.dnn.blobFromImage(frame, 1.0 / 255, (inWidth, inHeight), (0, 0, 0), swapRB=False, crop=False)\n\n#cv2.imshow(\"G\", inpBlob) #unsupported\n#cv2.waitKey(0)\n\n# Set the prepared object as the input blob of the network\nnet.setInput(inpBlob)\nprint(inpBlob)\noutput = net.forward()\n\nprint(output)\n\nprint(\"========\")\n\nH = output.shape[2]\nW = output.shape[3]\n# Empty list to store the detected keypoints\npoints = []\nthreshold = 0.3\nmaxKeypoints = 44\nKeypoints = output.shape[1]\nprint(\"Keypoints from output?\", Keypoints)\nKeypoints = 15 #MPI ... returns only 15\n\nlabels = [\"Head\", \"Neck\", \"Right Shoulder\", \"Right Elbow\", \"Right Wrist\", \"Left Shoulder\", \"Left Elbow\", \"Left Wrist\", \"Right Hip\", \"Right Knee\", \"Right Ankle\", \"Left Hip\", \"Left Knee\", \"Left Ankle\", \"Chest\", \"Background\"]\n\n#for i in range(len()):\nfor i in range(Keypoints): #?\n # confidence map of corresponding body's part.\n probMap = output[0, i, :, :]\n\n # Find global maxima of the probMap.\n minVal, prob, minLoc, point = cv2.minMaxLoc(probMap)\n\n # Scale the point to fit on the original image\n x = (frameWidth * point[0]) / W\n y = (frameHeight * point[1]) / H\n\n if prob > threshold :\n cv2.circle(frame, (int(x), int(y)), 5, (0, 255, 255), thickness=-1, lineType=cv2.FILLED)\n cv2.putText(frame, \"{}\".format(i), (int(x), int(y)), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2, lineType=cv2.LINE_AA)\n\n # Add the point to the list if the probability is greater than the threshold\n print(i, labels[i])\n print(x, y)\n points.append((int(x), int(y)))\n else :\n points.append(None)\n\nprint(points)\n\ncv2.imshow(\"Output-Keypoints\",frame)\n\ndef Detect(image): #inWidth, Height ... - global, set as params later \n frameHeight, frameWidth, ch = image.shape\n # Prepare the image to be fed to the network\n inpBlob = cv2.dnn.blobFromImage(image, 1.0 / 255, (inWidth, inHeight), (0, 0, 0), swapRB=False, crop=False)\n\n #cv2.imshow(\"G\", inpBlob) #unsupported\n #cv2.waitKey(0)\n\n # Set the prepared object as the input blob of the network\n net.setInput(inpBlob)\n print(inpBlob)\n output = net.forward()\n\n print(output)\n\n print(\"========\")\n\n H = output.shape[2]\n W = output.shape[3]\n # Empty list to store the detected keypoints\n points = []\n threshold = 0.1\n maxKeypoints = 44\n Keypoints = output.shape[1]\n print(\"Keypoints from output?\", Keypoints)\n Keypoints = 15 #MPI ... returns only 15\n\n labels = [\"Head\", \"Neck\", \"Right Shoulder\", \"Right Elbow\", \"Right Wrist\", \"Left Shoulder\", \"Left Elbow\", \"Left Wrist\", \"Right Hip\", \"Right Knee\", \"Right Ankle\", \"Left Hip\", \"Left Knee\", \"Left Ankle\", \"Chest\", \"Background\"]\n\n #for i in range(len()):\n for i in range(Keypoints): #?\n # confidence map of corresponding body's part.\n probMap = output[0, i, :, :]\n\n # Find global maxima of the probMap.\n minVal, prob, minLoc, point = cv2.minMaxLoc(probMap)\n\n # Scale the point to fit on the original image\n x = (frameWidth * point[0]) / W\n y = (frameHeight * point[1]) / H\n\n if prob > threshold :\n cv2.circle(image, (int(x), int(y)), 5, (0, 255, 255), thickness=-1, lineType=cv2.FILLED)\n cv2.putText(image, \"{}\".format(i), (int(x), int(y)), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2, lineType=cv2.LINE_AA)\n\n # Add the point to the list if the probability is greater than the threshold\n print(i, labels[i])\n print(x, y)\n points.append((int(x), int(y)))\n else :\n points.append(None)\n\n print(points)\n cv2.imshow(\"Output-Keypoints\",image)\n cv2.waitKey()\n\nfor i in collected: Detect(i)\n\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n\n\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
import random
import torch
import numpy as np
from torch.autograd import Variable
class SupportSetManager(object):
FIXED_FIRST = 0
RANDOM = 1
def __init__(self, datasets, config, sample_per_class):
self.config = config
(TEXT, LABEL, train, dev, test) = datasets[0]
self.TEXT = TEXT
self.sample_per_class = sample_per_class
print('Picking up prototypes')
self.prototype_text_list = []
for taskid, (TEXT, LABEL, train, dev, test) in enumerate(datasets):
prototype_text = []
#print taskid, LABEL.vocab
if not hasattr(LABEL, 'vocab'):
self.prototype_text_list.append(prototype_text)
continue
for lab_id in range(len(LABEL.vocab.itos)):
prototype_text.append([])
for example in train.examples:
lab_id = LABEL.vocab.stoi[example.label]
if prototype_text[lab_id] is not None:
prototype_text[lab_id].append(example.text)
else:
prototype_text[lab_id] = [example.text]
for lab_id in range(len(LABEL.vocab.itos)):
if len(prototype_text[lab_id]) == 0:
prototype_text[lab_id].append(['<pad>'])
if self.sample_per_class >= 1 and self.sample_per_class < len(prototype_text[lab_id]):
prototype_text[lab_id] = prototype_text[lab_id][:self.sample_per_class]
print('Task %s: picked up %s prototypes', (taskid, self.sample_per_class))
self.prototype_text_list.append(prototype_text)
def select_support_set(self, taskid, policy):
if policy == self.FIXED_FIRST:
supp_set = self.select_support_set_first(taskid)
elif policy == self.RANDOM:
supp_set = self.select_support_set_random(taskid)
return supp_set
def select_support_set_first(self, taskid):
prototype_text = self.prototype_text_list[taskid]
examples_text = []
for lab_id in range(len(prototype_text)):
examples_text.append(prototype_text[lab_id][0])
prototype_matrix = self.TEXT.numericalize(
self.TEXT.pad(x for x in examples_text),
device=self.config.device)
#if taskid == 0: #TODO test the consistency of the first example
# print examples_text
# print prototype_matrix
return prototype_matrix
def select_support_set_random(self, taskid, ):
prototype_text = self.prototype_text_list[taskid]
examples_text = []
for lab_id in range(len(prototype_text)):
rand_idx = random.randint(0, len(prototype_text[lab_id]) - 1)
examples_text.append(prototype_text[lab_id][rand_idx])
prototype_matrix = self.TEXT.numericalize(
self.TEXT.pad(x for x in examples_text),
device=self.config.device)
#if taskid == 0: #TODO test the consistency of the first example
# print examples_text
# print prototype_matrix
return prototype_matrix
def get_average_as_support(self, taskid, mnet_model):
prototype_text = self.prototype_text_list[taskid]
prototype_emb_list = []
for lab_id in range(len(prototype_text)):
prototype_sent = self.TEXT.numericalize(
self.TEXT.pad(x for x in prototype_text[lab_id]),
device=self.config.device)
prototype_matrix = mnet_model.get_hidden(prototype_sent)
prototype_emb_list.append(torch.mean(prototype_matrix, dim=0))
#print prototype_emb_list
#return torch.cat(prototype_emb_list, dim=0) #works for the new pytorch version
return torch.cat(prototype_emb_list, 0)
def get_average_and_std_as_support(self, taskid, mnet_model):
prototype_text = self.prototype_text_list[taskid]
prototype_emb_list = []
prototype_std_list = []
for lab_id in range(len(prototype_text)):
N = len(prototype_text[lab_id])
prototype_sent = self.TEXT.numericalize(
self.TEXT.pad(x for x in prototype_text[lab_id]),
device=self.config.device, train=True)
prototype_matrix = mnet_model.get_hidden(prototype_sent)
mean_vec = torch.mean(prototype_matrix, dim=0)
if N > 1:
#std_val = torch.sqrt((torch.pow(prototype_matrix, 2).sum() - N * torch.pow(mean_vec, 2).sum()) / (N - 1))
std_val = (torch.pow(prototype_matrix, 2).sum() - N * torch.pow(mean_vec, 2).sum()) / (N - 1)
std_val = Variable(std_val.data)
else:
std_val = Variable(torch.from_numpy(np.array([1.0]).astype(np.float32))).cuda()
prototype_emb_list.append(mean_vec)
prototype_std_list.append(std_val)
#print prototype_std_list
return torch.cat(prototype_emb_list, 0), torch.cat(prototype_std_list, 0)
def get_average_as_support_sample(self, taskid, mnet_model, sample_per_class):
prototype_text = self.prototype_text_list[taskid]
prototype_emb_list = []
for lab_id in range(len(prototype_text)):
if sample_per_class > len(prototype_text[lab_id]):
prototype_sent = self.TEXT.numericalize(
self.TEXT.pad(x for x in prototype_text[lab_id]),
device=self.config.device)
else:
top_ind = range(len(prototype_text[lab_id]))
random.shuffle(top_ind)
top_ind = top_ind[:sample_per_class]
prototype_text_sample = [prototype_text[lab_id][i] for i in top_ind]
prototype_sent = self.TEXT.numericalize(
self.TEXT.pad(x for x in prototype_text_sample),
device=self.config.device)
prototype_matrix = mnet_model.get_hidden(prototype_sent)
prototype_emb_list.append(torch.mean(prototype_matrix, dim=0))
return torch.cat(prototype_emb_list, 0)
def get_average_as_support_large(self, taskid, mnet_model, batchsize):
prototype_text = self.prototype_text_list[taskid]
prototype_emb_list = []
for lab_id in range(len(prototype_text)):
num_batch = len(prototype_text[lab_id]) / batchsize
if len(prototype_text[lab_id]) % batchsize != 0 and num_batch == 0:
num_batch += 1
lab_emb_sum = []
for i in range(num_batch):
#print i
#print len(prototype_text[lab_id]), i*batchsize, (i+1) * batchsize
batch_text = prototype_text[lab_id][i * batchsize : min((i+1) * batchsize, len(prototype_text[lab_id]))]
#print batch_text
len_text = len(batch_text)
#print len_text
batch_prototype_sent = self.TEXT.numericalize(
self.TEXT.pad(x for x in batch_text),
device=self.config.device, train=True)
#print batch_prototype_sent
prototype_matrix = mnet_model.get_hidden(batch_prototype_sent)
prototype_matrix = Variable(prototype_matrix.data)
#prototype_emb_list.append(torch.mean(prototype_matrix, dim=0))
#prototype_emb_list.append(torch.sum(prototype_matrix, dim=0) / len_text)
#break
#TODO: the following three lines not equivalent to the two lines below
# lab_emb_sum.append(torch.sum(prototype_matrix, dim=0))
#lab_emb_sum = torch.sum( torch.cat(lab_emb_sum, 0), dim=0 )
#lab_emb_sum /= len(prototype_text[lab_id])
lab_emb_sum.append(torch.mean(prototype_matrix, dim=0))
lab_emb_sum = torch.mean( torch.cat(lab_emb_sum, 0), dim=0 )
prototype_emb_list.append(lab_emb_sum)
return torch.cat(prototype_emb_list, 0)
|
normal
|
{
"blob_id": "13a2814e8744c6c09906d790185ed44fc2b3f23e",
"index": 3642,
"step-1": "<mask token>\n\n\nclass SupportSetManager(object):\n <mask token>\n <mask token>\n\n def __init__(self, datasets, config, sample_per_class):\n self.config = config\n TEXT, LABEL, train, dev, test = datasets[0]\n self.TEXT = TEXT\n self.sample_per_class = sample_per_class\n print('Picking up prototypes')\n self.prototype_text_list = []\n for taskid, (TEXT, LABEL, train, dev, test) in enumerate(datasets):\n prototype_text = []\n if not hasattr(LABEL, 'vocab'):\n self.prototype_text_list.append(prototype_text)\n continue\n for lab_id in range(len(LABEL.vocab.itos)):\n prototype_text.append([])\n for example in train.examples:\n lab_id = LABEL.vocab.stoi[example.label]\n if prototype_text[lab_id] is not None:\n prototype_text[lab_id].append(example.text)\n else:\n prototype_text[lab_id] = [example.text]\n for lab_id in range(len(LABEL.vocab.itos)):\n if len(prototype_text[lab_id]) == 0:\n prototype_text[lab_id].append(['<pad>'])\n if self.sample_per_class >= 1 and self.sample_per_class < len(\n prototype_text[lab_id]):\n prototype_text[lab_id] = prototype_text[lab_id][:self.\n sample_per_class]\n print('Task %s: picked up %s prototypes', (taskid, self.\n sample_per_class))\n self.prototype_text_list.append(prototype_text)\n\n def select_support_set(self, taskid, policy):\n if policy == self.FIXED_FIRST:\n supp_set = self.select_support_set_first(taskid)\n elif policy == self.RANDOM:\n supp_set = self.select_support_set_random(taskid)\n return supp_set\n\n def select_support_set_first(self, taskid):\n prototype_text = self.prototype_text_list[taskid]\n examples_text = []\n for lab_id in range(len(prototype_text)):\n examples_text.append(prototype_text[lab_id][0])\n prototype_matrix = self.TEXT.numericalize(self.TEXT.pad(x for x in\n examples_text), device=self.config.device)\n return prototype_matrix\n <mask token>\n\n def get_average_as_support(self, taskid, mnet_model):\n prototype_text = self.prototype_text_list[taskid]\n prototype_emb_list = []\n for lab_id in range(len(prototype_text)):\n prototype_sent = self.TEXT.numericalize(self.TEXT.pad(x for x in\n prototype_text[lab_id]), device=self.config.device)\n prototype_matrix = mnet_model.get_hidden(prototype_sent)\n prototype_emb_list.append(torch.mean(prototype_matrix, dim=0))\n return torch.cat(prototype_emb_list, 0)\n\n def get_average_and_std_as_support(self, taskid, mnet_model):\n prototype_text = self.prototype_text_list[taskid]\n prototype_emb_list = []\n prototype_std_list = []\n for lab_id in range(len(prototype_text)):\n N = len(prototype_text[lab_id])\n prototype_sent = self.TEXT.numericalize(self.TEXT.pad(x for x in\n prototype_text[lab_id]), device=self.config.device, train=True)\n prototype_matrix = mnet_model.get_hidden(prototype_sent)\n mean_vec = torch.mean(prototype_matrix, dim=0)\n if N > 1:\n std_val = (torch.pow(prototype_matrix, 2).sum() - N * torch\n .pow(mean_vec, 2).sum()) / (N - 1)\n std_val = Variable(std_val.data)\n else:\n std_val = Variable(torch.from_numpy(np.array([1.0]).astype(\n np.float32))).cuda()\n prototype_emb_list.append(mean_vec)\n prototype_std_list.append(std_val)\n return torch.cat(prototype_emb_list, 0), torch.cat(prototype_std_list,\n 0)\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass SupportSetManager(object):\n <mask token>\n <mask token>\n\n def __init__(self, datasets, config, sample_per_class):\n self.config = config\n TEXT, LABEL, train, dev, test = datasets[0]\n self.TEXT = TEXT\n self.sample_per_class = sample_per_class\n print('Picking up prototypes')\n self.prototype_text_list = []\n for taskid, (TEXT, LABEL, train, dev, test) in enumerate(datasets):\n prototype_text = []\n if not hasattr(LABEL, 'vocab'):\n self.prototype_text_list.append(prototype_text)\n continue\n for lab_id in range(len(LABEL.vocab.itos)):\n prototype_text.append([])\n for example in train.examples:\n lab_id = LABEL.vocab.stoi[example.label]\n if prototype_text[lab_id] is not None:\n prototype_text[lab_id].append(example.text)\n else:\n prototype_text[lab_id] = [example.text]\n for lab_id in range(len(LABEL.vocab.itos)):\n if len(prototype_text[lab_id]) == 0:\n prototype_text[lab_id].append(['<pad>'])\n if self.sample_per_class >= 1 and self.sample_per_class < len(\n prototype_text[lab_id]):\n prototype_text[lab_id] = prototype_text[lab_id][:self.\n sample_per_class]\n print('Task %s: picked up %s prototypes', (taskid, self.\n sample_per_class))\n self.prototype_text_list.append(prototype_text)\n\n def select_support_set(self, taskid, policy):\n if policy == self.FIXED_FIRST:\n supp_set = self.select_support_set_first(taskid)\n elif policy == self.RANDOM:\n supp_set = self.select_support_set_random(taskid)\n return supp_set\n\n def select_support_set_first(self, taskid):\n prototype_text = self.prototype_text_list[taskid]\n examples_text = []\n for lab_id in range(len(prototype_text)):\n examples_text.append(prototype_text[lab_id][0])\n prototype_matrix = self.TEXT.numericalize(self.TEXT.pad(x for x in\n examples_text), device=self.config.device)\n return prototype_matrix\n <mask token>\n\n def get_average_as_support(self, taskid, mnet_model):\n prototype_text = self.prototype_text_list[taskid]\n prototype_emb_list = []\n for lab_id in range(len(prototype_text)):\n prototype_sent = self.TEXT.numericalize(self.TEXT.pad(x for x in\n prototype_text[lab_id]), device=self.config.device)\n prototype_matrix = mnet_model.get_hidden(prototype_sent)\n prototype_emb_list.append(torch.mean(prototype_matrix, dim=0))\n return torch.cat(prototype_emb_list, 0)\n\n def get_average_and_std_as_support(self, taskid, mnet_model):\n prototype_text = self.prototype_text_list[taskid]\n prototype_emb_list = []\n prototype_std_list = []\n for lab_id in range(len(prototype_text)):\n N = len(prototype_text[lab_id])\n prototype_sent = self.TEXT.numericalize(self.TEXT.pad(x for x in\n prototype_text[lab_id]), device=self.config.device, train=True)\n prototype_matrix = mnet_model.get_hidden(prototype_sent)\n mean_vec = torch.mean(prototype_matrix, dim=0)\n if N > 1:\n std_val = (torch.pow(prototype_matrix, 2).sum() - N * torch\n .pow(mean_vec, 2).sum()) / (N - 1)\n std_val = Variable(std_val.data)\n else:\n std_val = Variable(torch.from_numpy(np.array([1.0]).astype(\n np.float32))).cuda()\n prototype_emb_list.append(mean_vec)\n prototype_std_list.append(std_val)\n return torch.cat(prototype_emb_list, 0), torch.cat(prototype_std_list,\n 0)\n\n def get_average_as_support_sample(self, taskid, mnet_model,\n sample_per_class):\n prototype_text = self.prototype_text_list[taskid]\n prototype_emb_list = []\n for lab_id in range(len(prototype_text)):\n if sample_per_class > len(prototype_text[lab_id]):\n prototype_sent = self.TEXT.numericalize(self.TEXT.pad(x for\n x in prototype_text[lab_id]), device=self.config.device)\n else:\n top_ind = range(len(prototype_text[lab_id]))\n random.shuffle(top_ind)\n top_ind = top_ind[:sample_per_class]\n prototype_text_sample = [prototype_text[lab_id][i] for i in\n top_ind]\n prototype_sent = self.TEXT.numericalize(self.TEXT.pad(x for\n x in prototype_text_sample), device=self.config.device)\n prototype_matrix = mnet_model.get_hidden(prototype_sent)\n prototype_emb_list.append(torch.mean(prototype_matrix, dim=0))\n return torch.cat(prototype_emb_list, 0)\n\n def get_average_as_support_large(self, taskid, mnet_model, batchsize):\n prototype_text = self.prototype_text_list[taskid]\n prototype_emb_list = []\n for lab_id in range(len(prototype_text)):\n num_batch = len(prototype_text[lab_id]) / batchsize\n if len(prototype_text[lab_id]) % batchsize != 0 and num_batch == 0:\n num_batch += 1\n lab_emb_sum = []\n for i in range(num_batch):\n batch_text = prototype_text[lab_id][i * batchsize:min((i + \n 1) * batchsize, len(prototype_text[lab_id]))]\n len_text = len(batch_text)\n batch_prototype_sent = self.TEXT.numericalize(self.TEXT.pad\n (x for x in batch_text), device=self.config.device,\n train=True)\n prototype_matrix = mnet_model.get_hidden(batch_prototype_sent)\n prototype_matrix = Variable(prototype_matrix.data)\n lab_emb_sum.append(torch.mean(prototype_matrix, dim=0))\n lab_emb_sum = torch.mean(torch.cat(lab_emb_sum, 0), dim=0)\n prototype_emb_list.append(lab_emb_sum)\n return torch.cat(prototype_emb_list, 0)\n",
"step-3": "<mask token>\n\n\nclass SupportSetManager(object):\n <mask token>\n <mask token>\n\n def __init__(self, datasets, config, sample_per_class):\n self.config = config\n TEXT, LABEL, train, dev, test = datasets[0]\n self.TEXT = TEXT\n self.sample_per_class = sample_per_class\n print('Picking up prototypes')\n self.prototype_text_list = []\n for taskid, (TEXT, LABEL, train, dev, test) in enumerate(datasets):\n prototype_text = []\n if not hasattr(LABEL, 'vocab'):\n self.prototype_text_list.append(prototype_text)\n continue\n for lab_id in range(len(LABEL.vocab.itos)):\n prototype_text.append([])\n for example in train.examples:\n lab_id = LABEL.vocab.stoi[example.label]\n if prototype_text[lab_id] is not None:\n prototype_text[lab_id].append(example.text)\n else:\n prototype_text[lab_id] = [example.text]\n for lab_id in range(len(LABEL.vocab.itos)):\n if len(prototype_text[lab_id]) == 0:\n prototype_text[lab_id].append(['<pad>'])\n if self.sample_per_class >= 1 and self.sample_per_class < len(\n prototype_text[lab_id]):\n prototype_text[lab_id] = prototype_text[lab_id][:self.\n sample_per_class]\n print('Task %s: picked up %s prototypes', (taskid, self.\n sample_per_class))\n self.prototype_text_list.append(prototype_text)\n\n def select_support_set(self, taskid, policy):\n if policy == self.FIXED_FIRST:\n supp_set = self.select_support_set_first(taskid)\n elif policy == self.RANDOM:\n supp_set = self.select_support_set_random(taskid)\n return supp_set\n\n def select_support_set_first(self, taskid):\n prototype_text = self.prototype_text_list[taskid]\n examples_text = []\n for lab_id in range(len(prototype_text)):\n examples_text.append(prototype_text[lab_id][0])\n prototype_matrix = self.TEXT.numericalize(self.TEXT.pad(x for x in\n examples_text), device=self.config.device)\n return prototype_matrix\n\n def select_support_set_random(self, taskid):\n prototype_text = self.prototype_text_list[taskid]\n examples_text = []\n for lab_id in range(len(prototype_text)):\n rand_idx = random.randint(0, len(prototype_text[lab_id]) - 1)\n examples_text.append(prototype_text[lab_id][rand_idx])\n prototype_matrix = self.TEXT.numericalize(self.TEXT.pad(x for x in\n examples_text), device=self.config.device)\n return prototype_matrix\n\n def get_average_as_support(self, taskid, mnet_model):\n prototype_text = self.prototype_text_list[taskid]\n prototype_emb_list = []\n for lab_id in range(len(prototype_text)):\n prototype_sent = self.TEXT.numericalize(self.TEXT.pad(x for x in\n prototype_text[lab_id]), device=self.config.device)\n prototype_matrix = mnet_model.get_hidden(prototype_sent)\n prototype_emb_list.append(torch.mean(prototype_matrix, dim=0))\n return torch.cat(prototype_emb_list, 0)\n\n def get_average_and_std_as_support(self, taskid, mnet_model):\n prototype_text = self.prototype_text_list[taskid]\n prototype_emb_list = []\n prototype_std_list = []\n for lab_id in range(len(prototype_text)):\n N = len(prototype_text[lab_id])\n prototype_sent = self.TEXT.numericalize(self.TEXT.pad(x for x in\n prototype_text[lab_id]), device=self.config.device, train=True)\n prototype_matrix = mnet_model.get_hidden(prototype_sent)\n mean_vec = torch.mean(prototype_matrix, dim=0)\n if N > 1:\n std_val = (torch.pow(prototype_matrix, 2).sum() - N * torch\n .pow(mean_vec, 2).sum()) / (N - 1)\n std_val = Variable(std_val.data)\n else:\n std_val = Variable(torch.from_numpy(np.array([1.0]).astype(\n np.float32))).cuda()\n prototype_emb_list.append(mean_vec)\n prototype_std_list.append(std_val)\n return torch.cat(prototype_emb_list, 0), torch.cat(prototype_std_list,\n 0)\n\n def get_average_as_support_sample(self, taskid, mnet_model,\n sample_per_class):\n prototype_text = self.prototype_text_list[taskid]\n prototype_emb_list = []\n for lab_id in range(len(prototype_text)):\n if sample_per_class > len(prototype_text[lab_id]):\n prototype_sent = self.TEXT.numericalize(self.TEXT.pad(x for\n x in prototype_text[lab_id]), device=self.config.device)\n else:\n top_ind = range(len(prototype_text[lab_id]))\n random.shuffle(top_ind)\n top_ind = top_ind[:sample_per_class]\n prototype_text_sample = [prototype_text[lab_id][i] for i in\n top_ind]\n prototype_sent = self.TEXT.numericalize(self.TEXT.pad(x for\n x in prototype_text_sample), device=self.config.device)\n prototype_matrix = mnet_model.get_hidden(prototype_sent)\n prototype_emb_list.append(torch.mean(prototype_matrix, dim=0))\n return torch.cat(prototype_emb_list, 0)\n\n def get_average_as_support_large(self, taskid, mnet_model, batchsize):\n prototype_text = self.prototype_text_list[taskid]\n prototype_emb_list = []\n for lab_id in range(len(prototype_text)):\n num_batch = len(prototype_text[lab_id]) / batchsize\n if len(prototype_text[lab_id]) % batchsize != 0 and num_batch == 0:\n num_batch += 1\n lab_emb_sum = []\n for i in range(num_batch):\n batch_text = prototype_text[lab_id][i * batchsize:min((i + \n 1) * batchsize, len(prototype_text[lab_id]))]\n len_text = len(batch_text)\n batch_prototype_sent = self.TEXT.numericalize(self.TEXT.pad\n (x for x in batch_text), device=self.config.device,\n train=True)\n prototype_matrix = mnet_model.get_hidden(batch_prototype_sent)\n prototype_matrix = Variable(prototype_matrix.data)\n lab_emb_sum.append(torch.mean(prototype_matrix, dim=0))\n lab_emb_sum = torch.mean(torch.cat(lab_emb_sum, 0), dim=0)\n prototype_emb_list.append(lab_emb_sum)\n return torch.cat(prototype_emb_list, 0)\n",
"step-4": "<mask token>\n\n\nclass SupportSetManager(object):\n FIXED_FIRST = 0\n RANDOM = 1\n\n def __init__(self, datasets, config, sample_per_class):\n self.config = config\n TEXT, LABEL, train, dev, test = datasets[0]\n self.TEXT = TEXT\n self.sample_per_class = sample_per_class\n print('Picking up prototypes')\n self.prototype_text_list = []\n for taskid, (TEXT, LABEL, train, dev, test) in enumerate(datasets):\n prototype_text = []\n if not hasattr(LABEL, 'vocab'):\n self.prototype_text_list.append(prototype_text)\n continue\n for lab_id in range(len(LABEL.vocab.itos)):\n prototype_text.append([])\n for example in train.examples:\n lab_id = LABEL.vocab.stoi[example.label]\n if prototype_text[lab_id] is not None:\n prototype_text[lab_id].append(example.text)\n else:\n prototype_text[lab_id] = [example.text]\n for lab_id in range(len(LABEL.vocab.itos)):\n if len(prototype_text[lab_id]) == 0:\n prototype_text[lab_id].append(['<pad>'])\n if self.sample_per_class >= 1 and self.sample_per_class < len(\n prototype_text[lab_id]):\n prototype_text[lab_id] = prototype_text[lab_id][:self.\n sample_per_class]\n print('Task %s: picked up %s prototypes', (taskid, self.\n sample_per_class))\n self.prototype_text_list.append(prototype_text)\n\n def select_support_set(self, taskid, policy):\n if policy == self.FIXED_FIRST:\n supp_set = self.select_support_set_first(taskid)\n elif policy == self.RANDOM:\n supp_set = self.select_support_set_random(taskid)\n return supp_set\n\n def select_support_set_first(self, taskid):\n prototype_text = self.prototype_text_list[taskid]\n examples_text = []\n for lab_id in range(len(prototype_text)):\n examples_text.append(prototype_text[lab_id][0])\n prototype_matrix = self.TEXT.numericalize(self.TEXT.pad(x for x in\n examples_text), device=self.config.device)\n return prototype_matrix\n\n def select_support_set_random(self, taskid):\n prototype_text = self.prototype_text_list[taskid]\n examples_text = []\n for lab_id in range(len(prototype_text)):\n rand_idx = random.randint(0, len(prototype_text[lab_id]) - 1)\n examples_text.append(prototype_text[lab_id][rand_idx])\n prototype_matrix = self.TEXT.numericalize(self.TEXT.pad(x for x in\n examples_text), device=self.config.device)\n return prototype_matrix\n\n def get_average_as_support(self, taskid, mnet_model):\n prototype_text = self.prototype_text_list[taskid]\n prototype_emb_list = []\n for lab_id in range(len(prototype_text)):\n prototype_sent = self.TEXT.numericalize(self.TEXT.pad(x for x in\n prototype_text[lab_id]), device=self.config.device)\n prototype_matrix = mnet_model.get_hidden(prototype_sent)\n prototype_emb_list.append(torch.mean(prototype_matrix, dim=0))\n return torch.cat(prototype_emb_list, 0)\n\n def get_average_and_std_as_support(self, taskid, mnet_model):\n prototype_text = self.prototype_text_list[taskid]\n prototype_emb_list = []\n prototype_std_list = []\n for lab_id in range(len(prototype_text)):\n N = len(prototype_text[lab_id])\n prototype_sent = self.TEXT.numericalize(self.TEXT.pad(x for x in\n prototype_text[lab_id]), device=self.config.device, train=True)\n prototype_matrix = mnet_model.get_hidden(prototype_sent)\n mean_vec = torch.mean(prototype_matrix, dim=0)\n if N > 1:\n std_val = (torch.pow(prototype_matrix, 2).sum() - N * torch\n .pow(mean_vec, 2).sum()) / (N - 1)\n std_val = Variable(std_val.data)\n else:\n std_val = Variable(torch.from_numpy(np.array([1.0]).astype(\n np.float32))).cuda()\n prototype_emb_list.append(mean_vec)\n prototype_std_list.append(std_val)\n return torch.cat(prototype_emb_list, 0), torch.cat(prototype_std_list,\n 0)\n\n def get_average_as_support_sample(self, taskid, mnet_model,\n sample_per_class):\n prototype_text = self.prototype_text_list[taskid]\n prototype_emb_list = []\n for lab_id in range(len(prototype_text)):\n if sample_per_class > len(prototype_text[lab_id]):\n prototype_sent = self.TEXT.numericalize(self.TEXT.pad(x for\n x in prototype_text[lab_id]), device=self.config.device)\n else:\n top_ind = range(len(prototype_text[lab_id]))\n random.shuffle(top_ind)\n top_ind = top_ind[:sample_per_class]\n prototype_text_sample = [prototype_text[lab_id][i] for i in\n top_ind]\n prototype_sent = self.TEXT.numericalize(self.TEXT.pad(x for\n x in prototype_text_sample), device=self.config.device)\n prototype_matrix = mnet_model.get_hidden(prototype_sent)\n prototype_emb_list.append(torch.mean(prototype_matrix, dim=0))\n return torch.cat(prototype_emb_list, 0)\n\n def get_average_as_support_large(self, taskid, mnet_model, batchsize):\n prototype_text = self.prototype_text_list[taskid]\n prototype_emb_list = []\n for lab_id in range(len(prototype_text)):\n num_batch = len(prototype_text[lab_id]) / batchsize\n if len(prototype_text[lab_id]) % batchsize != 0 and num_batch == 0:\n num_batch += 1\n lab_emb_sum = []\n for i in range(num_batch):\n batch_text = prototype_text[lab_id][i * batchsize:min((i + \n 1) * batchsize, len(prototype_text[lab_id]))]\n len_text = len(batch_text)\n batch_prototype_sent = self.TEXT.numericalize(self.TEXT.pad\n (x for x in batch_text), device=self.config.device,\n train=True)\n prototype_matrix = mnet_model.get_hidden(batch_prototype_sent)\n prototype_matrix = Variable(prototype_matrix.data)\n lab_emb_sum.append(torch.mean(prototype_matrix, dim=0))\n lab_emb_sum = torch.mean(torch.cat(lab_emb_sum, 0), dim=0)\n prototype_emb_list.append(lab_emb_sum)\n return torch.cat(prototype_emb_list, 0)\n",
"step-5": "import random\nimport torch\nimport numpy as np\nfrom torch.autograd import Variable\n\nclass SupportSetManager(object):\n FIXED_FIRST = 0\n RANDOM = 1\n def __init__(self, datasets, config, sample_per_class):\n self.config = config\n (TEXT, LABEL, train, dev, test) = datasets[0]\n self.TEXT = TEXT\n self.sample_per_class = sample_per_class\n\n print('Picking up prototypes')\n self.prototype_text_list = []\n\n for taskid, (TEXT, LABEL, train, dev, test) in enumerate(datasets):\n prototype_text = []\n #print taskid, LABEL.vocab\n if not hasattr(LABEL, 'vocab'):\n self.prototype_text_list.append(prototype_text)\n continue\n for lab_id in range(len(LABEL.vocab.itos)):\n prototype_text.append([])\n for example in train.examples:\n lab_id = LABEL.vocab.stoi[example.label]\n if prototype_text[lab_id] is not None:\n prototype_text[lab_id].append(example.text)\n else:\n prototype_text[lab_id] = [example.text]\n\n for lab_id in range(len(LABEL.vocab.itos)):\n if len(prototype_text[lab_id]) == 0:\n prototype_text[lab_id].append(['<pad>'])\n\n if self.sample_per_class >= 1 and self.sample_per_class < len(prototype_text[lab_id]):\n prototype_text[lab_id] = prototype_text[lab_id][:self.sample_per_class]\n\n print('Task %s: picked up %s prototypes', (taskid, self.sample_per_class))\n self.prototype_text_list.append(prototype_text)\n\n def select_support_set(self, taskid, policy):\n if policy == self.FIXED_FIRST:\n supp_set = self.select_support_set_first(taskid)\n elif policy == self.RANDOM:\n supp_set = self.select_support_set_random(taskid)\n return supp_set\n\n def select_support_set_first(self, taskid):\n prototype_text = self.prototype_text_list[taskid]\n\n examples_text = []\n for lab_id in range(len(prototype_text)):\n examples_text.append(prototype_text[lab_id][0])\n\n prototype_matrix = self.TEXT.numericalize(\n self.TEXT.pad(x for x in examples_text),\n device=self.config.device)\n #if taskid == 0: #TODO test the consistency of the first example\n # print examples_text\n # print prototype_matrix\n\n return prototype_matrix\n\n def select_support_set_random(self, taskid, ):\n prototype_text = self.prototype_text_list[taskid]\n\n examples_text = []\n for lab_id in range(len(prototype_text)):\n rand_idx = random.randint(0, len(prototype_text[lab_id]) - 1)\n examples_text.append(prototype_text[lab_id][rand_idx])\n\n prototype_matrix = self.TEXT.numericalize(\n self.TEXT.pad(x for x in examples_text),\n device=self.config.device)\n #if taskid == 0: #TODO test the consistency of the first example\n # print examples_text\n # print prototype_matrix\n\n return prototype_matrix\n\n def get_average_as_support(self, taskid, mnet_model):\n prototype_text = self.prototype_text_list[taskid]\n\n prototype_emb_list = []\n for lab_id in range(len(prototype_text)):\n prototype_sent = self.TEXT.numericalize(\n self.TEXT.pad(x for x in prototype_text[lab_id]),\n device=self.config.device)\n\n prototype_matrix = mnet_model.get_hidden(prototype_sent)\n prototype_emb_list.append(torch.mean(prototype_matrix, dim=0))\n #print prototype_emb_list\n #return torch.cat(prototype_emb_list, dim=0) #works for the new pytorch version\n return torch.cat(prototype_emb_list, 0)\n\n def get_average_and_std_as_support(self, taskid, mnet_model):\n prototype_text = self.prototype_text_list[taskid]\n\n prototype_emb_list = []\n prototype_std_list = []\n for lab_id in range(len(prototype_text)):\n N = len(prototype_text[lab_id])\n prototype_sent = self.TEXT.numericalize(\n self.TEXT.pad(x for x in prototype_text[lab_id]),\n device=self.config.device, train=True)\n\n prototype_matrix = mnet_model.get_hidden(prototype_sent)\n mean_vec = torch.mean(prototype_matrix, dim=0)\n if N > 1:\n #std_val = torch.sqrt((torch.pow(prototype_matrix, 2).sum() - N * torch.pow(mean_vec, 2).sum()) / (N - 1))\n std_val = (torch.pow(prototype_matrix, 2).sum() - N * torch.pow(mean_vec, 2).sum()) / (N - 1)\n std_val = Variable(std_val.data)\n else:\n std_val = Variable(torch.from_numpy(np.array([1.0]).astype(np.float32))).cuda()\n prototype_emb_list.append(mean_vec)\n prototype_std_list.append(std_val)\n #print prototype_std_list\n return torch.cat(prototype_emb_list, 0), torch.cat(prototype_std_list, 0)\n\n def get_average_as_support_sample(self, taskid, mnet_model, sample_per_class):\n prototype_text = self.prototype_text_list[taskid]\n\n prototype_emb_list = []\n for lab_id in range(len(prototype_text)):\n if sample_per_class > len(prototype_text[lab_id]):\n prototype_sent = self.TEXT.numericalize(\n self.TEXT.pad(x for x in prototype_text[lab_id]),\n device=self.config.device)\n else:\n top_ind = range(len(prototype_text[lab_id]))\n random.shuffle(top_ind)\n top_ind = top_ind[:sample_per_class]\n prototype_text_sample = [prototype_text[lab_id][i] for i in top_ind]\n prototype_sent = self.TEXT.numericalize(\n self.TEXT.pad(x for x in prototype_text_sample),\n device=self.config.device)\n\n prototype_matrix = mnet_model.get_hidden(prototype_sent)\n prototype_emb_list.append(torch.mean(prototype_matrix, dim=0))\n return torch.cat(prototype_emb_list, 0)\n\n def get_average_as_support_large(self, taskid, mnet_model, batchsize):\n prototype_text = self.prototype_text_list[taskid]\n\n prototype_emb_list = []\n for lab_id in range(len(prototype_text)):\n num_batch = len(prototype_text[lab_id]) / batchsize\n if len(prototype_text[lab_id]) % batchsize != 0 and num_batch == 0:\n num_batch += 1\n lab_emb_sum = []\n for i in range(num_batch):\n #print i\n #print len(prototype_text[lab_id]), i*batchsize, (i+1) * batchsize\n batch_text = prototype_text[lab_id][i * batchsize : min((i+1) * batchsize, len(prototype_text[lab_id]))]\n #print batch_text\n len_text = len(batch_text)\n #print len_text\n batch_prototype_sent = self.TEXT.numericalize(\n self.TEXT.pad(x for x in batch_text),\n device=self.config.device, train=True)\n #print batch_prototype_sent\n prototype_matrix = mnet_model.get_hidden(batch_prototype_sent)\n prototype_matrix = Variable(prototype_matrix.data)\n\n #prototype_emb_list.append(torch.mean(prototype_matrix, dim=0))\n #prototype_emb_list.append(torch.sum(prototype_matrix, dim=0) / len_text)\n #break\n #TODO: the following three lines not equivalent to the two lines below\n # lab_emb_sum.append(torch.sum(prototype_matrix, dim=0))\n #lab_emb_sum = torch.sum( torch.cat(lab_emb_sum, 0), dim=0 )\n #lab_emb_sum /= len(prototype_text[lab_id])\n lab_emb_sum.append(torch.mean(prototype_matrix, dim=0))\n lab_emb_sum = torch.mean( torch.cat(lab_emb_sum, 0), dim=0 )\n prototype_emb_list.append(lab_emb_sum)\n return torch.cat(prototype_emb_list, 0)\n\n\n",
"step-ids": [
6,
8,
9,
10,
12
]
}
|
[
6,
8,
9,
10,
12
] |
from pyathena import connect
from Config import config2
from Config import merchants
def get_mapped_sku(sku):
try:
cursor = connect(aws_access_key_id=config2["aws_access_key_id"],
aws_secret_access_key=config2["aws_secret_access_key"],
s3_staging_dir=config2["s3_staging_dir"],
region_name=config2["region_name"]).cursor()
cursor.execute("SELECT seller_sku, seller FROM optivations.master_product_list where sc_sku = %(sku)s ",
{"sku": str(sku)})
# print(cursor.description)
result = cursor.fetchall()
for row in result:
return {'Cross-Reference No': row[0], 'brand': row[1]}
except Exception as e:
print(e)
return {}
return {}
def get_sku(seller_sku, sc_sku, seller):
try:
cursor = connect(aws_access_key_id=config2["aws_access_key_id"],
aws_secret_access_key=config2["aws_secret_access_key"],
s3_staging_dir=config2["s3_staging_dir"],
region_name=config2["region_name"]).cursor()
cursor.execute("SELECT seller_sku FROM optivations.master_product_list where sc_sku = %(sku)s ",
{"sku": str(sku)})
# print(cursor.description)
# print(cursor.fetchall())
for row in cursor:
return (row[0])
except Exception as e:
print(e)
return False
return True
def add_sku(sc_sku, seller_sku, seller):
try:
cursor = connect(aws_access_key_id=config2["aws_access_key_id"],
aws_secret_access_key=config2["aws_secret_access_key"],
s3_staging_dir=config2["s3_staging_dir"],
region_name=config2["region_name"]).cursor()
cursor.execute("INSERT INTO optivations.master_product_list VALUES ( %(scsku)s, %(sellersku)s, %(seller)s )",
{"scsku": str(sc_sku), "sellersku": str(seller_sku), "seller": str(seller)})
return (cursor.description)
# print(cursor.fetchall())
# for row in cursor:
# return (row[0])
except Exception as e:
print(e)
return False
return True
# print(add_sku('test', 'test', 'Adean'))
# result = (get_mapped_sku('HDS-3571'))
# print(result['Cross-Reference No'])
|
normal
|
{
"blob_id": "6add599035573842475c7f9155c5dbbea6c96a8a",
"index": 3618,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef get_mapped_sku(sku):\n try:\n cursor = connect(aws_access_key_id=config2['aws_access_key_id'],\n aws_secret_access_key=config2['aws_secret_access_key'],\n s3_staging_dir=config2['s3_staging_dir'], region_name=config2[\n 'region_name']).cursor()\n cursor.execute(\n 'SELECT seller_sku, seller FROM optivations.master_product_list where sc_sku = %(sku)s '\n , {'sku': str(sku)})\n result = cursor.fetchall()\n for row in result:\n return {'Cross-Reference No': row[0], 'brand': row[1]}\n except Exception as e:\n print(e)\n return {}\n return {}\n\n\ndef get_sku(seller_sku, sc_sku, seller):\n try:\n cursor = connect(aws_access_key_id=config2['aws_access_key_id'],\n aws_secret_access_key=config2['aws_secret_access_key'],\n s3_staging_dir=config2['s3_staging_dir'], region_name=config2[\n 'region_name']).cursor()\n cursor.execute(\n 'SELECT seller_sku FROM optivations.master_product_list where sc_sku = %(sku)s '\n , {'sku': str(sku)})\n for row in cursor:\n return row[0]\n except Exception as e:\n print(e)\n return False\n return True\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef get_mapped_sku(sku):\n try:\n cursor = connect(aws_access_key_id=config2['aws_access_key_id'],\n aws_secret_access_key=config2['aws_secret_access_key'],\n s3_staging_dir=config2['s3_staging_dir'], region_name=config2[\n 'region_name']).cursor()\n cursor.execute(\n 'SELECT seller_sku, seller FROM optivations.master_product_list where sc_sku = %(sku)s '\n , {'sku': str(sku)})\n result = cursor.fetchall()\n for row in result:\n return {'Cross-Reference No': row[0], 'brand': row[1]}\n except Exception as e:\n print(e)\n return {}\n return {}\n\n\ndef get_sku(seller_sku, sc_sku, seller):\n try:\n cursor = connect(aws_access_key_id=config2['aws_access_key_id'],\n aws_secret_access_key=config2['aws_secret_access_key'],\n s3_staging_dir=config2['s3_staging_dir'], region_name=config2[\n 'region_name']).cursor()\n cursor.execute(\n 'SELECT seller_sku FROM optivations.master_product_list where sc_sku = %(sku)s '\n , {'sku': str(sku)})\n for row in cursor:\n return row[0]\n except Exception as e:\n print(e)\n return False\n return True\n\n\ndef add_sku(sc_sku, seller_sku, seller):\n try:\n cursor = connect(aws_access_key_id=config2['aws_access_key_id'],\n aws_secret_access_key=config2['aws_secret_access_key'],\n s3_staging_dir=config2['s3_staging_dir'], region_name=config2[\n 'region_name']).cursor()\n cursor.execute(\n 'INSERT INTO optivations.master_product_list VALUES ( %(scsku)s, %(sellersku)s, %(seller)s )'\n , {'scsku': str(sc_sku), 'sellersku': str(seller_sku), 'seller':\n str(seller)})\n return cursor.description\n except Exception as e:\n print(e)\n return False\n return True\n",
"step-4": "from pyathena import connect\nfrom Config import config2\nfrom Config import merchants\n\n\ndef get_mapped_sku(sku):\n try:\n cursor = connect(aws_access_key_id=config2['aws_access_key_id'],\n aws_secret_access_key=config2['aws_secret_access_key'],\n s3_staging_dir=config2['s3_staging_dir'], region_name=config2[\n 'region_name']).cursor()\n cursor.execute(\n 'SELECT seller_sku, seller FROM optivations.master_product_list where sc_sku = %(sku)s '\n , {'sku': str(sku)})\n result = cursor.fetchall()\n for row in result:\n return {'Cross-Reference No': row[0], 'brand': row[1]}\n except Exception as e:\n print(e)\n return {}\n return {}\n\n\ndef get_sku(seller_sku, sc_sku, seller):\n try:\n cursor = connect(aws_access_key_id=config2['aws_access_key_id'],\n aws_secret_access_key=config2['aws_secret_access_key'],\n s3_staging_dir=config2['s3_staging_dir'], region_name=config2[\n 'region_name']).cursor()\n cursor.execute(\n 'SELECT seller_sku FROM optivations.master_product_list where sc_sku = %(sku)s '\n , {'sku': str(sku)})\n for row in cursor:\n return row[0]\n except Exception as e:\n print(e)\n return False\n return True\n\n\ndef add_sku(sc_sku, seller_sku, seller):\n try:\n cursor = connect(aws_access_key_id=config2['aws_access_key_id'],\n aws_secret_access_key=config2['aws_secret_access_key'],\n s3_staging_dir=config2['s3_staging_dir'], region_name=config2[\n 'region_name']).cursor()\n cursor.execute(\n 'INSERT INTO optivations.master_product_list VALUES ( %(scsku)s, %(sellersku)s, %(seller)s )'\n , {'scsku': str(sc_sku), 'sellersku': str(seller_sku), 'seller':\n str(seller)})\n return cursor.description\n except Exception as e:\n print(e)\n return False\n return True\n",
"step-5": "from pyathena import connect\nfrom Config import config2\nfrom Config import merchants\n\n\ndef get_mapped_sku(sku):\n try:\n cursor = connect(aws_access_key_id=config2[\"aws_access_key_id\"],\n aws_secret_access_key=config2[\"aws_secret_access_key\"],\n s3_staging_dir=config2[\"s3_staging_dir\"],\n region_name=config2[\"region_name\"]).cursor()\n cursor.execute(\"SELECT seller_sku, seller FROM optivations.master_product_list where sc_sku = %(sku)s \",\n {\"sku\": str(sku)})\n\n # print(cursor.description)\n result = cursor.fetchall()\n for row in result:\n return {'Cross-Reference No': row[0], 'brand': row[1]}\n\n except Exception as e:\n print(e)\n return {}\n return {}\n\n\ndef get_sku(seller_sku, sc_sku, seller):\n try:\n cursor = connect(aws_access_key_id=config2[\"aws_access_key_id\"],\n aws_secret_access_key=config2[\"aws_secret_access_key\"],\n s3_staging_dir=config2[\"s3_staging_dir\"],\n region_name=config2[\"region_name\"]).cursor()\n cursor.execute(\"SELECT seller_sku FROM optivations.master_product_list where sc_sku = %(sku)s \",\n {\"sku\": str(sku)})\n\n # print(cursor.description)\n # print(cursor.fetchall())\n for row in cursor:\n return (row[0])\n except Exception as e:\n print(e)\n return False\n return True\n\n\ndef add_sku(sc_sku, seller_sku, seller):\n try:\n cursor = connect(aws_access_key_id=config2[\"aws_access_key_id\"],\n aws_secret_access_key=config2[\"aws_secret_access_key\"],\n s3_staging_dir=config2[\"s3_staging_dir\"],\n region_name=config2[\"region_name\"]).cursor()\n cursor.execute(\"INSERT INTO optivations.master_product_list VALUES ( %(scsku)s, %(sellersku)s, %(seller)s )\",\n {\"scsku\": str(sc_sku), \"sellersku\": str(seller_sku), \"seller\": str(seller)})\n\n return (cursor.description)\n # print(cursor.fetchall())\n # for row in cursor:\n # return (row[0])\n except Exception as e:\n print(e)\n return False\n return True\n# print(add_sku('test', 'test', 'Adean'))\n# result = (get_mapped_sku('HDS-3571'))\n# print(result['Cross-Reference No'])\n",
"step-ids": [
0,
2,
3,
4,
5
]
}
|
[
0,
2,
3,
4,
5
] |
import sys
import os
sys.path.append("C:/Users/Laptop/Documents/Repos/udacity_stats_functions/descriptive")
import normal_distribution_06
#import sampling_distributions_07
def lower_upper_confidence_intervals(avg, SD):
#avg is x bar. The mean value at the "would be" point. ie Bieber Tweeter
#SD is standard error (standard deviation of population dataset dvided by sqrt(number_in_sample)
lower = avg-2*SD
upper = avg+2*SD
return((lower, upper))
#7. Quiz: Confidence Interval Bounds
print(lower_upper_confidence_intervals(40, 2.71))
#8. Quiz: Exact Z-Scores
print(get_z_from_p(0.975))
|
normal
|
{
"blob_id": "d423b0bc6cd9ea9795317750141ad5f5eab01636",
"index": 1886,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef lower_upper_confidence_intervals(avg, SD):\n lower = avg - 2 * SD\n upper = avg + 2 * SD\n return lower, upper\n\n\n<mask token>\n",
"step-3": "<mask token>\nsys.path.append(\n 'C:/Users/Laptop/Documents/Repos/udacity_stats_functions/descriptive')\n<mask token>\n\n\ndef lower_upper_confidence_intervals(avg, SD):\n lower = avg - 2 * SD\n upper = avg + 2 * SD\n return lower, upper\n\n\nprint(lower_upper_confidence_intervals(40, 2.71))\nprint(get_z_from_p(0.975))\n",
"step-4": "import sys\nimport os\nsys.path.append(\n 'C:/Users/Laptop/Documents/Repos/udacity_stats_functions/descriptive')\nimport normal_distribution_06\n\n\ndef lower_upper_confidence_intervals(avg, SD):\n lower = avg - 2 * SD\n upper = avg + 2 * SD\n return lower, upper\n\n\nprint(lower_upper_confidence_intervals(40, 2.71))\nprint(get_z_from_p(0.975))\n",
"step-5": "import sys\nimport os\nsys.path.append(\"C:/Users/Laptop/Documents/Repos/udacity_stats_functions/descriptive\")\nimport normal_distribution_06\n#import sampling_distributions_07\n\ndef lower_upper_confidence_intervals(avg, SD):\n #avg is x bar. The mean value at the \"would be\" point. ie Bieber Tweeter\n #SD is standard error (standard deviation of population dataset dvided by sqrt(number_in_sample)\n lower = avg-2*SD\n upper = avg+2*SD\n return((lower, upper))\n \n#7. Quiz: Confidence Interval Bounds\nprint(lower_upper_confidence_intervals(40, 2.71))\n\n#8. Quiz: Exact Z-Scores\nprint(get_z_from_p(0.975))",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import tkinter as tk
import tkinter.messagebox as tkmb
import psutil
import os
import re
import subprocess
from subprocess import Popen, PIPE, STDOUT, DEVNULL
import filecmp
import re
import time
import threading
import datetime
import re
debian = '/etc/debian_version'
redhat = '/etc/redhat-release'
def PrintaLog(texto):
t = time.time()
logtime= time.ctime(t)
stringprint = "%s %s\n" % (logtime, texto)
f = open("/var/log/patriot", "a")
f.write(stringprint)
f.flush()
f.close()
def PrintaMSG(texto):
command = 'python3 alertiqt.py "'+texto+'"'
processalert = subprocess.Popen([command], stdout=subprocess.PIPE,shell=True, stderr=DEVNULL)
def TestIntegrity(File):
if os.path.exists(redhat) :
command = 'rpm -Vf "'+File+'"'
processrpm = subprocess.Popen([command], stdout=subprocess.PIPE,shell=True)
outputrpm = processrpm.communicate()[0]
if outputrpm :
return(1)
else:
return(0)
else :
commandDPKG = 'dpkg -S "'+File+'"'
processdpkg = subprocess.Popen([commandDPKG], stdout=subprocess.PIPE,shell=True, stderr=DEVNULL)
outputdpkg = processdpkg.communicate()[0]
if processdpkg.returncode == 1:
#dpkg is buggy to find package files
fixdpkgbug= re.sub('/usr', '', File)
commandDPKG2 = 'dpkg -S "'+fixdpkgbug+'"'
processdpkg2 = subprocess.Popen([commandDPKG2], stdout=subprocess.PIPE,shell=True, stderr=DEVNULL)
outputdpkg2 = processdpkg2.communicate()[0]
outputdpkg = outputdpkg2
if processdpkg2.returncode == 1:
return(1)
packagename = outputdpkg.split(":")
commandDEBSUM = 'dpkg --verify "'+packagename[0]+'"'
processdebsum = subprocess.Popen([commandDEBSUM], stdout=subprocess.PIPE,shell=True)
outputdebsum = processdebsum.communicate()[0]
print (outputdebsum)
if outputdebsum :
return(1)
else:
return(0)
def ScanUnsigned():
pidsinicial = psutil.pids()
while True:
pidsshots = psutil.pids()
s = set(pidsinicial)
newpids = [x for x in pidsshots if x not in s]
if newpids:
#print(newpids)
for i in newpids:
#print(i)
try:
p = psutil.Process(pid=i)
with p.oneshot():
integrity= TestIntegrity(p.exe())
#print (integrity)
pidproceso = p.pid
exeproceso = p.exe()
evadeau = bool(re.match(exeproceso, "/usr/sbin/ausearch"))
if integrity == 1 and evadeau == 0:
stringprint = "New process that not belongs to any package or package was modified: %i %s" % (pidproceso, exeproceso)
x = threading.Thread(target=PrintaMSG, args=(stringprint,))
x.setDaemon(True)
x.start()
PrintaLog(stringprint)
except Exception as e:
print (e)
pidsinicial = pidsshots
time.sleep(2)
def ScanConnections():
initialcon =psutil.net_connections()
netprocess =[]
for i in initialcon:
#print (i.pid)
p = psutil.Process(pid=i.pid)
with p.oneshot():
#print (p.exe())
netprocess.append(p.exe())
#print (netprocess)
while True:
runcon =psutil.net_connections()
netprocessrun =[]
for e in runcon:
#print (e.pid)
p = psutil.Process(pid=e.pid)
with p.oneshot():
#print (p.exe())
netprocessrun.append(p.exe())
#print (netprocessrun)
s = set(netprocess)
newpconprogs = [x for x in netprocessrun if x not in s]
if newpconprogs:
#print(newpconprogs)
for h in newpconprogs:
stringprint = "New Process initiating TCP/IP connection: %s" % h
x = threading.Thread(target=PrintaMSG, args=(stringprint,))
x.setDaemon(True)
x.start()
PrintaLog(stringprint)
netprocess.append(h)
time.sleep(2)
def AuSearch():
auparams = {"modules": "New module loaded in Kernel","code_injection": "DLL Inject","register_injection": "DLL Inject"}
while True:
tomo = datetime.datetime.now() - datetime.timedelta(minutes=2)
timeraw = str(tomo.time().replace(second=0, microsecond=0))
for key in auparams.keys():
#print(key)
command = 'ausearch -k "'+key+'" --start "'+timeraw+'"'
processausearch = subprocess.Popen([command], stdout=subprocess.PIPE,shell=True, stderr=DEVNULL)
outputausearch = processausearch.communicate()[0]
if outputausearch:
stringprint = "Audit Alert: %s" % auparams[key]
x = threading.Thread(target=PrintaMSG, args=(stringprint,))
x.setDaemon(True)
x.start()
PrintaLog(stringprint)
time.sleep(115)
def KeyBoardSearch():
command = "xinput --list"
keyfirstcommand = subprocess.Popen([command], stdout=subprocess.PIPE,shell=True)
outputkeysearch= keyfirstcommand.communicate()[0]
while True:
keyruncommand = subprocess.Popen([command], stdout=subprocess.PIPE,shell=True)
outputkeyrunsearch= keyruncommand.communicate()[0]
if outputkeyrunsearch != outputkeysearch:
stringprint = "New keyboard detected"
x = threading.Thread(target=PrintaMSG, args=(stringprint,))
x.setDaemon(True)
x.start()
PrintaLog(stringprint)
outputkeysearch = outputkeyrunsearch
time.sleep(60)
s = threading.Thread(target=KeyBoardSearch)
s.setDaemon(True)
s.start()
x = threading.Thread(target=ScanUnsigned)
x.setDaemon(True)
x.start()
y = threading.Thread(target=ScanConnections)
y.setDaemon(True)
y.start()
z = threading.Thread(target=AuSearch)
z.setDaemon(True)
z.start()
while True:
time.sleep(100)
|
normal
|
{
"blob_id": "fde62dd3f5ee3cc0a1568b037ada14835c327046",
"index": 6298,
"step-1": "<mask token>\n\n\ndef PrintaLog(texto):\n t = time.time()\n logtime = time.ctime(t)\n stringprint = '%s %s\\n' % (logtime, texto)\n f = open('/var/log/patriot', 'a')\n f.write(stringprint)\n f.flush()\n f.close()\n\n\ndef PrintaMSG(texto):\n command = 'python3 alertiqt.py \"' + texto + '\"'\n processalert = subprocess.Popen([command], stdout=subprocess.PIPE,\n shell=True, stderr=DEVNULL)\n\n\n<mask token>\n\n\ndef ScanConnections():\n initialcon = psutil.net_connections()\n netprocess = []\n for i in initialcon:\n p = psutil.Process(pid=i.pid)\n with p.oneshot():\n netprocess.append(p.exe())\n while True:\n runcon = psutil.net_connections()\n netprocessrun = []\n for e in runcon:\n p = psutil.Process(pid=e.pid)\n with p.oneshot():\n netprocessrun.append(p.exe())\n s = set(netprocess)\n newpconprogs = [x for x in netprocessrun if x not in s]\n if newpconprogs:\n for h in newpconprogs:\n stringprint = (\n 'New Process initiating TCP/IP connection: %s' % h)\n x = threading.Thread(target=PrintaMSG, args=(stringprint,))\n x.setDaemon(True)\n x.start()\n PrintaLog(stringprint)\n netprocess.append(h)\n time.sleep(2)\n\n\ndef AuSearch():\n auparams = {'modules': 'New module loaded in Kernel', 'code_injection':\n 'DLL Inject', 'register_injection': 'DLL Inject'}\n while True:\n tomo = datetime.datetime.now() - datetime.timedelta(minutes=2)\n timeraw = str(tomo.time().replace(second=0, microsecond=0))\n for key in auparams.keys():\n command = 'ausearch -k \"' + key + '\" --start \"' + timeraw + '\"'\n processausearch = subprocess.Popen([command], stdout=subprocess\n .PIPE, shell=True, stderr=DEVNULL)\n outputausearch = processausearch.communicate()[0]\n if outputausearch:\n stringprint = 'Audit Alert: %s' % auparams[key]\n x = threading.Thread(target=PrintaMSG, args=(stringprint,))\n x.setDaemon(True)\n x.start()\n PrintaLog(stringprint)\n time.sleep(115)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef PrintaLog(texto):\n t = time.time()\n logtime = time.ctime(t)\n stringprint = '%s %s\\n' % (logtime, texto)\n f = open('/var/log/patriot', 'a')\n f.write(stringprint)\n f.flush()\n f.close()\n\n\ndef PrintaMSG(texto):\n command = 'python3 alertiqt.py \"' + texto + '\"'\n processalert = subprocess.Popen([command], stdout=subprocess.PIPE,\n shell=True, stderr=DEVNULL)\n\n\n<mask token>\n\n\ndef ScanUnsigned():\n pidsinicial = psutil.pids()\n while True:\n pidsshots = psutil.pids()\n s = set(pidsinicial)\n newpids = [x for x in pidsshots if x not in s]\n if newpids:\n for i in newpids:\n try:\n p = psutil.Process(pid=i)\n with p.oneshot():\n integrity = TestIntegrity(p.exe())\n pidproceso = p.pid\n exeproceso = p.exe()\n evadeau = bool(re.match(exeproceso,\n '/usr/sbin/ausearch'))\n if integrity == 1 and evadeau == 0:\n stringprint = (\n 'New process that not belongs to any package or package was modified: %i %s'\n % (pidproceso, exeproceso))\n x = threading.Thread(target=PrintaMSG, args=(\n stringprint,))\n x.setDaemon(True)\n x.start()\n PrintaLog(stringprint)\n except Exception as e:\n print(e)\n pidsinicial = pidsshots\n time.sleep(2)\n\n\ndef ScanConnections():\n initialcon = psutil.net_connections()\n netprocess = []\n for i in initialcon:\n p = psutil.Process(pid=i.pid)\n with p.oneshot():\n netprocess.append(p.exe())\n while True:\n runcon = psutil.net_connections()\n netprocessrun = []\n for e in runcon:\n p = psutil.Process(pid=e.pid)\n with p.oneshot():\n netprocessrun.append(p.exe())\n s = set(netprocess)\n newpconprogs = [x for x in netprocessrun if x not in s]\n if newpconprogs:\n for h in newpconprogs:\n stringprint = (\n 'New Process initiating TCP/IP connection: %s' % h)\n x = threading.Thread(target=PrintaMSG, args=(stringprint,))\n x.setDaemon(True)\n x.start()\n PrintaLog(stringprint)\n netprocess.append(h)\n time.sleep(2)\n\n\ndef AuSearch():\n auparams = {'modules': 'New module loaded in Kernel', 'code_injection':\n 'DLL Inject', 'register_injection': 'DLL Inject'}\n while True:\n tomo = datetime.datetime.now() - datetime.timedelta(minutes=2)\n timeraw = str(tomo.time().replace(second=0, microsecond=0))\n for key in auparams.keys():\n command = 'ausearch -k \"' + key + '\" --start \"' + timeraw + '\"'\n processausearch = subprocess.Popen([command], stdout=subprocess\n .PIPE, shell=True, stderr=DEVNULL)\n outputausearch = processausearch.communicate()[0]\n if outputausearch:\n stringprint = 'Audit Alert: %s' % auparams[key]\n x = threading.Thread(target=PrintaMSG, args=(stringprint,))\n x.setDaemon(True)\n x.start()\n PrintaLog(stringprint)\n time.sleep(115)\n\n\ndef KeyBoardSearch():\n command = 'xinput --list'\n keyfirstcommand = subprocess.Popen([command], stdout=subprocess.PIPE,\n shell=True)\n outputkeysearch = keyfirstcommand.communicate()[0]\n while True:\n keyruncommand = subprocess.Popen([command], stdout=subprocess.PIPE,\n shell=True)\n outputkeyrunsearch = keyruncommand.communicate()[0]\n if outputkeyrunsearch != outputkeysearch:\n stringprint = 'New keyboard detected'\n x = threading.Thread(target=PrintaMSG, args=(stringprint,))\n x.setDaemon(True)\n x.start()\n PrintaLog(stringprint)\n outputkeysearch = outputkeyrunsearch\n time.sleep(60)\n\n\n<mask token>\n",
"step-3": "<mask token>\ndebian = '/etc/debian_version'\nredhat = '/etc/redhat-release'\n\n\ndef PrintaLog(texto):\n t = time.time()\n logtime = time.ctime(t)\n stringprint = '%s %s\\n' % (logtime, texto)\n f = open('/var/log/patriot', 'a')\n f.write(stringprint)\n f.flush()\n f.close()\n\n\ndef PrintaMSG(texto):\n command = 'python3 alertiqt.py \"' + texto + '\"'\n processalert = subprocess.Popen([command], stdout=subprocess.PIPE,\n shell=True, stderr=DEVNULL)\n\n\ndef TestIntegrity(File):\n if os.path.exists(redhat):\n command = 'rpm -Vf \"' + File + '\"'\n processrpm = subprocess.Popen([command], stdout=subprocess.PIPE,\n shell=True)\n outputrpm = processrpm.communicate()[0]\n if outputrpm:\n return 1\n else:\n return 0\n else:\n commandDPKG = 'dpkg -S \"' + File + '\"'\n processdpkg = subprocess.Popen([commandDPKG], stdout=subprocess.\n PIPE, shell=True, stderr=DEVNULL)\n outputdpkg = processdpkg.communicate()[0]\n if processdpkg.returncode == 1:\n fixdpkgbug = re.sub('/usr', '', File)\n commandDPKG2 = 'dpkg -S \"' + fixdpkgbug + '\"'\n processdpkg2 = subprocess.Popen([commandDPKG2], stdout=\n subprocess.PIPE, shell=True, stderr=DEVNULL)\n outputdpkg2 = processdpkg2.communicate()[0]\n outputdpkg = outputdpkg2\n if processdpkg2.returncode == 1:\n return 1\n packagename = outputdpkg.split(':')\n commandDEBSUM = 'dpkg --verify \"' + packagename[0] + '\"'\n processdebsum = subprocess.Popen([commandDEBSUM], stdout=subprocess\n .PIPE, shell=True)\n outputdebsum = processdebsum.communicate()[0]\n print(outputdebsum)\n if outputdebsum:\n return 1\n else:\n return 0\n\n\ndef ScanUnsigned():\n pidsinicial = psutil.pids()\n while True:\n pidsshots = psutil.pids()\n s = set(pidsinicial)\n newpids = [x for x in pidsshots if x not in s]\n if newpids:\n for i in newpids:\n try:\n p = psutil.Process(pid=i)\n with p.oneshot():\n integrity = TestIntegrity(p.exe())\n pidproceso = p.pid\n exeproceso = p.exe()\n evadeau = bool(re.match(exeproceso,\n '/usr/sbin/ausearch'))\n if integrity == 1 and evadeau == 0:\n stringprint = (\n 'New process that not belongs to any package or package was modified: %i %s'\n % (pidproceso, exeproceso))\n x = threading.Thread(target=PrintaMSG, args=(\n stringprint,))\n x.setDaemon(True)\n x.start()\n PrintaLog(stringprint)\n except Exception as e:\n print(e)\n pidsinicial = pidsshots\n time.sleep(2)\n\n\ndef ScanConnections():\n initialcon = psutil.net_connections()\n netprocess = []\n for i in initialcon:\n p = psutil.Process(pid=i.pid)\n with p.oneshot():\n netprocess.append(p.exe())\n while True:\n runcon = psutil.net_connections()\n netprocessrun = []\n for e in runcon:\n p = psutil.Process(pid=e.pid)\n with p.oneshot():\n netprocessrun.append(p.exe())\n s = set(netprocess)\n newpconprogs = [x for x in netprocessrun if x not in s]\n if newpconprogs:\n for h in newpconprogs:\n stringprint = (\n 'New Process initiating TCP/IP connection: %s' % h)\n x = threading.Thread(target=PrintaMSG, args=(stringprint,))\n x.setDaemon(True)\n x.start()\n PrintaLog(stringprint)\n netprocess.append(h)\n time.sleep(2)\n\n\ndef AuSearch():\n auparams = {'modules': 'New module loaded in Kernel', 'code_injection':\n 'DLL Inject', 'register_injection': 'DLL Inject'}\n while True:\n tomo = datetime.datetime.now() - datetime.timedelta(minutes=2)\n timeraw = str(tomo.time().replace(second=0, microsecond=0))\n for key in auparams.keys():\n command = 'ausearch -k \"' + key + '\" --start \"' + timeraw + '\"'\n processausearch = subprocess.Popen([command], stdout=subprocess\n .PIPE, shell=True, stderr=DEVNULL)\n outputausearch = processausearch.communicate()[0]\n if outputausearch:\n stringprint = 'Audit Alert: %s' % auparams[key]\n x = threading.Thread(target=PrintaMSG, args=(stringprint,))\n x.setDaemon(True)\n x.start()\n PrintaLog(stringprint)\n time.sleep(115)\n\n\ndef KeyBoardSearch():\n command = 'xinput --list'\n keyfirstcommand = subprocess.Popen([command], stdout=subprocess.PIPE,\n shell=True)\n outputkeysearch = keyfirstcommand.communicate()[0]\n while True:\n keyruncommand = subprocess.Popen([command], stdout=subprocess.PIPE,\n shell=True)\n outputkeyrunsearch = keyruncommand.communicate()[0]\n if outputkeyrunsearch != outputkeysearch:\n stringprint = 'New keyboard detected'\n x = threading.Thread(target=PrintaMSG, args=(stringprint,))\n x.setDaemon(True)\n x.start()\n PrintaLog(stringprint)\n outputkeysearch = outputkeyrunsearch\n time.sleep(60)\n\n\ns = threading.Thread(target=KeyBoardSearch)\ns.setDaemon(True)\ns.start()\nx = threading.Thread(target=ScanUnsigned)\nx.setDaemon(True)\nx.start()\ny = threading.Thread(target=ScanConnections)\ny.setDaemon(True)\ny.start()\nz = threading.Thread(target=AuSearch)\nz.setDaemon(True)\nz.start()\nwhile True:\n time.sleep(100)\n",
"step-4": "import tkinter as tk\nimport tkinter.messagebox as tkmb\nimport psutil\nimport os\nimport re\nimport subprocess\nfrom subprocess import Popen, PIPE, STDOUT, DEVNULL\nimport filecmp\nimport re\nimport time\nimport threading\nimport datetime\nimport re\ndebian = '/etc/debian_version'\nredhat = '/etc/redhat-release'\n\n\ndef PrintaLog(texto):\n t = time.time()\n logtime = time.ctime(t)\n stringprint = '%s %s\\n' % (logtime, texto)\n f = open('/var/log/patriot', 'a')\n f.write(stringprint)\n f.flush()\n f.close()\n\n\ndef PrintaMSG(texto):\n command = 'python3 alertiqt.py \"' + texto + '\"'\n processalert = subprocess.Popen([command], stdout=subprocess.PIPE,\n shell=True, stderr=DEVNULL)\n\n\ndef TestIntegrity(File):\n if os.path.exists(redhat):\n command = 'rpm -Vf \"' + File + '\"'\n processrpm = subprocess.Popen([command], stdout=subprocess.PIPE,\n shell=True)\n outputrpm = processrpm.communicate()[0]\n if outputrpm:\n return 1\n else:\n return 0\n else:\n commandDPKG = 'dpkg -S \"' + File + '\"'\n processdpkg = subprocess.Popen([commandDPKG], stdout=subprocess.\n PIPE, shell=True, stderr=DEVNULL)\n outputdpkg = processdpkg.communicate()[0]\n if processdpkg.returncode == 1:\n fixdpkgbug = re.sub('/usr', '', File)\n commandDPKG2 = 'dpkg -S \"' + fixdpkgbug + '\"'\n processdpkg2 = subprocess.Popen([commandDPKG2], stdout=\n subprocess.PIPE, shell=True, stderr=DEVNULL)\n outputdpkg2 = processdpkg2.communicate()[0]\n outputdpkg = outputdpkg2\n if processdpkg2.returncode == 1:\n return 1\n packagename = outputdpkg.split(':')\n commandDEBSUM = 'dpkg --verify \"' + packagename[0] + '\"'\n processdebsum = subprocess.Popen([commandDEBSUM], stdout=subprocess\n .PIPE, shell=True)\n outputdebsum = processdebsum.communicate()[0]\n print(outputdebsum)\n if outputdebsum:\n return 1\n else:\n return 0\n\n\ndef ScanUnsigned():\n pidsinicial = psutil.pids()\n while True:\n pidsshots = psutil.pids()\n s = set(pidsinicial)\n newpids = [x for x in pidsshots if x not in s]\n if newpids:\n for i in newpids:\n try:\n p = psutil.Process(pid=i)\n with p.oneshot():\n integrity = TestIntegrity(p.exe())\n pidproceso = p.pid\n exeproceso = p.exe()\n evadeau = bool(re.match(exeproceso,\n '/usr/sbin/ausearch'))\n if integrity == 1 and evadeau == 0:\n stringprint = (\n 'New process that not belongs to any package or package was modified: %i %s'\n % (pidproceso, exeproceso))\n x = threading.Thread(target=PrintaMSG, args=(\n stringprint,))\n x.setDaemon(True)\n x.start()\n PrintaLog(stringprint)\n except Exception as e:\n print(e)\n pidsinicial = pidsshots\n time.sleep(2)\n\n\ndef ScanConnections():\n initialcon = psutil.net_connections()\n netprocess = []\n for i in initialcon:\n p = psutil.Process(pid=i.pid)\n with p.oneshot():\n netprocess.append(p.exe())\n while True:\n runcon = psutil.net_connections()\n netprocessrun = []\n for e in runcon:\n p = psutil.Process(pid=e.pid)\n with p.oneshot():\n netprocessrun.append(p.exe())\n s = set(netprocess)\n newpconprogs = [x for x in netprocessrun if x not in s]\n if newpconprogs:\n for h in newpconprogs:\n stringprint = (\n 'New Process initiating TCP/IP connection: %s' % h)\n x = threading.Thread(target=PrintaMSG, args=(stringprint,))\n x.setDaemon(True)\n x.start()\n PrintaLog(stringprint)\n netprocess.append(h)\n time.sleep(2)\n\n\ndef AuSearch():\n auparams = {'modules': 'New module loaded in Kernel', 'code_injection':\n 'DLL Inject', 'register_injection': 'DLL Inject'}\n while True:\n tomo = datetime.datetime.now() - datetime.timedelta(minutes=2)\n timeraw = str(tomo.time().replace(second=0, microsecond=0))\n for key in auparams.keys():\n command = 'ausearch -k \"' + key + '\" --start \"' + timeraw + '\"'\n processausearch = subprocess.Popen([command], stdout=subprocess\n .PIPE, shell=True, stderr=DEVNULL)\n outputausearch = processausearch.communicate()[0]\n if outputausearch:\n stringprint = 'Audit Alert: %s' % auparams[key]\n x = threading.Thread(target=PrintaMSG, args=(stringprint,))\n x.setDaemon(True)\n x.start()\n PrintaLog(stringprint)\n time.sleep(115)\n\n\ndef KeyBoardSearch():\n command = 'xinput --list'\n keyfirstcommand = subprocess.Popen([command], stdout=subprocess.PIPE,\n shell=True)\n outputkeysearch = keyfirstcommand.communicate()[0]\n while True:\n keyruncommand = subprocess.Popen([command], stdout=subprocess.PIPE,\n shell=True)\n outputkeyrunsearch = keyruncommand.communicate()[0]\n if outputkeyrunsearch != outputkeysearch:\n stringprint = 'New keyboard detected'\n x = threading.Thread(target=PrintaMSG, args=(stringprint,))\n x.setDaemon(True)\n x.start()\n PrintaLog(stringprint)\n outputkeysearch = outputkeyrunsearch\n time.sleep(60)\n\n\ns = threading.Thread(target=KeyBoardSearch)\ns.setDaemon(True)\ns.start()\nx = threading.Thread(target=ScanUnsigned)\nx.setDaemon(True)\nx.start()\ny = threading.Thread(target=ScanConnections)\ny.setDaemon(True)\ny.start()\nz = threading.Thread(target=AuSearch)\nz.setDaemon(True)\nz.start()\nwhile True:\n time.sleep(100)\n",
"step-5": "import tkinter as tk\nimport tkinter.messagebox as tkmb\nimport psutil\nimport os\nimport re\nimport subprocess\nfrom subprocess import Popen, PIPE, STDOUT, DEVNULL\nimport filecmp\nimport re\nimport time\nimport threading\nimport datetime\nimport re\n\ndebian = '/etc/debian_version'\nredhat = '/etc/redhat-release'\n\ndef PrintaLog(texto):\n\t\n\tt = time.time()\n\tlogtime= time.ctime(t)\n\t\n\tstringprint = \"%s %s\\n\" % (logtime, texto)\n\t\n\tf = open(\"/var/log/patriot\", \"a\")\n\tf.write(stringprint)\n\tf.flush()\n\tf.close()\n\ndef PrintaMSG(texto):\n\n\tcommand = 'python3 alertiqt.py \"'+texto+'\"' \n\t\t\t\t\t\n\tprocessalert = subprocess.Popen([command], stdout=subprocess.PIPE,shell=True, stderr=DEVNULL)\n\ndef TestIntegrity(File):\n\t\n\tif os.path.exists(redhat) : \n\t\n\t\tcommand = 'rpm -Vf \"'+File+'\"' \n\t\t\t\t\t\n\t\tprocessrpm = subprocess.Popen([command], stdout=subprocess.PIPE,shell=True)\n\t\toutputrpm = processrpm.communicate()[0]\n\t\t\t\t\t\n\t\tif outputrpm :\n\t\t\t\t\t\t\n\t\t\treturn(1)\n\t\t\t\t\t\t\t\t\n\t\t\n\t\telse:\n\t\t\t\n\t\t\treturn(0)\n\n\telse :\t\n\t\t\n\t\tcommandDPKG = 'dpkg -S \"'+File+'\"'\n\t\t\t\t\t\t\n\t\t\t\t\t\t\n\t\tprocessdpkg = subprocess.Popen([commandDPKG], stdout=subprocess.PIPE,shell=True, stderr=DEVNULL)\n\t\toutputdpkg = processdpkg.communicate()[0]\n\t\t\t\t\t\t\n\t\tif processdpkg.returncode == 1:\n\t\t\t\t\t\t\t\n\t\t\t#dpkg is buggy to find package files \n\t\t\t\t\t\t\t\n\t\t\tfixdpkgbug= re.sub('/usr', '', File)\n\t\t\t\t\t\t\t\n\t\t\tcommandDPKG2 = 'dpkg -S \"'+fixdpkgbug+'\"'\n\t\t\t\t\t\t\n\t\t\tprocessdpkg2 = subprocess.Popen([commandDPKG2], stdout=subprocess.PIPE,shell=True, stderr=DEVNULL)\n\t\t\toutputdpkg2 = processdpkg2.communicate()[0]\n\t\t\t\t\t\t\t\n\t\t\toutputdpkg = outputdpkg2\n\t\t\t\t\t\t\t\n\t\t\tif processdpkg2.returncode == 1:\n\t\t\t\t\t\t\t\n\t\t\t\treturn(1)\n\t\t\t\t\t\t\t\t\n\t\t\t\t\n\t\tpackagename = outputdpkg.split(\":\")\n\t\t\t\t\t\t\n\t\tcommandDEBSUM = 'dpkg --verify \"'+packagename[0]+'\"'\n\t\t\t\t\t\t\n\t\t\t\t\t\t\t\n\t\tprocessdebsum = subprocess.Popen([commandDEBSUM], stdout=subprocess.PIPE,shell=True)\n\t\toutputdebsum = processdebsum.communicate()[0]\n\t\t\n\t\tprint (outputdebsum)\n\t\t\t\t\t\t\n\t\tif outputdebsum :\n\t\t\t\n\t\t\treturn(1)\n\t\t\t\t\t\t\n\t\telse:\n\t\t\treturn(0)\n\n\ndef ScanUnsigned():\n\t\n\tpidsinicial = psutil.pids()\n\n\twhile True:\n\t\n\t\tpidsshots = psutil.pids()\n\t\n\t\ts = set(pidsinicial)\n\t\tnewpids = [x for x in pidsshots if x not in s]\n\t\n\t\tif newpids:\n\t\n\t\t\t#print(newpids)\n\t\t\n\t\t\tfor i in newpids:\n\t\t\t\n\t\t\t\t#print(i)\n\t\t\t\ttry:\n\t\t\t\t\tp = psutil.Process(pid=i)\n\t\t\t\t\twith p.oneshot():\n\t\t\t\n\t\t\t\t\t\tintegrity= TestIntegrity(p.exe())\n\t\t\t\n\t\t\t\t\t\t#print (integrity)\n\t\t\t\t\t\t\n\t\t\t\t\t\tpidproceso = p.pid\n\t\t\t\t\t\texeproceso = p.exe()\n\t\t\t\t\t\t\n\t\t\t\t\t\tevadeau = bool(re.match(exeproceso, \"/usr/sbin/ausearch\"))\n\t\t\t\t\t\t\n\t\t\t\t\t\tif integrity == 1 and evadeau == 0:\n\t\t\t\t\t\t\n\t\t\t\t\t\t\tstringprint = \"New process that not belongs to any package or package was modified: %i %s\" % (pidproceso, exeproceso)\n\t\t\t\t\t\t\n\t\t\t\t\t\t\tx = threading.Thread(target=PrintaMSG, args=(stringprint,))\n\t\t\t\t\t\t\tx.setDaemon(True)\n\t\t\t\t\t\t\tx.start()\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\tPrintaLog(stringprint)\n\t\t\t\t\t\t\n\t\t\t\texcept Exception as e:\n\t\t\t\t\tprint (e)\n\t\n\t\tpidsinicial = pidsshots\n\t\n\t\ttime.sleep(2)\n\t\t\n\ndef ScanConnections():\n\t\n\tinitialcon =psutil.net_connections()\n\n\tnetprocess =[]\n\n\tfor i in initialcon:\n\t\n\t\t#print (i.pid)\n\t\n\t\tp = psutil.Process(pid=i.pid)\n\t\n\t\twith p.oneshot():\n\t\t\n\t\t\t#print (p.exe())\n\t\t\n\t\t\tnetprocess.append(p.exe())\n\t\t\n\t#print (netprocess)\n\t\n\twhile True:\n\t\t\n\t\truncon =psutil.net_connections()\n\n\t\tnetprocessrun =[]\n\n\t\tfor e in runcon:\n\t\n\t\t\t#print (e.pid)\n\t\n\t\t\tp = psutil.Process(pid=e.pid)\n\t\n\t\t\twith p.oneshot():\n\t\t\n\t\t\t\t#print (p.exe())\n\t\t\n\t\t\t\tnetprocessrun.append(p.exe())\n\t\t\n\t\t#print (netprocessrun)\n\t\t\n\t\ts = set(netprocess)\n\t\tnewpconprogs = [x for x in netprocessrun if x not in s]\n\t\t\n\t\tif newpconprogs:\n\t\n\t\t\t#print(newpconprogs)\n\t\t\n\t\t\tfor h in newpconprogs:\n\t\t\t\t\n\t\t\t\tstringprint = \"New Process initiating TCP/IP connection: %s\" % h\n\t\t\t\t\t\t\n\t\t\t\tx = threading.Thread(target=PrintaMSG, args=(stringprint,))\n\t\t\t\tx.setDaemon(True)\n\t\t\t\tx.start()\n\t\t\t\t\n\t\t\t\tPrintaLog(stringprint)\n\t\t\t\t\n\t\t\t\tnetprocess.append(h)\n\t\t\n\t\t\t\t\n\t\ttime.sleep(2)\n\ndef AuSearch():\n\t\n\tauparams = {\"modules\": \"New module loaded in Kernel\",\"code_injection\": \"DLL Inject\",\"register_injection\": \"DLL Inject\"}\n\t\n\twhile True:\n\t\n\t\ttomo = datetime.datetime.now() - datetime.timedelta(minutes=2)\n\n\t\ttimeraw = str(tomo.time().replace(second=0, microsecond=0))\n\n\t\tfor key in auparams.keys():\n\t\t\t#print(key)\n\t\n\t\t\tcommand = 'ausearch -k \"'+key+'\" --start \"'+timeraw+'\"' \n\t\t\t\t\t\n\t\t\tprocessausearch = subprocess.Popen([command], stdout=subprocess.PIPE,shell=True, stderr=DEVNULL)\n\t\t\toutputausearch = processausearch.communicate()[0]\n\t\n\t\t\tif outputausearch:\n\t\t\t\n\t\t\t\tstringprint = \"Audit Alert: %s\" % auparams[key]\n\t\t\t\t\t\t\n\t\t\t\tx = threading.Thread(target=PrintaMSG, args=(stringprint,))\n\t\t\t\tx.setDaemon(True)\n\t\t\t\tx.start()\n\t\t\t\n\t\t\t\tPrintaLog(stringprint)\n\t\n\t\ttime.sleep(115)\n\ndef KeyBoardSearch():\n\t\n\tcommand = \"xinput --list\" \n\t\n\tkeyfirstcommand = subprocess.Popen([command], stdout=subprocess.PIPE,shell=True)\n\toutputkeysearch= keyfirstcommand.communicate()[0]\n\t\n\twhile True:\n\t\t\n\t\tkeyruncommand = subprocess.Popen([command], stdout=subprocess.PIPE,shell=True)\n\t\toutputkeyrunsearch= keyruncommand.communicate()[0]\n\t\t\n\t\tif outputkeyrunsearch != outputkeysearch:\n\t\t\t\n\t\t\tstringprint = \"New keyboard detected\"\n\t\t\t\n\t\t\tx = threading.Thread(target=PrintaMSG, args=(stringprint,))\n\t\t\tx.setDaemon(True)\n\t\t\tx.start()\n\t\t\t\n\t\t\tPrintaLog(stringprint)\n\t\t\t\n\t\t\toutputkeysearch = outputkeyrunsearch\n\t\t\t\n\t\ttime.sleep(60)\n\t\t\t\n\t\ns = threading.Thread(target=KeyBoardSearch)\ns.setDaemon(True)\ns.start()\t\n\nx = threading.Thread(target=ScanUnsigned)\nx.setDaemon(True)\nx.start()\n\ny = threading.Thread(target=ScanConnections)\ny.setDaemon(True)\ny.start()\n\nz = threading.Thread(target=AuSearch)\nz.setDaemon(True)\nz.start()\n\nwhile True:\n\t\n\ttime.sleep(100)\n",
"step-ids": [
4,
6,
9,
10,
11
]
}
|
[
4,
6,
9,
10,
11
] |
import bpy
bl_info = {
"name": "Ratchets Center All Objects",
"author": "Ratchet3789",
"version": (0, 1, 0),
"description": "Centers all selected objects. Built for Game Development.",
"category": "Object",
}
class CenterOriginToZero(bpy.types.Operator):
"""Center all objects script""" # blender will use this as a tooltip for menu items and buttons.
bl_idname = "object.center_all_in_level" # unique identifier for buttons and menu items to reference.
bl_label = "Center Origin (Zero)" # display name in the interface.
bl_options = {'REGISTER', 'UNDO'} # enable undo for the operator.
# execute() is called by blender when running the operator.
def execute(self, context):
# The original script
for x in bpy.context.selected_objects:
x.location = (0, 0, 0)
# this lets blender know the operator finished successfully.
return {'FINISHED'}
class SnapMeshToOrigin(bpy.types.Operator):
"""ABSOLUTE Zero of all objects within the scene"""
bl_idname = "object.snap_to_origin"
bl_label = "Center Mesh (Zero)"
bl_options = {'REGISTER', 'UNDO'} # enable undo for the operator.
def execute(self, context):
for x in bpy.context.selected_objects:
x.select = True
bpy.ops.object.origin_set(type="GEOMETRY_ORIGIN")
return {'FINISHED'}
class AbsoluteCenterObjects(bpy.types.Operator):
"""ABSOLUTE Zero of all objects within the scene"""
bl_idname = "object.absolute_center_all_in_level"
bl_label = "Center All (Zero)"
bl_options = {'REGISTER', 'UNDO'} # enable undo for the operator.
def execute(self, context):
for x in bpy.context.selected_objects:
x.select = True
bpy.ops.object.origin_set(type="GEOMETRY_ORIGIN")
x.location = (0, 0, 0)
return {'FINISHED'}
def register():
bpy.utils.register_class(CenterOriginToZero)
bpy.utils.register_class(SnapMeshToOrigin)
bpy.utils.register_class(AbsoluteCenterObjects)
def unregister():
bpy.utils.unregister_class(CenterOriginToZero)
bpy.utils.unregister_class(SnapMeshToOrigin)
bpy.utils.unregister_class(AbsoluteCenterObjects)
# This allows you to run the script directly from blenders text editor
# to test the addon without having to install it.
if __name__ == "__main__":
register()
|
normal
|
{
"blob_id": "f7a511beaea869cf32eb905a4f3685077297a5ec",
"index": 1654,
"step-1": "<mask token>\n\n\nclass CenterOriginToZero(bpy.types.Operator):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def execute(self, context):\n for x in bpy.context.selected_objects:\n x.location = 0, 0, 0\n return {'FINISHED'}\n\n\nclass SnapMeshToOrigin(bpy.types.Operator):\n \"\"\"ABSOLUTE Zero of all objects within the scene\"\"\"\n bl_idname = 'object.snap_to_origin'\n bl_label = 'Center Mesh (Zero)'\n bl_options = {'REGISTER', 'UNDO'}\n\n def execute(self, context):\n for x in bpy.context.selected_objects:\n x.select = True\n bpy.ops.object.origin_set(type='GEOMETRY_ORIGIN')\n return {'FINISHED'}\n\n\nclass AbsoluteCenterObjects(bpy.types.Operator):\n \"\"\"ABSOLUTE Zero of all objects within the scene\"\"\"\n bl_idname = 'object.absolute_center_all_in_level'\n bl_label = 'Center All (Zero)'\n bl_options = {'REGISTER', 'UNDO'}\n\n def execute(self, context):\n for x in bpy.context.selected_objects:\n x.select = True\n bpy.ops.object.origin_set(type='GEOMETRY_ORIGIN')\n x.location = 0, 0, 0\n return {'FINISHED'}\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass CenterOriginToZero(bpy.types.Operator):\n \"\"\"Center all objects script\"\"\"\n bl_idname = 'object.center_all_in_level'\n bl_label = 'Center Origin (Zero)'\n bl_options = {'REGISTER', 'UNDO'}\n\n def execute(self, context):\n for x in bpy.context.selected_objects:\n x.location = 0, 0, 0\n return {'FINISHED'}\n\n\nclass SnapMeshToOrigin(bpy.types.Operator):\n \"\"\"ABSOLUTE Zero of all objects within the scene\"\"\"\n bl_idname = 'object.snap_to_origin'\n bl_label = 'Center Mesh (Zero)'\n bl_options = {'REGISTER', 'UNDO'}\n\n def execute(self, context):\n for x in bpy.context.selected_objects:\n x.select = True\n bpy.ops.object.origin_set(type='GEOMETRY_ORIGIN')\n return {'FINISHED'}\n\n\nclass AbsoluteCenterObjects(bpy.types.Operator):\n \"\"\"ABSOLUTE Zero of all objects within the scene\"\"\"\n bl_idname = 'object.absolute_center_all_in_level'\n bl_label = 'Center All (Zero)'\n bl_options = {'REGISTER', 'UNDO'}\n\n def execute(self, context):\n for x in bpy.context.selected_objects:\n x.select = True\n bpy.ops.object.origin_set(type='GEOMETRY_ORIGIN')\n x.location = 0, 0, 0\n return {'FINISHED'}\n\n\ndef register():\n bpy.utils.register_class(CenterOriginToZero)\n bpy.utils.register_class(SnapMeshToOrigin)\n bpy.utils.register_class(AbsoluteCenterObjects)\n\n\ndef unregister():\n bpy.utils.unregister_class(CenterOriginToZero)\n bpy.utils.unregister_class(SnapMeshToOrigin)\n bpy.utils.unregister_class(AbsoluteCenterObjects)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass CenterOriginToZero(bpy.types.Operator):\n \"\"\"Center all objects script\"\"\"\n bl_idname = 'object.center_all_in_level'\n bl_label = 'Center Origin (Zero)'\n bl_options = {'REGISTER', 'UNDO'}\n\n def execute(self, context):\n for x in bpy.context.selected_objects:\n x.location = 0, 0, 0\n return {'FINISHED'}\n\n\nclass SnapMeshToOrigin(bpy.types.Operator):\n \"\"\"ABSOLUTE Zero of all objects within the scene\"\"\"\n bl_idname = 'object.snap_to_origin'\n bl_label = 'Center Mesh (Zero)'\n bl_options = {'REGISTER', 'UNDO'}\n\n def execute(self, context):\n for x in bpy.context.selected_objects:\n x.select = True\n bpy.ops.object.origin_set(type='GEOMETRY_ORIGIN')\n return {'FINISHED'}\n\n\nclass AbsoluteCenterObjects(bpy.types.Operator):\n \"\"\"ABSOLUTE Zero of all objects within the scene\"\"\"\n bl_idname = 'object.absolute_center_all_in_level'\n bl_label = 'Center All (Zero)'\n bl_options = {'REGISTER', 'UNDO'}\n\n def execute(self, context):\n for x in bpy.context.selected_objects:\n x.select = True\n bpy.ops.object.origin_set(type='GEOMETRY_ORIGIN')\n x.location = 0, 0, 0\n return {'FINISHED'}\n\n\ndef register():\n bpy.utils.register_class(CenterOriginToZero)\n bpy.utils.register_class(SnapMeshToOrigin)\n bpy.utils.register_class(AbsoluteCenterObjects)\n\n\ndef unregister():\n bpy.utils.unregister_class(CenterOriginToZero)\n bpy.utils.unregister_class(SnapMeshToOrigin)\n bpy.utils.unregister_class(AbsoluteCenterObjects)\n\n\nif __name__ == '__main__':\n register()\n",
"step-4": "<mask token>\nbl_info = {'name': 'Ratchets Center All Objects', 'author': 'Ratchet3789',\n 'version': (0, 1, 0), 'description':\n 'Centers all selected objects. Built for Game Development.', 'category':\n 'Object'}\n\n\nclass CenterOriginToZero(bpy.types.Operator):\n \"\"\"Center all objects script\"\"\"\n bl_idname = 'object.center_all_in_level'\n bl_label = 'Center Origin (Zero)'\n bl_options = {'REGISTER', 'UNDO'}\n\n def execute(self, context):\n for x in bpy.context.selected_objects:\n x.location = 0, 0, 0\n return {'FINISHED'}\n\n\nclass SnapMeshToOrigin(bpy.types.Operator):\n \"\"\"ABSOLUTE Zero of all objects within the scene\"\"\"\n bl_idname = 'object.snap_to_origin'\n bl_label = 'Center Mesh (Zero)'\n bl_options = {'REGISTER', 'UNDO'}\n\n def execute(self, context):\n for x in bpy.context.selected_objects:\n x.select = True\n bpy.ops.object.origin_set(type='GEOMETRY_ORIGIN')\n return {'FINISHED'}\n\n\nclass AbsoluteCenterObjects(bpy.types.Operator):\n \"\"\"ABSOLUTE Zero of all objects within the scene\"\"\"\n bl_idname = 'object.absolute_center_all_in_level'\n bl_label = 'Center All (Zero)'\n bl_options = {'REGISTER', 'UNDO'}\n\n def execute(self, context):\n for x in bpy.context.selected_objects:\n x.select = True\n bpy.ops.object.origin_set(type='GEOMETRY_ORIGIN')\n x.location = 0, 0, 0\n return {'FINISHED'}\n\n\ndef register():\n bpy.utils.register_class(CenterOriginToZero)\n bpy.utils.register_class(SnapMeshToOrigin)\n bpy.utils.register_class(AbsoluteCenterObjects)\n\n\ndef unregister():\n bpy.utils.unregister_class(CenterOriginToZero)\n bpy.utils.unregister_class(SnapMeshToOrigin)\n bpy.utils.unregister_class(AbsoluteCenterObjects)\n\n\nif __name__ == '__main__':\n register()\n",
"step-5": "import bpy\nbl_info = {\n \"name\": \"Ratchets Center All Objects\",\n \"author\": \"Ratchet3789\",\n \"version\": (0, 1, 0),\n \"description\": \"Centers all selected objects. Built for Game Development.\",\n \"category\": \"Object\",\n}\n\n\nclass CenterOriginToZero(bpy.types.Operator):\n \"\"\"Center all objects script\"\"\" # blender will use this as a tooltip for menu items and buttons.\n bl_idname = \"object.center_all_in_level\" # unique identifier for buttons and menu items to reference.\n bl_label = \"Center Origin (Zero)\"\t\t\t# display name in the interface.\n bl_options = {'REGISTER', 'UNDO'} # enable undo for the operator.\n\n # execute() is called by blender when running the operator.\n def execute(self, context):\n\n # The original script\n for x in bpy.context.selected_objects:\n x.location = (0, 0, 0)\n # this lets blender know the operator finished successfully.\n return {'FINISHED'}\n\nclass SnapMeshToOrigin(bpy.types.Operator):\n \"\"\"ABSOLUTE Zero of all objects within the scene\"\"\"\n bl_idname = \"object.snap_to_origin\"\n bl_label = \"Center Mesh (Zero)\"\n bl_options = {'REGISTER', 'UNDO'} # enable undo for the operator.\n\n def execute(self, context):\n \n for x in bpy.context.selected_objects:\n x.select = True\n bpy.ops.object.origin_set(type=\"GEOMETRY_ORIGIN\")\n return {'FINISHED'}\n\nclass AbsoluteCenterObjects(bpy.types.Operator):\n \"\"\"ABSOLUTE Zero of all objects within the scene\"\"\"\n bl_idname = \"object.absolute_center_all_in_level\"\n bl_label = \"Center All (Zero)\"\n bl_options = {'REGISTER', 'UNDO'} # enable undo for the operator.\n\n def execute(self, context):\n for x in bpy.context.selected_objects:\n x.select = True\n bpy.ops.object.origin_set(type=\"GEOMETRY_ORIGIN\")\n x.location = (0, 0, 0)\n return {'FINISHED'}\n\n\ndef register():\n bpy.utils.register_class(CenterOriginToZero)\n bpy.utils.register_class(SnapMeshToOrigin)\n bpy.utils.register_class(AbsoluteCenterObjects)\n\ndef unregister():\n bpy.utils.unregister_class(CenterOriginToZero)\n bpy.utils.unregister_class(SnapMeshToOrigin)\n bpy.utils.unregister_class(AbsoluteCenterObjects)\n\n# This allows you to run the script directly from blenders text editor\n# to test the addon without having to install it.\nif __name__ == \"__main__\":\n register()\n",
"step-ids": [
10,
14,
15,
16,
18
]
}
|
[
10,
14,
15,
16,
18
] |
from rllab.envs.base import Env
from rllab.spaces import Discrete
from rllab.spaces import Box
from rllab.envs.base import Step
import numpy as np
import sys, pickle, os
sys.path.append(os.path.dirname(os.getcwd()))
from os.path import dirname
sys.path.append(dirname(dirname(dirname(os.getcwd()))))
from simulation import *
from scorer import *
from shapecloth import *
from tensioner import *
"""
A Rllab Environment for the tensioning policy experiments.
"""
class PinEnvDiscrete(Env):
MAPPING = {
0 : (0,0,0),
1 : (1,0,0),
2 : (0,1,0),
3 : (0,0,1),
4 : (-1,0,0),
5 : (0,-1,0),
6 : (0,0,-1)
}
def __init__(self, simulation, x, y, trajectory, scorer=0, max_displacement=False, predict=False, original=False, sample=False):
self.simulation = simulation
height, width = simulation.cloth.initial_params[0]
self.os_dim = height * width * 5
self.simulation.reset()
self.tensioner = self.simulation.pin_position(x, y, max_displacement)
self.scorer = Scorer(scorer)
self.trajectory = trajectory
self.traj_index = 0
self.pinx, self.piny = x, y
self.predict = predict
self.original = original
self.sample = sample
@property
def observation_space(self):
if self.original:
return Box(low=np.array([0, -self.tensioner.max_displacement, -self.tensioner.max_displacement, -self.tensioner.max_displacement]),
high=np.array([len(self.trajectory) + 1, self.tensioner.max_displacement, self.tensioner.max_displacement, self.tensioner.max_displacement]))
if not self.predict:
return Box(low=np.array([0, -self.tensioner.max_displacement, -self.tensioner.max_displacement, -self.tensioner.max_displacement] + len(self.simulation.cloth.blobs) * [0, 0, -800]),
high=np.array([len(self.trajectory) + 1, self.tensioner.max_displacement, self.tensioner.max_displacement, self.tensioner.max_displacement]
+ len(self.simulation.cloth.blobs) * [500, 500, 800]))
return Box(low=np.array([0, -self.tensioner.max_displacement, -self.tensioner.max_displacement, -self.tensioner.max_displacement] + len(self.simulation.cloth.blobs) * [0, 0, -800] + 3 * [-1000, -1000, -1000, -1000, -3.2] + [0, 0]),
high=np.array([len(self.trajectory) + 1, self.tensioner.max_displacement, self.tensioner.max_displacement, self.tensioner.max_displacement]
+ len(self.simulation.cloth.blobs) * [500, 500, 800] + 3 * [800, 800, 800, 800, 3.2] + [600, 600]))
@property
def action_space(self):
return Discrete(7)
@property
def _state(self):
scissors = self.simulation.mouse.x, self.simulation.mouse.y
centroids = np.ravel(np.array(self.simulation.cloth.centroids)).tolist()
if self.original:
return np.array([self.traj_index] + list(self.tensioner.displacement))
if not self.predict:
return np.array([self.traj_index] + list(self.tensioner.displacement) + centroids)
next_position3 = [-1000, -1000]
closest_shape3 = [-1000, -1000]
angle3 = 0
next_position2 = [-1000, -1000]
closest_shape2 = [-1000, -1000]
angle2 = 0
next_position = [-1000, -1000]
closest_shape = [-1000, -1000]
angle = 0
if self.traj_index < len(self.trajectory) - 1:
next_position = [self.trajectory[self.traj_index+1][0], self.trajectory[self.traj_index+1][1]]
closest_shape = list(self.simulation.cloth.find_closest_shapept(next_position[0], next_position[1]))
angle = self.simulation.cloth.find_dtheta(scissors[0], scissors[1], next_position[0], next_position[1], closest_shape[0], closest_shape[1])
if self.traj_index < len(self.trajectory) - 5:
next_position2 = [self.trajectory[self.traj_index+5][0], self.trajectory[self.traj_index+5][1]]
if np.linalg.norm(np.array(next_position2) - np.array(next_position)) < 100:
closest_shape2 = list(self.simulation.cloth.find_closest_shapept(next_position2[0], next_position2[1]))
angle2 = self.simulation.cloth.find_dtheta(next_position[0], next_position[1], next_position2[0], next_position2[1], closest_shape2[0], closest_shape2[1])
if self.traj_index < len(self.trajectory) - 10:
next_position3 = [self.trajectory[self.traj_index+10][0], self.trajectory[self.traj_index+10][1]]
if np.linalg.norm(np.array(next_position3) - np.array(next_position2)) < 100:
closest_shape3 = list(self.simulation.cloth.find_closest_shapept(next_position3[0], next_position3[1]))
angle3 = self.simulation.cloth.find_dtheta(next_position2[0], next_position2[1], next_position3[0], next_position3[1], closest_shape3[0], closest_shape3[1])
return np.array([self.traj_index] + list(self.tensioner.displacement) + centroids + next_position + closest_shape + [angle] + next_position2 + closest_shape2 + [angle2]
+ next_position3 + closest_shape3 + [angle3] + list(scissors))
@property
def _score(self):
disp = np.linalg.norm(self._state[1])
score = self.scorer.score(self.simulation.cloth)
if disp >= self.tensioner.max_displacement - 2:
score -= 100
return score
def reset(self):
self.simulation.reset()
self.tensioner = self.simulation.pin_position(self.pinx, self.piny, self.tensioner.max_displacement)
self.traj_index = 0
observation = np.copy(self._state)
return observation
def step(self, action):
x, y, z = self.MAPPING[action]
self.tensioner.tension(x, y, z)
self.simulation.move_mouse(self.trajectory[self.traj_index][0], self.trajectory[self.traj_index][1])
reward = self.simulation.update() * self.traj_index/10
self.traj_index += 1
self.simulation.move_mouse(self.trajectory[self.traj_index][0], self.trajectory[self.traj_index][1])
reward += self.simulation.update() * self.traj_index/10
done = self.traj_index >= len(self.trajectory) - 2
if done:
reward = self.simulation.cloth.evaluate()
else:
reward = 0
next_observation = np.copy(self._state)
self.traj_index += 1
return Step(observation=next_observation, reward=reward, done=done)
def render(self):
self.simulation.render_sim()
# def local_angles(self, n=5):
# if self.
# for i in range(n):
|
normal
|
{
"blob_id": "21974274b1e7800b83eb9582ab21714f04230549",
"index": 4299,
"step-1": "<mask token>\n\n\nclass PinEnvDiscrete(Env):\n <mask token>\n\n def __init__(self, simulation, x, y, trajectory, scorer=0,\n max_displacement=False, predict=False, original=False, sample=False):\n self.simulation = simulation\n height, width = simulation.cloth.initial_params[0]\n self.os_dim = height * width * 5\n self.simulation.reset()\n self.tensioner = self.simulation.pin_position(x, y, max_displacement)\n self.scorer = Scorer(scorer)\n self.trajectory = trajectory\n self.traj_index = 0\n self.pinx, self.piny = x, y\n self.predict = predict\n self.original = original\n self.sample = sample\n\n @property\n def observation_space(self):\n if self.original:\n return Box(low=np.array([0, -self.tensioner.max_displacement, -\n self.tensioner.max_displacement, -self.tensioner.\n max_displacement]), high=np.array([len(self.trajectory) + 1,\n self.tensioner.max_displacement, self.tensioner.\n max_displacement, self.tensioner.max_displacement]))\n if not self.predict:\n return Box(low=np.array([0, -self.tensioner.max_displacement, -\n self.tensioner.max_displacement, -self.tensioner.\n max_displacement] + len(self.simulation.cloth.blobs) * [0, \n 0, -800]), high=np.array([len(self.trajectory) + 1, self.\n tensioner.max_displacement, self.tensioner.max_displacement,\n self.tensioner.max_displacement] + len(self.simulation.\n cloth.blobs) * [500, 500, 800]))\n return Box(low=np.array([0, -self.tensioner.max_displacement, -self\n .tensioner.max_displacement, -self.tensioner.max_displacement] +\n len(self.simulation.cloth.blobs) * [0, 0, -800] + 3 * [-1000, -\n 1000, -1000, -1000, -3.2] + [0, 0]), high=np.array([len(self.\n trajectory) + 1, self.tensioner.max_displacement, self.\n tensioner.max_displacement, self.tensioner.max_displacement] + \n len(self.simulation.cloth.blobs) * [500, 500, 800] + 3 * [800, \n 800, 800, 800, 3.2] + [600, 600]))\n\n @property\n def action_space(self):\n return Discrete(7)\n\n @property\n def _state(self):\n scissors = self.simulation.mouse.x, self.simulation.mouse.y\n centroids = np.ravel(np.array(self.simulation.cloth.centroids)).tolist(\n )\n if self.original:\n return np.array([self.traj_index] + list(self.tensioner.\n displacement))\n if not self.predict:\n return np.array([self.traj_index] + list(self.tensioner.\n displacement) + centroids)\n next_position3 = [-1000, -1000]\n closest_shape3 = [-1000, -1000]\n angle3 = 0\n next_position2 = [-1000, -1000]\n closest_shape2 = [-1000, -1000]\n angle2 = 0\n next_position = [-1000, -1000]\n closest_shape = [-1000, -1000]\n angle = 0\n if self.traj_index < len(self.trajectory) - 1:\n next_position = [self.trajectory[self.traj_index + 1][0], self.\n trajectory[self.traj_index + 1][1]]\n closest_shape = list(self.simulation.cloth.find_closest_shapept\n (next_position[0], next_position[1]))\n angle = self.simulation.cloth.find_dtheta(scissors[0], scissors\n [1], next_position[0], next_position[1], closest_shape[0],\n closest_shape[1])\n if self.traj_index < len(self.trajectory) - 5:\n next_position2 = [self.trajectory[self.traj_index + 5][0],\n self.trajectory[self.traj_index + 5][1]]\n if np.linalg.norm(np.array(next_position2) - np.array(\n next_position)) < 100:\n closest_shape2 = list(self.simulation.cloth.\n find_closest_shapept(next_position2[0],\n next_position2[1]))\n angle2 = self.simulation.cloth.find_dtheta(next_position\n [0], next_position[1], next_position2[0],\n next_position2[1], closest_shape2[0], closest_shape2[1]\n )\n if self.traj_index < len(self.trajectory) - 10:\n next_position3 = [self.trajectory[self.traj_index +\n 10][0], self.trajectory[self.traj_index + 10][1]]\n if np.linalg.norm(np.array(next_position3) - np.\n array(next_position2)) < 100:\n closest_shape3 = list(self.simulation.cloth.\n find_closest_shapept(next_position3[0],\n next_position3[1]))\n angle3 = self.simulation.cloth.find_dtheta(\n next_position2[0], next_position2[1],\n next_position3[0], next_position3[1],\n closest_shape3[0], closest_shape3[1])\n return np.array([self.traj_index] + list(self.tensioner.\n displacement) + centroids + next_position + closest_shape + [\n angle] + next_position2 + closest_shape2 + [angle2] +\n next_position3 + closest_shape3 + [angle3] + list(scissors))\n\n @property\n def _score(self):\n disp = np.linalg.norm(self._state[1])\n score = self.scorer.score(self.simulation.cloth)\n if disp >= self.tensioner.max_displacement - 2:\n score -= 100\n return score\n\n def reset(self):\n self.simulation.reset()\n self.tensioner = self.simulation.pin_position(self.pinx, self.piny,\n self.tensioner.max_displacement)\n self.traj_index = 0\n observation = np.copy(self._state)\n return observation\n\n def step(self, action):\n x, y, z = self.MAPPING[action]\n self.tensioner.tension(x, y, z)\n self.simulation.move_mouse(self.trajectory[self.traj_index][0],\n self.trajectory[self.traj_index][1])\n reward = self.simulation.update() * self.traj_index / 10\n self.traj_index += 1\n self.simulation.move_mouse(self.trajectory[self.traj_index][0],\n self.trajectory[self.traj_index][1])\n reward += self.simulation.update() * self.traj_index / 10\n done = self.traj_index >= len(self.trajectory) - 2\n if done:\n reward = self.simulation.cloth.evaluate()\n else:\n reward = 0\n next_observation = np.copy(self._state)\n self.traj_index += 1\n return Step(observation=next_observation, reward=reward, done=done)\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass PinEnvDiscrete(Env):\n <mask token>\n\n def __init__(self, simulation, x, y, trajectory, scorer=0,\n max_displacement=False, predict=False, original=False, sample=False):\n self.simulation = simulation\n height, width = simulation.cloth.initial_params[0]\n self.os_dim = height * width * 5\n self.simulation.reset()\n self.tensioner = self.simulation.pin_position(x, y, max_displacement)\n self.scorer = Scorer(scorer)\n self.trajectory = trajectory\n self.traj_index = 0\n self.pinx, self.piny = x, y\n self.predict = predict\n self.original = original\n self.sample = sample\n\n @property\n def observation_space(self):\n if self.original:\n return Box(low=np.array([0, -self.tensioner.max_displacement, -\n self.tensioner.max_displacement, -self.tensioner.\n max_displacement]), high=np.array([len(self.trajectory) + 1,\n self.tensioner.max_displacement, self.tensioner.\n max_displacement, self.tensioner.max_displacement]))\n if not self.predict:\n return Box(low=np.array([0, -self.tensioner.max_displacement, -\n self.tensioner.max_displacement, -self.tensioner.\n max_displacement] + len(self.simulation.cloth.blobs) * [0, \n 0, -800]), high=np.array([len(self.trajectory) + 1, self.\n tensioner.max_displacement, self.tensioner.max_displacement,\n self.tensioner.max_displacement] + len(self.simulation.\n cloth.blobs) * [500, 500, 800]))\n return Box(low=np.array([0, -self.tensioner.max_displacement, -self\n .tensioner.max_displacement, -self.tensioner.max_displacement] +\n len(self.simulation.cloth.blobs) * [0, 0, -800] + 3 * [-1000, -\n 1000, -1000, -1000, -3.2] + [0, 0]), high=np.array([len(self.\n trajectory) + 1, self.tensioner.max_displacement, self.\n tensioner.max_displacement, self.tensioner.max_displacement] + \n len(self.simulation.cloth.blobs) * [500, 500, 800] + 3 * [800, \n 800, 800, 800, 3.2] + [600, 600]))\n\n @property\n def action_space(self):\n return Discrete(7)\n\n @property\n def _state(self):\n scissors = self.simulation.mouse.x, self.simulation.mouse.y\n centroids = np.ravel(np.array(self.simulation.cloth.centroids)).tolist(\n )\n if self.original:\n return np.array([self.traj_index] + list(self.tensioner.\n displacement))\n if not self.predict:\n return np.array([self.traj_index] + list(self.tensioner.\n displacement) + centroids)\n next_position3 = [-1000, -1000]\n closest_shape3 = [-1000, -1000]\n angle3 = 0\n next_position2 = [-1000, -1000]\n closest_shape2 = [-1000, -1000]\n angle2 = 0\n next_position = [-1000, -1000]\n closest_shape = [-1000, -1000]\n angle = 0\n if self.traj_index < len(self.trajectory) - 1:\n next_position = [self.trajectory[self.traj_index + 1][0], self.\n trajectory[self.traj_index + 1][1]]\n closest_shape = list(self.simulation.cloth.find_closest_shapept\n (next_position[0], next_position[1]))\n angle = self.simulation.cloth.find_dtheta(scissors[0], scissors\n [1], next_position[0], next_position[1], closest_shape[0],\n closest_shape[1])\n if self.traj_index < len(self.trajectory) - 5:\n next_position2 = [self.trajectory[self.traj_index + 5][0],\n self.trajectory[self.traj_index + 5][1]]\n if np.linalg.norm(np.array(next_position2) - np.array(\n next_position)) < 100:\n closest_shape2 = list(self.simulation.cloth.\n find_closest_shapept(next_position2[0],\n next_position2[1]))\n angle2 = self.simulation.cloth.find_dtheta(next_position\n [0], next_position[1], next_position2[0],\n next_position2[1], closest_shape2[0], closest_shape2[1]\n )\n if self.traj_index < len(self.trajectory) - 10:\n next_position3 = [self.trajectory[self.traj_index +\n 10][0], self.trajectory[self.traj_index + 10][1]]\n if np.linalg.norm(np.array(next_position3) - np.\n array(next_position2)) < 100:\n closest_shape3 = list(self.simulation.cloth.\n find_closest_shapept(next_position3[0],\n next_position3[1]))\n angle3 = self.simulation.cloth.find_dtheta(\n next_position2[0], next_position2[1],\n next_position3[0], next_position3[1],\n closest_shape3[0], closest_shape3[1])\n return np.array([self.traj_index] + list(self.tensioner.\n displacement) + centroids + next_position + closest_shape + [\n angle] + next_position2 + closest_shape2 + [angle2] +\n next_position3 + closest_shape3 + [angle3] + list(scissors))\n\n @property\n def _score(self):\n disp = np.linalg.norm(self._state[1])\n score = self.scorer.score(self.simulation.cloth)\n if disp >= self.tensioner.max_displacement - 2:\n score -= 100\n return score\n\n def reset(self):\n self.simulation.reset()\n self.tensioner = self.simulation.pin_position(self.pinx, self.piny,\n self.tensioner.max_displacement)\n self.traj_index = 0\n observation = np.copy(self._state)\n return observation\n\n def step(self, action):\n x, y, z = self.MAPPING[action]\n self.tensioner.tension(x, y, z)\n self.simulation.move_mouse(self.trajectory[self.traj_index][0],\n self.trajectory[self.traj_index][1])\n reward = self.simulation.update() * self.traj_index / 10\n self.traj_index += 1\n self.simulation.move_mouse(self.trajectory[self.traj_index][0],\n self.trajectory[self.traj_index][1])\n reward += self.simulation.update() * self.traj_index / 10\n done = self.traj_index >= len(self.trajectory) - 2\n if done:\n reward = self.simulation.cloth.evaluate()\n else:\n reward = 0\n next_observation = np.copy(self._state)\n self.traj_index += 1\n return Step(observation=next_observation, reward=reward, done=done)\n\n def render(self):\n self.simulation.render_sim()\n",
"step-3": "<mask token>\n\n\nclass PinEnvDiscrete(Env):\n MAPPING = {(0): (0, 0, 0), (1): (1, 0, 0), (2): (0, 1, 0), (3): (0, 0, \n 1), (4): (-1, 0, 0), (5): (0, -1, 0), (6): (0, 0, -1)}\n\n def __init__(self, simulation, x, y, trajectory, scorer=0,\n max_displacement=False, predict=False, original=False, sample=False):\n self.simulation = simulation\n height, width = simulation.cloth.initial_params[0]\n self.os_dim = height * width * 5\n self.simulation.reset()\n self.tensioner = self.simulation.pin_position(x, y, max_displacement)\n self.scorer = Scorer(scorer)\n self.trajectory = trajectory\n self.traj_index = 0\n self.pinx, self.piny = x, y\n self.predict = predict\n self.original = original\n self.sample = sample\n\n @property\n def observation_space(self):\n if self.original:\n return Box(low=np.array([0, -self.tensioner.max_displacement, -\n self.tensioner.max_displacement, -self.tensioner.\n max_displacement]), high=np.array([len(self.trajectory) + 1,\n self.tensioner.max_displacement, self.tensioner.\n max_displacement, self.tensioner.max_displacement]))\n if not self.predict:\n return Box(low=np.array([0, -self.tensioner.max_displacement, -\n self.tensioner.max_displacement, -self.tensioner.\n max_displacement] + len(self.simulation.cloth.blobs) * [0, \n 0, -800]), high=np.array([len(self.trajectory) + 1, self.\n tensioner.max_displacement, self.tensioner.max_displacement,\n self.tensioner.max_displacement] + len(self.simulation.\n cloth.blobs) * [500, 500, 800]))\n return Box(low=np.array([0, -self.tensioner.max_displacement, -self\n .tensioner.max_displacement, -self.tensioner.max_displacement] +\n len(self.simulation.cloth.blobs) * [0, 0, -800] + 3 * [-1000, -\n 1000, -1000, -1000, -3.2] + [0, 0]), high=np.array([len(self.\n trajectory) + 1, self.tensioner.max_displacement, self.\n tensioner.max_displacement, self.tensioner.max_displacement] + \n len(self.simulation.cloth.blobs) * [500, 500, 800] + 3 * [800, \n 800, 800, 800, 3.2] + [600, 600]))\n\n @property\n def action_space(self):\n return Discrete(7)\n\n @property\n def _state(self):\n scissors = self.simulation.mouse.x, self.simulation.mouse.y\n centroids = np.ravel(np.array(self.simulation.cloth.centroids)).tolist(\n )\n if self.original:\n return np.array([self.traj_index] + list(self.tensioner.\n displacement))\n if not self.predict:\n return np.array([self.traj_index] + list(self.tensioner.\n displacement) + centroids)\n next_position3 = [-1000, -1000]\n closest_shape3 = [-1000, -1000]\n angle3 = 0\n next_position2 = [-1000, -1000]\n closest_shape2 = [-1000, -1000]\n angle2 = 0\n next_position = [-1000, -1000]\n closest_shape = [-1000, -1000]\n angle = 0\n if self.traj_index < len(self.trajectory) - 1:\n next_position = [self.trajectory[self.traj_index + 1][0], self.\n trajectory[self.traj_index + 1][1]]\n closest_shape = list(self.simulation.cloth.find_closest_shapept\n (next_position[0], next_position[1]))\n angle = self.simulation.cloth.find_dtheta(scissors[0], scissors\n [1], next_position[0], next_position[1], closest_shape[0],\n closest_shape[1])\n if self.traj_index < len(self.trajectory) - 5:\n next_position2 = [self.trajectory[self.traj_index + 5][0],\n self.trajectory[self.traj_index + 5][1]]\n if np.linalg.norm(np.array(next_position2) - np.array(\n next_position)) < 100:\n closest_shape2 = list(self.simulation.cloth.\n find_closest_shapept(next_position2[0],\n next_position2[1]))\n angle2 = self.simulation.cloth.find_dtheta(next_position\n [0], next_position[1], next_position2[0],\n next_position2[1], closest_shape2[0], closest_shape2[1]\n )\n if self.traj_index < len(self.trajectory) - 10:\n next_position3 = [self.trajectory[self.traj_index +\n 10][0], self.trajectory[self.traj_index + 10][1]]\n if np.linalg.norm(np.array(next_position3) - np.\n array(next_position2)) < 100:\n closest_shape3 = list(self.simulation.cloth.\n find_closest_shapept(next_position3[0],\n next_position3[1]))\n angle3 = self.simulation.cloth.find_dtheta(\n next_position2[0], next_position2[1],\n next_position3[0], next_position3[1],\n closest_shape3[0], closest_shape3[1])\n return np.array([self.traj_index] + list(self.tensioner.\n displacement) + centroids + next_position + closest_shape + [\n angle] + next_position2 + closest_shape2 + [angle2] +\n next_position3 + closest_shape3 + [angle3] + list(scissors))\n\n @property\n def _score(self):\n disp = np.linalg.norm(self._state[1])\n score = self.scorer.score(self.simulation.cloth)\n if disp >= self.tensioner.max_displacement - 2:\n score -= 100\n return score\n\n def reset(self):\n self.simulation.reset()\n self.tensioner = self.simulation.pin_position(self.pinx, self.piny,\n self.tensioner.max_displacement)\n self.traj_index = 0\n observation = np.copy(self._state)\n return observation\n\n def step(self, action):\n x, y, z = self.MAPPING[action]\n self.tensioner.tension(x, y, z)\n self.simulation.move_mouse(self.trajectory[self.traj_index][0],\n self.trajectory[self.traj_index][1])\n reward = self.simulation.update() * self.traj_index / 10\n self.traj_index += 1\n self.simulation.move_mouse(self.trajectory[self.traj_index][0],\n self.trajectory[self.traj_index][1])\n reward += self.simulation.update() * self.traj_index / 10\n done = self.traj_index >= len(self.trajectory) - 2\n if done:\n reward = self.simulation.cloth.evaluate()\n else:\n reward = 0\n next_observation = np.copy(self._state)\n self.traj_index += 1\n return Step(observation=next_observation, reward=reward, done=done)\n\n def render(self):\n self.simulation.render_sim()\n",
"step-4": "from rllab.envs.base import Env\nfrom rllab.spaces import Discrete\nfrom rllab.spaces import Box\nfrom rllab.envs.base import Step\nimport numpy as np\nimport sys, pickle, os\nsys.path.append(os.path.dirname(os.getcwd()))\nfrom os.path import dirname\nsys.path.append(dirname(dirname(dirname(os.getcwd()))))\nfrom simulation import *\nfrom scorer import *\nfrom shapecloth import *\nfrom tensioner import *\n<mask token>\n\n\nclass PinEnvDiscrete(Env):\n MAPPING = {(0): (0, 0, 0), (1): (1, 0, 0), (2): (0, 1, 0), (3): (0, 0, \n 1), (4): (-1, 0, 0), (5): (0, -1, 0), (6): (0, 0, -1)}\n\n def __init__(self, simulation, x, y, trajectory, scorer=0,\n max_displacement=False, predict=False, original=False, sample=False):\n self.simulation = simulation\n height, width = simulation.cloth.initial_params[0]\n self.os_dim = height * width * 5\n self.simulation.reset()\n self.tensioner = self.simulation.pin_position(x, y, max_displacement)\n self.scorer = Scorer(scorer)\n self.trajectory = trajectory\n self.traj_index = 0\n self.pinx, self.piny = x, y\n self.predict = predict\n self.original = original\n self.sample = sample\n\n @property\n def observation_space(self):\n if self.original:\n return Box(low=np.array([0, -self.tensioner.max_displacement, -\n self.tensioner.max_displacement, -self.tensioner.\n max_displacement]), high=np.array([len(self.trajectory) + 1,\n self.tensioner.max_displacement, self.tensioner.\n max_displacement, self.tensioner.max_displacement]))\n if not self.predict:\n return Box(low=np.array([0, -self.tensioner.max_displacement, -\n self.tensioner.max_displacement, -self.tensioner.\n max_displacement] + len(self.simulation.cloth.blobs) * [0, \n 0, -800]), high=np.array([len(self.trajectory) + 1, self.\n tensioner.max_displacement, self.tensioner.max_displacement,\n self.tensioner.max_displacement] + len(self.simulation.\n cloth.blobs) * [500, 500, 800]))\n return Box(low=np.array([0, -self.tensioner.max_displacement, -self\n .tensioner.max_displacement, -self.tensioner.max_displacement] +\n len(self.simulation.cloth.blobs) * [0, 0, -800] + 3 * [-1000, -\n 1000, -1000, -1000, -3.2] + [0, 0]), high=np.array([len(self.\n trajectory) + 1, self.tensioner.max_displacement, self.\n tensioner.max_displacement, self.tensioner.max_displacement] + \n len(self.simulation.cloth.blobs) * [500, 500, 800] + 3 * [800, \n 800, 800, 800, 3.2] + [600, 600]))\n\n @property\n def action_space(self):\n return Discrete(7)\n\n @property\n def _state(self):\n scissors = self.simulation.mouse.x, self.simulation.mouse.y\n centroids = np.ravel(np.array(self.simulation.cloth.centroids)).tolist(\n )\n if self.original:\n return np.array([self.traj_index] + list(self.tensioner.\n displacement))\n if not self.predict:\n return np.array([self.traj_index] + list(self.tensioner.\n displacement) + centroids)\n next_position3 = [-1000, -1000]\n closest_shape3 = [-1000, -1000]\n angle3 = 0\n next_position2 = [-1000, -1000]\n closest_shape2 = [-1000, -1000]\n angle2 = 0\n next_position = [-1000, -1000]\n closest_shape = [-1000, -1000]\n angle = 0\n if self.traj_index < len(self.trajectory) - 1:\n next_position = [self.trajectory[self.traj_index + 1][0], self.\n trajectory[self.traj_index + 1][1]]\n closest_shape = list(self.simulation.cloth.find_closest_shapept\n (next_position[0], next_position[1]))\n angle = self.simulation.cloth.find_dtheta(scissors[0], scissors\n [1], next_position[0], next_position[1], closest_shape[0],\n closest_shape[1])\n if self.traj_index < len(self.trajectory) - 5:\n next_position2 = [self.trajectory[self.traj_index + 5][0],\n self.trajectory[self.traj_index + 5][1]]\n if np.linalg.norm(np.array(next_position2) - np.array(\n next_position)) < 100:\n closest_shape2 = list(self.simulation.cloth.\n find_closest_shapept(next_position2[0],\n next_position2[1]))\n angle2 = self.simulation.cloth.find_dtheta(next_position\n [0], next_position[1], next_position2[0],\n next_position2[1], closest_shape2[0], closest_shape2[1]\n )\n if self.traj_index < len(self.trajectory) - 10:\n next_position3 = [self.trajectory[self.traj_index +\n 10][0], self.trajectory[self.traj_index + 10][1]]\n if np.linalg.norm(np.array(next_position3) - np.\n array(next_position2)) < 100:\n closest_shape3 = list(self.simulation.cloth.\n find_closest_shapept(next_position3[0],\n next_position3[1]))\n angle3 = self.simulation.cloth.find_dtheta(\n next_position2[0], next_position2[1],\n next_position3[0], next_position3[1],\n closest_shape3[0], closest_shape3[1])\n return np.array([self.traj_index] + list(self.tensioner.\n displacement) + centroids + next_position + closest_shape + [\n angle] + next_position2 + closest_shape2 + [angle2] +\n next_position3 + closest_shape3 + [angle3] + list(scissors))\n\n @property\n def _score(self):\n disp = np.linalg.norm(self._state[1])\n score = self.scorer.score(self.simulation.cloth)\n if disp >= self.tensioner.max_displacement - 2:\n score -= 100\n return score\n\n def reset(self):\n self.simulation.reset()\n self.tensioner = self.simulation.pin_position(self.pinx, self.piny,\n self.tensioner.max_displacement)\n self.traj_index = 0\n observation = np.copy(self._state)\n return observation\n\n def step(self, action):\n x, y, z = self.MAPPING[action]\n self.tensioner.tension(x, y, z)\n self.simulation.move_mouse(self.trajectory[self.traj_index][0],\n self.trajectory[self.traj_index][1])\n reward = self.simulation.update() * self.traj_index / 10\n self.traj_index += 1\n self.simulation.move_mouse(self.trajectory[self.traj_index][0],\n self.trajectory[self.traj_index][1])\n reward += self.simulation.update() * self.traj_index / 10\n done = self.traj_index >= len(self.trajectory) - 2\n if done:\n reward = self.simulation.cloth.evaluate()\n else:\n reward = 0\n next_observation = np.copy(self._state)\n self.traj_index += 1\n return Step(observation=next_observation, reward=reward, done=done)\n\n def render(self):\n self.simulation.render_sim()\n",
"step-5": "from rllab.envs.base import Env\nfrom rllab.spaces import Discrete\nfrom rllab.spaces import Box\nfrom rllab.envs.base import Step\nimport numpy as np\nimport sys, pickle, os\nsys.path.append(os.path.dirname(os.getcwd()))\nfrom os.path import dirname\nsys.path.append(dirname(dirname(dirname(os.getcwd()))))\nfrom simulation import *\nfrom scorer import *\nfrom shapecloth import *\nfrom tensioner import *\n\n\"\"\"\nA Rllab Environment for the tensioning policy experiments.\n\"\"\"\n\n\nclass PinEnvDiscrete(Env):\n\n MAPPING = {\n 0 : (0,0,0),\n 1 : (1,0,0),\n 2 : (0,1,0),\n 3 : (0,0,1),\n 4 : (-1,0,0),\n 5 : (0,-1,0),\n 6 : (0,0,-1)\n }\n\n def __init__(self, simulation, x, y, trajectory, scorer=0, max_displacement=False, predict=False, original=False, sample=False):\n self.simulation = simulation\n height, width = simulation.cloth.initial_params[0]\n self.os_dim = height * width * 5\n self.simulation.reset()\n self.tensioner = self.simulation.pin_position(x, y, max_displacement)\n self.scorer = Scorer(scorer)\n self.trajectory = trajectory\n self.traj_index = 0\n self.pinx, self.piny = x, y\n self.predict = predict\n self.original = original\n self.sample = sample\n\n @property\n def observation_space(self):\n if self.original:\n return Box(low=np.array([0, -self.tensioner.max_displacement, -self.tensioner.max_displacement, -self.tensioner.max_displacement]),\n high=np.array([len(self.trajectory) + 1, self.tensioner.max_displacement, self.tensioner.max_displacement, self.tensioner.max_displacement]))\n if not self.predict:\n return Box(low=np.array([0, -self.tensioner.max_displacement, -self.tensioner.max_displacement, -self.tensioner.max_displacement] + len(self.simulation.cloth.blobs) * [0, 0, -800]),\n high=np.array([len(self.trajectory) + 1, self.tensioner.max_displacement, self.tensioner.max_displacement, self.tensioner.max_displacement]\n + len(self.simulation.cloth.blobs) * [500, 500, 800]))\n return Box(low=np.array([0, -self.tensioner.max_displacement, -self.tensioner.max_displacement, -self.tensioner.max_displacement] + len(self.simulation.cloth.blobs) * [0, 0, -800] + 3 * [-1000, -1000, -1000, -1000, -3.2] + [0, 0]),\n high=np.array([len(self.trajectory) + 1, self.tensioner.max_displacement, self.tensioner.max_displacement, self.tensioner.max_displacement]\n + len(self.simulation.cloth.blobs) * [500, 500, 800] + 3 * [800, 800, 800, 800, 3.2] + [600, 600]))\n\n @property\n def action_space(self):\n return Discrete(7)\n\n\n @property\n def _state(self):\n scissors = self.simulation.mouse.x, self.simulation.mouse.y\n centroids = np.ravel(np.array(self.simulation.cloth.centroids)).tolist()\n if self.original:\n return np.array([self.traj_index] + list(self.tensioner.displacement))\n if not self.predict:\n return np.array([self.traj_index] + list(self.tensioner.displacement) + centroids)\n next_position3 = [-1000, -1000]\n closest_shape3 = [-1000, -1000]\n angle3 = 0\n next_position2 = [-1000, -1000]\n closest_shape2 = [-1000, -1000]\n angle2 = 0\n next_position = [-1000, -1000]\n closest_shape = [-1000, -1000]\n angle = 0\n if self.traj_index < len(self.trajectory) - 1:\n next_position = [self.trajectory[self.traj_index+1][0], self.trajectory[self.traj_index+1][1]]\n closest_shape = list(self.simulation.cloth.find_closest_shapept(next_position[0], next_position[1]))\n angle = self.simulation.cloth.find_dtheta(scissors[0], scissors[1], next_position[0], next_position[1], closest_shape[0], closest_shape[1])\n if self.traj_index < len(self.trajectory) - 5:\n next_position2 = [self.trajectory[self.traj_index+5][0], self.trajectory[self.traj_index+5][1]]\n if np.linalg.norm(np.array(next_position2) - np.array(next_position)) < 100:\n closest_shape2 = list(self.simulation.cloth.find_closest_shapept(next_position2[0], next_position2[1]))\n angle2 = self.simulation.cloth.find_dtheta(next_position[0], next_position[1], next_position2[0], next_position2[1], closest_shape2[0], closest_shape2[1])\n if self.traj_index < len(self.trajectory) - 10:\n next_position3 = [self.trajectory[self.traj_index+10][0], self.trajectory[self.traj_index+10][1]]\n if np.linalg.norm(np.array(next_position3) - np.array(next_position2)) < 100:\n closest_shape3 = list(self.simulation.cloth.find_closest_shapept(next_position3[0], next_position3[1]))\n angle3 = self.simulation.cloth.find_dtheta(next_position2[0], next_position2[1], next_position3[0], next_position3[1], closest_shape3[0], closest_shape3[1])\n return np.array([self.traj_index] + list(self.tensioner.displacement) + centroids + next_position + closest_shape + [angle] + next_position2 + closest_shape2 + [angle2]\n + next_position3 + closest_shape3 + [angle3] + list(scissors))\n \n @property\n def _score(self):\n disp = np.linalg.norm(self._state[1])\n score = self.scorer.score(self.simulation.cloth)\n if disp >= self.tensioner.max_displacement - 2:\n score -= 100\n return score\n\n\n def reset(self):\n self.simulation.reset()\n self.tensioner = self.simulation.pin_position(self.pinx, self.piny, self.tensioner.max_displacement)\n self.traj_index = 0\n observation = np.copy(self._state)\n return observation\n\n def step(self, action):\n x, y, z = self.MAPPING[action]\n self.tensioner.tension(x, y, z)\n self.simulation.move_mouse(self.trajectory[self.traj_index][0], self.trajectory[self.traj_index][1])\n reward = self.simulation.update() * self.traj_index/10\n self.traj_index += 1\n self.simulation.move_mouse(self.trajectory[self.traj_index][0], self.trajectory[self.traj_index][1])\n reward += self.simulation.update() * self.traj_index/10\n done = self.traj_index >= len(self.trajectory) - 2\n if done:\n reward = self.simulation.cloth.evaluate()\n else:\n reward = 0\n next_observation = np.copy(self._state)\n self.traj_index += 1\n return Step(observation=next_observation, reward=reward, done=done)\n\n def render(self):\n self.simulation.render_sim()\n\n # def local_angles(self, n=5):\n # if self.\n # for i in range(n):\n\n",
"step-ids": [
8,
9,
10,
12,
13
]
}
|
[
8,
9,
10,
12,
13
] |
#!/usr/bin/env python
"""Diverse wiskundige structuren weergeven in LaTeX in Jupyter Notebook."""
__author__ = "Brian van der Bijl"
__copyright__ = "Copyright 2020, Hogeschool Utrecht"
from IPython.display import display, Math, Markdown
import re
def show_num(x):
return re.compile(r"\.(?!\d)").sub("\1",x)
def latex_formula(form):
latex = form.simplify().to_latex(outer=True)
if latex:
display(Math(latex))
display(Markdown("<details><pre>$" + latex + "$</pre></details>"))
def latex_bmatrix(M, label=None): # Gebaseerd op https://stackoverflow.com/questions/17129290/numpy-2d-and-1d-array-to-latex-bmatrix
if len(M.shape) > 2:
raise ValueError('bmatrix can at most display two dimensions')
lines = str(M).replace("[", "").replace("]", "").splitlines()
if label:
result = [label + " = "]
else:
result = [""]
result += [r"\begin{bmatrix}"]
result += [" " + " & ".join(map(show_num, l.split())) + r"\\" for l in lines]
result += [r"\end{bmatrix}"]
display(Math("\n".join(result)))
display(Markdown("<details><pre>$" + " ".join(result) + "$</pre></details>"))
def latex_amatrix(M, labels=None):
if len(M.shape) > 2:
raise ValueError('array can at most display two dimensions')
lines = str(M).replace("[", "").replace("]", "").splitlines()
if labels and len(labels) == 2:
result = [r"(\mathbf{" + labels[0] + r"} | \vec " + labels[1] + ") = "]
else:
result = [""]
result += [r"\left[\begin{array}{ccc|c}"]
result += [" " + " & ".join(map(show_num, l.split())) + r"\\" for l in lines]
result += [r"\end{array}\right]"]
display(Math("\n".join(result)))
display(Markdown("<details><pre>$" + " ".join(result) + "$</pre></details>"))
def latex_msquare(sq):
if sq.shape != (3,3):
raise ValueError('Geen magisch vierkant')
lines = str(sq).replace("[", "").replace("]", "").splitlines()
result = [r"\begin{array}{|c|c|c|}\hline"]
result += [" " + " & ".join(map(show_num, l.split())) + r"\\\hline" for l in lines]
result += [r"\end{array}"]
display(Math("\n".join(result)))
display(Markdown("<details><pre>$" + " ".join(result) + "$</pre></details>"))
def latex_ratio(x):
"""Helper functie om breuken naar LaTeX te converteren; getallen worden alleen naar string
geconverteerd."""
if isinstance(x, int):
return str(x)
else:
n, d = x.as_integer_ratio() # Nul buiten de breuk halen
return ("-" if n < 0 else "") + r"\frac{" + str(abs(n)) + "}{" + str(d) + "}"
def latex_polynomial(poly):
terms, label, var, primes = poly # Bind parameters uit tuple
def power(exp):
"""Print een term (e.g. x^2). x^1 is gewoon x, x^0 is 1, maar n × 1 is gewoon n dus verberg de 1.
In alle andere gevallen wordt de variabele met het juiste exponent opgeleverd."""
if exp is 1:
return var
elif exp is 0:
return ""
else:
return (var + r"^{" + latex_ratio(exp) + "}")
# Print f(x) met het juiste aantal primes
result = label + ("^{" + r"\prime"*primes + "}" if primes > 0 else "") + "(" + var + ") = "
first = True # Na de eerste moet er "+" tussen de termen komen
for k, v in reversed(sorted(terms.items())): # Voor iedere term, van groot (hoog exponent) naar klein
if v > 0 and not first: # Koppel met een plus, tenzij het de eerste term is
result += "+"
elif v < 0: # Koppel met een min als de term negatief is, ook de eerste term
result += "-"
if v != 0: # Zet first op False na de eerste keer
first = False
if k is 0:
result += str(v)
elif abs(v) is 1: # Print x in plaats van 1x en -x in plaats van -1x
result += str(power(k))
elif v != 0: # Print iedere term die niet 0 of 1 is op de gebruikelijke manier, zonder min want die staat
result += latex_ratio(abs(v)) + str(power(k)) # erboven al
display(Math(result))
display(Markdown("<details><pre>$" + result + "$</pre></details>"))
|
normal
|
{
"blob_id": "7f7bd2e9ec1932ccfd8aa900956ce85473ee8dbd",
"index": 4668,
"step-1": "<mask token>\n\n\ndef latex_formula(form):\n latex = form.simplify().to_latex(outer=True)\n if latex:\n display(Math(latex))\n display(Markdown('<details><pre>$' + latex + '$</pre></details>'))\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef latex_formula(form):\n latex = form.simplify().to_latex(outer=True)\n if latex:\n display(Math(latex))\n display(Markdown('<details><pre>$' + latex + '$</pre></details>'))\n\n\n<mask token>\n\n\ndef latex_amatrix(M, labels=None):\n if len(M.shape) > 2:\n raise ValueError('array can at most display two dimensions')\n lines = str(M).replace('[', '').replace(']', '').splitlines()\n if labels and len(labels) == 2:\n result = ['(\\\\mathbf{' + labels[0] + '} | \\\\vec ' + labels[1] + ') = ']\n else:\n result = ['']\n result += ['\\\\left[\\\\begin{array}{ccc|c}']\n result += [(' ' + ' & '.join(map(show_num, l.split())) + '\\\\\\\\') for l in\n lines]\n result += ['\\\\end{array}\\\\right]']\n display(Math('\\n'.join(result)))\n display(Markdown('<details><pre>$' + ' '.join(result) +\n '$</pre></details>'))\n\n\ndef latex_msquare(sq):\n if sq.shape != (3, 3):\n raise ValueError('Geen magisch vierkant')\n lines = str(sq).replace('[', '').replace(']', '').splitlines()\n result = ['\\\\begin{array}{|c|c|c|}\\\\hline']\n result += [(' ' + ' & '.join(map(show_num, l.split())) + '\\\\\\\\\\\\hline'\n ) for l in lines]\n result += ['\\\\end{array}']\n display(Math('\\n'.join(result)))\n display(Markdown('<details><pre>$' + ' '.join(result) +\n '$</pre></details>'))\n\n\ndef latex_ratio(x):\n \"\"\"Helper functie om breuken naar LaTeX te converteren; getallen worden alleen naar string\n geconverteerd.\"\"\"\n if isinstance(x, int):\n return str(x)\n else:\n n, d = x.as_integer_ratio()\n return ('-' if n < 0 else '') + '\\\\frac{' + str(abs(n)) + '}{' + str(d\n ) + '}'\n\n\ndef latex_polynomial(poly):\n terms, label, var, primes = poly\n\n def power(exp):\n \"\"\"Print een term (e.g. x^2). x^1 is gewoon x, x^0 is 1, maar n × 1 is gewoon n dus verberg de 1.\n In alle andere gevallen wordt de variabele met het juiste exponent opgeleverd.\"\"\"\n if exp is 1:\n return var\n elif exp is 0:\n return ''\n else:\n return var + '^{' + latex_ratio(exp) + '}'\n result = label + ('^{' + '\\\\prime' * primes + '}' if primes > 0 else ''\n ) + '(' + var + ') = '\n first = True\n for k, v in reversed(sorted(terms.items())):\n if v > 0 and not first:\n result += '+'\n elif v < 0:\n result += '-'\n if v != 0:\n first = False\n if k is 0:\n result += str(v)\n elif abs(v) is 1:\n result += str(power(k))\n elif v != 0:\n result += latex_ratio(abs(v)) + str(power(k))\n display(Math(result))\n display(Markdown('<details><pre>$' + result + '$</pre></details>'))\n",
"step-3": "<mask token>\n\n\ndef show_num(x):\n return re.compile('\\\\.(?!\\\\d)').sub('\\x01', x)\n\n\ndef latex_formula(form):\n latex = form.simplify().to_latex(outer=True)\n if latex:\n display(Math(latex))\n display(Markdown('<details><pre>$' + latex + '$</pre></details>'))\n\n\ndef latex_bmatrix(M, label=None):\n if len(M.shape) > 2:\n raise ValueError('bmatrix can at most display two dimensions')\n lines = str(M).replace('[', '').replace(']', '').splitlines()\n if label:\n result = [label + ' = ']\n else:\n result = ['']\n result += ['\\\\begin{bmatrix}']\n result += [(' ' + ' & '.join(map(show_num, l.split())) + '\\\\\\\\') for l in\n lines]\n result += ['\\\\end{bmatrix}']\n display(Math('\\n'.join(result)))\n display(Markdown('<details><pre>$' + ' '.join(result) +\n '$</pre></details>'))\n\n\ndef latex_amatrix(M, labels=None):\n if len(M.shape) > 2:\n raise ValueError('array can at most display two dimensions')\n lines = str(M).replace('[', '').replace(']', '').splitlines()\n if labels and len(labels) == 2:\n result = ['(\\\\mathbf{' + labels[0] + '} | \\\\vec ' + labels[1] + ') = ']\n else:\n result = ['']\n result += ['\\\\left[\\\\begin{array}{ccc|c}']\n result += [(' ' + ' & '.join(map(show_num, l.split())) + '\\\\\\\\') for l in\n lines]\n result += ['\\\\end{array}\\\\right]']\n display(Math('\\n'.join(result)))\n display(Markdown('<details><pre>$' + ' '.join(result) +\n '$</pre></details>'))\n\n\ndef latex_msquare(sq):\n if sq.shape != (3, 3):\n raise ValueError('Geen magisch vierkant')\n lines = str(sq).replace('[', '').replace(']', '').splitlines()\n result = ['\\\\begin{array}{|c|c|c|}\\\\hline']\n result += [(' ' + ' & '.join(map(show_num, l.split())) + '\\\\\\\\\\\\hline'\n ) for l in lines]\n result += ['\\\\end{array}']\n display(Math('\\n'.join(result)))\n display(Markdown('<details><pre>$' + ' '.join(result) +\n '$</pre></details>'))\n\n\ndef latex_ratio(x):\n \"\"\"Helper functie om breuken naar LaTeX te converteren; getallen worden alleen naar string\n geconverteerd.\"\"\"\n if isinstance(x, int):\n return str(x)\n else:\n n, d = x.as_integer_ratio()\n return ('-' if n < 0 else '') + '\\\\frac{' + str(abs(n)) + '}{' + str(d\n ) + '}'\n\n\ndef latex_polynomial(poly):\n terms, label, var, primes = poly\n\n def power(exp):\n \"\"\"Print een term (e.g. x^2). x^1 is gewoon x, x^0 is 1, maar n × 1 is gewoon n dus verberg de 1.\n In alle andere gevallen wordt de variabele met het juiste exponent opgeleverd.\"\"\"\n if exp is 1:\n return var\n elif exp is 0:\n return ''\n else:\n return var + '^{' + latex_ratio(exp) + '}'\n result = label + ('^{' + '\\\\prime' * primes + '}' if primes > 0 else ''\n ) + '(' + var + ') = '\n first = True\n for k, v in reversed(sorted(terms.items())):\n if v > 0 and not first:\n result += '+'\n elif v < 0:\n result += '-'\n if v != 0:\n first = False\n if k is 0:\n result += str(v)\n elif abs(v) is 1:\n result += str(power(k))\n elif v != 0:\n result += latex_ratio(abs(v)) + str(power(k))\n display(Math(result))\n display(Markdown('<details><pre>$' + result + '$</pre></details>'))\n",
"step-4": "<mask token>\n__author__ = 'Brian van der Bijl'\n__copyright__ = 'Copyright 2020, Hogeschool Utrecht'\nfrom IPython.display import display, Math, Markdown\nimport re\n\n\ndef show_num(x):\n return re.compile('\\\\.(?!\\\\d)').sub('\\x01', x)\n\n\ndef latex_formula(form):\n latex = form.simplify().to_latex(outer=True)\n if latex:\n display(Math(latex))\n display(Markdown('<details><pre>$' + latex + '$</pre></details>'))\n\n\ndef latex_bmatrix(M, label=None):\n if len(M.shape) > 2:\n raise ValueError('bmatrix can at most display two dimensions')\n lines = str(M).replace('[', '').replace(']', '').splitlines()\n if label:\n result = [label + ' = ']\n else:\n result = ['']\n result += ['\\\\begin{bmatrix}']\n result += [(' ' + ' & '.join(map(show_num, l.split())) + '\\\\\\\\') for l in\n lines]\n result += ['\\\\end{bmatrix}']\n display(Math('\\n'.join(result)))\n display(Markdown('<details><pre>$' + ' '.join(result) +\n '$</pre></details>'))\n\n\ndef latex_amatrix(M, labels=None):\n if len(M.shape) > 2:\n raise ValueError('array can at most display two dimensions')\n lines = str(M).replace('[', '').replace(']', '').splitlines()\n if labels and len(labels) == 2:\n result = ['(\\\\mathbf{' + labels[0] + '} | \\\\vec ' + labels[1] + ') = ']\n else:\n result = ['']\n result += ['\\\\left[\\\\begin{array}{ccc|c}']\n result += [(' ' + ' & '.join(map(show_num, l.split())) + '\\\\\\\\') for l in\n lines]\n result += ['\\\\end{array}\\\\right]']\n display(Math('\\n'.join(result)))\n display(Markdown('<details><pre>$' + ' '.join(result) +\n '$</pre></details>'))\n\n\ndef latex_msquare(sq):\n if sq.shape != (3, 3):\n raise ValueError('Geen magisch vierkant')\n lines = str(sq).replace('[', '').replace(']', '').splitlines()\n result = ['\\\\begin{array}{|c|c|c|}\\\\hline']\n result += [(' ' + ' & '.join(map(show_num, l.split())) + '\\\\\\\\\\\\hline'\n ) for l in lines]\n result += ['\\\\end{array}']\n display(Math('\\n'.join(result)))\n display(Markdown('<details><pre>$' + ' '.join(result) +\n '$</pre></details>'))\n\n\ndef latex_ratio(x):\n \"\"\"Helper functie om breuken naar LaTeX te converteren; getallen worden alleen naar string\n geconverteerd.\"\"\"\n if isinstance(x, int):\n return str(x)\n else:\n n, d = x.as_integer_ratio()\n return ('-' if n < 0 else '') + '\\\\frac{' + str(abs(n)) + '}{' + str(d\n ) + '}'\n\n\ndef latex_polynomial(poly):\n terms, label, var, primes = poly\n\n def power(exp):\n \"\"\"Print een term (e.g. x^2). x^1 is gewoon x, x^0 is 1, maar n × 1 is gewoon n dus verberg de 1.\n In alle andere gevallen wordt de variabele met het juiste exponent opgeleverd.\"\"\"\n if exp is 1:\n return var\n elif exp is 0:\n return ''\n else:\n return var + '^{' + latex_ratio(exp) + '}'\n result = label + ('^{' + '\\\\prime' * primes + '}' if primes > 0 else ''\n ) + '(' + var + ') = '\n first = True\n for k, v in reversed(sorted(terms.items())):\n if v > 0 and not first:\n result += '+'\n elif v < 0:\n result += '-'\n if v != 0:\n first = False\n if k is 0:\n result += str(v)\n elif abs(v) is 1:\n result += str(power(k))\n elif v != 0:\n result += latex_ratio(abs(v)) + str(power(k))\n display(Math(result))\n display(Markdown('<details><pre>$' + result + '$</pre></details>'))\n",
"step-5": "#!/usr/bin/env python\r\n\r\n\"\"\"Diverse wiskundige structuren weergeven in LaTeX in Jupyter Notebook.\"\"\"\r\n\r\n__author__ = \"Brian van der Bijl\"\r\n__copyright__ = \"Copyright 2020, Hogeschool Utrecht\"\r\n\r\nfrom IPython.display import display, Math, Markdown\r\nimport re\r\n\r\ndef show_num(x):\r\n return re.compile(r\"\\.(?!\\d)\").sub(\"\\1\",x)\r\n\r\ndef latex_formula(form):\r\n latex = form.simplify().to_latex(outer=True)\r\n if latex:\r\n display(Math(latex))\r\n display(Markdown(\"<details><pre>$\" + latex + \"$</pre></details>\"))\r\n\r\ndef latex_bmatrix(M, label=None): # Gebaseerd op https://stackoverflow.com/questions/17129290/numpy-2d-and-1d-array-to-latex-bmatrix\r\n if len(M.shape) > 2:\r\n raise ValueError('bmatrix can at most display two dimensions')\r\n lines = str(M).replace(\"[\", \"\").replace(\"]\", \"\").splitlines()\r\n if label:\r\n result = [label + \" = \"]\r\n else:\r\n result = [\"\"]\r\n result += [r\"\\begin{bmatrix}\"]\r\n result += [\" \" + \" & \".join(map(show_num, l.split())) + r\"\\\\\" for l in lines]\r\n result += [r\"\\end{bmatrix}\"]\r\n display(Math(\"\\n\".join(result)))\r\n display(Markdown(\"<details><pre>$\" + \" \".join(result) + \"$</pre></details>\"))\r\n\r\ndef latex_amatrix(M, labels=None):\r\n if len(M.shape) > 2:\r\n raise ValueError('array can at most display two dimensions')\r\n lines = str(M).replace(\"[\", \"\").replace(\"]\", \"\").splitlines()\r\n if labels and len(labels) == 2:\r\n result = [r\"(\\mathbf{\" + labels[0] + r\"} | \\vec \" + labels[1] + \") = \"]\r\n else:\r\n result = [\"\"]\r\n result += [r\"\\left[\\begin{array}{ccc|c}\"]\r\n result += [\" \" + \" & \".join(map(show_num, l.split())) + r\"\\\\\" for l in lines]\r\n result += [r\"\\end{array}\\right]\"]\r\n display(Math(\"\\n\".join(result)))\r\n display(Markdown(\"<details><pre>$\" + \" \".join(result) + \"$</pre></details>\"))\r\n\r\ndef latex_msquare(sq):\r\n if sq.shape != (3,3):\r\n raise ValueError('Geen magisch vierkant')\r\n lines = str(sq).replace(\"[\", \"\").replace(\"]\", \"\").splitlines()\r\n result = [r\"\\begin{array}{|c|c|c|}\\hline\"]\r\n result += [\" \" + \" & \".join(map(show_num, l.split())) + r\"\\\\\\hline\" for l in lines]\r\n result += [r\"\\end{array}\"]\r\n display(Math(\"\\n\".join(result)))\r\n display(Markdown(\"<details><pre>$\" + \" \".join(result) + \"$</pre></details>\"))\r\n\r\ndef latex_ratio(x):\r\n \"\"\"Helper functie om breuken naar LaTeX te converteren; getallen worden alleen naar string\r\n geconverteerd.\"\"\"\r\n if isinstance(x, int):\r\n return str(x)\r\n else:\r\n n, d = x.as_integer_ratio() # Nul buiten de breuk halen\r\n return (\"-\" if n < 0 else \"\") + r\"\\frac{\" + str(abs(n)) + \"}{\" + str(d) + \"}\"\r\n\r\ndef latex_polynomial(poly):\r\n terms, label, var, primes = poly # Bind parameters uit tuple\r\n\r\n def power(exp):\r\n \"\"\"Print een term (e.g. x^2). x^1 is gewoon x, x^0 is 1, maar n × 1 is gewoon n dus verberg de 1.\r\n In alle andere gevallen wordt de variabele met het juiste exponent opgeleverd.\"\"\"\r\n if exp is 1:\r\n return var\r\n elif exp is 0:\r\n return \"\"\r\n else:\r\n return (var + r\"^{\" + latex_ratio(exp) + \"}\")\r\n\r\n # Print f(x) met het juiste aantal primes \r\n result = label + (\"^{\" + r\"\\prime\"*primes + \"}\" if primes > 0 else \"\") + \"(\" + var + \") = \"\r\n first = True # Na de eerste moet er \"+\" tussen de termen komen\r\n\r\n for k, v in reversed(sorted(terms.items())): # Voor iedere term, van groot (hoog exponent) naar klein\r\n if v > 0 and not first: # Koppel met een plus, tenzij het de eerste term is\r\n result += \"+\"\r\n elif v < 0: # Koppel met een min als de term negatief is, ook de eerste term\r\n result += \"-\"\r\n\r\n if v != 0: # Zet first op False na de eerste keer\r\n first = False\r\n\r\n if k is 0:\r\n result += str(v)\r\n elif abs(v) is 1: # Print x in plaats van 1x en -x in plaats van -1x\r\n result += str(power(k))\r\n elif v != 0: # Print iedere term die niet 0 of 1 is op de gebruikelijke manier, zonder min want die staat\r\n result += latex_ratio(abs(v)) + str(power(k)) # erboven al\r\n\r\n display(Math(result))\r\n display(Markdown(\"<details><pre>$\" + result + \"$</pre></details>\"))\r\n",
"step-ids": [
1,
5,
7,
9,
10
]
}
|
[
1,
5,
7,
9,
10
] |
import re
import requests
def download_image(url: str) -> bool:
img_tag_regex = r"""<img.*?src="(.*?)"[^\>]+>"""
response = requests.get(url)
if response.status_code != 200:
return False
text = response.text
image_links = re.findall(img_tag_regex, text)
for link in image_links:
resp = requests.get(link)
with open(link.replace("https://", "").replace("http://", ""), "wb") as file:
file.write(resp.content)
return True
|
normal
|
{
"blob_id": "268c36f6fb99383ea02b7ee406189ffb467d246c",
"index": 6554,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef download_image(url: str) ->bool:\n img_tag_regex = '<img.*?src=\"(.*?)\"[^\\\\>]+>'\n response = requests.get(url)\n if response.status_code != 200:\n return False\n text = response.text\n image_links = re.findall(img_tag_regex, text)\n for link in image_links:\n resp = requests.get(link)\n with open(link.replace('https://', '').replace('http://', ''), 'wb'\n ) as file:\n file.write(resp.content)\n return True\n",
"step-3": "import re\nimport requests\n\n\ndef download_image(url: str) ->bool:\n img_tag_regex = '<img.*?src=\"(.*?)\"[^\\\\>]+>'\n response = requests.get(url)\n if response.status_code != 200:\n return False\n text = response.text\n image_links = re.findall(img_tag_regex, text)\n for link in image_links:\n resp = requests.get(link)\n with open(link.replace('https://', '').replace('http://', ''), 'wb'\n ) as file:\n file.write(resp.content)\n return True\n",
"step-4": "import re\n\nimport requests\n\n\ndef download_image(url: str) -> bool:\n img_tag_regex = r\"\"\"<img.*?src=\"(.*?)\"[^\\>]+>\"\"\"\n\n response = requests.get(url)\n if response.status_code != 200:\n return False\n\n text = response.text\n image_links = re.findall(img_tag_regex, text)\n\n for link in image_links:\n resp = requests.get(link)\n with open(link.replace(\"https://\", \"\").replace(\"http://\", \"\"), \"wb\") as file:\n file.write(resp.content)\n\n return True\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
__version__ = '0.90.03'
|
normal
|
{
"blob_id": "284e4f79748c17d44518f2ce424db5b1697373dc",
"index": 3156,
"step-1": "<mask token>\n",
"step-2": "__version__ = '0.90.03'\n",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
# Dependancies
import pandas as pd
# We can use the read_html function in Pandas
# to automatically scrape any tabular data from a page.
# URL of website to scrape
url = 'https://en.wikipedia.org/wiki/List_of_capitals_in_the_United_States'
# Read HTML
tables = pd.read_html(url)
tables
# What we get in return is a list of dataframes for any tabular data that Pandas found.
# We can slice off any of those dataframes that we want using normal indexing.
# Select first table as df
df = tables[0]
# Establish columns
df.columns = ['State', 'Abr.', 'State-hood Rank', 'Capital',
'Capital Since', 'Area (sq-mi)', 'Municipal Population', 'Metropolitan',
'Metropolitan Population', 'Population Rank', 'Notes']
# Display
df.head()
# Cleanup of extra rows
df = df.iloc[2:]
df.head()
# Set the index to the State column
df.set_index('State', inplace=True)
df.head()
# That way we can display all info about a row
df.loc['Alabama']
# Pandas also had a to_html method that we can use to generate HTML tables from DataFrames.
html_table = df.to_html()
html_table
# You may have to strip unwanted newlines to clean up the table.
html_table.replace('\n', '')
# You can also save the table directly to a file.
df.to_html('table.html')
|
normal
|
{
"blob_id": "f4fca5ce20db0e27da11d76a7a2fd402c33d2e92",
"index": 4731,
"step-1": "<mask token>\n",
"step-2": "<mask token>\ntables\n<mask token>\ndf.head()\n<mask token>\ndf.head()\ndf.set_index('State', inplace=True)\ndf.head()\ndf.loc['Alabama']\n<mask token>\nhtml_table\nhtml_table.replace('\\n', '')\ndf.to_html('table.html')\n",
"step-3": "<mask token>\nurl = 'https://en.wikipedia.org/wiki/List_of_capitals_in_the_United_States'\ntables = pd.read_html(url)\ntables\ndf = tables[0]\ndf.columns = ['State', 'Abr.', 'State-hood Rank', 'Capital',\n 'Capital Since', 'Area (sq-mi)', 'Municipal Population', 'Metropolitan',\n 'Metropolitan Population', 'Population Rank', 'Notes']\ndf.head()\ndf = df.iloc[2:]\ndf.head()\ndf.set_index('State', inplace=True)\ndf.head()\ndf.loc['Alabama']\nhtml_table = df.to_html()\nhtml_table\nhtml_table.replace('\\n', '')\ndf.to_html('table.html')\n",
"step-4": "import pandas as pd\nurl = 'https://en.wikipedia.org/wiki/List_of_capitals_in_the_United_States'\ntables = pd.read_html(url)\ntables\ndf = tables[0]\ndf.columns = ['State', 'Abr.', 'State-hood Rank', 'Capital',\n 'Capital Since', 'Area (sq-mi)', 'Municipal Population', 'Metropolitan',\n 'Metropolitan Population', 'Population Rank', 'Notes']\ndf.head()\ndf = df.iloc[2:]\ndf.head()\ndf.set_index('State', inplace=True)\ndf.head()\ndf.loc['Alabama']\nhtml_table = df.to_html()\nhtml_table\nhtml_table.replace('\\n', '')\ndf.to_html('table.html')\n",
"step-5": "# Dependancies\nimport pandas as pd\n\n# We can use the read_html function in Pandas \n# to automatically scrape any tabular data from a page.\n\n# URL of website to scrape\nurl = 'https://en.wikipedia.org/wiki/List_of_capitals_in_the_United_States'\n\n# Read HTML\ntables = pd.read_html(url)\ntables\n\n# What we get in return is a list of dataframes for any tabular data that Pandas found.\n# We can slice off any of those dataframes that we want using normal indexing.\n\n# Select first table as df\ndf = tables[0]\n\n# Establish columns\ndf.columns = ['State', 'Abr.', 'State-hood Rank', 'Capital', \n 'Capital Since', 'Area (sq-mi)', 'Municipal Population', 'Metropolitan', \n 'Metropolitan Population', 'Population Rank', 'Notes']\n# Display\ndf.head()\n\n# Cleanup of extra rows\ndf = df.iloc[2:]\ndf.head()\n\n# Set the index to the State column\ndf.set_index('State', inplace=True)\ndf.head()\n\n# That way we can display all info about a row\ndf.loc['Alabama']\n\n\n# Pandas also had a to_html method that we can use to generate HTML tables from DataFrames.\nhtml_table = df.to_html()\nhtml_table\n\n# You may have to strip unwanted newlines to clean up the table.\nhtml_table.replace('\\n', '')\n\n# You can also save the table directly to a file.\ndf.to_html('table.html')",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import sys
sys.path.append("..\\Pole_IA_Systemes_Experts")
from tkinter import *
from Knowledge_base.Facts import Fact
from Knowledge_base.Rules import Rule
from Backward.Explanation_tree import *
def ask_about_fact(fact: Fact):
"""
Asks the user about whether a fact is true or false threw an interface provided by tkinter
Args:
fact (Fact): the fact we want to know about
Returns:
bool: true if the fact is true, false otherwise
"""
window = Tk()
window.title("Question !")
Label(window, text=fact.description, font=("Arial", 18)).grid(padx="1c", pady=("0.5c", "1c"), columnspan=3)
def fact_is_true():
global boolean
boolean = True
window.quit()
window.destroy()
def fact_not_true():
global boolean
boolean = False
window.quit()
window.destroy()
Button(window, text="Vrai", fg="green", command=fact_is_true, width="15") \
.grid(column=0, row=1, padx="0.5c", pady="0.5c")
Button(window, text="Ne sais pas", fg="black", command=fact_not_true, width="15") \
.grid(column=1, row=1, padx="0.5c", pady="0.5c")
Button(window, text="Faux", fg="red", command=fact_not_true, width="15") \
.grid(column=2, row=1, padx="0.5c", pady="0.5c")
window.mainloop()
try:
return boolean
except NameError:
return False
def show_result(goal: Fact, description: str, true_fact: bool, facts: list, used_rules: list):
"""
Displays the result of the inference engine and the explanation of the facts and rules used to reach this conclusion
Args:
goal (Fact): the fact understudy
description (String): the explanation of the rules and facts used
true_fact (bool): is True if the goal is verified, False otherwise
facts (list[fact]): list of the known facts
used_rules (list[Rule]): list of the rules that have been used
"""
root = Tk()
root.title("Résultat !")
if true_fact:
Label(root, text=goal.description, font=("Arial", 18)) \
.grid(padx="1c", pady="1c")
Label(root, text="car {}".format(description), font=("Arial", 10)) \
.grid(row=1, padx="1c", pady="1c")
else:
Label(root, text="Impossible à dire", font=("Arial", 18)) \
.grid(padx="1c", pady="1c")
display_explanation_tree(facts, used_rules, root)
root.mainloop()
|
normal
|
{
"blob_id": "4dae34b7c90f52314aac5e457addb3700ffcbd28",
"index": 9156,
"step-1": "<mask token>\n\n\ndef ask_about_fact(fact: Fact):\n \"\"\"\n Asks the user about whether a fact is true or false threw an interface provided by tkinter\n Args:\n fact (Fact): the fact we want to know about\n\n Returns:\n bool: true if the fact is true, false otherwise\n \"\"\"\n window = Tk()\n window.title('Question !')\n Label(window, text=fact.description, font=('Arial', 18)).grid(padx='1c',\n pady=('0.5c', '1c'), columnspan=3)\n\n def fact_is_true():\n global boolean\n boolean = True\n window.quit()\n window.destroy()\n\n def fact_not_true():\n global boolean\n boolean = False\n window.quit()\n window.destroy()\n Button(window, text='Vrai', fg='green', command=fact_is_true, width='15'\n ).grid(column=0, row=1, padx='0.5c', pady='0.5c')\n Button(window, text='Ne sais pas', fg='black', command=fact_not_true,\n width='15').grid(column=1, row=1, padx='0.5c', pady='0.5c')\n Button(window, text='Faux', fg='red', command=fact_not_true, width='15'\n ).grid(column=2, row=1, padx='0.5c', pady='0.5c')\n window.mainloop()\n try:\n return boolean\n except NameError:\n return False\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef ask_about_fact(fact: Fact):\n \"\"\"\n Asks the user about whether a fact is true or false threw an interface provided by tkinter\n Args:\n fact (Fact): the fact we want to know about\n\n Returns:\n bool: true if the fact is true, false otherwise\n \"\"\"\n window = Tk()\n window.title('Question !')\n Label(window, text=fact.description, font=('Arial', 18)).grid(padx='1c',\n pady=('0.5c', '1c'), columnspan=3)\n\n def fact_is_true():\n global boolean\n boolean = True\n window.quit()\n window.destroy()\n\n def fact_not_true():\n global boolean\n boolean = False\n window.quit()\n window.destroy()\n Button(window, text='Vrai', fg='green', command=fact_is_true, width='15'\n ).grid(column=0, row=1, padx='0.5c', pady='0.5c')\n Button(window, text='Ne sais pas', fg='black', command=fact_not_true,\n width='15').grid(column=1, row=1, padx='0.5c', pady='0.5c')\n Button(window, text='Faux', fg='red', command=fact_not_true, width='15'\n ).grid(column=2, row=1, padx='0.5c', pady='0.5c')\n window.mainloop()\n try:\n return boolean\n except NameError:\n return False\n\n\ndef show_result(goal: Fact, description: str, true_fact: bool, facts: list,\n used_rules: list):\n \"\"\"\n Displays the result of the inference engine and the explanation of the facts and rules used to reach this conclusion\n Args:\n goal (Fact): the fact understudy\n description (String): the explanation of the rules and facts used\n true_fact (bool): is True if the goal is verified, False otherwise\n facts (list[fact]): list of the known facts\n used_rules (list[Rule]): list of the rules that have been used\n \"\"\"\n root = Tk()\n root.title('Résultat !')\n if true_fact:\n Label(root, text=goal.description, font=('Arial', 18)).grid(padx=\n '1c', pady='1c')\n Label(root, text='car {}'.format(description), font=('Arial', 10)\n ).grid(row=1, padx='1c', pady='1c')\n else:\n Label(root, text='Impossible à dire', font=('Arial', 18)).grid(padx\n ='1c', pady='1c')\n display_explanation_tree(facts, used_rules, root)\n root.mainloop()\n",
"step-3": "<mask token>\nsys.path.append('..\\\\Pole_IA_Systemes_Experts')\n<mask token>\n\n\ndef ask_about_fact(fact: Fact):\n \"\"\"\n Asks the user about whether a fact is true or false threw an interface provided by tkinter\n Args:\n fact (Fact): the fact we want to know about\n\n Returns:\n bool: true if the fact is true, false otherwise\n \"\"\"\n window = Tk()\n window.title('Question !')\n Label(window, text=fact.description, font=('Arial', 18)).grid(padx='1c',\n pady=('0.5c', '1c'), columnspan=3)\n\n def fact_is_true():\n global boolean\n boolean = True\n window.quit()\n window.destroy()\n\n def fact_not_true():\n global boolean\n boolean = False\n window.quit()\n window.destroy()\n Button(window, text='Vrai', fg='green', command=fact_is_true, width='15'\n ).grid(column=0, row=1, padx='0.5c', pady='0.5c')\n Button(window, text='Ne sais pas', fg='black', command=fact_not_true,\n width='15').grid(column=1, row=1, padx='0.5c', pady='0.5c')\n Button(window, text='Faux', fg='red', command=fact_not_true, width='15'\n ).grid(column=2, row=1, padx='0.5c', pady='0.5c')\n window.mainloop()\n try:\n return boolean\n except NameError:\n return False\n\n\ndef show_result(goal: Fact, description: str, true_fact: bool, facts: list,\n used_rules: list):\n \"\"\"\n Displays the result of the inference engine and the explanation of the facts and rules used to reach this conclusion\n Args:\n goal (Fact): the fact understudy\n description (String): the explanation of the rules and facts used\n true_fact (bool): is True if the goal is verified, False otherwise\n facts (list[fact]): list of the known facts\n used_rules (list[Rule]): list of the rules that have been used\n \"\"\"\n root = Tk()\n root.title('Résultat !')\n if true_fact:\n Label(root, text=goal.description, font=('Arial', 18)).grid(padx=\n '1c', pady='1c')\n Label(root, text='car {}'.format(description), font=('Arial', 10)\n ).grid(row=1, padx='1c', pady='1c')\n else:\n Label(root, text='Impossible à dire', font=('Arial', 18)).grid(padx\n ='1c', pady='1c')\n display_explanation_tree(facts, used_rules, root)\n root.mainloop()\n",
"step-4": "import sys\nsys.path.append('..\\\\Pole_IA_Systemes_Experts')\nfrom tkinter import *\nfrom Knowledge_base.Facts import Fact\nfrom Knowledge_base.Rules import Rule\nfrom Backward.Explanation_tree import *\n\n\ndef ask_about_fact(fact: Fact):\n \"\"\"\n Asks the user about whether a fact is true or false threw an interface provided by tkinter\n Args:\n fact (Fact): the fact we want to know about\n\n Returns:\n bool: true if the fact is true, false otherwise\n \"\"\"\n window = Tk()\n window.title('Question !')\n Label(window, text=fact.description, font=('Arial', 18)).grid(padx='1c',\n pady=('0.5c', '1c'), columnspan=3)\n\n def fact_is_true():\n global boolean\n boolean = True\n window.quit()\n window.destroy()\n\n def fact_not_true():\n global boolean\n boolean = False\n window.quit()\n window.destroy()\n Button(window, text='Vrai', fg='green', command=fact_is_true, width='15'\n ).grid(column=0, row=1, padx='0.5c', pady='0.5c')\n Button(window, text='Ne sais pas', fg='black', command=fact_not_true,\n width='15').grid(column=1, row=1, padx='0.5c', pady='0.5c')\n Button(window, text='Faux', fg='red', command=fact_not_true, width='15'\n ).grid(column=2, row=1, padx='0.5c', pady='0.5c')\n window.mainloop()\n try:\n return boolean\n except NameError:\n return False\n\n\ndef show_result(goal: Fact, description: str, true_fact: bool, facts: list,\n used_rules: list):\n \"\"\"\n Displays the result of the inference engine and the explanation of the facts and rules used to reach this conclusion\n Args:\n goal (Fact): the fact understudy\n description (String): the explanation of the rules and facts used\n true_fact (bool): is True if the goal is verified, False otherwise\n facts (list[fact]): list of the known facts\n used_rules (list[Rule]): list of the rules that have been used\n \"\"\"\n root = Tk()\n root.title('Résultat !')\n if true_fact:\n Label(root, text=goal.description, font=('Arial', 18)).grid(padx=\n '1c', pady='1c')\n Label(root, text='car {}'.format(description), font=('Arial', 10)\n ).grid(row=1, padx='1c', pady='1c')\n else:\n Label(root, text='Impossible à dire', font=('Arial', 18)).grid(padx\n ='1c', pady='1c')\n display_explanation_tree(facts, used_rules, root)\n root.mainloop()\n",
"step-5": "import sys\n\nsys.path.append(\"..\\\\Pole_IA_Systemes_Experts\")\nfrom tkinter import *\nfrom Knowledge_base.Facts import Fact\nfrom Knowledge_base.Rules import Rule\nfrom Backward.Explanation_tree import *\n\n\ndef ask_about_fact(fact: Fact):\n \"\"\"\n Asks the user about whether a fact is true or false threw an interface provided by tkinter\n Args:\n fact (Fact): the fact we want to know about\n\n Returns:\n bool: true if the fact is true, false otherwise\n \"\"\"\n \n window = Tk()\n window.title(\"Question !\")\n \n Label(window, text=fact.description, font=(\"Arial\", 18)).grid(padx=\"1c\", pady=(\"0.5c\", \"1c\"), columnspan=3)\n \n def fact_is_true():\n global boolean\n boolean = True\n window.quit()\n window.destroy()\n \n def fact_not_true():\n global boolean\n boolean = False\n window.quit()\n window.destroy()\n \n Button(window, text=\"Vrai\", fg=\"green\", command=fact_is_true, width=\"15\") \\\n .grid(column=0, row=1, padx=\"0.5c\", pady=\"0.5c\")\n \n Button(window, text=\"Ne sais pas\", fg=\"black\", command=fact_not_true, width=\"15\") \\\n .grid(column=1, row=1, padx=\"0.5c\", pady=\"0.5c\")\n \n Button(window, text=\"Faux\", fg=\"red\", command=fact_not_true, width=\"15\") \\\n .grid(column=2, row=1, padx=\"0.5c\", pady=\"0.5c\")\n \n window.mainloop()\n \n try:\n return boolean\n except NameError:\n return False\n\n\ndef show_result(goal: Fact, description: str, true_fact: bool, facts: list, used_rules: list):\n \"\"\"\n Displays the result of the inference engine and the explanation of the facts and rules used to reach this conclusion\n Args:\n goal (Fact): the fact understudy\n description (String): the explanation of the rules and facts used\n true_fact (bool): is True if the goal is verified, False otherwise\n facts (list[fact]): list of the known facts\n used_rules (list[Rule]): list of the rules that have been used\n \"\"\"\n root = Tk()\n root.title(\"Résultat !\")\n \n if true_fact:\n Label(root, text=goal.description, font=(\"Arial\", 18)) \\\n .grid(padx=\"1c\", pady=\"1c\")\n \n Label(root, text=\"car {}\".format(description), font=(\"Arial\", 10)) \\\n .grid(row=1, padx=\"1c\", pady=\"1c\")\n \n else:\n Label(root, text=\"Impossible à dire\", font=(\"Arial\", 18)) \\\n .grid(padx=\"1c\", pady=\"1c\")\n \n display_explanation_tree(facts, used_rules, root)\n \n root.mainloop()\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
#MenuTitle: Check for open paths in selected glyphs
"""
Checks for open paths in selected glyphs (or all glyphs if no selection).
Output appears in Macro Window (Option-Command-M).
"""
# FIXME: test with masters and instances -- may not work
Font = Glyphs.font
Doc = Glyphs.currentDocument
selectedGlyphs = [ x.parent for x in Doc.selectedLayers() ]
selectedNames = [ x.name for x in selectedGlyphs ]
nopenpaths = 0
checkedGlyphs = []
print "Font: ", Font.familyName
if not selectedGlyphs:
selectedGlyphs = Font.glyphs
selectedNames = "all glyphs."
for glyph in selectedGlyphs:
# assumption: glyph layer 0 without paths means glyph doesn't have any drawing in it, yet
if glyph.layers[0].paths:
checkedGlyphs.append(glyph.name)
layers = glyph.layers
for layer in layers:
paths = layer.paths
for path in paths:
if not path.closed:
print "OPEN PATH: %s (%s)" % (layer.parent.name, layer.parent.unicode), "[layer: %s]" % layer.name, path
nopenpaths += 1
if not nopenpaths:
print "No open paths in %d glyphs:" % len(checkedGlyphs), checkedGlyphs
else:
print "Total open paths: %d out of %d checked glyphs." % (nopenpaths, len(checkedGlyphs))
|
normal
|
{
"blob_id": "bf49893fee79b0c3e34340cf1633c1797ce1bf41",
"index": 2282,
"step-1": "#MenuTitle: Check for open paths in selected glyphs\n\"\"\"\nChecks for open paths in selected glyphs (or all glyphs if no selection).\nOutput appears in Macro Window (Option-Command-M).\n\"\"\"\n# FIXME: test with masters and instances -- may not work\n\nFont = Glyphs.font\nDoc = Glyphs.currentDocument\nselectedGlyphs = [ x.parent for x in Doc.selectedLayers() ]\nselectedNames = [ x.name for x in selectedGlyphs ]\nnopenpaths = 0\ncheckedGlyphs = []\nprint \"Font: \", Font.familyName\nif not selectedGlyphs:\n\tselectedGlyphs = Font.glyphs\n\tselectedNames = \"all glyphs.\"\nfor glyph in selectedGlyphs:\n\t# assumption: glyph layer 0 without paths means glyph doesn't have any drawing in it, yet\n\tif glyph.layers[0].paths:\n\t\tcheckedGlyphs.append(glyph.name)\n\t\tlayers = glyph.layers\t\n\t\tfor layer in layers:\n\t\t\tpaths = layer.paths\n\t\t\tfor path in paths:\n\t\t\t\tif not path.closed:\n\t\t\t\t\tprint \"OPEN PATH: %s (%s)\" % (layer.parent.name, layer.parent.unicode), \"[layer: %s]\" % layer.name, path\n\t\t\t\t\tnopenpaths += 1\nif not nopenpaths:\n\tprint \"No open paths in %d glyphs:\" % len(checkedGlyphs), checkedGlyphs\nelse:\n\tprint \"Total open paths: %d out of %d checked glyphs.\" % (nopenpaths, len(checkedGlyphs))\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
from rlbot.agents.base_agent import BaseAgent, GameTickPacket, SimpleControllerState
#from rlbot.utils.structures.game_data_struct import GameTickPacket
from Decisions.challengeGame import ChallengeGame
from Decisions.info import MyInfo, Car
from Decisions.strat import Strategy
from Drawing.Drawing import DrawingTool
from util.vec import Vec3
from Actions.Kickoff import kickoff
from Actions.Chase import chase
# Blue team's (0) goal is located at (0, -5120)
# Orange (1) at (0, 5120)
# ball R = 92
from rlbot.messages.flat.QuickChatSelection import QuickChatSelection
from rlbot.utils.structures.game_data_struct import GameTickPacket
from util.ball_prediction_analysis import find_slice_at_time
from util.boost_pad_tracker import BoostPadTracker
from util.drive import steer_toward_target
from util.sequence import Sequence, ControlStep
from util.vec import Vec3
import math
import time
from math import radians
kickoff_location = Vec3(0, 0, 0)
class MyBot(BaseAgent):
def __init__(self, name, team, index):
super().__init__(name, team, index)
self.action: Action = kickoff
self.info : GameInfo = None
self.car : Car = None
self.boost_pad_tracker = BoostPadTracker()
self.stat : Strategy = None
self.action : Action = None
def initialize_agent(self):
# Set up information about the boost pads now that the game is active and the info is available
self.boost_pad_tracker.initialize_boosts(self.get_field_info())
self.info = MyInfo(self.team, self.index)
self.strat = Strategy(self.info)
self.car = Car()
def get_output(self, packet: GameTickPacket) -> SimpleControllerState:
"""
This function will be called by the framework many times per second. This is where you can
see the motion of the ball, etc. and return controls to drive your car.
"""
# Keep our boost pad info updated with which pads are currently active
self.boost_pad_tracker.update_boost_status(packet)
#self.info = self.info.read_packet(packet)
self.car.updateCar(packet, self.index)
self.info.read_packet(packet, self.get_ball_prediction_struct().slices)
#print("in main target: {}".format(self.get_ball_prediction_struct().slices[0].physics.location))
#self.renderer.draw_line_3d(self.car.loc, target_location, self.renderer.white())
#self.renderer.draw_rect_3d(target_location, 8, 8, True, self.renderer.cyan(), centered=True)
#cg = ChallengeGame(self.car, bp_struct)
#print(cg.get_time_to_loc(cg.challenge_loc))
# This is good to keep at the beginning of get_output. It will allow you to continue
# any sequences that you may have started during a previous call to get_output.
if self.action is None:
self.action = self.strat.chooseAction(self.info)
controls = self.action.tick(self.info)
print(controls.steer)
return controls
self.renderer.draw_string_3d(self.car.loc, 1, 1, f'Speed: {self.car.vel.length():.1f}', self.renderer.white())
if self.action.name:
self.renderer.draw_string_3d(self.car.loc + Vec3(0, 0, 20), 1, 1, self.action.name, self.renderer.white())
if packet.game_info.is_kickoff_pause and not isinstance(self.action, kickoff):
#self.logger.info(self.action)
self.action = kickoff(self.car.loc)
#print("Sequence is: {}".format(self.action))
#print("Sequence finished: {}".format(self.action.done))
controls = self.action.tick(self.info)
return controls
if self.action and not self.action.done:
controls = self.action.tick(self.info)
#print("action is: {}".format(self.action.name))
if controls is not None:
return controls
elif self.action.done:
print("choosing new action")
self.action = self.strat.chooseAction(self.info)
controls = self.action.tick(self.info)
return controls
# Gather some information about our car and the ball
ball_location = Vec3(packet.game_ball.physics.location)
if self.car.loc.dist(ball_location) > 1500:
# We're far away from the ball, let's try to lead it a little bit
ball_prediction = self.get_ball_prediction_struct() # This can predict bounces, etc
ball_in_future = find_slice_at_time(ball_prediction, packet.game_info.seconds_elapsed + 2)
target_location = Vec3(ball_in_future.physics.location)
self.renderer.draw_line_3d(ball_location, target_location, self.renderer.cyan())
else:
target_location = ball_location
# Draw some things to help understand what the bot is thinking
#self.renderer.draw_string_2d(100, 100, 1, 1, f'Ball at: {ball_location}', self.renderer.white())
'''
if 750 < self.car.vel.length() < 800:
# We'll do a front flip if the car is moving at a certain speed.
return self.begin_front_flip(packet)
#controls = self.action.controls
controls = SimpleControllerState()
controls.steer = steer_toward_target(self.car, target_location)
controls.throttle = 1.0
# You can set more controls if you want, like controls.boost.
'''
print("the fuck we doin here?!?!?!?")
return controls
def begin_front_flip(self, packet):
# Send some quickchat just for fun
self.send_quick_chat(team_only=False, quick_chat=QuickChatSelection.Information_IGotIt)
# Do a front flip. We will be committed to this for a few seconds and the bot will ignore other
# logic during that time because we are setting the action.
self.action = Sequence([
ControlStep(duration=0.05, controls=SimpleControllerState(jump=True)),
ControlStep(duration=0.05, controls=SimpleControllerState(jump=False)),
ControlStep(duration=0.2, controls=SimpleControllerState(jump=True, pitch=-1)),
ControlStep(duration=0.8, controls=SimpleControllerState()),
])
# Return the controls associated with the beginning of the sequence so we can start right away.
return self.action.tick(packet)
def is_kickoff(self, ball_location, ball_velocity):
#self.logger.info(ball_location.flat() == kickoff_location)
#self.logger.info(ball_velocity.length() == 0)
return ball_location.flat() == kickoff_location and ball_velocity.length() == 0
'''
class Bot(BaseAgent):
DEVMODE = True
def __init__(self, name, team, index):
super().__init__(name, team, index)
self.info: GameInfo = None
self.draw: DrawingTool = None
self.strat: Strategy = None
self.car = None
self.Actions: Maneuver = None
self.controls: SimpleControllerState = SimpleControllerState()
def initialize_agent(self):
#self.logger.info(rlutilities.__file__)
self.info = GameInfo(self.team)
#for field in self.info._fields_:
# print(field[0], getattr(self.info, field[0]))
self.info.set_mode("soccar")
self.draw = DrawingTool(self.renderer)
self.car = self.info.cars[self.index]
self.logger.info("my index is {}".format(self.index))
self.strat = Strategy(self.info, my_car)
def get_output(self, packet: GameTickPacket):
# Update game data variables
if self.tick_counter < 20:
self.tick_counter += 1
return Input()
if self.Actions is None and not self.Actions.finished:
controls = self.Action.tick(packet)
self.info.read_packet(packet, self.get_field_info(), self.get_ball_path())
self.draw.draw_path(self.get_ball_path())
challenge = ChallengeGame(self.info.cars[self.index], self.info.ball_path)
if challenge.should_go:
self.Action = self.strat.chooseAction(challenge, self.info.ball_path)
self.controls = self.Action.controls
print(self.Action)
if self.info.is_kickoff():
return self.do
self.controls = self.action.doThing(self.info)
if self.DEVMODE:
self.Action.render(self.draw)
challenge.render(self.draw)
return self.controls
def get_ball_path(self):
ball_prediction = self.get_ball_prediction_struct()
path = []
for i in range(0, ball_prediction.num_slices):
prediction_slice = ball_prediction.slices[i]
loc = prediction_slice.physics.location
path.append(loc)
return path
'''
|
normal
|
{
"blob_id": "1a0d4e77f09b4ce752631ae36a83ff57f96b89b1",
"index": 600,
"step-1": "<mask token>\n\n\nclass MyBot(BaseAgent):\n <mask token>\n\n def initialize_agent(self):\n self.boost_pad_tracker.initialize_boosts(self.get_field_info())\n self.info = MyInfo(self.team, self.index)\n self.strat = Strategy(self.info)\n self.car = Car()\n\n def get_output(self, packet: GameTickPacket) ->SimpleControllerState:\n \"\"\"\n This function will be called by the framework many times per second. This is where you can\n see the motion of the ball, etc. and return controls to drive your car.\n \"\"\"\n self.boost_pad_tracker.update_boost_status(packet)\n self.car.updateCar(packet, self.index)\n self.info.read_packet(packet, self.get_ball_prediction_struct().slices)\n if self.action is None:\n self.action = self.strat.chooseAction(self.info)\n controls = self.action.tick(self.info)\n print(controls.steer)\n return controls\n self.renderer.draw_string_3d(self.car.loc, 1, 1,\n f'Speed: {self.car.vel.length():.1f}', self.renderer.white())\n if self.action.name:\n self.renderer.draw_string_3d(self.car.loc + Vec3(0, 0, 20), 1, \n 1, self.action.name, self.renderer.white())\n if packet.game_info.is_kickoff_pause and not isinstance(self.action,\n kickoff):\n self.action = kickoff(self.car.loc)\n controls = self.action.tick(self.info)\n return controls\n if self.action and not self.action.done:\n controls = self.action.tick(self.info)\n if controls is not None:\n return controls\n elif self.action.done:\n print('choosing new action')\n self.action = self.strat.chooseAction(self.info)\n controls = self.action.tick(self.info)\n return controls\n ball_location = Vec3(packet.game_ball.physics.location)\n if self.car.loc.dist(ball_location) > 1500:\n ball_prediction = self.get_ball_prediction_struct()\n ball_in_future = find_slice_at_time(ball_prediction, packet.\n game_info.seconds_elapsed + 2)\n target_location = Vec3(ball_in_future.physics.location)\n self.renderer.draw_line_3d(ball_location, target_location, self\n .renderer.cyan())\n else:\n target_location = ball_location\n \"\"\"\n if 750 < self.car.vel.length() < 800:\n # We'll do a front flip if the car is moving at a certain speed.\n return self.begin_front_flip(packet)\n \n #controls = self.action.controls\n controls = SimpleControllerState()\n controls.steer = steer_toward_target(self.car, target_location)\n controls.throttle = 1.0\n # You can set more controls if you want, like controls.boost.\n \"\"\"\n print('the fuck we doin here?!?!?!?')\n return controls\n\n def begin_front_flip(self, packet):\n self.send_quick_chat(team_only=False, quick_chat=QuickChatSelection\n .Information_IGotIt)\n self.action = Sequence([ControlStep(duration=0.05, controls=\n SimpleControllerState(jump=True)), ControlStep(duration=0.05,\n controls=SimpleControllerState(jump=False)), ControlStep(\n duration=0.2, controls=SimpleControllerState(jump=True, pitch=-\n 1)), ControlStep(duration=0.8, controls=SimpleControllerState())])\n return self.action.tick(packet)\n\n def is_kickoff(self, ball_location, ball_velocity):\n return ball_location.flat(\n ) == kickoff_location and ball_velocity.length() == 0\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass MyBot(BaseAgent):\n\n def __init__(self, name, team, index):\n super().__init__(name, team, index)\n self.action: Action = kickoff\n self.info: GameInfo = None\n self.car: Car = None\n self.boost_pad_tracker = BoostPadTracker()\n self.stat: Strategy = None\n self.action: Action = None\n\n def initialize_agent(self):\n self.boost_pad_tracker.initialize_boosts(self.get_field_info())\n self.info = MyInfo(self.team, self.index)\n self.strat = Strategy(self.info)\n self.car = Car()\n\n def get_output(self, packet: GameTickPacket) ->SimpleControllerState:\n \"\"\"\n This function will be called by the framework many times per second. This is where you can\n see the motion of the ball, etc. and return controls to drive your car.\n \"\"\"\n self.boost_pad_tracker.update_boost_status(packet)\n self.car.updateCar(packet, self.index)\n self.info.read_packet(packet, self.get_ball_prediction_struct().slices)\n if self.action is None:\n self.action = self.strat.chooseAction(self.info)\n controls = self.action.tick(self.info)\n print(controls.steer)\n return controls\n self.renderer.draw_string_3d(self.car.loc, 1, 1,\n f'Speed: {self.car.vel.length():.1f}', self.renderer.white())\n if self.action.name:\n self.renderer.draw_string_3d(self.car.loc + Vec3(0, 0, 20), 1, \n 1, self.action.name, self.renderer.white())\n if packet.game_info.is_kickoff_pause and not isinstance(self.action,\n kickoff):\n self.action = kickoff(self.car.loc)\n controls = self.action.tick(self.info)\n return controls\n if self.action and not self.action.done:\n controls = self.action.tick(self.info)\n if controls is not None:\n return controls\n elif self.action.done:\n print('choosing new action')\n self.action = self.strat.chooseAction(self.info)\n controls = self.action.tick(self.info)\n return controls\n ball_location = Vec3(packet.game_ball.physics.location)\n if self.car.loc.dist(ball_location) > 1500:\n ball_prediction = self.get_ball_prediction_struct()\n ball_in_future = find_slice_at_time(ball_prediction, packet.\n game_info.seconds_elapsed + 2)\n target_location = Vec3(ball_in_future.physics.location)\n self.renderer.draw_line_3d(ball_location, target_location, self\n .renderer.cyan())\n else:\n target_location = ball_location\n \"\"\"\n if 750 < self.car.vel.length() < 800:\n # We'll do a front flip if the car is moving at a certain speed.\n return self.begin_front_flip(packet)\n \n #controls = self.action.controls\n controls = SimpleControllerState()\n controls.steer = steer_toward_target(self.car, target_location)\n controls.throttle = 1.0\n # You can set more controls if you want, like controls.boost.\n \"\"\"\n print('the fuck we doin here?!?!?!?')\n return controls\n\n def begin_front_flip(self, packet):\n self.send_quick_chat(team_only=False, quick_chat=QuickChatSelection\n .Information_IGotIt)\n self.action = Sequence([ControlStep(duration=0.05, controls=\n SimpleControllerState(jump=True)), ControlStep(duration=0.05,\n controls=SimpleControllerState(jump=False)), ControlStep(\n duration=0.2, controls=SimpleControllerState(jump=True, pitch=-\n 1)), ControlStep(duration=0.8, controls=SimpleControllerState())])\n return self.action.tick(packet)\n\n def is_kickoff(self, ball_location, ball_velocity):\n return ball_location.flat(\n ) == kickoff_location and ball_velocity.length() == 0\n\n\n<mask token>\n",
"step-3": "<mask token>\nkickoff_location = Vec3(0, 0, 0)\n\n\nclass MyBot(BaseAgent):\n\n def __init__(self, name, team, index):\n super().__init__(name, team, index)\n self.action: Action = kickoff\n self.info: GameInfo = None\n self.car: Car = None\n self.boost_pad_tracker = BoostPadTracker()\n self.stat: Strategy = None\n self.action: Action = None\n\n def initialize_agent(self):\n self.boost_pad_tracker.initialize_boosts(self.get_field_info())\n self.info = MyInfo(self.team, self.index)\n self.strat = Strategy(self.info)\n self.car = Car()\n\n def get_output(self, packet: GameTickPacket) ->SimpleControllerState:\n \"\"\"\n This function will be called by the framework many times per second. This is where you can\n see the motion of the ball, etc. and return controls to drive your car.\n \"\"\"\n self.boost_pad_tracker.update_boost_status(packet)\n self.car.updateCar(packet, self.index)\n self.info.read_packet(packet, self.get_ball_prediction_struct().slices)\n if self.action is None:\n self.action = self.strat.chooseAction(self.info)\n controls = self.action.tick(self.info)\n print(controls.steer)\n return controls\n self.renderer.draw_string_3d(self.car.loc, 1, 1,\n f'Speed: {self.car.vel.length():.1f}', self.renderer.white())\n if self.action.name:\n self.renderer.draw_string_3d(self.car.loc + Vec3(0, 0, 20), 1, \n 1, self.action.name, self.renderer.white())\n if packet.game_info.is_kickoff_pause and not isinstance(self.action,\n kickoff):\n self.action = kickoff(self.car.loc)\n controls = self.action.tick(self.info)\n return controls\n if self.action and not self.action.done:\n controls = self.action.tick(self.info)\n if controls is not None:\n return controls\n elif self.action.done:\n print('choosing new action')\n self.action = self.strat.chooseAction(self.info)\n controls = self.action.tick(self.info)\n return controls\n ball_location = Vec3(packet.game_ball.physics.location)\n if self.car.loc.dist(ball_location) > 1500:\n ball_prediction = self.get_ball_prediction_struct()\n ball_in_future = find_slice_at_time(ball_prediction, packet.\n game_info.seconds_elapsed + 2)\n target_location = Vec3(ball_in_future.physics.location)\n self.renderer.draw_line_3d(ball_location, target_location, self\n .renderer.cyan())\n else:\n target_location = ball_location\n \"\"\"\n if 750 < self.car.vel.length() < 800:\n # We'll do a front flip if the car is moving at a certain speed.\n return self.begin_front_flip(packet)\n \n #controls = self.action.controls\n controls = SimpleControllerState()\n controls.steer = steer_toward_target(self.car, target_location)\n controls.throttle = 1.0\n # You can set more controls if you want, like controls.boost.\n \"\"\"\n print('the fuck we doin here?!?!?!?')\n return controls\n\n def begin_front_flip(self, packet):\n self.send_quick_chat(team_only=False, quick_chat=QuickChatSelection\n .Information_IGotIt)\n self.action = Sequence([ControlStep(duration=0.05, controls=\n SimpleControllerState(jump=True)), ControlStep(duration=0.05,\n controls=SimpleControllerState(jump=False)), ControlStep(\n duration=0.2, controls=SimpleControllerState(jump=True, pitch=-\n 1)), ControlStep(duration=0.8, controls=SimpleControllerState())])\n return self.action.tick(packet)\n\n def is_kickoff(self, ball_location, ball_velocity):\n return ball_location.flat(\n ) == kickoff_location and ball_velocity.length() == 0\n\n\n<mask token>\n",
"step-4": "from rlbot.agents.base_agent import BaseAgent, GameTickPacket, SimpleControllerState\nfrom Decisions.challengeGame import ChallengeGame\nfrom Decisions.info import MyInfo, Car\nfrom Decisions.strat import Strategy\nfrom Drawing.Drawing import DrawingTool\nfrom util.vec import Vec3\nfrom Actions.Kickoff import kickoff\nfrom Actions.Chase import chase\nfrom rlbot.messages.flat.QuickChatSelection import QuickChatSelection\nfrom rlbot.utils.structures.game_data_struct import GameTickPacket\nfrom util.ball_prediction_analysis import find_slice_at_time\nfrom util.boost_pad_tracker import BoostPadTracker\nfrom util.drive import steer_toward_target\nfrom util.sequence import Sequence, ControlStep\nfrom util.vec import Vec3\nimport math\nimport time\nfrom math import radians\nkickoff_location = Vec3(0, 0, 0)\n\n\nclass MyBot(BaseAgent):\n\n def __init__(self, name, team, index):\n super().__init__(name, team, index)\n self.action: Action = kickoff\n self.info: GameInfo = None\n self.car: Car = None\n self.boost_pad_tracker = BoostPadTracker()\n self.stat: Strategy = None\n self.action: Action = None\n\n def initialize_agent(self):\n self.boost_pad_tracker.initialize_boosts(self.get_field_info())\n self.info = MyInfo(self.team, self.index)\n self.strat = Strategy(self.info)\n self.car = Car()\n\n def get_output(self, packet: GameTickPacket) ->SimpleControllerState:\n \"\"\"\n This function will be called by the framework many times per second. This is where you can\n see the motion of the ball, etc. and return controls to drive your car.\n \"\"\"\n self.boost_pad_tracker.update_boost_status(packet)\n self.car.updateCar(packet, self.index)\n self.info.read_packet(packet, self.get_ball_prediction_struct().slices)\n if self.action is None:\n self.action = self.strat.chooseAction(self.info)\n controls = self.action.tick(self.info)\n print(controls.steer)\n return controls\n self.renderer.draw_string_3d(self.car.loc, 1, 1,\n f'Speed: {self.car.vel.length():.1f}', self.renderer.white())\n if self.action.name:\n self.renderer.draw_string_3d(self.car.loc + Vec3(0, 0, 20), 1, \n 1, self.action.name, self.renderer.white())\n if packet.game_info.is_kickoff_pause and not isinstance(self.action,\n kickoff):\n self.action = kickoff(self.car.loc)\n controls = self.action.tick(self.info)\n return controls\n if self.action and not self.action.done:\n controls = self.action.tick(self.info)\n if controls is not None:\n return controls\n elif self.action.done:\n print('choosing new action')\n self.action = self.strat.chooseAction(self.info)\n controls = self.action.tick(self.info)\n return controls\n ball_location = Vec3(packet.game_ball.physics.location)\n if self.car.loc.dist(ball_location) > 1500:\n ball_prediction = self.get_ball_prediction_struct()\n ball_in_future = find_slice_at_time(ball_prediction, packet.\n game_info.seconds_elapsed + 2)\n target_location = Vec3(ball_in_future.physics.location)\n self.renderer.draw_line_3d(ball_location, target_location, self\n .renderer.cyan())\n else:\n target_location = ball_location\n \"\"\"\n if 750 < self.car.vel.length() < 800:\n # We'll do a front flip if the car is moving at a certain speed.\n return self.begin_front_flip(packet)\n \n #controls = self.action.controls\n controls = SimpleControllerState()\n controls.steer = steer_toward_target(self.car, target_location)\n controls.throttle = 1.0\n # You can set more controls if you want, like controls.boost.\n \"\"\"\n print('the fuck we doin here?!?!?!?')\n return controls\n\n def begin_front_flip(self, packet):\n self.send_quick_chat(team_only=False, quick_chat=QuickChatSelection\n .Information_IGotIt)\n self.action = Sequence([ControlStep(duration=0.05, controls=\n SimpleControllerState(jump=True)), ControlStep(duration=0.05,\n controls=SimpleControllerState(jump=False)), ControlStep(\n duration=0.2, controls=SimpleControllerState(jump=True, pitch=-\n 1)), ControlStep(duration=0.8, controls=SimpleControllerState())])\n return self.action.tick(packet)\n\n def is_kickoff(self, ball_location, ball_velocity):\n return ball_location.flat(\n ) == kickoff_location and ball_velocity.length() == 0\n\n\n<mask token>\n",
"step-5": "from rlbot.agents.base_agent import BaseAgent, GameTickPacket, SimpleControllerState\n#from rlbot.utils.structures.game_data_struct import GameTickPacket\nfrom Decisions.challengeGame import ChallengeGame\nfrom Decisions.info import MyInfo, Car\nfrom Decisions.strat import Strategy\nfrom Drawing.Drawing import DrawingTool\nfrom util.vec import Vec3\nfrom Actions.Kickoff import kickoff\nfrom Actions.Chase import chase\n# Blue team's (0) goal is located at (0, -5120) \n# Orange (1) at (0, 5120)\n# ball R = 92\n\nfrom rlbot.messages.flat.QuickChatSelection import QuickChatSelection\nfrom rlbot.utils.structures.game_data_struct import GameTickPacket\n\nfrom util.ball_prediction_analysis import find_slice_at_time\nfrom util.boost_pad_tracker import BoostPadTracker\nfrom util.drive import steer_toward_target\nfrom util.sequence import Sequence, ControlStep\nfrom util.vec import Vec3\n\n\nimport math\nimport time\nfrom math import radians\n\nkickoff_location = Vec3(0, 0, 0)\n\nclass MyBot(BaseAgent):\n\n\n def __init__(self, name, team, index):\n super().__init__(name, team, index) \n self.action: Action = kickoff\n self.info : GameInfo = None\n self.car : Car = None\n self.boost_pad_tracker = BoostPadTracker()\n self.stat : Strategy = None\n self.action : Action = None\n\n def initialize_agent(self):\n # Set up information about the boost pads now that the game is active and the info is available\n self.boost_pad_tracker.initialize_boosts(self.get_field_info())\n self.info = MyInfo(self.team, self.index)\n self.strat = Strategy(self.info)\n self.car = Car()\n\n def get_output(self, packet: GameTickPacket) -> SimpleControllerState:\n \"\"\"\n This function will be called by the framework many times per second. This is where you can\n see the motion of the ball, etc. and return controls to drive your car.\n \"\"\"\n\n # Keep our boost pad info updated with which pads are currently active\n self.boost_pad_tracker.update_boost_status(packet)\n #self.info = self.info.read_packet(packet) \n self.car.updateCar(packet, self.index)\n self.info.read_packet(packet, self.get_ball_prediction_struct().slices)\n #print(\"in main target: {}\".format(self.get_ball_prediction_struct().slices[0].physics.location))\n #self.renderer.draw_line_3d(self.car.loc, target_location, self.renderer.white())\n #self.renderer.draw_rect_3d(target_location, 8, 8, True, self.renderer.cyan(), centered=True)\n \n #cg = ChallengeGame(self.car, bp_struct)\n \n #print(cg.get_time_to_loc(cg.challenge_loc))\n # This is good to keep at the beginning of get_output. It will allow you to continue\n # any sequences that you may have started during a previous call to get_output.\n if self.action is None:\n self.action = self.strat.chooseAction(self.info)\n \n controls = self.action.tick(self.info)\n print(controls.steer)\n return controls\n \n\n self.renderer.draw_string_3d(self.car.loc, 1, 1, f'Speed: {self.car.vel.length():.1f}', self.renderer.white())\n if self.action.name:\n self.renderer.draw_string_3d(self.car.loc + Vec3(0, 0, 20), 1, 1, self.action.name, self.renderer.white())\n \n\n if packet.game_info.is_kickoff_pause and not isinstance(self.action, kickoff):\n #self.logger.info(self.action)\n self.action = kickoff(self.car.loc)\n #print(\"Sequence is: {}\".format(self.action))\n #print(\"Sequence finished: {}\".format(self.action.done))\n controls = self.action.tick(self.info)\n return controls\n \n if self.action and not self.action.done:\n controls = self.action.tick(self.info)\n #print(\"action is: {}\".format(self.action.name))\n if controls is not None:\n return controls\n \n elif self.action.done:\n print(\"choosing new action\")\n \n self.action = self.strat.chooseAction(self.info)\n controls = self.action.tick(self.info)\n return controls\n\n # Gather some information about our car and the ball\n ball_location = Vec3(packet.game_ball.physics.location)\n\n if self.car.loc.dist(ball_location) > 1500:\n # We're far away from the ball, let's try to lead it a little bit\n ball_prediction = self.get_ball_prediction_struct() # This can predict bounces, etc\n ball_in_future = find_slice_at_time(ball_prediction, packet.game_info.seconds_elapsed + 2)\n target_location = Vec3(ball_in_future.physics.location)\n self.renderer.draw_line_3d(ball_location, target_location, self.renderer.cyan())\n else:\n target_location = ball_location\n\n # Draw some things to help understand what the bot is thinking\n #self.renderer.draw_string_2d(100, 100, 1, 1, f'Ball at: {ball_location}', self.renderer.white())\n\n '''\n if 750 < self.car.vel.length() < 800:\n # We'll do a front flip if the car is moving at a certain speed.\n return self.begin_front_flip(packet)\n \n #controls = self.action.controls\n controls = SimpleControllerState()\n controls.steer = steer_toward_target(self.car, target_location)\n controls.throttle = 1.0\n # You can set more controls if you want, like controls.boost.\n '''\n print(\"the fuck we doin here?!?!?!?\")\n return controls\n\n def begin_front_flip(self, packet):\n # Send some quickchat just for fun\n self.send_quick_chat(team_only=False, quick_chat=QuickChatSelection.Information_IGotIt)\n\n # Do a front flip. We will be committed to this for a few seconds and the bot will ignore other\n # logic during that time because we are setting the action.\n self.action = Sequence([\n ControlStep(duration=0.05, controls=SimpleControllerState(jump=True)),\n ControlStep(duration=0.05, controls=SimpleControllerState(jump=False)),\n ControlStep(duration=0.2, controls=SimpleControllerState(jump=True, pitch=-1)),\n ControlStep(duration=0.8, controls=SimpleControllerState()),\n ])\n\n # Return the controls associated with the beginning of the sequence so we can start right away.\n return self.action.tick(packet)\n\n def is_kickoff(self, ball_location, ball_velocity):\n #self.logger.info(ball_location.flat() == kickoff_location)\n #self.logger.info(ball_velocity.length() == 0)\n return ball_location.flat() == kickoff_location and ball_velocity.length() == 0\n\n\n'''\nclass Bot(BaseAgent):\n DEVMODE = True\n\n def __init__(self, name, team, index):\n super().__init__(name, team, index)\n self.info: GameInfo = None\n self.draw: DrawingTool = None\n self.strat: Strategy = None\n self.car = None\n self.Actions: Maneuver = None\n self.controls: SimpleControllerState = SimpleControllerState()\n\n def initialize_agent(self):\n #self.logger.info(rlutilities.__file__)\n self.info = GameInfo(self.team)\n #for field in self.info._fields_:\n # print(field[0], getattr(self.info, field[0]))\n self.info.set_mode(\"soccar\")\n self.draw = DrawingTool(self.renderer)\n self.car = self.info.cars[self.index]\n self.logger.info(\"my index is {}\".format(self.index))\n self.strat = Strategy(self.info, my_car)\n\n def get_output(self, packet: GameTickPacket):\n # Update game data variables\n \n if self.tick_counter < 20:\n self.tick_counter += 1\n return Input()\n \n\n if self.Actions is None and not self.Actions.finished:\n controls = self.Action.tick(packet)\n\n self.info.read_packet(packet, self.get_field_info(), self.get_ball_path())\n\n self.draw.draw_path(self.get_ball_path())\n challenge = ChallengeGame(self.info.cars[self.index], self.info.ball_path)\n\n if challenge.should_go:\n self.Action = self.strat.chooseAction(challenge, self.info.ball_path)\n self.controls = self.Action.controls\n print(self.Action)\n \n if self.info.is_kickoff():\n return self.do\n self.controls = self.action.doThing(self.info)\n \n if self.DEVMODE:\n self.Action.render(self.draw)\n challenge.render(self.draw)\n\n return self.controls\n\n \n def get_ball_path(self):\n ball_prediction = self.get_ball_prediction_struct()\n path = []\n for i in range(0, ball_prediction.num_slices):\n prediction_slice = ball_prediction.slices[i]\n loc = prediction_slice.physics.location\n path.append(loc)\n return path\n\n'''",
"step-ids": [
5,
6,
7,
8,
9
]
}
|
[
5,
6,
7,
8,
9
] |
#!/usr/bin/env python
##############
#### Your name: Alexis Vincent
##############
import numpy as np
import re
from skimage.color import convert_colorspace
from sklearn.model_selection import GridSearchCV
from sklearn import svm, metrics
from skimage import io, feature, filters, exposure, color
from skimage.feature import hog
import matplotlib.pyplot as plt
class ImageClassifier:
def __init__(self):
self.classifer = None
def imread_convert(self, f):
return io.imread(f).astype(np.uint8)
def load_data_from_folder(self, dir):
# read all images into an image collection
ic = io.ImageCollection(dir + "*.jpg", load_func=self.imread_convert)
# create one large array of image data
data = io.concatenate_images(ic)
# extract labels from image names
labels = np.array(ic.files)
for i, f in enumerate(labels):
m = re.search("_", f)
labels[i] = f[len(dir):m.start()]
return (data, labels)
def extract_image_features(self, data):
# Please do not modify the header above
# extract feature vector from image data
fd = None
for pic in data:
#grey_picture = color.rgb2gray(pic)
#gaussian_picture = filters.gaussian(pic, 1)
rescaled_picture = exposure.rescale_intensity(pic)
feature_data = hog(rescaled_picture,
orientations=11,
#pixels_per_cell=(32, 32),
pixels_per_cell=(20, 20),
cells_per_block=(6, 6),
# transform_sqrt=True,
feature_vector=True,
block_norm='L2-Hys')
# self.print_hog_pics(color.rgb2gray(gaussian_picture))
if fd is None:
fd = feature_data.reshape(1, feature_data.shape[0])
else:
fd = np.concatenate([fd, feature_data.reshape(1, feature_data.shape[0])])
# Please do not modify the return type below
return fd
def train_classifier(self, train_data, train_labels):
# Please do not modify the header above
# train model and save the trained model to self.classifier
clf = svm.SVC(C=1, gamma=0.001, kernel='linear')
self.classifer = clf.fit(train_data, train_labels)
def predict_labels(self, data):
# Please do not modify the header
# predict labels of test data using trained model in self.classifier
# the code below expects output to be stored in predicted_labels
predicted_labels = self.classifer.predict(data)
# Please do not modify the return type below
return predicted_labels
def print_hog_pics(self, image):
#orientations=8, pixels_per_cell=(16, 16) cells_per_block=(1, 1), visualise=True
fd, hog_image = hog(image, orientations=8, pixels_per_cell=(16, 16),
cells_per_block=(1, 1), visualise=True)
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(8, 4), sharex='all', sharey='all')
ax1.axis('off')
ax1.imshow(image)
ax1.set_title('Input image')
ax1.set_adjustable('box-forced')
# Rescale histogram for better display
hog_image_rescaled = exposure.rescale_intensity(hog_image, in_range=(0, 10))
ax2.axis('off')
ax2.imshow(hog_image_rescaled)
ax2.set_title('Histogram of Oriented Gradients')
ax1.set_adjustable('box-forced')
plt.show()
def main():
img_clf = ImageClassifier()
# load images
(train_raw, train_labels) = img_clf.load_data_from_folder('./train/')
(test_raw, test_labels) = img_clf.load_data_from_folder('./test/')
# convert images into features
train_data = img_clf.extract_image_features(train_raw)
test_data = img_clf.extract_image_features(test_raw)
# train model and test on training data
img_clf.train_classifier(train_data, train_labels)
predicted_labels = img_clf.predict_labels(train_data)
print("\nTraining results")
print("=============================")
print("Confusion Matrix:\n", metrics.confusion_matrix(train_labels, predicted_labels))
print("Accuracy: ", metrics.accuracy_score(train_labels, predicted_labels))
print("F1 score: ", metrics.f1_score(train_labels, predicted_labels, average='micro'))
print(predicted_labels)
# test model
predicted_labels = img_clf.predict_labels(test_data)
print("\nTesting results")
print("=============================")
print("Confusion Matrix:\n", metrics.confusion_matrix(test_labels, predicted_labels))
print("Accuracy: ", metrics.accuracy_score(test_labels, predicted_labels))
print("F1 score: ", metrics.f1_score(test_labels, predicted_labels, average='micro'))
print(predicted_labels)
if __name__ == "__main__":
main()
|
normal
|
{
"blob_id": "58204b4b035aa06015def7529852e882ffdd369a",
"index": 8997,
"step-1": "<mask token>\n\n\nclass ImageClassifier:\n <mask token>\n <mask token>\n <mask token>\n\n def extract_image_features(self, data):\n fd = None\n for pic in data:\n rescaled_picture = exposure.rescale_intensity(pic)\n feature_data = hog(rescaled_picture, orientations=11,\n pixels_per_cell=(20, 20), cells_per_block=(6, 6),\n feature_vector=True, block_norm='L2-Hys')\n if fd is None:\n fd = feature_data.reshape(1, feature_data.shape[0])\n else:\n fd = np.concatenate([fd, feature_data.reshape(1,\n feature_data.shape[0])])\n return fd\n\n def train_classifier(self, train_data, train_labels):\n clf = svm.SVC(C=1, gamma=0.001, kernel='linear')\n self.classifer = clf.fit(train_data, train_labels)\n\n def predict_labels(self, data):\n predicted_labels = self.classifer.predict(data)\n return predicted_labels\n\n def print_hog_pics(self, image):\n fd, hog_image = hog(image, orientations=8, pixels_per_cell=(16, 16),\n cells_per_block=(1, 1), visualise=True)\n fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(8, 4), sharex='all',\n sharey='all')\n ax1.axis('off')\n ax1.imshow(image)\n ax1.set_title('Input image')\n ax1.set_adjustable('box-forced')\n hog_image_rescaled = exposure.rescale_intensity(hog_image, in_range\n =(0, 10))\n ax2.axis('off')\n ax2.imshow(hog_image_rescaled)\n ax2.set_title('Histogram of Oriented Gradients')\n ax1.set_adjustable('box-forced')\n plt.show()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass ImageClassifier:\n\n def __init__(self):\n self.classifer = None\n <mask token>\n\n def load_data_from_folder(self, dir):\n ic = io.ImageCollection(dir + '*.jpg', load_func=self.imread_convert)\n data = io.concatenate_images(ic)\n labels = np.array(ic.files)\n for i, f in enumerate(labels):\n m = re.search('_', f)\n labels[i] = f[len(dir):m.start()]\n return data, labels\n\n def extract_image_features(self, data):\n fd = None\n for pic in data:\n rescaled_picture = exposure.rescale_intensity(pic)\n feature_data = hog(rescaled_picture, orientations=11,\n pixels_per_cell=(20, 20), cells_per_block=(6, 6),\n feature_vector=True, block_norm='L2-Hys')\n if fd is None:\n fd = feature_data.reshape(1, feature_data.shape[0])\n else:\n fd = np.concatenate([fd, feature_data.reshape(1,\n feature_data.shape[0])])\n return fd\n\n def train_classifier(self, train_data, train_labels):\n clf = svm.SVC(C=1, gamma=0.001, kernel='linear')\n self.classifer = clf.fit(train_data, train_labels)\n\n def predict_labels(self, data):\n predicted_labels = self.classifer.predict(data)\n return predicted_labels\n\n def print_hog_pics(self, image):\n fd, hog_image = hog(image, orientations=8, pixels_per_cell=(16, 16),\n cells_per_block=(1, 1), visualise=True)\n fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(8, 4), sharex='all',\n sharey='all')\n ax1.axis('off')\n ax1.imshow(image)\n ax1.set_title('Input image')\n ax1.set_adjustable('box-forced')\n hog_image_rescaled = exposure.rescale_intensity(hog_image, in_range\n =(0, 10))\n ax2.axis('off')\n ax2.imshow(hog_image_rescaled)\n ax2.set_title('Histogram of Oriented Gradients')\n ax1.set_adjustable('box-forced')\n plt.show()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass ImageClassifier:\n\n def __init__(self):\n self.classifer = None\n\n def imread_convert(self, f):\n return io.imread(f).astype(np.uint8)\n\n def load_data_from_folder(self, dir):\n ic = io.ImageCollection(dir + '*.jpg', load_func=self.imread_convert)\n data = io.concatenate_images(ic)\n labels = np.array(ic.files)\n for i, f in enumerate(labels):\n m = re.search('_', f)\n labels[i] = f[len(dir):m.start()]\n return data, labels\n\n def extract_image_features(self, data):\n fd = None\n for pic in data:\n rescaled_picture = exposure.rescale_intensity(pic)\n feature_data = hog(rescaled_picture, orientations=11,\n pixels_per_cell=(20, 20), cells_per_block=(6, 6),\n feature_vector=True, block_norm='L2-Hys')\n if fd is None:\n fd = feature_data.reshape(1, feature_data.shape[0])\n else:\n fd = np.concatenate([fd, feature_data.reshape(1,\n feature_data.shape[0])])\n return fd\n\n def train_classifier(self, train_data, train_labels):\n clf = svm.SVC(C=1, gamma=0.001, kernel='linear')\n self.classifer = clf.fit(train_data, train_labels)\n\n def predict_labels(self, data):\n predicted_labels = self.classifer.predict(data)\n return predicted_labels\n\n def print_hog_pics(self, image):\n fd, hog_image = hog(image, orientations=8, pixels_per_cell=(16, 16),\n cells_per_block=(1, 1), visualise=True)\n fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(8, 4), sharex='all',\n sharey='all')\n ax1.axis('off')\n ax1.imshow(image)\n ax1.set_title('Input image')\n ax1.set_adjustable('box-forced')\n hog_image_rescaled = exposure.rescale_intensity(hog_image, in_range\n =(0, 10))\n ax2.axis('off')\n ax2.imshow(hog_image_rescaled)\n ax2.set_title('Histogram of Oriented Gradients')\n ax1.set_adjustable('box-forced')\n plt.show()\n\n\ndef main():\n img_clf = ImageClassifier()\n train_raw, train_labels = img_clf.load_data_from_folder('./train/')\n test_raw, test_labels = img_clf.load_data_from_folder('./test/')\n train_data = img_clf.extract_image_features(train_raw)\n test_data = img_clf.extract_image_features(test_raw)\n img_clf.train_classifier(train_data, train_labels)\n predicted_labels = img_clf.predict_labels(train_data)\n print('\\nTraining results')\n print('=============================')\n print('Confusion Matrix:\\n', metrics.confusion_matrix(train_labels,\n predicted_labels))\n print('Accuracy: ', metrics.accuracy_score(train_labels, predicted_labels))\n print('F1 score: ', metrics.f1_score(train_labels, predicted_labels,\n average='micro'))\n print(predicted_labels)\n predicted_labels = img_clf.predict_labels(test_data)\n print('\\nTesting results')\n print('=============================')\n print('Confusion Matrix:\\n', metrics.confusion_matrix(test_labels,\n predicted_labels))\n print('Accuracy: ', metrics.accuracy_score(test_labels, predicted_labels))\n print('F1 score: ', metrics.f1_score(test_labels, predicted_labels,\n average='micro'))\n print(predicted_labels)\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\nclass ImageClassifier:\n\n def __init__(self):\n self.classifer = None\n\n def imread_convert(self, f):\n return io.imread(f).astype(np.uint8)\n\n def load_data_from_folder(self, dir):\n ic = io.ImageCollection(dir + '*.jpg', load_func=self.imread_convert)\n data = io.concatenate_images(ic)\n labels = np.array(ic.files)\n for i, f in enumerate(labels):\n m = re.search('_', f)\n labels[i] = f[len(dir):m.start()]\n return data, labels\n\n def extract_image_features(self, data):\n fd = None\n for pic in data:\n rescaled_picture = exposure.rescale_intensity(pic)\n feature_data = hog(rescaled_picture, orientations=11,\n pixels_per_cell=(20, 20), cells_per_block=(6, 6),\n feature_vector=True, block_norm='L2-Hys')\n if fd is None:\n fd = feature_data.reshape(1, feature_data.shape[0])\n else:\n fd = np.concatenate([fd, feature_data.reshape(1,\n feature_data.shape[0])])\n return fd\n\n def train_classifier(self, train_data, train_labels):\n clf = svm.SVC(C=1, gamma=0.001, kernel='linear')\n self.classifer = clf.fit(train_data, train_labels)\n\n def predict_labels(self, data):\n predicted_labels = self.classifer.predict(data)\n return predicted_labels\n\n def print_hog_pics(self, image):\n fd, hog_image = hog(image, orientations=8, pixels_per_cell=(16, 16),\n cells_per_block=(1, 1), visualise=True)\n fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(8, 4), sharex='all',\n sharey='all')\n ax1.axis('off')\n ax1.imshow(image)\n ax1.set_title('Input image')\n ax1.set_adjustable('box-forced')\n hog_image_rescaled = exposure.rescale_intensity(hog_image, in_range\n =(0, 10))\n ax2.axis('off')\n ax2.imshow(hog_image_rescaled)\n ax2.set_title('Histogram of Oriented Gradients')\n ax1.set_adjustable('box-forced')\n plt.show()\n\n\ndef main():\n img_clf = ImageClassifier()\n train_raw, train_labels = img_clf.load_data_from_folder('./train/')\n test_raw, test_labels = img_clf.load_data_from_folder('./test/')\n train_data = img_clf.extract_image_features(train_raw)\n test_data = img_clf.extract_image_features(test_raw)\n img_clf.train_classifier(train_data, train_labels)\n predicted_labels = img_clf.predict_labels(train_data)\n print('\\nTraining results')\n print('=============================')\n print('Confusion Matrix:\\n', metrics.confusion_matrix(train_labels,\n predicted_labels))\n print('Accuracy: ', metrics.accuracy_score(train_labels, predicted_labels))\n print('F1 score: ', metrics.f1_score(train_labels, predicted_labels,\n average='micro'))\n print(predicted_labels)\n predicted_labels = img_clf.predict_labels(test_data)\n print('\\nTesting results')\n print('=============================')\n print('Confusion Matrix:\\n', metrics.confusion_matrix(test_labels,\n predicted_labels))\n print('Accuracy: ', metrics.accuracy_score(test_labels, predicted_labels))\n print('F1 score: ', metrics.f1_score(test_labels, predicted_labels,\n average='micro'))\n print(predicted_labels)\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "#!/usr/bin/env python\r\n\r\n\r\n\r\n##############\r\n\r\n#### Your name: Alexis Vincent\r\n\r\n##############\r\n\r\n\r\n\r\nimport numpy as np\r\n\r\nimport re\r\n\r\nfrom skimage.color import convert_colorspace\r\nfrom sklearn.model_selection import GridSearchCV\r\n\r\nfrom sklearn import svm, metrics\r\n\r\nfrom skimage import io, feature, filters, exposure, color\r\n\r\nfrom skimage.feature import hog\r\n\r\n\r\n\r\nimport matplotlib.pyplot as plt\r\n\r\n\r\n\r\n\r\n\r\nclass ImageClassifier:\r\n\r\n def __init__(self):\r\n\r\n self.classifer = None\r\n\r\n\r\n\r\n def imread_convert(self, f):\r\n\r\n return io.imread(f).astype(np.uint8)\r\n\r\n\r\n\r\n def load_data_from_folder(self, dir):\r\n\r\n # read all images into an image collection\r\n\r\n ic = io.ImageCollection(dir + \"*.jpg\", load_func=self.imread_convert)\r\n\r\n\r\n\r\n # create one large array of image data\r\n\r\n data = io.concatenate_images(ic)\r\n\r\n # extract labels from image names\r\n\r\n labels = np.array(ic.files)\r\n\r\n for i, f in enumerate(labels):\r\n\r\n m = re.search(\"_\", f)\r\n\r\n labels[i] = f[len(dir):m.start()]\r\n\r\n\r\n\r\n return (data, labels)\r\n\r\n\r\n\r\n def extract_image_features(self, data):\r\n\r\n # Please do not modify the header above\r\n\r\n # extract feature vector from image data\r\n\r\n fd = None\r\n\r\n for pic in data:\r\n\r\n #grey_picture = color.rgb2gray(pic)\r\n\r\n #gaussian_picture = filters.gaussian(pic, 1)\r\n\r\n rescaled_picture = exposure.rescale_intensity(pic)\r\n\r\n\r\n\r\n feature_data = hog(rescaled_picture,\r\n\r\n orientations=11,\r\n\r\n #pixels_per_cell=(32, 32),\r\n pixels_per_cell=(20, 20),\r\n cells_per_block=(6, 6),\r\n\r\n # transform_sqrt=True,\r\n\r\n feature_vector=True,\r\n\r\n block_norm='L2-Hys')\r\n\r\n # self.print_hog_pics(color.rgb2gray(gaussian_picture))\r\n\r\n if fd is None:\r\n\r\n fd = feature_data.reshape(1, feature_data.shape[0])\r\n\r\n else:\r\n\r\n fd = np.concatenate([fd, feature_data.reshape(1, feature_data.shape[0])])\r\n\r\n # Please do not modify the return type below\r\n\r\n return fd\r\n\r\n\r\n\r\n def train_classifier(self, train_data, train_labels):\r\n\r\n # Please do not modify the header above\r\n\r\n # train model and save the trained model to self.classifier\r\n\r\n clf = svm.SVC(C=1, gamma=0.001, kernel='linear')\r\n\r\n self.classifer = clf.fit(train_data, train_labels)\r\n\r\n\r\n\r\n def predict_labels(self, data):\r\n\r\n # Please do not modify the header\r\n\r\n # predict labels of test data using trained model in self.classifier\r\n\r\n # the code below expects output to be stored in predicted_labels\r\n\r\n predicted_labels = self.classifer.predict(data)\r\n\r\n # Please do not modify the return type below\r\n\r\n return predicted_labels\r\n\r\n\r\n\r\n def print_hog_pics(self, image):\r\n #orientations=8, pixels_per_cell=(16, 16) cells_per_block=(1, 1), visualise=True\r\n fd, hog_image = hog(image, orientations=8, pixels_per_cell=(16, 16),\r\n\r\n cells_per_block=(1, 1), visualise=True)\r\n\r\n fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(8, 4), sharex='all', sharey='all')\r\n\r\n\r\n\r\n ax1.axis('off')\r\n\r\n ax1.imshow(image)\r\n\r\n ax1.set_title('Input image')\r\n\r\n ax1.set_adjustable('box-forced')\r\n\r\n\r\n\r\n # Rescale histogram for better display\r\n\r\n hog_image_rescaled = exposure.rescale_intensity(hog_image, in_range=(0, 10))\r\n\r\n\r\n\r\n ax2.axis('off')\r\n\r\n ax2.imshow(hog_image_rescaled)\r\n\r\n ax2.set_title('Histogram of Oriented Gradients')\r\n\r\n ax1.set_adjustable('box-forced')\r\n\r\n plt.show()\r\n\r\n\r\n\r\n\r\n\r\ndef main():\r\n\r\n img_clf = ImageClassifier()\r\n\r\n\r\n\r\n # load images\r\n\r\n (train_raw, train_labels) = img_clf.load_data_from_folder('./train/')\r\n\r\n (test_raw, test_labels) = img_clf.load_data_from_folder('./test/')\r\n\r\n\r\n\r\n # convert images into features\r\n\r\n train_data = img_clf.extract_image_features(train_raw)\r\n\r\n test_data = img_clf.extract_image_features(test_raw)\r\n\r\n\r\n\r\n # train model and test on training data\r\n\r\n img_clf.train_classifier(train_data, train_labels)\r\n\r\n\r\n\r\n predicted_labels = img_clf.predict_labels(train_data)\r\n\r\n print(\"\\nTraining results\")\r\n\r\n print(\"=============================\")\r\n\r\n print(\"Confusion Matrix:\\n\", metrics.confusion_matrix(train_labels, predicted_labels))\r\n\r\n print(\"Accuracy: \", metrics.accuracy_score(train_labels, predicted_labels))\r\n\r\n print(\"F1 score: \", metrics.f1_score(train_labels, predicted_labels, average='micro'))\r\n print(predicted_labels)\r\n\r\n\r\n\r\n # test model\r\n\r\n predicted_labels = img_clf.predict_labels(test_data)\r\n\r\n print(\"\\nTesting results\")\r\n\r\n print(\"=============================\")\r\n\r\n print(\"Confusion Matrix:\\n\", metrics.confusion_matrix(test_labels, predicted_labels))\r\n\r\n print(\"Accuracy: \", metrics.accuracy_score(test_labels, predicted_labels))\r\n\r\n print(\"F1 score: \", metrics.f1_score(test_labels, predicted_labels, average='micro'))\r\n print(predicted_labels)\r\n\r\n\r\n\r\n\r\n\r\nif __name__ == \"__main__\":\r\n\r\n main()",
"step-ids": [
5,
7,
9,
10,
12
]
}
|
[
5,
7,
9,
10,
12
] |
from tkinter import *
class Menuutje:
def __init__(self, master):
menu = Menu(master)
master.config(menu=menu)
subMenu = Menu(menu)
menu.add_cascade(label="File", menu=subMenu)
subMenu.add_command(label="New Game...", command=self.doNothing)
subMenu.add_command(label="New...", command=self.doNothing)
subMenu.add_separator()
subMenu.add_command(label="Exit", command=self.doNothing)
editMenu = Menu(menu)
menu.add_cascade(label="Edit", menu=editMenu)
editMenu.add_command(label="Redo", command=self.doNothing)
def doNothing(self):
print("Okay I do nothing..")
class MenuGameRPS:
def __init__(self, master):
menu = Menu(master)
master.config(menu=menu)
subMenu = Menu(menu)
menu.add_cascade(label="File", menu=subMenu)
subMenu.add_command(label="New Game...", command=self.newGame)
subMenu.add_separator()
subMenu.add_command(label="Exit", command=self.exitGame)
def exitGame(self):
exit()
def newGame(self):
|
normal
|
{
"blob_id": "8fbfa53be826b45b53b530a1766f6a68c61f5be9",
"index": 9377,
"step-1": "from tkinter import *\n\n\nclass Menuutje:\n\n def __init__(self, master):\n menu = Menu(master)\n master.config(menu=menu)\n\n subMenu = Menu(menu)\n menu.add_cascade(label=\"File\", menu=subMenu)\n subMenu.add_command(label=\"New Game...\", command=self.doNothing)\n subMenu.add_command(label=\"New...\", command=self.doNothing)\n subMenu.add_separator()\n subMenu.add_command(label=\"Exit\", command=self.doNothing)\n\n editMenu = Menu(menu)\n menu.add_cascade(label=\"Edit\", menu=editMenu)\n editMenu.add_command(label=\"Redo\", command=self.doNothing)\n\n\n def doNothing(self):\n print(\"Okay I do nothing..\")\n\n\nclass MenuGameRPS:\n def __init__(self, master):\n menu = Menu(master)\n master.config(menu=menu)\n\n subMenu = Menu(menu)\n menu.add_cascade(label=\"File\", menu=subMenu)\n subMenu.add_command(label=\"New Game...\", command=self.newGame)\n subMenu.add_separator()\n subMenu.add_command(label=\"Exit\", command=self.exitGame)\n\n\n def exitGame(self):\n exit()\n\n\n def newGame(self):\n\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
import sys
sys.path.append('../')
import constants as cnst
import os
os.environ['PYTHONHASHSEED'] = '2'
import tqdm
from model.stg2_generator import StyledGenerator
import numpy as np
from my_utils.visualize_flame_overlay import OverLayViz
from my_utils.flm_dynamic_fit_overlay import camera_ringnetpp
from my_utils.generic_utils import save_set_of_images
import constants
from dataset_loaders import fast_image_reshape
import torch
from my_utils import generic_utils
from my_utils.eye_centering import position_to_given_location
from copy import deepcopy
from my_utils.photometric_optimization.models import FLAME
from my_utils.photometric_optimization import util
def ge_gen_in(flm_params, textured_rndr, norm_map, normal_map_cond, texture_cond):
if normal_map_cond and texture_cond:
return torch.cat((textured_rndr, norm_map), dim=1)
elif normal_map_cond:
return norm_map
elif texture_cond:
return textured_rndr
else:
return flm_params
def corrupt_flame_given_sigma(flm_params, corruption_type, sigma, jaw_sigma, pose_sigma):
# import ipdb; ipdb.set_trace()
# np.random.seed(2)
corrupted_flame = deepcopy(flm_params)
if corruption_type == 'shape' or corruption_type == 'all':
corrupted_flame[:, :10] = flm_params[:, :10] + \
np.clip(np.random.normal(0, sigma, flm_params[:, :10].shape),
-3 * sigma, 3 * sigma).astype('float32')
if corruption_type == 'exp_jaw'or corruption_type == 'all':
# Expression
corrupted_flame[:, 100:110] = flm_params[:, 100:110] + \
np.clip(np.random.normal(0, sigma, flm_params[:, 100:110].shape),
-3 * sigma, 3 * sigma).astype('float32')
# Jaw pose
corrupted_flame[:, 153] = flm_params[:, 153] + \
np.random.normal(0, jaw_sigma, corrupted_flame.shape[0])
if corruption_type == 'pose' or corruption_type == 'all':
# pose_perturbation = np.random.normal(0, pose_sigma[i], (corrupted_flame.shape[0], 3))
# corrupted_flame[:, 150:153] += np.clip(pose_perturbation, -3 * pose_sigma[i], 3 * pose_sigma[i])
pose_perturbation = np.random.normal(0, pose_sigma, (corrupted_flame.shape[0],))
corrupted_flame[:, 151] = flm_params[:, 151] + \
np.clip(pose_perturbation, -3 * pose_sigma, 3 * pose_sigma)
return corrupted_flame
# General settings
save_images = True
code_size = 236
use_inst_norm = True
core_tensor_res = 4
resolution = 256
alpha = 1
step_max = int(np.log2(resolution) - 2)
num_smpl_to_eval_on = 128
use_styled_conv_stylegan2 = True
flength = 5000
cam_t = np.array([0., 0., 0])
camera_params = camera_ringnetpp((512, 512), trans=cam_t, focal=flength)
# Uncomment the appropriate run_id
run_ids_1 = [29, ] # with sqrt(2)
# run_ids_1 = [7, 24, 8, 3]
# run_ids_1 = [7, 8, 3]
# run_ids_1 = [7]
settings_for_runs = \
{24: {'name': 'vector_cond', 'model_idx': '216000_1', 'normal_maps_as_cond': False,
'rendered_flame_as_condition': False, 'apply_sqrt2_fac_in_eq_lin': False},
29: {'name': 'full_model', 'model_idx': '294000_1', 'normal_maps_as_cond': True,
'rendered_flame_as_condition': True, 'apply_sqrt2_fac_in_eq_lin': True},
7: {'name': 'flm_rndr_tex_interp', 'model_idx': '051000_1', 'normal_maps_as_cond': False,
'rendered_flame_as_condition': True, 'apply_sqrt2_fac_in_eq_lin': False},
3: {'name': 'norm_mp_tex_interp', 'model_idx': '203000_1', 'normal_maps_as_cond': True,
'rendered_flame_as_condition': False, 'apply_sqrt2_fac_in_eq_lin': False},
8: {'name': 'norm_map_rend_flm_no_tex_interp', 'model_idx': '009000_1', 'normal_maps_as_cond': True,
'rendered_flame_as_condition': True, 'apply_sqrt2_fac_in_eq_lin': False},}
overlay_visualizer = OverLayViz()
# overlay_visualizer.setup_renderer(mesh_file=None)
flm_params = np.zeros((num_smpl_to_eval_on, code_size)).astype('float32')
fl_param_dict = np.load(cnst.all_flame_params_file, allow_pickle=True).item()
np.random.seed(2)
for i, key in enumerate(fl_param_dict):
flame_param = fl_param_dict[key]
shape_params = np.concatenate((np.random.normal(0, 1, [3,]), np.zeros(97))).astype('float32')
exp_params = np.concatenate((np.random.normal(0, 1, [3,]), np.zeros(47))).astype('float32')
# +- pi/4 for bad samples +- pi/8 for good samples
# pose = np.array([0, np.random.uniform(-np.pi/4, np.pi/4, 1), 0,
# np.random.uniform(0, np.pi/12, 1), 0, 0]).astype('float32')
pose = np.array([0, np.random.uniform(-np.pi / 8, np.pi / 8, 1), 0,
np.random.uniform(0, np.pi / 12, 1), 0, 0]).astype('float32')
texture = np.random.normal(0, 1, [50]).astype('float32')
# texture = flame_param['tex']
flame_param = np.hstack((shape_params, exp_params, pose, flame_param['cam'],
texture, flame_param['lit'].flatten()))
# tz = camera_params['f'][0] / (camera_params['c'][0] * flame_param[:, 156:157])
# flame_param[:, 156:159] = np.concatenate((flame_param[:, 157:], tz), axis=1)
# import ipdb; ipdb.set_trace()
flm_params[i, :] = flame_param.astype('float32')
if i == num_smpl_to_eval_on - 1:
break
batch_size = 32
num_sigmas = 1
corruption_sigma = np.linspace(0, 1.5, num_sigmas)
jaw_rot_range = (0, np.pi/8)
jaw_rot_sigmas = np.linspace(0, (jaw_rot_range[1] - jaw_rot_range[0])/6, num_sigmas)
pose_range = (-np.pi/3, np.pi/3)
pose_sigmas = np.linspace(0, (pose_range[1] - pose_range[0])/6, num_sigmas)
config_obj = util.dict2obj(cnst.flame_config)
flame_decoder = FLAME.FLAME(config_obj).cuda().eval()
for run_idx in run_ids_1:
# import ipdb; ipdb.set_trace()
generator_1 = torch.nn.DataParallel(
StyledGenerator(embedding_vocab_size=69158,
rendered_flame_ascondition=settings_for_runs[run_idx]['rendered_flame_as_condition'],
normal_maps_as_cond=settings_for_runs[run_idx]['normal_maps_as_cond'],
apply_sqrt2_fac_in_eq_lin=settings_for_runs[run_idx]['apply_sqrt2_fac_in_eq_lin'],
core_tensor_res=core_tensor_res,
w_truncation_factor=1.0,
n_mlp=8)).cuda()
model_idx = settings_for_runs[run_idx]['model_idx']
ckpt1 = torch.load(f'{cnst.output_root}checkpoint/{run_idx}/{model_idx}.model')
generator_1.load_state_dict(ckpt1['generator_running'])
generator_1 = generator_1.eval()
params_to_save = {'cam': [], 'shape': [], 'exp': [], 'pose': [], 'light_code': [], 'texture_code': [],
'identity_indices': []}
for i, sigma in enumerate(corruption_sigma):
images = np.zeros((num_smpl_to_eval_on, 3, resolution, resolution)).astype('float32')
flame_mesh_imgs = np.zeros((num_smpl_to_eval_on, 3, resolution, resolution)).astype('float32')
pbar = tqdm.tqdm(range(0, num_smpl_to_eval_on, batch_size))
pbar.set_description('Generating_images')
# print(flm_params[1, :])
for batch_idx in pbar:
flm_batch = flm_params[batch_idx:batch_idx+batch_size, :]
flm_batch = torch.from_numpy(flm_batch).cuda()
# flm_batch = eye_cntr_reg.substitute_flame_batch_with_regressed_camera(flm_batch)
flm_batch = position_to_given_location(flame_decoder, flm_batch)
if settings_for_runs[run_idx]['normal_maps_as_cond'] or \
settings_for_runs[run_idx]['rendered_flame_as_condition']:
batch_size_true = flm_batch.shape[0]
cam = flm_batch[:, constants.DECA_IDX['cam'][0]:constants.DECA_IDX['cam'][1]:]
shape = flm_batch[:, constants.INDICES['SHAPE'][0]:constants.INDICES['SHAPE'][1]]
exp = flm_batch[:, constants.INDICES['EXP'][0]:constants.INDICES['EXP'][1]]
pose = flm_batch[:, constants.INDICES['POSE'][0]:constants.INDICES['POSE'][1]]
# import ipdb; ipdb.set_trace()
light_code = \
flm_batch[:, constants.DECA_IDX['lit'][0]:constants.DECA_IDX['lit'][1]:].view((batch_size_true, 9, 3))
texture_code = flm_batch[:, constants.DECA_IDX['tex'][0]:constants.DECA_IDX['tex'][1]:]
params_to_save['cam'].append(cam.cpu().detach().numpy())
params_to_save['shape'].append(shape.cpu().detach().numpy())
params_to_save['shape'].append(shape.cpu().detach().numpy())
params_to_save['exp'].append(exp.cpu().detach().numpy())
params_to_save['pose'].append(pose.cpu().detach().numpy())
params_to_save['light_code'].append(light_code.cpu().detach().numpy())
params_to_save['texture_code'].append(texture_code.cpu().detach().numpy())
norma_map_img, _, _, _, rend_flm = \
overlay_visualizer.get_rendered_mesh(flame_params=(shape, exp, pose, light_code, texture_code),
camera_params=cam)
# import ipdb; ipdb.set_trace()
rend_flm = torch.clamp(rend_flm, 0, 1) * 2 - 1
norma_map_img = torch.clamp(norma_map_img, 0, 1) * 2 - 1
rend_flm = fast_image_reshape(rend_flm, height_out=256, width_out=256, mode='bilinear')
norma_map_img = fast_image_reshape(norma_map_img, height_out=256, width_out=256, mode='bilinear')
# Render the 2nd time to get backface culling and white texture
# norma_map_img_to_save, _, _, _, rend_flm_to_save = \
# overlay_visualizer.get_rendered_mesh(flame_params=(shape, exp, pose, light_code, texture_code),
# camera_params=cam, cull_backfaces=True, constant_albedo=0.6)
# Back face culling temporarily un-availabe
norma_map_img_to_save, _, _, _, rend_flm_to_save = \
overlay_visualizer.get_rendered_mesh(flame_params=(shape, exp, pose, light_code, texture_code),
camera_params=cam, cull_backfaces=False, constant_albedo=0.6)
rend_flm_to_save = torch.clamp(rend_flm_to_save, 0, 1) * 2 - 1
# rend_flm_to_save = rend_flm
# norma_map_img_to_save = torch.clamp(norma_map_img, 0, 1) * 2 - 1
rend_flm_to_save = fast_image_reshape(rend_flm_to_save, height_out=256, width_out=256, mode='bilinear')
# norma_map_img_to_save = fast_image_reshape(norma_map_img, height_out=256, width_out=256, mode='bilinear')
else:
rend_flm = None
norma_map_img = None
gen_1_in = ge_gen_in(flm_batch, rend_flm, norma_map_img, settings_for_runs[run_idx]['normal_maps_as_cond'],
settings_for_runs[run_idx]['rendered_flame_as_condition'])
# torch.manual_seed(2)
identity_embeddings = torch.randint(low=0, high=69158, size=(gen_1_in.shape[0], ), dtype=torch.long,
device='cuda')
mdl_1_gen_images = generic_utils.get_images_from_flame_params(
flame_params=gen_1_in.cpu().numpy(), pose=None,
model=generator_1,
step=step_max, alpha=alpha,
input_indices=identity_embeddings.cpu().numpy())
params_to_save['identity_indices'].append(identity_embeddings.cpu().detach().numpy())
# import ipdb; ipdb.set_trace()
images[batch_idx:batch_idx+batch_size_true] = torch.clamp(mdl_1_gen_images, -1, 1).cpu().numpy()
# if flame_mesh_imgs is None:
flame_mesh_imgs[batch_idx:batch_idx+batch_size_true] = torch.clamp(rend_flm_to_save, -1, 1).cpu().numpy()
if save_images:
mdl_name = settings_for_runs[run_idx]['name']
for key in params_to_save.keys():
params_to_save[key] = np.concatenate(params_to_save[key], axis=0)
save_dir = os.path.join(cnst.output_root, 'sample', str(run_idx), f'random_samples_q_eval_{mdl_name}')
os.makedirs(save_dir, exist_ok=True)
np.save(os.path.join(save_dir, 'params.npy'), params_to_save)
save_path_current_id = os.path.join(save_dir, 'images')
save_set_of_images(path=save_path_current_id, prefix='', images=(images + 1) / 2, show_prog_bar=True)
#save flam rndr
save_path_current_id_flm_rndr = os.path.join(save_dir, 'conditions')
save_set_of_images(path=save_path_current_id_flm_rndr, prefix='mesh', images=(flame_mesh_imgs + 1) / 2,
show_prog_bar=True)
|
normal
|
{
"blob_id": "d0991d8ea47379a0c1de836b5d215c99166ad049",
"index": 5936,
"step-1": "<mask token>\n\n\ndef ge_gen_in(flm_params, textured_rndr, norm_map, normal_map_cond,\n texture_cond):\n if normal_map_cond and texture_cond:\n return torch.cat((textured_rndr, norm_map), dim=1)\n elif normal_map_cond:\n return norm_map\n elif texture_cond:\n return textured_rndr\n else:\n return flm_params\n\n\ndef corrupt_flame_given_sigma(flm_params, corruption_type, sigma, jaw_sigma,\n pose_sigma):\n corrupted_flame = deepcopy(flm_params)\n if corruption_type == 'shape' or corruption_type == 'all':\n corrupted_flame[:, :10] = flm_params[:, :10] + np.clip(np.random.\n normal(0, sigma, flm_params[:, :10].shape), -3 * sigma, 3 * sigma\n ).astype('float32')\n if corruption_type == 'exp_jaw' or corruption_type == 'all':\n corrupted_flame[:, 100:110] = flm_params[:, 100:110] + np.clip(np.\n random.normal(0, sigma, flm_params[:, 100:110].shape), -3 *\n sigma, 3 * sigma).astype('float32')\n corrupted_flame[:, 153] = flm_params[:, 153] + np.random.normal(0,\n jaw_sigma, corrupted_flame.shape[0])\n if corruption_type == 'pose' or corruption_type == 'all':\n pose_perturbation = np.random.normal(0, pose_sigma, (\n corrupted_flame.shape[0],))\n corrupted_flame[:, 151] = flm_params[:, 151] + np.clip(\n pose_perturbation, -3 * pose_sigma, 3 * pose_sigma)\n return corrupted_flame\n\n\n<mask token>\n",
"step-2": "<mask token>\nsys.path.append('../')\n<mask token>\n\n\ndef ge_gen_in(flm_params, textured_rndr, norm_map, normal_map_cond,\n texture_cond):\n if normal_map_cond and texture_cond:\n return torch.cat((textured_rndr, norm_map), dim=1)\n elif normal_map_cond:\n return norm_map\n elif texture_cond:\n return textured_rndr\n else:\n return flm_params\n\n\ndef corrupt_flame_given_sigma(flm_params, corruption_type, sigma, jaw_sigma,\n pose_sigma):\n corrupted_flame = deepcopy(flm_params)\n if corruption_type == 'shape' or corruption_type == 'all':\n corrupted_flame[:, :10] = flm_params[:, :10] + np.clip(np.random.\n normal(0, sigma, flm_params[:, :10].shape), -3 * sigma, 3 * sigma\n ).astype('float32')\n if corruption_type == 'exp_jaw' or corruption_type == 'all':\n corrupted_flame[:, 100:110] = flm_params[:, 100:110] + np.clip(np.\n random.normal(0, sigma, flm_params[:, 100:110].shape), -3 *\n sigma, 3 * sigma).astype('float32')\n corrupted_flame[:, 153] = flm_params[:, 153] + np.random.normal(0,\n jaw_sigma, corrupted_flame.shape[0])\n if corruption_type == 'pose' or corruption_type == 'all':\n pose_perturbation = np.random.normal(0, pose_sigma, (\n corrupted_flame.shape[0],))\n corrupted_flame[:, 151] = flm_params[:, 151] + np.clip(\n pose_perturbation, -3 * pose_sigma, 3 * pose_sigma)\n return corrupted_flame\n\n\n<mask token>\nnp.random.seed(2)\nfor i, key in enumerate(fl_param_dict):\n flame_param = fl_param_dict[key]\n shape_params = np.concatenate((np.random.normal(0, 1, [3]), np.zeros(97))\n ).astype('float32')\n exp_params = np.concatenate((np.random.normal(0, 1, [3]), np.zeros(47))\n ).astype('float32')\n pose = np.array([0, np.random.uniform(-np.pi / 8, np.pi / 8, 1), 0, np.\n random.uniform(0, np.pi / 12, 1), 0, 0]).astype('float32')\n texture = np.random.normal(0, 1, [50]).astype('float32')\n flame_param = np.hstack((shape_params, exp_params, pose, flame_param[\n 'cam'], texture, flame_param['lit'].flatten()))\n flm_params[i, :] = flame_param.astype('float32')\n if i == num_smpl_to_eval_on - 1:\n break\n<mask token>\nfor run_idx in run_ids_1:\n generator_1 = torch.nn.DataParallel(StyledGenerator(\n embedding_vocab_size=69158, rendered_flame_ascondition=\n settings_for_runs[run_idx]['rendered_flame_as_condition'],\n normal_maps_as_cond=settings_for_runs[run_idx][\n 'normal_maps_as_cond'], apply_sqrt2_fac_in_eq_lin=settings_for_runs\n [run_idx]['apply_sqrt2_fac_in_eq_lin'], core_tensor_res=\n core_tensor_res, w_truncation_factor=1.0, n_mlp=8)).cuda()\n model_idx = settings_for_runs[run_idx]['model_idx']\n ckpt1 = torch.load(\n f'{cnst.output_root}checkpoint/{run_idx}/{model_idx}.model')\n generator_1.load_state_dict(ckpt1['generator_running'])\n generator_1 = generator_1.eval()\n params_to_save = {'cam': [], 'shape': [], 'exp': [], 'pose': [],\n 'light_code': [], 'texture_code': [], 'identity_indices': []}\n for i, sigma in enumerate(corruption_sigma):\n images = np.zeros((num_smpl_to_eval_on, 3, resolution, resolution)\n ).astype('float32')\n flame_mesh_imgs = np.zeros((num_smpl_to_eval_on, 3, resolution,\n resolution)).astype('float32')\n pbar = tqdm.tqdm(range(0, num_smpl_to_eval_on, batch_size))\n pbar.set_description('Generating_images')\n for batch_idx in pbar:\n flm_batch = flm_params[batch_idx:batch_idx + batch_size, :]\n flm_batch = torch.from_numpy(flm_batch).cuda()\n flm_batch = position_to_given_location(flame_decoder, flm_batch)\n if settings_for_runs[run_idx]['normal_maps_as_cond'\n ] or settings_for_runs[run_idx]['rendered_flame_as_condition']:\n batch_size_true = flm_batch.shape[0]\n cam = flm_batch[:, constants.DECA_IDX['cam'][0]:constants.\n DECA_IDX['cam'][1]]\n shape = flm_batch[:, constants.INDICES['SHAPE'][0]:\n constants.INDICES['SHAPE'][1]]\n exp = flm_batch[:, constants.INDICES['EXP'][0]:constants.\n INDICES['EXP'][1]]\n pose = flm_batch[:, constants.INDICES['POSE'][0]:constants.\n INDICES['POSE'][1]]\n light_code = flm_batch[:, constants.DECA_IDX['lit'][0]:\n constants.DECA_IDX['lit'][1]].view((batch_size_true, 9, 3))\n texture_code = flm_batch[:, constants.DECA_IDX['tex'][0]:\n constants.DECA_IDX['tex'][1]]\n params_to_save['cam'].append(cam.cpu().detach().numpy())\n params_to_save['shape'].append(shape.cpu().detach().numpy())\n params_to_save['shape'].append(shape.cpu().detach().numpy())\n params_to_save['exp'].append(exp.cpu().detach().numpy())\n params_to_save['pose'].append(pose.cpu().detach().numpy())\n params_to_save['light_code'].append(light_code.cpu().detach\n ().numpy())\n params_to_save['texture_code'].append(texture_code.cpu().\n detach().numpy())\n norma_map_img, _, _, _, rend_flm = (overlay_visualizer.\n get_rendered_mesh(flame_params=(shape, exp, pose,\n light_code, texture_code), camera_params=cam))\n rend_flm = torch.clamp(rend_flm, 0, 1) * 2 - 1\n norma_map_img = torch.clamp(norma_map_img, 0, 1) * 2 - 1\n rend_flm = fast_image_reshape(rend_flm, height_out=256,\n width_out=256, mode='bilinear')\n norma_map_img = fast_image_reshape(norma_map_img,\n height_out=256, width_out=256, mode='bilinear')\n norma_map_img_to_save, _, _, _, rend_flm_to_save = (\n overlay_visualizer.get_rendered_mesh(flame_params=(\n shape, exp, pose, light_code, texture_code),\n camera_params=cam, cull_backfaces=False,\n constant_albedo=0.6))\n rend_flm_to_save = torch.clamp(rend_flm_to_save, 0, 1) * 2 - 1\n rend_flm_to_save = fast_image_reshape(rend_flm_to_save,\n height_out=256, width_out=256, mode='bilinear')\n else:\n rend_flm = None\n norma_map_img = None\n gen_1_in = ge_gen_in(flm_batch, rend_flm, norma_map_img,\n settings_for_runs[run_idx]['normal_maps_as_cond'],\n settings_for_runs[run_idx]['rendered_flame_as_condition'])\n identity_embeddings = torch.randint(low=0, high=69158, size=(\n gen_1_in.shape[0],), dtype=torch.long, device='cuda')\n mdl_1_gen_images = generic_utils.get_images_from_flame_params(\n flame_params=gen_1_in.cpu().numpy(), pose=None, model=\n generator_1, step=step_max, alpha=alpha, input_indices=\n identity_embeddings.cpu().numpy())\n params_to_save['identity_indices'].append(identity_embeddings.\n cpu().detach().numpy())\n images[batch_idx:batch_idx + batch_size_true] = torch.clamp(\n mdl_1_gen_images, -1, 1).cpu().numpy()\n flame_mesh_imgs[batch_idx:batch_idx + batch_size_true\n ] = torch.clamp(rend_flm_to_save, -1, 1).cpu().numpy()\n if save_images:\n mdl_name = settings_for_runs[run_idx]['name']\n for key in params_to_save.keys():\n params_to_save[key] = np.concatenate(params_to_save[key],\n axis=0)\n save_dir = os.path.join(cnst.output_root, 'sample', str(run_idx\n ), f'random_samples_q_eval_{mdl_name}')\n os.makedirs(save_dir, exist_ok=True)\n np.save(os.path.join(save_dir, 'params.npy'), params_to_save)\n save_path_current_id = os.path.join(save_dir, 'images')\n save_set_of_images(path=save_path_current_id, prefix='', images\n =(images + 1) / 2, show_prog_bar=True)\n save_path_current_id_flm_rndr = os.path.join(save_dir, 'conditions'\n )\n save_set_of_images(path=save_path_current_id_flm_rndr, prefix=\n 'mesh', images=(flame_mesh_imgs + 1) / 2, show_prog_bar=True)\n",
"step-3": "<mask token>\nsys.path.append('../')\n<mask token>\nos.environ['PYTHONHASHSEED'] = '2'\n<mask token>\n\n\ndef ge_gen_in(flm_params, textured_rndr, norm_map, normal_map_cond,\n texture_cond):\n if normal_map_cond and texture_cond:\n return torch.cat((textured_rndr, norm_map), dim=1)\n elif normal_map_cond:\n return norm_map\n elif texture_cond:\n return textured_rndr\n else:\n return flm_params\n\n\ndef corrupt_flame_given_sigma(flm_params, corruption_type, sigma, jaw_sigma,\n pose_sigma):\n corrupted_flame = deepcopy(flm_params)\n if corruption_type == 'shape' or corruption_type == 'all':\n corrupted_flame[:, :10] = flm_params[:, :10] + np.clip(np.random.\n normal(0, sigma, flm_params[:, :10].shape), -3 * sigma, 3 * sigma\n ).astype('float32')\n if corruption_type == 'exp_jaw' or corruption_type == 'all':\n corrupted_flame[:, 100:110] = flm_params[:, 100:110] + np.clip(np.\n random.normal(0, sigma, flm_params[:, 100:110].shape), -3 *\n sigma, 3 * sigma).astype('float32')\n corrupted_flame[:, 153] = flm_params[:, 153] + np.random.normal(0,\n jaw_sigma, corrupted_flame.shape[0])\n if corruption_type == 'pose' or corruption_type == 'all':\n pose_perturbation = np.random.normal(0, pose_sigma, (\n corrupted_flame.shape[0],))\n corrupted_flame[:, 151] = flm_params[:, 151] + np.clip(\n pose_perturbation, -3 * pose_sigma, 3 * pose_sigma)\n return corrupted_flame\n\n\nsave_images = True\ncode_size = 236\nuse_inst_norm = True\ncore_tensor_res = 4\nresolution = 256\nalpha = 1\nstep_max = int(np.log2(resolution) - 2)\nnum_smpl_to_eval_on = 128\nuse_styled_conv_stylegan2 = True\nflength = 5000\ncam_t = np.array([0.0, 0.0, 0])\ncamera_params = camera_ringnetpp((512, 512), trans=cam_t, focal=flength)\nrun_ids_1 = [29]\nsettings_for_runs = {(24): {'name': 'vector_cond', 'model_idx': '216000_1',\n 'normal_maps_as_cond': False, 'rendered_flame_as_condition': False,\n 'apply_sqrt2_fac_in_eq_lin': False}, (29): {'name': 'full_model',\n 'model_idx': '294000_1', 'normal_maps_as_cond': True,\n 'rendered_flame_as_condition': True, 'apply_sqrt2_fac_in_eq_lin': True},\n (7): {'name': 'flm_rndr_tex_interp', 'model_idx': '051000_1',\n 'normal_maps_as_cond': False, 'rendered_flame_as_condition': True,\n 'apply_sqrt2_fac_in_eq_lin': False}, (3): {'name': 'norm_mp_tex_interp',\n 'model_idx': '203000_1', 'normal_maps_as_cond': True,\n 'rendered_flame_as_condition': False, 'apply_sqrt2_fac_in_eq_lin': \n False}, (8): {'name': 'norm_map_rend_flm_no_tex_interp', 'model_idx':\n '009000_1', 'normal_maps_as_cond': True, 'rendered_flame_as_condition':\n True, 'apply_sqrt2_fac_in_eq_lin': False}}\noverlay_visualizer = OverLayViz()\nflm_params = np.zeros((num_smpl_to_eval_on, code_size)).astype('float32')\nfl_param_dict = np.load(cnst.all_flame_params_file, allow_pickle=True).item()\nnp.random.seed(2)\nfor i, key in enumerate(fl_param_dict):\n flame_param = fl_param_dict[key]\n shape_params = np.concatenate((np.random.normal(0, 1, [3]), np.zeros(97))\n ).astype('float32')\n exp_params = np.concatenate((np.random.normal(0, 1, [3]), np.zeros(47))\n ).astype('float32')\n pose = np.array([0, np.random.uniform(-np.pi / 8, np.pi / 8, 1), 0, np.\n random.uniform(0, np.pi / 12, 1), 0, 0]).astype('float32')\n texture = np.random.normal(0, 1, [50]).astype('float32')\n flame_param = np.hstack((shape_params, exp_params, pose, flame_param[\n 'cam'], texture, flame_param['lit'].flatten()))\n flm_params[i, :] = flame_param.astype('float32')\n if i == num_smpl_to_eval_on - 1:\n break\nbatch_size = 32\nnum_sigmas = 1\ncorruption_sigma = np.linspace(0, 1.5, num_sigmas)\njaw_rot_range = 0, np.pi / 8\njaw_rot_sigmas = np.linspace(0, (jaw_rot_range[1] - jaw_rot_range[0]) / 6,\n num_sigmas)\npose_range = -np.pi / 3, np.pi / 3\npose_sigmas = np.linspace(0, (pose_range[1] - pose_range[0]) / 6, num_sigmas)\nconfig_obj = util.dict2obj(cnst.flame_config)\nflame_decoder = FLAME.FLAME(config_obj).cuda().eval()\nfor run_idx in run_ids_1:\n generator_1 = torch.nn.DataParallel(StyledGenerator(\n embedding_vocab_size=69158, rendered_flame_ascondition=\n settings_for_runs[run_idx]['rendered_flame_as_condition'],\n normal_maps_as_cond=settings_for_runs[run_idx][\n 'normal_maps_as_cond'], apply_sqrt2_fac_in_eq_lin=settings_for_runs\n [run_idx]['apply_sqrt2_fac_in_eq_lin'], core_tensor_res=\n core_tensor_res, w_truncation_factor=1.0, n_mlp=8)).cuda()\n model_idx = settings_for_runs[run_idx]['model_idx']\n ckpt1 = torch.load(\n f'{cnst.output_root}checkpoint/{run_idx}/{model_idx}.model')\n generator_1.load_state_dict(ckpt1['generator_running'])\n generator_1 = generator_1.eval()\n params_to_save = {'cam': [], 'shape': [], 'exp': [], 'pose': [],\n 'light_code': [], 'texture_code': [], 'identity_indices': []}\n for i, sigma in enumerate(corruption_sigma):\n images = np.zeros((num_smpl_to_eval_on, 3, resolution, resolution)\n ).astype('float32')\n flame_mesh_imgs = np.zeros((num_smpl_to_eval_on, 3, resolution,\n resolution)).astype('float32')\n pbar = tqdm.tqdm(range(0, num_smpl_to_eval_on, batch_size))\n pbar.set_description('Generating_images')\n for batch_idx in pbar:\n flm_batch = flm_params[batch_idx:batch_idx + batch_size, :]\n flm_batch = torch.from_numpy(flm_batch).cuda()\n flm_batch = position_to_given_location(flame_decoder, flm_batch)\n if settings_for_runs[run_idx]['normal_maps_as_cond'\n ] or settings_for_runs[run_idx]['rendered_flame_as_condition']:\n batch_size_true = flm_batch.shape[0]\n cam = flm_batch[:, constants.DECA_IDX['cam'][0]:constants.\n DECA_IDX['cam'][1]]\n shape = flm_batch[:, constants.INDICES['SHAPE'][0]:\n constants.INDICES['SHAPE'][1]]\n exp = flm_batch[:, constants.INDICES['EXP'][0]:constants.\n INDICES['EXP'][1]]\n pose = flm_batch[:, constants.INDICES['POSE'][0]:constants.\n INDICES['POSE'][1]]\n light_code = flm_batch[:, constants.DECA_IDX['lit'][0]:\n constants.DECA_IDX['lit'][1]].view((batch_size_true, 9, 3))\n texture_code = flm_batch[:, constants.DECA_IDX['tex'][0]:\n constants.DECA_IDX['tex'][1]]\n params_to_save['cam'].append(cam.cpu().detach().numpy())\n params_to_save['shape'].append(shape.cpu().detach().numpy())\n params_to_save['shape'].append(shape.cpu().detach().numpy())\n params_to_save['exp'].append(exp.cpu().detach().numpy())\n params_to_save['pose'].append(pose.cpu().detach().numpy())\n params_to_save['light_code'].append(light_code.cpu().detach\n ().numpy())\n params_to_save['texture_code'].append(texture_code.cpu().\n detach().numpy())\n norma_map_img, _, _, _, rend_flm = (overlay_visualizer.\n get_rendered_mesh(flame_params=(shape, exp, pose,\n light_code, texture_code), camera_params=cam))\n rend_flm = torch.clamp(rend_flm, 0, 1) * 2 - 1\n norma_map_img = torch.clamp(norma_map_img, 0, 1) * 2 - 1\n rend_flm = fast_image_reshape(rend_flm, height_out=256,\n width_out=256, mode='bilinear')\n norma_map_img = fast_image_reshape(norma_map_img,\n height_out=256, width_out=256, mode='bilinear')\n norma_map_img_to_save, _, _, _, rend_flm_to_save = (\n overlay_visualizer.get_rendered_mesh(flame_params=(\n shape, exp, pose, light_code, texture_code),\n camera_params=cam, cull_backfaces=False,\n constant_albedo=0.6))\n rend_flm_to_save = torch.clamp(rend_flm_to_save, 0, 1) * 2 - 1\n rend_flm_to_save = fast_image_reshape(rend_flm_to_save,\n height_out=256, width_out=256, mode='bilinear')\n else:\n rend_flm = None\n norma_map_img = None\n gen_1_in = ge_gen_in(flm_batch, rend_flm, norma_map_img,\n settings_for_runs[run_idx]['normal_maps_as_cond'],\n settings_for_runs[run_idx]['rendered_flame_as_condition'])\n identity_embeddings = torch.randint(low=0, high=69158, size=(\n gen_1_in.shape[0],), dtype=torch.long, device='cuda')\n mdl_1_gen_images = generic_utils.get_images_from_flame_params(\n flame_params=gen_1_in.cpu().numpy(), pose=None, model=\n generator_1, step=step_max, alpha=alpha, input_indices=\n identity_embeddings.cpu().numpy())\n params_to_save['identity_indices'].append(identity_embeddings.\n cpu().detach().numpy())\n images[batch_idx:batch_idx + batch_size_true] = torch.clamp(\n mdl_1_gen_images, -1, 1).cpu().numpy()\n flame_mesh_imgs[batch_idx:batch_idx + batch_size_true\n ] = torch.clamp(rend_flm_to_save, -1, 1).cpu().numpy()\n if save_images:\n mdl_name = settings_for_runs[run_idx]['name']\n for key in params_to_save.keys():\n params_to_save[key] = np.concatenate(params_to_save[key],\n axis=0)\n save_dir = os.path.join(cnst.output_root, 'sample', str(run_idx\n ), f'random_samples_q_eval_{mdl_name}')\n os.makedirs(save_dir, exist_ok=True)\n np.save(os.path.join(save_dir, 'params.npy'), params_to_save)\n save_path_current_id = os.path.join(save_dir, 'images')\n save_set_of_images(path=save_path_current_id, prefix='', images\n =(images + 1) / 2, show_prog_bar=True)\n save_path_current_id_flm_rndr = os.path.join(save_dir, 'conditions'\n )\n save_set_of_images(path=save_path_current_id_flm_rndr, prefix=\n 'mesh', images=(flame_mesh_imgs + 1) / 2, show_prog_bar=True)\n",
"step-4": "import sys\nsys.path.append('../')\nimport constants as cnst\nimport os\nos.environ['PYTHONHASHSEED'] = '2'\nimport tqdm\nfrom model.stg2_generator import StyledGenerator\nimport numpy as np\nfrom my_utils.visualize_flame_overlay import OverLayViz\nfrom my_utils.flm_dynamic_fit_overlay import camera_ringnetpp\nfrom my_utils.generic_utils import save_set_of_images\nimport constants\nfrom dataset_loaders import fast_image_reshape\nimport torch\nfrom my_utils import generic_utils\nfrom my_utils.eye_centering import position_to_given_location\nfrom copy import deepcopy\nfrom my_utils.photometric_optimization.models import FLAME\nfrom my_utils.photometric_optimization import util\n\n\ndef ge_gen_in(flm_params, textured_rndr, norm_map, normal_map_cond,\n texture_cond):\n if normal_map_cond and texture_cond:\n return torch.cat((textured_rndr, norm_map), dim=1)\n elif normal_map_cond:\n return norm_map\n elif texture_cond:\n return textured_rndr\n else:\n return flm_params\n\n\ndef corrupt_flame_given_sigma(flm_params, corruption_type, sigma, jaw_sigma,\n pose_sigma):\n corrupted_flame = deepcopy(flm_params)\n if corruption_type == 'shape' or corruption_type == 'all':\n corrupted_flame[:, :10] = flm_params[:, :10] + np.clip(np.random.\n normal(0, sigma, flm_params[:, :10].shape), -3 * sigma, 3 * sigma\n ).astype('float32')\n if corruption_type == 'exp_jaw' or corruption_type == 'all':\n corrupted_flame[:, 100:110] = flm_params[:, 100:110] + np.clip(np.\n random.normal(0, sigma, flm_params[:, 100:110].shape), -3 *\n sigma, 3 * sigma).astype('float32')\n corrupted_flame[:, 153] = flm_params[:, 153] + np.random.normal(0,\n jaw_sigma, corrupted_flame.shape[0])\n if corruption_type == 'pose' or corruption_type == 'all':\n pose_perturbation = np.random.normal(0, pose_sigma, (\n corrupted_flame.shape[0],))\n corrupted_flame[:, 151] = flm_params[:, 151] + np.clip(\n pose_perturbation, -3 * pose_sigma, 3 * pose_sigma)\n return corrupted_flame\n\n\nsave_images = True\ncode_size = 236\nuse_inst_norm = True\ncore_tensor_res = 4\nresolution = 256\nalpha = 1\nstep_max = int(np.log2(resolution) - 2)\nnum_smpl_to_eval_on = 128\nuse_styled_conv_stylegan2 = True\nflength = 5000\ncam_t = np.array([0.0, 0.0, 0])\ncamera_params = camera_ringnetpp((512, 512), trans=cam_t, focal=flength)\nrun_ids_1 = [29]\nsettings_for_runs = {(24): {'name': 'vector_cond', 'model_idx': '216000_1',\n 'normal_maps_as_cond': False, 'rendered_flame_as_condition': False,\n 'apply_sqrt2_fac_in_eq_lin': False}, (29): {'name': 'full_model',\n 'model_idx': '294000_1', 'normal_maps_as_cond': True,\n 'rendered_flame_as_condition': True, 'apply_sqrt2_fac_in_eq_lin': True},\n (7): {'name': 'flm_rndr_tex_interp', 'model_idx': '051000_1',\n 'normal_maps_as_cond': False, 'rendered_flame_as_condition': True,\n 'apply_sqrt2_fac_in_eq_lin': False}, (3): {'name': 'norm_mp_tex_interp',\n 'model_idx': '203000_1', 'normal_maps_as_cond': True,\n 'rendered_flame_as_condition': False, 'apply_sqrt2_fac_in_eq_lin': \n False}, (8): {'name': 'norm_map_rend_flm_no_tex_interp', 'model_idx':\n '009000_1', 'normal_maps_as_cond': True, 'rendered_flame_as_condition':\n True, 'apply_sqrt2_fac_in_eq_lin': False}}\noverlay_visualizer = OverLayViz()\nflm_params = np.zeros((num_smpl_to_eval_on, code_size)).astype('float32')\nfl_param_dict = np.load(cnst.all_flame_params_file, allow_pickle=True).item()\nnp.random.seed(2)\nfor i, key in enumerate(fl_param_dict):\n flame_param = fl_param_dict[key]\n shape_params = np.concatenate((np.random.normal(0, 1, [3]), np.zeros(97))\n ).astype('float32')\n exp_params = np.concatenate((np.random.normal(0, 1, [3]), np.zeros(47))\n ).astype('float32')\n pose = np.array([0, np.random.uniform(-np.pi / 8, np.pi / 8, 1), 0, np.\n random.uniform(0, np.pi / 12, 1), 0, 0]).astype('float32')\n texture = np.random.normal(0, 1, [50]).astype('float32')\n flame_param = np.hstack((shape_params, exp_params, pose, flame_param[\n 'cam'], texture, flame_param['lit'].flatten()))\n flm_params[i, :] = flame_param.astype('float32')\n if i == num_smpl_to_eval_on - 1:\n break\nbatch_size = 32\nnum_sigmas = 1\ncorruption_sigma = np.linspace(0, 1.5, num_sigmas)\njaw_rot_range = 0, np.pi / 8\njaw_rot_sigmas = np.linspace(0, (jaw_rot_range[1] - jaw_rot_range[0]) / 6,\n num_sigmas)\npose_range = -np.pi / 3, np.pi / 3\npose_sigmas = np.linspace(0, (pose_range[1] - pose_range[0]) / 6, num_sigmas)\nconfig_obj = util.dict2obj(cnst.flame_config)\nflame_decoder = FLAME.FLAME(config_obj).cuda().eval()\nfor run_idx in run_ids_1:\n generator_1 = torch.nn.DataParallel(StyledGenerator(\n embedding_vocab_size=69158, rendered_flame_ascondition=\n settings_for_runs[run_idx]['rendered_flame_as_condition'],\n normal_maps_as_cond=settings_for_runs[run_idx][\n 'normal_maps_as_cond'], apply_sqrt2_fac_in_eq_lin=settings_for_runs\n [run_idx]['apply_sqrt2_fac_in_eq_lin'], core_tensor_res=\n core_tensor_res, w_truncation_factor=1.0, n_mlp=8)).cuda()\n model_idx = settings_for_runs[run_idx]['model_idx']\n ckpt1 = torch.load(\n f'{cnst.output_root}checkpoint/{run_idx}/{model_idx}.model')\n generator_1.load_state_dict(ckpt1['generator_running'])\n generator_1 = generator_1.eval()\n params_to_save = {'cam': [], 'shape': [], 'exp': [], 'pose': [],\n 'light_code': [], 'texture_code': [], 'identity_indices': []}\n for i, sigma in enumerate(corruption_sigma):\n images = np.zeros((num_smpl_to_eval_on, 3, resolution, resolution)\n ).astype('float32')\n flame_mesh_imgs = np.zeros((num_smpl_to_eval_on, 3, resolution,\n resolution)).astype('float32')\n pbar = tqdm.tqdm(range(0, num_smpl_to_eval_on, batch_size))\n pbar.set_description('Generating_images')\n for batch_idx in pbar:\n flm_batch = flm_params[batch_idx:batch_idx + batch_size, :]\n flm_batch = torch.from_numpy(flm_batch).cuda()\n flm_batch = position_to_given_location(flame_decoder, flm_batch)\n if settings_for_runs[run_idx]['normal_maps_as_cond'\n ] or settings_for_runs[run_idx]['rendered_flame_as_condition']:\n batch_size_true = flm_batch.shape[0]\n cam = flm_batch[:, constants.DECA_IDX['cam'][0]:constants.\n DECA_IDX['cam'][1]]\n shape = flm_batch[:, constants.INDICES['SHAPE'][0]:\n constants.INDICES['SHAPE'][1]]\n exp = flm_batch[:, constants.INDICES['EXP'][0]:constants.\n INDICES['EXP'][1]]\n pose = flm_batch[:, constants.INDICES['POSE'][0]:constants.\n INDICES['POSE'][1]]\n light_code = flm_batch[:, constants.DECA_IDX['lit'][0]:\n constants.DECA_IDX['lit'][1]].view((batch_size_true, 9, 3))\n texture_code = flm_batch[:, constants.DECA_IDX['tex'][0]:\n constants.DECA_IDX['tex'][1]]\n params_to_save['cam'].append(cam.cpu().detach().numpy())\n params_to_save['shape'].append(shape.cpu().detach().numpy())\n params_to_save['shape'].append(shape.cpu().detach().numpy())\n params_to_save['exp'].append(exp.cpu().detach().numpy())\n params_to_save['pose'].append(pose.cpu().detach().numpy())\n params_to_save['light_code'].append(light_code.cpu().detach\n ().numpy())\n params_to_save['texture_code'].append(texture_code.cpu().\n detach().numpy())\n norma_map_img, _, _, _, rend_flm = (overlay_visualizer.\n get_rendered_mesh(flame_params=(shape, exp, pose,\n light_code, texture_code), camera_params=cam))\n rend_flm = torch.clamp(rend_flm, 0, 1) * 2 - 1\n norma_map_img = torch.clamp(norma_map_img, 0, 1) * 2 - 1\n rend_flm = fast_image_reshape(rend_flm, height_out=256,\n width_out=256, mode='bilinear')\n norma_map_img = fast_image_reshape(norma_map_img,\n height_out=256, width_out=256, mode='bilinear')\n norma_map_img_to_save, _, _, _, rend_flm_to_save = (\n overlay_visualizer.get_rendered_mesh(flame_params=(\n shape, exp, pose, light_code, texture_code),\n camera_params=cam, cull_backfaces=False,\n constant_albedo=0.6))\n rend_flm_to_save = torch.clamp(rend_flm_to_save, 0, 1) * 2 - 1\n rend_flm_to_save = fast_image_reshape(rend_flm_to_save,\n height_out=256, width_out=256, mode='bilinear')\n else:\n rend_flm = None\n norma_map_img = None\n gen_1_in = ge_gen_in(flm_batch, rend_flm, norma_map_img,\n settings_for_runs[run_idx]['normal_maps_as_cond'],\n settings_for_runs[run_idx]['rendered_flame_as_condition'])\n identity_embeddings = torch.randint(low=0, high=69158, size=(\n gen_1_in.shape[0],), dtype=torch.long, device='cuda')\n mdl_1_gen_images = generic_utils.get_images_from_flame_params(\n flame_params=gen_1_in.cpu().numpy(), pose=None, model=\n generator_1, step=step_max, alpha=alpha, input_indices=\n identity_embeddings.cpu().numpy())\n params_to_save['identity_indices'].append(identity_embeddings.\n cpu().detach().numpy())\n images[batch_idx:batch_idx + batch_size_true] = torch.clamp(\n mdl_1_gen_images, -1, 1).cpu().numpy()\n flame_mesh_imgs[batch_idx:batch_idx + batch_size_true\n ] = torch.clamp(rend_flm_to_save, -1, 1).cpu().numpy()\n if save_images:\n mdl_name = settings_for_runs[run_idx]['name']\n for key in params_to_save.keys():\n params_to_save[key] = np.concatenate(params_to_save[key],\n axis=0)\n save_dir = os.path.join(cnst.output_root, 'sample', str(run_idx\n ), f'random_samples_q_eval_{mdl_name}')\n os.makedirs(save_dir, exist_ok=True)\n np.save(os.path.join(save_dir, 'params.npy'), params_to_save)\n save_path_current_id = os.path.join(save_dir, 'images')\n save_set_of_images(path=save_path_current_id, prefix='', images\n =(images + 1) / 2, show_prog_bar=True)\n save_path_current_id_flm_rndr = os.path.join(save_dir, 'conditions'\n )\n save_set_of_images(path=save_path_current_id_flm_rndr, prefix=\n 'mesh', images=(flame_mesh_imgs + 1) / 2, show_prog_bar=True)\n",
"step-5": "import sys\nsys.path.append('../')\nimport constants as cnst\nimport os\nos.environ['PYTHONHASHSEED'] = '2'\nimport tqdm\nfrom model.stg2_generator import StyledGenerator\nimport numpy as np\nfrom my_utils.visualize_flame_overlay import OverLayViz\nfrom my_utils.flm_dynamic_fit_overlay import camera_ringnetpp\nfrom my_utils.generic_utils import save_set_of_images\nimport constants\nfrom dataset_loaders import fast_image_reshape\nimport torch\nfrom my_utils import generic_utils\nfrom my_utils.eye_centering import position_to_given_location\nfrom copy import deepcopy\nfrom my_utils.photometric_optimization.models import FLAME\nfrom my_utils.photometric_optimization import util\n\n\ndef ge_gen_in(flm_params, textured_rndr, norm_map, normal_map_cond, texture_cond):\n if normal_map_cond and texture_cond:\n return torch.cat((textured_rndr, norm_map), dim=1)\n elif normal_map_cond:\n return norm_map\n elif texture_cond:\n return textured_rndr\n else:\n return flm_params\n\n\ndef corrupt_flame_given_sigma(flm_params, corruption_type, sigma, jaw_sigma, pose_sigma):\n # import ipdb; ipdb.set_trace()\n # np.random.seed(2)\n corrupted_flame = deepcopy(flm_params)\n if corruption_type == 'shape' or corruption_type == 'all':\n corrupted_flame[:, :10] = flm_params[:, :10] + \\\n np.clip(np.random.normal(0, sigma, flm_params[:, :10].shape),\n -3 * sigma, 3 * sigma).astype('float32')\n if corruption_type == 'exp_jaw'or corruption_type == 'all':\n # Expression\n corrupted_flame[:, 100:110] = flm_params[:, 100:110] + \\\n np.clip(np.random.normal(0, sigma, flm_params[:, 100:110].shape),\n -3 * sigma, 3 * sigma).astype('float32')\n # Jaw pose\n corrupted_flame[:, 153] = flm_params[:, 153] + \\\n np.random.normal(0, jaw_sigma, corrupted_flame.shape[0])\n\n if corruption_type == 'pose' or corruption_type == 'all':\n # pose_perturbation = np.random.normal(0, pose_sigma[i], (corrupted_flame.shape[0], 3))\n # corrupted_flame[:, 150:153] += np.clip(pose_perturbation, -3 * pose_sigma[i], 3 * pose_sigma[i])\n pose_perturbation = np.random.normal(0, pose_sigma, (corrupted_flame.shape[0],))\n corrupted_flame[:, 151] = flm_params[:, 151] + \\\n np.clip(pose_perturbation, -3 * pose_sigma, 3 * pose_sigma)\n\n return corrupted_flame\n\n\n# General settings\nsave_images = True\ncode_size = 236\nuse_inst_norm = True\ncore_tensor_res = 4\nresolution = 256\nalpha = 1\nstep_max = int(np.log2(resolution) - 2)\nnum_smpl_to_eval_on = 128\nuse_styled_conv_stylegan2 = True\n\nflength = 5000\ncam_t = np.array([0., 0., 0])\ncamera_params = camera_ringnetpp((512, 512), trans=cam_t, focal=flength)\n\n# Uncomment the appropriate run_id\nrun_ids_1 = [29, ] # with sqrt(2)\n# run_ids_1 = [7, 24, 8, 3]\n# run_ids_1 = [7, 8, 3]\n# run_ids_1 = [7]\n\nsettings_for_runs = \\\n {24: {'name': 'vector_cond', 'model_idx': '216000_1', 'normal_maps_as_cond': False,\n 'rendered_flame_as_condition': False, 'apply_sqrt2_fac_in_eq_lin': False},\n 29: {'name': 'full_model', 'model_idx': '294000_1', 'normal_maps_as_cond': True,\n 'rendered_flame_as_condition': True, 'apply_sqrt2_fac_in_eq_lin': True},\n 7: {'name': 'flm_rndr_tex_interp', 'model_idx': '051000_1', 'normal_maps_as_cond': False,\n 'rendered_flame_as_condition': True, 'apply_sqrt2_fac_in_eq_lin': False},\n 3: {'name': 'norm_mp_tex_interp', 'model_idx': '203000_1', 'normal_maps_as_cond': True,\n 'rendered_flame_as_condition': False, 'apply_sqrt2_fac_in_eq_lin': False},\n 8: {'name': 'norm_map_rend_flm_no_tex_interp', 'model_idx': '009000_1', 'normal_maps_as_cond': True,\n 'rendered_flame_as_condition': True, 'apply_sqrt2_fac_in_eq_lin': False},}\n\n\noverlay_visualizer = OverLayViz()\n# overlay_visualizer.setup_renderer(mesh_file=None)\n\nflm_params = np.zeros((num_smpl_to_eval_on, code_size)).astype('float32')\nfl_param_dict = np.load(cnst.all_flame_params_file, allow_pickle=True).item()\nnp.random.seed(2)\nfor i, key in enumerate(fl_param_dict):\n flame_param = fl_param_dict[key]\n shape_params = np.concatenate((np.random.normal(0, 1, [3,]), np.zeros(97))).astype('float32')\n exp_params = np.concatenate((np.random.normal(0, 1, [3,]), np.zeros(47))).astype('float32')\n # +- pi/4 for bad samples +- pi/8 for good samples\n # pose = np.array([0, np.random.uniform(-np.pi/4, np.pi/4, 1), 0,\n # np.random.uniform(0, np.pi/12, 1), 0, 0]).astype('float32')\n pose = np.array([0, np.random.uniform(-np.pi / 8, np.pi / 8, 1), 0,\n np.random.uniform(0, np.pi / 12, 1), 0, 0]).astype('float32')\n texture = np.random.normal(0, 1, [50]).astype('float32')\n # texture = flame_param['tex']\n flame_param = np.hstack((shape_params, exp_params, pose, flame_param['cam'],\n texture, flame_param['lit'].flatten()))\n # tz = camera_params['f'][0] / (camera_params['c'][0] * flame_param[:, 156:157])\n # flame_param[:, 156:159] = np.concatenate((flame_param[:, 157:], tz), axis=1)\n\n # import ipdb; ipdb.set_trace()\n flm_params[i, :] = flame_param.astype('float32')\n if i == num_smpl_to_eval_on - 1:\n break\n\nbatch_size = 32\n\nnum_sigmas = 1\ncorruption_sigma = np.linspace(0, 1.5, num_sigmas)\njaw_rot_range = (0, np.pi/8)\njaw_rot_sigmas = np.linspace(0, (jaw_rot_range[1] - jaw_rot_range[0])/6, num_sigmas)\npose_range = (-np.pi/3, np.pi/3)\npose_sigmas = np.linspace(0, (pose_range[1] - pose_range[0])/6, num_sigmas)\nconfig_obj = util.dict2obj(cnst.flame_config)\nflame_decoder = FLAME.FLAME(config_obj).cuda().eval()\n\nfor run_idx in run_ids_1:\n # import ipdb; ipdb.set_trace()\n generator_1 = torch.nn.DataParallel(\n StyledGenerator(embedding_vocab_size=69158,\n rendered_flame_ascondition=settings_for_runs[run_idx]['rendered_flame_as_condition'],\n normal_maps_as_cond=settings_for_runs[run_idx]['normal_maps_as_cond'],\n apply_sqrt2_fac_in_eq_lin=settings_for_runs[run_idx]['apply_sqrt2_fac_in_eq_lin'],\n core_tensor_res=core_tensor_res,\n w_truncation_factor=1.0,\n n_mlp=8)).cuda()\n model_idx = settings_for_runs[run_idx]['model_idx']\n ckpt1 = torch.load(f'{cnst.output_root}checkpoint/{run_idx}/{model_idx}.model')\n generator_1.load_state_dict(ckpt1['generator_running'])\n generator_1 = generator_1.eval()\n\n params_to_save = {'cam': [], 'shape': [], 'exp': [], 'pose': [], 'light_code': [], 'texture_code': [],\n 'identity_indices': []}\n\n for i, sigma in enumerate(corruption_sigma):\n images = np.zeros((num_smpl_to_eval_on, 3, resolution, resolution)).astype('float32')\n flame_mesh_imgs = np.zeros((num_smpl_to_eval_on, 3, resolution, resolution)).astype('float32')\n pbar = tqdm.tqdm(range(0, num_smpl_to_eval_on, batch_size))\n pbar.set_description('Generating_images')\n # print(flm_params[1, :])\n for batch_idx in pbar:\n flm_batch = flm_params[batch_idx:batch_idx+batch_size, :]\n flm_batch = torch.from_numpy(flm_batch).cuda()\n # flm_batch = eye_cntr_reg.substitute_flame_batch_with_regressed_camera(flm_batch)\n flm_batch = position_to_given_location(flame_decoder, flm_batch)\n\n if settings_for_runs[run_idx]['normal_maps_as_cond'] or \\\n settings_for_runs[run_idx]['rendered_flame_as_condition']:\n\n batch_size_true = flm_batch.shape[0]\n cam = flm_batch[:, constants.DECA_IDX['cam'][0]:constants.DECA_IDX['cam'][1]:]\n shape = flm_batch[:, constants.INDICES['SHAPE'][0]:constants.INDICES['SHAPE'][1]]\n exp = flm_batch[:, constants.INDICES['EXP'][0]:constants.INDICES['EXP'][1]]\n pose = flm_batch[:, constants.INDICES['POSE'][0]:constants.INDICES['POSE'][1]]\n # import ipdb; ipdb.set_trace()\n light_code = \\\n flm_batch[:, constants.DECA_IDX['lit'][0]:constants.DECA_IDX['lit'][1]:].view((batch_size_true, 9, 3))\n texture_code = flm_batch[:, constants.DECA_IDX['tex'][0]:constants.DECA_IDX['tex'][1]:]\n\n params_to_save['cam'].append(cam.cpu().detach().numpy())\n params_to_save['shape'].append(shape.cpu().detach().numpy())\n params_to_save['shape'].append(shape.cpu().detach().numpy())\n params_to_save['exp'].append(exp.cpu().detach().numpy())\n params_to_save['pose'].append(pose.cpu().detach().numpy())\n params_to_save['light_code'].append(light_code.cpu().detach().numpy())\n params_to_save['texture_code'].append(texture_code.cpu().detach().numpy())\n\n norma_map_img, _, _, _, rend_flm = \\\n overlay_visualizer.get_rendered_mesh(flame_params=(shape, exp, pose, light_code, texture_code),\n camera_params=cam)\n # import ipdb; ipdb.set_trace()\n\n rend_flm = torch.clamp(rend_flm, 0, 1) * 2 - 1\n norma_map_img = torch.clamp(norma_map_img, 0, 1) * 2 - 1\n rend_flm = fast_image_reshape(rend_flm, height_out=256, width_out=256, mode='bilinear')\n norma_map_img = fast_image_reshape(norma_map_img, height_out=256, width_out=256, mode='bilinear')\n\n # Render the 2nd time to get backface culling and white texture\n # norma_map_img_to_save, _, _, _, rend_flm_to_save = \\\n # overlay_visualizer.get_rendered_mesh(flame_params=(shape, exp, pose, light_code, texture_code),\n # camera_params=cam, cull_backfaces=True, constant_albedo=0.6)\n # Back face culling temporarily un-availabe\n\n norma_map_img_to_save, _, _, _, rend_flm_to_save = \\\n overlay_visualizer.get_rendered_mesh(flame_params=(shape, exp, pose, light_code, texture_code),\n camera_params=cam, cull_backfaces=False, constant_albedo=0.6)\n rend_flm_to_save = torch.clamp(rend_flm_to_save, 0, 1) * 2 - 1\n # rend_flm_to_save = rend_flm\n # norma_map_img_to_save = torch.clamp(norma_map_img, 0, 1) * 2 - 1\n rend_flm_to_save = fast_image_reshape(rend_flm_to_save, height_out=256, width_out=256, mode='bilinear')\n # norma_map_img_to_save = fast_image_reshape(norma_map_img, height_out=256, width_out=256, mode='bilinear')\n\n else:\n rend_flm = None\n norma_map_img = None\n\n gen_1_in = ge_gen_in(flm_batch, rend_flm, norma_map_img, settings_for_runs[run_idx]['normal_maps_as_cond'],\n settings_for_runs[run_idx]['rendered_flame_as_condition'])\n\n # torch.manual_seed(2)\n identity_embeddings = torch.randint(low=0, high=69158, size=(gen_1_in.shape[0], ), dtype=torch.long,\n device='cuda')\n mdl_1_gen_images = generic_utils.get_images_from_flame_params(\n flame_params=gen_1_in.cpu().numpy(), pose=None,\n model=generator_1,\n step=step_max, alpha=alpha,\n input_indices=identity_embeddings.cpu().numpy())\n\n params_to_save['identity_indices'].append(identity_embeddings.cpu().detach().numpy())\n # import ipdb; ipdb.set_trace()\n images[batch_idx:batch_idx+batch_size_true] = torch.clamp(mdl_1_gen_images, -1, 1).cpu().numpy()\n # if flame_mesh_imgs is None:\n flame_mesh_imgs[batch_idx:batch_idx+batch_size_true] = torch.clamp(rend_flm_to_save, -1, 1).cpu().numpy()\n\n if save_images:\n mdl_name = settings_for_runs[run_idx]['name']\n for key in params_to_save.keys():\n params_to_save[key] = np.concatenate(params_to_save[key], axis=0)\n\n save_dir = os.path.join(cnst.output_root, 'sample', str(run_idx), f'random_samples_q_eval_{mdl_name}')\n os.makedirs(save_dir, exist_ok=True)\n np.save(os.path.join(save_dir, 'params.npy'), params_to_save)\n\n save_path_current_id = os.path.join(save_dir, 'images')\n save_set_of_images(path=save_path_current_id, prefix='', images=(images + 1) / 2, show_prog_bar=True)\n\n #save flam rndr\n save_path_current_id_flm_rndr = os.path.join(save_dir, 'conditions')\n save_set_of_images(path=save_path_current_id_flm_rndr, prefix='mesh', images=(flame_mesh_imgs + 1) / 2,\n show_prog_bar=True)\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
import numpy as np
np.random.seed(1)
class MonteCarloGameDriver():
def __init__(self):
self.default_moves = np.array(['w','a','s','d'])
self.probability_distribution = np.array([.25,.25,.25,.25])
def run_game(self, simulation_size=20):
from game import GameLayout
from copy import deepcopy
game = GameLayout()
while game.active:
# simulate simulation_size games starting at this point
game_performance = self.simulate(game, simulation_size)
if len(game_performance)==0:
game.end_game()
print("After {} simulations, achieved max tile {} and score {}".format(simulation_size, game.final_layout.max(), game.score))
break
# return the first move with highest average score
recommendation = max(game_performance, key=game_performance.get)
game.swipe(recommendation)
# game is over
self.log_game(game)
def simulate(self, game, simulation_size):
from collections import defaultdict
game_performance = defaultdict(list)
from copy import deepcopy
for i in range(simulation_size):
# run copy game multiple times, saving final scores and first moves each time
game_copy = deepcopy(game)
game_copy.reset()
while game_copy.active:
move_order = self.weighted_shuffle(self.default_moves, self.probability_distribution)
for move in move_order:
try:
game_copy.swipe(move)
break
except:
# move didn't work, try next move
continue
# log final score and first move
try:
game_performance[self.default_moves[(game_copy.moves[0]==1).argmax()]].append(game_copy.score)
except AttributeError:
pass
# get average score for each first move
game_performance = {key: np.mean(val) for key, val in game_performance.items()}
return game_performance
def weighted_shuffle(self, options,weights):
lst = list(options)
w = [None]*len(lst) # make a copy
for i in range(len(lst)):
win_idx = np.random.choice(range(len(lst)), p=weights)
w[i] = lst[win_idx]
del lst[win_idx]
weights = np.delete(weights, win_idx)
weights = weights/weights.sum()
return w
def log_game(self, game):
assert not game.active # must be a finished game
try:
self.final_scores = np.append(self.final_scores, game.score)
self.num_moves = np.append(self.num_moves, game.num_moves)
self.layouts.append(game.layouts)
self.final_layouts.append(game.final_layout)
self.moves.append(game.moves)
self.scores.append(game.scores)
self.tile_sums = np.append(self.tile_sums, game.final_layout.sum())
self.max_tile = np.append(self.max_tile, game.final_layout.max())
self.wins = np.append(self.wins, game.won)
except AttributeError:
self.final_scores = np.array(game.score)
self.num_moves = np.array(game.num_moves)
self.layouts = [game.layouts]
self.final_layouts = [game.final_layout]
self.moves = [game.moves]
self.scores = [game.scores]
self.tile_sums = np.array(game.final_layout.sum())
self.max_tile = np.array(game.final_layout.max())
self.wins = np.array(game.won)
|
normal
|
{
"blob_id": "aeb986360c6990f9375f2552cbdeef595af815b4",
"index": 6432,
"step-1": "<mask token>\n\n\nclass MonteCarloGameDriver:\n\n def __init__(self):\n self.default_moves = np.array(['w', 'a', 's', 'd'])\n self.probability_distribution = np.array([0.25, 0.25, 0.25, 0.25])\n <mask token>\n\n def simulate(self, game, simulation_size):\n from collections import defaultdict\n game_performance = defaultdict(list)\n from copy import deepcopy\n for i in range(simulation_size):\n game_copy = deepcopy(game)\n game_copy.reset()\n while game_copy.active:\n move_order = self.weighted_shuffle(self.default_moves, self\n .probability_distribution)\n for move in move_order:\n try:\n game_copy.swipe(move)\n break\n except:\n continue\n try:\n game_performance[self.default_moves[(game_copy.moves[0] == \n 1).argmax()]].append(game_copy.score)\n except AttributeError:\n pass\n game_performance = {key: np.mean(val) for key, val in\n game_performance.items()}\n return game_performance\n\n def weighted_shuffle(self, options, weights):\n lst = list(options)\n w = [None] * len(lst)\n for i in range(len(lst)):\n win_idx = np.random.choice(range(len(lst)), p=weights)\n w[i] = lst[win_idx]\n del lst[win_idx]\n weights = np.delete(weights, win_idx)\n weights = weights / weights.sum()\n return w\n\n def log_game(self, game):\n assert not game.active\n try:\n self.final_scores = np.append(self.final_scores, game.score)\n self.num_moves = np.append(self.num_moves, game.num_moves)\n self.layouts.append(game.layouts)\n self.final_layouts.append(game.final_layout)\n self.moves.append(game.moves)\n self.scores.append(game.scores)\n self.tile_sums = np.append(self.tile_sums, game.final_layout.sum())\n self.max_tile = np.append(self.max_tile, game.final_layout.max())\n self.wins = np.append(self.wins, game.won)\n except AttributeError:\n self.final_scores = np.array(game.score)\n self.num_moves = np.array(game.num_moves)\n self.layouts = [game.layouts]\n self.final_layouts = [game.final_layout]\n self.moves = [game.moves]\n self.scores = [game.scores]\n self.tile_sums = np.array(game.final_layout.sum())\n self.max_tile = np.array(game.final_layout.max())\n self.wins = np.array(game.won)\n",
"step-2": "<mask token>\n\n\nclass MonteCarloGameDriver:\n\n def __init__(self):\n self.default_moves = np.array(['w', 'a', 's', 'd'])\n self.probability_distribution = np.array([0.25, 0.25, 0.25, 0.25])\n\n def run_game(self, simulation_size=20):\n from game import GameLayout\n from copy import deepcopy\n game = GameLayout()\n while game.active:\n game_performance = self.simulate(game, simulation_size)\n if len(game_performance) == 0:\n game.end_game()\n print('After {} simulations, achieved max tile {} and score {}'\n .format(simulation_size, game.final_layout.max(), game.\n score))\n break\n recommendation = max(game_performance, key=game_performance.get)\n game.swipe(recommendation)\n self.log_game(game)\n\n def simulate(self, game, simulation_size):\n from collections import defaultdict\n game_performance = defaultdict(list)\n from copy import deepcopy\n for i in range(simulation_size):\n game_copy = deepcopy(game)\n game_copy.reset()\n while game_copy.active:\n move_order = self.weighted_shuffle(self.default_moves, self\n .probability_distribution)\n for move in move_order:\n try:\n game_copy.swipe(move)\n break\n except:\n continue\n try:\n game_performance[self.default_moves[(game_copy.moves[0] == \n 1).argmax()]].append(game_copy.score)\n except AttributeError:\n pass\n game_performance = {key: np.mean(val) for key, val in\n game_performance.items()}\n return game_performance\n\n def weighted_shuffle(self, options, weights):\n lst = list(options)\n w = [None] * len(lst)\n for i in range(len(lst)):\n win_idx = np.random.choice(range(len(lst)), p=weights)\n w[i] = lst[win_idx]\n del lst[win_idx]\n weights = np.delete(weights, win_idx)\n weights = weights / weights.sum()\n return w\n\n def log_game(self, game):\n assert not game.active\n try:\n self.final_scores = np.append(self.final_scores, game.score)\n self.num_moves = np.append(self.num_moves, game.num_moves)\n self.layouts.append(game.layouts)\n self.final_layouts.append(game.final_layout)\n self.moves.append(game.moves)\n self.scores.append(game.scores)\n self.tile_sums = np.append(self.tile_sums, game.final_layout.sum())\n self.max_tile = np.append(self.max_tile, game.final_layout.max())\n self.wins = np.append(self.wins, game.won)\n except AttributeError:\n self.final_scores = np.array(game.score)\n self.num_moves = np.array(game.num_moves)\n self.layouts = [game.layouts]\n self.final_layouts = [game.final_layout]\n self.moves = [game.moves]\n self.scores = [game.scores]\n self.tile_sums = np.array(game.final_layout.sum())\n self.max_tile = np.array(game.final_layout.max())\n self.wins = np.array(game.won)\n",
"step-3": "<mask token>\nnp.random.seed(1)\n\n\nclass MonteCarloGameDriver:\n\n def __init__(self):\n self.default_moves = np.array(['w', 'a', 's', 'd'])\n self.probability_distribution = np.array([0.25, 0.25, 0.25, 0.25])\n\n def run_game(self, simulation_size=20):\n from game import GameLayout\n from copy import deepcopy\n game = GameLayout()\n while game.active:\n game_performance = self.simulate(game, simulation_size)\n if len(game_performance) == 0:\n game.end_game()\n print('After {} simulations, achieved max tile {} and score {}'\n .format(simulation_size, game.final_layout.max(), game.\n score))\n break\n recommendation = max(game_performance, key=game_performance.get)\n game.swipe(recommendation)\n self.log_game(game)\n\n def simulate(self, game, simulation_size):\n from collections import defaultdict\n game_performance = defaultdict(list)\n from copy import deepcopy\n for i in range(simulation_size):\n game_copy = deepcopy(game)\n game_copy.reset()\n while game_copy.active:\n move_order = self.weighted_shuffle(self.default_moves, self\n .probability_distribution)\n for move in move_order:\n try:\n game_copy.swipe(move)\n break\n except:\n continue\n try:\n game_performance[self.default_moves[(game_copy.moves[0] == \n 1).argmax()]].append(game_copy.score)\n except AttributeError:\n pass\n game_performance = {key: np.mean(val) for key, val in\n game_performance.items()}\n return game_performance\n\n def weighted_shuffle(self, options, weights):\n lst = list(options)\n w = [None] * len(lst)\n for i in range(len(lst)):\n win_idx = np.random.choice(range(len(lst)), p=weights)\n w[i] = lst[win_idx]\n del lst[win_idx]\n weights = np.delete(weights, win_idx)\n weights = weights / weights.sum()\n return w\n\n def log_game(self, game):\n assert not game.active\n try:\n self.final_scores = np.append(self.final_scores, game.score)\n self.num_moves = np.append(self.num_moves, game.num_moves)\n self.layouts.append(game.layouts)\n self.final_layouts.append(game.final_layout)\n self.moves.append(game.moves)\n self.scores.append(game.scores)\n self.tile_sums = np.append(self.tile_sums, game.final_layout.sum())\n self.max_tile = np.append(self.max_tile, game.final_layout.max())\n self.wins = np.append(self.wins, game.won)\n except AttributeError:\n self.final_scores = np.array(game.score)\n self.num_moves = np.array(game.num_moves)\n self.layouts = [game.layouts]\n self.final_layouts = [game.final_layout]\n self.moves = [game.moves]\n self.scores = [game.scores]\n self.tile_sums = np.array(game.final_layout.sum())\n self.max_tile = np.array(game.final_layout.max())\n self.wins = np.array(game.won)\n",
"step-4": "import numpy as np\nnp.random.seed(1)\n\n\nclass MonteCarloGameDriver:\n\n def __init__(self):\n self.default_moves = np.array(['w', 'a', 's', 'd'])\n self.probability_distribution = np.array([0.25, 0.25, 0.25, 0.25])\n\n def run_game(self, simulation_size=20):\n from game import GameLayout\n from copy import deepcopy\n game = GameLayout()\n while game.active:\n game_performance = self.simulate(game, simulation_size)\n if len(game_performance) == 0:\n game.end_game()\n print('After {} simulations, achieved max tile {} and score {}'\n .format(simulation_size, game.final_layout.max(), game.\n score))\n break\n recommendation = max(game_performance, key=game_performance.get)\n game.swipe(recommendation)\n self.log_game(game)\n\n def simulate(self, game, simulation_size):\n from collections import defaultdict\n game_performance = defaultdict(list)\n from copy import deepcopy\n for i in range(simulation_size):\n game_copy = deepcopy(game)\n game_copy.reset()\n while game_copy.active:\n move_order = self.weighted_shuffle(self.default_moves, self\n .probability_distribution)\n for move in move_order:\n try:\n game_copy.swipe(move)\n break\n except:\n continue\n try:\n game_performance[self.default_moves[(game_copy.moves[0] == \n 1).argmax()]].append(game_copy.score)\n except AttributeError:\n pass\n game_performance = {key: np.mean(val) for key, val in\n game_performance.items()}\n return game_performance\n\n def weighted_shuffle(self, options, weights):\n lst = list(options)\n w = [None] * len(lst)\n for i in range(len(lst)):\n win_idx = np.random.choice(range(len(lst)), p=weights)\n w[i] = lst[win_idx]\n del lst[win_idx]\n weights = np.delete(weights, win_idx)\n weights = weights / weights.sum()\n return w\n\n def log_game(self, game):\n assert not game.active\n try:\n self.final_scores = np.append(self.final_scores, game.score)\n self.num_moves = np.append(self.num_moves, game.num_moves)\n self.layouts.append(game.layouts)\n self.final_layouts.append(game.final_layout)\n self.moves.append(game.moves)\n self.scores.append(game.scores)\n self.tile_sums = np.append(self.tile_sums, game.final_layout.sum())\n self.max_tile = np.append(self.max_tile, game.final_layout.max())\n self.wins = np.append(self.wins, game.won)\n except AttributeError:\n self.final_scores = np.array(game.score)\n self.num_moves = np.array(game.num_moves)\n self.layouts = [game.layouts]\n self.final_layouts = [game.final_layout]\n self.moves = [game.moves]\n self.scores = [game.scores]\n self.tile_sums = np.array(game.final_layout.sum())\n self.max_tile = np.array(game.final_layout.max())\n self.wins = np.array(game.won)\n",
"step-5": "import numpy as np\nnp.random.seed(1)\n\nclass MonteCarloGameDriver(): \n def __init__(self):\n self.default_moves = np.array(['w','a','s','d'])\n self.probability_distribution = np.array([.25,.25,.25,.25])\n\n def run_game(self, simulation_size=20):\n from game import GameLayout\n from copy import deepcopy\n \n game = GameLayout()\n \n while game.active:\n # simulate simulation_size games starting at this point\n game_performance = self.simulate(game, simulation_size)\n \n if len(game_performance)==0:\n game.end_game()\n \n print(\"After {} simulations, achieved max tile {} and score {}\".format(simulation_size, game.final_layout.max(), game.score))\n break\n\n # return the first move with highest average score\n recommendation = max(game_performance, key=game_performance.get)\n \n game.swipe(recommendation)\n \n # game is over\n self.log_game(game)\n\n \n def simulate(self, game, simulation_size):\n from collections import defaultdict\n game_performance = defaultdict(list)\n \n from copy import deepcopy\n\n for i in range(simulation_size):\n # run copy game multiple times, saving final scores and first moves each time\n game_copy = deepcopy(game)\n game_copy.reset()\n\n while game_copy.active:\n move_order = self.weighted_shuffle(self.default_moves, self.probability_distribution)\n for move in move_order:\n try:\n game_copy.swipe(move)\n break\n except:\n # move didn't work, try next move\n continue\n # log final score and first move\n try:\n game_performance[self.default_moves[(game_copy.moves[0]==1).argmax()]].append(game_copy.score)\n except AttributeError:\n pass\n \n # get average score for each first move\n game_performance = {key: np.mean(val) for key, val in game_performance.items()}\n \n return game_performance\n\n \n def weighted_shuffle(self, options,weights):\n lst = list(options)\n w = [None]*len(lst) # make a copy\n for i in range(len(lst)):\n win_idx = np.random.choice(range(len(lst)), p=weights)\n w[i] = lst[win_idx]\n del lst[win_idx]\n weights = np.delete(weights, win_idx)\n weights = weights/weights.sum()\n return w\n\n\n def log_game(self, game):\n assert not game.active # must be a finished game\n try:\n self.final_scores = np.append(self.final_scores, game.score)\n self.num_moves = np.append(self.num_moves, game.num_moves)\n self.layouts.append(game.layouts)\n self.final_layouts.append(game.final_layout)\n self.moves.append(game.moves)\n self.scores.append(game.scores)\n self.tile_sums = np.append(self.tile_sums, game.final_layout.sum())\n self.max_tile = np.append(self.max_tile, game.final_layout.max())\n self.wins = np.append(self.wins, game.won)\n\n except AttributeError:\n self.final_scores = np.array(game.score)\n self.num_moves = np.array(game.num_moves)\n self.layouts = [game.layouts]\n self.final_layouts = [game.final_layout]\n self.moves = [game.moves]\n self.scores = [game.scores]\n self.tile_sums = np.array(game.final_layout.sum())\n self.max_tile = np.array(game.final_layout.max())\n self.wins = np.array(game.won)",
"step-ids": [
5,
6,
7,
8,
9
]
}
|
[
5,
6,
7,
8,
9
] |
''' Model package should containt all data types for the database engine,
which means that projects like PyCIM can be included within '''
|
normal
|
{
"blob_id": "ce3c1a7210632d0a8475fe886d514eb91d3c75ac",
"index": 7700,
"step-1": "<mask token>\n",
"step-2": "''' Model package should containt all data types for the database engine, \nwhich means that projects like PyCIM can be included within '''",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
$ pip install "<package_name> >= 1.1"
|
normal
|
{
"blob_id": "8010c0d53af6d428f29ff3ce63bcd6b5b811b051",
"index": 3456,
"step-1": "$ pip install \"<package_name> >= 1.1\"\n\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
/usr/share/pyshared/screenlets/plugins/SizeConverter.py
|
normal
|
{
"blob_id": "58ddf496245741498177a67b7ce692b97bbd476a",
"index": 9887,
"step-1": "/usr/share/pyshared/screenlets/plugins/SizeConverter.py",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
from DHT_Python import dht22
from oled96 import oled
from PiBlynk import Blynk
# read data using pin 4
instance = dht22.DHT22(pin=4)
token = "---token---"
blynk = Blynk(token)
def cnct_cb():
print ("Connected: ")
blynk.on_connect(cnct_cb)
def _funCb(ACT):
result = instance.read()
if result.is_valid():
strTemp=("%.2f" % result.temperature)
strHumi=("%.2f" % result.humidity)
# Show temperature and humidity on OLED
oled.yell2("Temp="+strTemp,"Humi="+strHumi)
blynk.virtual_write(1,strTemp) # User Virtual port V1
blynk.virtual_write(2,strHumi) # User Virtual port V2
blynk.Ticker(_funCb, 140, False) # ~2 Hz
blynk.gpio_auto("button")
blynk.run()
|
normal
|
{
"blob_id": "e95ebb2aa6526e3bf3789da17d144e71cdb49aca",
"index": 2712,
"step-1": "<mask token>\n\n\ndef cnct_cb():\n print('Connected: ')\n\n\n<mask token>\n\n\ndef _funCb(ACT):\n result = instance.read()\n if result.is_valid():\n strTemp = '%.2f' % result.temperature\n strHumi = '%.2f' % result.humidity\n oled.yell2('Temp=' + strTemp, 'Humi=' + strHumi)\n blynk.virtual_write(1, strTemp)\n blynk.virtual_write(2, strHumi)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef cnct_cb():\n print('Connected: ')\n\n\nblynk.on_connect(cnct_cb)\n\n\ndef _funCb(ACT):\n result = instance.read()\n if result.is_valid():\n strTemp = '%.2f' % result.temperature\n strHumi = '%.2f' % result.humidity\n oled.yell2('Temp=' + strTemp, 'Humi=' + strHumi)\n blynk.virtual_write(1, strTemp)\n blynk.virtual_write(2, strHumi)\n\n\nblynk.Ticker(_funCb, 140, False)\nblynk.gpio_auto('button')\nblynk.run()\n",
"step-3": "<mask token>\ninstance = dht22.DHT22(pin=4)\ntoken = '---token---'\nblynk = Blynk(token)\n\n\ndef cnct_cb():\n print('Connected: ')\n\n\nblynk.on_connect(cnct_cb)\n\n\ndef _funCb(ACT):\n result = instance.read()\n if result.is_valid():\n strTemp = '%.2f' % result.temperature\n strHumi = '%.2f' % result.humidity\n oled.yell2('Temp=' + strTemp, 'Humi=' + strHumi)\n blynk.virtual_write(1, strTemp)\n blynk.virtual_write(2, strHumi)\n\n\nblynk.Ticker(_funCb, 140, False)\nblynk.gpio_auto('button')\nblynk.run()\n",
"step-4": "from DHT_Python import dht22\nfrom oled96 import oled\nfrom PiBlynk import Blynk\ninstance = dht22.DHT22(pin=4)\ntoken = '---token---'\nblynk = Blynk(token)\n\n\ndef cnct_cb():\n print('Connected: ')\n\n\nblynk.on_connect(cnct_cb)\n\n\ndef _funCb(ACT):\n result = instance.read()\n if result.is_valid():\n strTemp = '%.2f' % result.temperature\n strHumi = '%.2f' % result.humidity\n oled.yell2('Temp=' + strTemp, 'Humi=' + strHumi)\n blynk.virtual_write(1, strTemp)\n blynk.virtual_write(2, strHumi)\n\n\nblynk.Ticker(_funCb, 140, False)\nblynk.gpio_auto('button')\nblynk.run()\n",
"step-5": "from DHT_Python import dht22\nfrom oled96 import oled \nfrom PiBlynk import Blynk\n\n# read data using pin 4\ninstance = dht22.DHT22(pin=4)\n\ntoken = \"---token---\"\nblynk = Blynk(token)\ndef cnct_cb():\n\tprint (\"Connected: \")\n\t\nblynk.on_connect(cnct_cb)\n\ndef _funCb(ACT):\n\tresult = instance.read()\n\tif result.is_valid():\n\t\tstrTemp=(\"%.2f\" % result.temperature)\n\t\tstrHumi=(\"%.2f\" % result.humidity)\n\t\t# Show temperature and humidity on OLED\n\t\toled.yell2(\"Temp=\"+strTemp,\"Humi=\"+strHumi) \n\t\tblynk.virtual_write(1,strTemp) # User Virtual port V1\n\t\tblynk.virtual_write(2,strHumi) # User Virtual port V2\nblynk.Ticker(_funCb, 140, False) # ~2 Hz\n\nblynk.gpio_auto(\"button\")\n\nblynk.run()\n\n\n\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
# _*_ coding: utf-8 _*_
# 按层打印二叉树
class TreeNode(object):
def __init__(self, val):
self.val = val
self.left = None
self.right = None
class PrintTree(object):
def printTree(self, root):
if not root:
return
'''
定义next_last为下一层的最后一个,cur_last为当前层最后一个
temp用于存放当前行的值,resutl存放最终的结果
'''
next_last = cur_last = root
_queue = [root]
result, temp = [], []
while _queue:
# 在按层遍历的基础上,不断把下层最右边儿子赋值给next_last
_cur = _queue.pop(0)
temp.append(_cur.val)
if _cur.left:
_queue.append(_cur.left)
next_last = _cur.left
if _cur.right:
_queue.append(_cur.right)
next_last = _cur.right
# 如果当前节点为此层最后的节点时,
# 进行下层最后一个节点的赋值(cur_last=next_last),然后才由_queue.pop(0)进入下层
if _cur == cur_last:
result.append(temp)
temp = []
cur_last = next_last
return result
|
normal
|
{
"blob_id": "4ddff57790ad191fc29fc092bcc714f0b6273100",
"index": 7755,
"step-1": "<mask token>\n\n\nclass PrintTree(object):\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass PrintTree(object):\n\n def printTree(self, root):\n if not root:\n return\n \"\"\"\n 定义next_last为下一层的最后一个,cur_last为当前层最后一个\n temp用于存放当前行的值,resutl存放最终的结果\n \"\"\"\n next_last = cur_last = root\n _queue = [root]\n result, temp = [], []\n while _queue:\n _cur = _queue.pop(0)\n temp.append(_cur.val)\n if _cur.left:\n _queue.append(_cur.left)\n next_last = _cur.left\n if _cur.right:\n _queue.append(_cur.right)\n next_last = _cur.right\n if _cur == cur_last:\n result.append(temp)\n temp = []\n cur_last = next_last\n return result\n",
"step-3": "class TreeNode(object):\n <mask token>\n\n\nclass PrintTree(object):\n\n def printTree(self, root):\n if not root:\n return\n \"\"\"\n 定义next_last为下一层的最后一个,cur_last为当前层最后一个\n temp用于存放当前行的值,resutl存放最终的结果\n \"\"\"\n next_last = cur_last = root\n _queue = [root]\n result, temp = [], []\n while _queue:\n _cur = _queue.pop(0)\n temp.append(_cur.val)\n if _cur.left:\n _queue.append(_cur.left)\n next_last = _cur.left\n if _cur.right:\n _queue.append(_cur.right)\n next_last = _cur.right\n if _cur == cur_last:\n result.append(temp)\n temp = []\n cur_last = next_last\n return result\n",
"step-4": "class TreeNode(object):\n\n def __init__(self, val):\n self.val = val\n self.left = None\n self.right = None\n\n\nclass PrintTree(object):\n\n def printTree(self, root):\n if not root:\n return\n \"\"\"\n 定义next_last为下一层的最后一个,cur_last为当前层最后一个\n temp用于存放当前行的值,resutl存放最终的结果\n \"\"\"\n next_last = cur_last = root\n _queue = [root]\n result, temp = [], []\n while _queue:\n _cur = _queue.pop(0)\n temp.append(_cur.val)\n if _cur.left:\n _queue.append(_cur.left)\n next_last = _cur.left\n if _cur.right:\n _queue.append(_cur.right)\n next_last = _cur.right\n if _cur == cur_last:\n result.append(temp)\n temp = []\n cur_last = next_last\n return result\n",
"step-5": "# _*_ coding: utf-8 _*_\n\n# 按层打印二叉树\n\n\nclass TreeNode(object):\n def __init__(self, val):\n self.val = val\n self.left = None\n self.right = None\n\n\nclass PrintTree(object):\n def printTree(self, root):\n if not root:\n return\n '''\n 定义next_last为下一层的最后一个,cur_last为当前层最后一个\n temp用于存放当前行的值,resutl存放最终的结果\n '''\n next_last = cur_last = root\n _queue = [root]\n result, temp = [], []\n while _queue:\n # 在按层遍历的基础上,不断把下层最右边儿子赋值给next_last\n _cur = _queue.pop(0)\n temp.append(_cur.val)\n if _cur.left:\n _queue.append(_cur.left)\n next_last = _cur.left\n if _cur.right:\n _queue.append(_cur.right)\n next_last = _cur.right\n # 如果当前节点为此层最后的节点时,\n # 进行下层最后一个节点的赋值(cur_last=next_last),然后才由_queue.pop(0)进入下层\n if _cur == cur_last:\n result.append(temp)\n temp = []\n cur_last = next_last\n return result\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-07-31 18:38
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('product', '0007_auto_20170731_1812'),
]
operations = [
migrations.AddField(
model_name='category',
name='is_root',
field=models.BooleanField(default=False, verbose_name='是否是一级分类'),
),
migrations.AlterField(
model_name='category',
name='parent',
field=models.ForeignKey(blank=True, default=0, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='children', to='product.Category', verbose_name='上级分类'),
),
]
|
normal
|
{
"blob_id": "ae82ecadb61fd87afbc83926b9dc9d5f7e8c35a0",
"index": 4194,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('product', '0007_auto_20170731_1812')]\n operations = [migrations.AddField(model_name='category', name='is_root',\n field=models.BooleanField(default=False, verbose_name='是否是一级分类')),\n migrations.AlterField(model_name='category', name='parent', field=\n models.ForeignKey(blank=True, default=0, null=True, on_delete=\n django.db.models.deletion.CASCADE, related_name='children', to=\n 'product.Category', verbose_name='上级分类'))]\n",
"step-4": "from __future__ import unicode_literals\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n dependencies = [('product', '0007_auto_20170731_1812')]\n operations = [migrations.AddField(model_name='category', name='is_root',\n field=models.BooleanField(default=False, verbose_name='是否是一级分类')),\n migrations.AlterField(model_name='category', name='parent', field=\n models.ForeignKey(blank=True, default=0, null=True, on_delete=\n django.db.models.deletion.CASCADE, related_name='children', to=\n 'product.Category', verbose_name='上级分类'))]\n",
"step-5": "# -*- coding: utf-8 -*-\n# Generated by Django 1.10.5 on 2017-07-31 18:38\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('product', '0007_auto_20170731_1812'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='category',\n name='is_root',\n field=models.BooleanField(default=False, verbose_name='是否是一级分类'),\n ),\n migrations.AlterField(\n model_name='category',\n name='parent',\n field=models.ForeignKey(blank=True, default=0, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='children', to='product.Category', verbose_name='上级分类'),\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
"""
@file : 001-rnn+lstm+crf.py
@author: xiaolu
@time : 2019-09-06
"""
import re
import numpy as np
import tensorflow as tf
from sklearn.metrics import classification_report
class Model:
def __init__(self, dim_word, dim_char, dropout, learning_rate,
hidden_size_char, hidden_size_word, num_layers):
'''
:param dim_word: 词的维度
:param dim_char: 字符维度
:param dropout: dropout
:param learning_rate: 学习率
:param hidden_size_char: 字符隐层输出维度
:param hidden_size_word: 词隐层输出维度
:param num_layers: 几层
'''
def cells(size, reuse=False):
return tf.contrib.rnn.DropoutWrapper(
tf.nn.rnn_cell.LSTMCell(size, initializer=tf.orthogonal_initializer(), reuse=reuse),
output_keep_prob=dropout
)
# 1. define input
self.word_ids = tf.placeholder(tf.int32, shape=[None, None])
self.char_ids = tf.placeholder(tf.int32, shape=[None, None, None])
self.labels = tf.placeholder(tf.int32, shape=[None, None])
self.maxlen = tf.shape(self.word_ids)[1]
self.lengths = tf.count_nonzero(self.word_ids, 1)
# 2. embedding
self.word_embeddings = tf.Variable(tf.truncated_normal([len(word2idx), dim_word], stddev=1.0 / np.sqrt(dim_word)))
self.char_embeddings = tf.Variable(tf.truncated_normal([len(char2idx), dim_char], stddev=1.0 / np.sqrt(dim_char)))
word_embedded = tf.nn.embedding_lookup(self.word_embeddings, self.word_ids)
char_embedded = tf.nn.embedding_lookup(self.char_embeddings, self.char_ids)
s = tf.shape(char_embedded) # (51312, 50, 27, embedding_size)
char_embedded = tf.reshape(char_embedded, shape=[s[0] * s[1], s[-2], dim_char])
for n in range(num_layers):
(out_fw, out_bw), (state_fw, state_bw) = tf.nn.bidirectional_dynamic_rnn(
cell_fw=cells(hidden_size_char),
cell_bw=cells(hidden_size_char),
inputs=char_embedded,
dtype=tf.float32,
scope='bidirectional_rnn_char_%d' % n
)
char_embedded = tf.concat((out_fw, out_bw), 2)
output = tf.reshape(char_embedded[:, -1], shape=[s[0], s[1], 2*hidden_size_char])
word_embedded = tf.concat([word_embedded, output], axis=-1) # 将词嵌入部分与字符嵌入通过双向lstm输出部分进行拼接
for n in range(num_layers):
(out_fw, out_bw), (state_fw, state_bw) = tf.nn.bidirectional_dynamic_rnn(
cell_fw=cells(hidden_size_word),
cell_bw=cells(hidden_size_word),
inputs=word_embedded,
dtype=tf.float32,
scope='bidirectional_rnn_word_%d' % n
)
word_embedded = tf.concat((out_fw, out_bw), 2)
logits = tf.layers.Dense(word_embedded, len(idx2tag))
y_t = self.labels
log_likelihood, transition_params = tf.contrib.crf.crf_log_likelihood(
logits, y_t, self.lengths
)
self.cost = tf.reduce_mean(-log_likelihood)
self.optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(self.cost)
mask = tf.sequence_mask(self.lengths, maxlen=self.maxlen)
self.tags_seq, tags_score = tf.contrib.crf.crf_decode(
logits, transition_params, self.lengths
)
self.tags_seq = tf.identity(self.tags_seq, name='logits')
y_t = tf.cast(y_t, tf.int32)
self.prediction = tf.boolean_mask(self.tags_seq, mask)
mask_label = tf.boolean_mask(y_t, mask)
correct_pred = tf.equal(self.prediction, mask_label)
correct_index = tf.cast(correct_pred, tf.float32)
self.accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
def parse(file):
'''
加载文件并且解析
:param file: 文件名
:return: 词<->词性
'''
with open(file) as fopen:
texts = fopen.read().split('\n')
left, right = [], []
for text in texts:
if '-DOCSTART' in text or not len(text):
continue
splitted = text.split()
left.append(splitted[0])
right.append(splitted[-1])
return left, right
def process_string(string):
'''
:param string:
:return:
'''
string= re.sub('[^A-Za-z0-9\-\/ ]+', ' ', string).split()
return ' '.join([to_title(y.strip()) for y in string])
def to_title(string):
if string.isupper():
string = string.title()
return string
def parse_XY(texts, labels):
'''
整理词性表 词表 字符表 并将文本转为对应的数字序列
:param texts: 文本 词的一个列表
:param labels: 词性的一个列表
:return: 词转为id的序列 词性转为id的序列
'''
global word2idx, tag2idx, char2idx, word_idx, tag_idx, char_idx
X, Y = [], []
for no, text in enumerate(texts):
text = text.lower() # 当前这个单词转小写
tag = labels[no] # 取出对应的词性
for c in text: # 字符表
if c not in char2idx:
char2idx[c] = char_idx
char_idx += 1
if tag not in tag2idx: # 词性表
tag2idx[tag] = tag_idx
tag_idx += 1
Y.append(tag2idx[tag]) # 当前这个词的词性转为id的值
if text not in word2idx: # 词表
word2idx[text] = word_idx
word_idx += 1
X.append(word2idx[text]) # 将词转为id的标号
return X, np.array(Y)
def iter_seq(x):
return np.array([x[i: i+seq_len] for i in range(0, len(x)-seq_len, 1)])
def to_train_seq(*args):
'''
:param args: 词转为的id的序列 词性转为id的序列
:return:
'''
return [iter_seq(x) for x in args]
def generate_char_seq(batch):
'''
传进来是50一个块 总共有多少块
然后将每块的单词转为字符序列
:param batch:
:return:
'''
x = [[len(idx2word[i]) for i in k] for k in batch] # 得出每个单词的长度
maxlen = max([j for i in x for j in i]) # 最大长度
temp = np.zeros((batch.shape[0], batch.shape[1], maxlen), dtype=np.int32)
for i in range(batch.shape[0]):
for k in range(batch.shape[1]):
for no, c in enumerate(idx2word[batch[i, k]]):
temp[i, k, -1-no] = char2idx[c]
return temp # [文章数, 单词个数, maxlen(每个单词按字符转的id)]
def pred2label(pred):
# 将预测结果转为标签
out = []
for pred_i in pred:
out_i = []
for p in pred_i:
out_i.append(idx2tag[p])
out.append(out_i)
return out
if __name__ == '__main__':
left_train, right_train = parse('./data/eng.train')
left_test, right_test = parse('./data/eng.testa')
# print(left_train[:10])
# print(right_train[:10])
word2idx = {'PAD': 0, 'NUM': 1, 'UNK': 2} # 词表
tag2idx = {'PAD': 0} # 词性表
char2idx = {'PAD': 0}
word_idx = 3
tag_idx = 1
char_idx = 1
train_X, train_Y = parse_XY(left_train, right_train)
test_X, test_Y = parse_XY(left_test, right_test)
# print(train_X[:20])
# print(train_Y[:20])
idx2word = {idx: tag for tag, idx in word2idx.items()}
idx2tag = {i: w for w, i in tag2idx.items()}
seq_len = 50
X_seq, Y_seq = to_train_seq(train_X, train_Y) # 长度为50为一个段落
X_char_seq = generate_char_seq(X_seq)
print(X_seq.shape) # (203571, 50)
print(X_char_seq.shape) # (203571, 50, 61)
X_seq_test, Y_seq_test = to_train_seq(test_X, test_Y)
X_char_seq_test = generate_char_seq(X_seq_test)
print(X_seq_test.shape) # (51312, 50)
print(X_char_seq_test.shape) # (51312, 50, 27)
train_X, train_Y, train_char = X_seq, Y_seq, X_char_seq
test_X, test_Y, test_char = X_seq_test, Y_seq_test, X_char_seq_test
tf.reset_default_graph()
sess = tf.Session()
dim_word = 64
dim_char = 128
dropout = 0.8
learning_rate = 1e-3
hidden_size_char = 128
hidden_size_word = 128
num_layers = 2
batch_size = 32
model = Model(dim_word, dim_char, dropout, learning_rate,
hidden_size_char, hidden_size_word, num_layers)
sess.run(tf.global_variables_initializer())
for e in range(3):
train_acc, train_loss, test_acc, test_loss = 0, 0, 0, 0
for i in range(0, len(train_X), batch_size):
batch_x = train_X[i: min(i + batch_size, train_X.shape[0])]
batch_char = train_char[i: min(i + batch_size, train_X.shape[0])]
batch_y = train_Y[i: min(i + batch_size, train_X.shape[0])]
acc, cost, _ = sess.run(
[model.accuracy, model.cost, model.optimizer],
feed_dict={
model.word_ids: batch_x,
model.char_ids: batch_char,
model.labels: batch_y
},
)
train_loss += cost
train_acc += acc
print('train_data: epoch:{}, step:{}, loss:{}, accuracy:{}'.format(e, i//batch_size+1, cost, acc))
for i in range(0, len(test_X), batch_size):
batch_x = test_X[i: min(i + batch_size, test_X.shape[0])]
batch_char = test_char[i: min(i + batch_size, test_X.shape[0])]
batch_y = test_Y[i: min(i + batch_size, test_X.shape[0])]
acc, cost = sess.run(
[model.accuracy, model.cost],
feed_dict={
model.word_ids: batch_x,
model.char_ids: batch_char,
model.labels: batch_y
},
)
test_loss += cost
test_acc += acc
print('test_data: epoch:{}, step:{}, loss:{}, accuracy:{}'.format(e, i//batch_size+1, cost, acc))
train_loss /= len(train_X) / batch_size
train_acc /= len(train_X) / batch_size
test_loss /= len(test_X) / batch_size
test_acc /= len(test_X) / batch_size
print('epoch: %d, training loss: %f, training acc: %f, valid loss: %f, valid acc: %f\n'
% (e, train_loss, train_acc, test_loss, test_acc))
real_Y, predict_Y = [], []
for i in range(0, len(test_X), batch_size):
batch_x = test_X[i: min(i + batch_size, test_X.shape[0])]
batch_char = test_char[i: min(i + batch_size, test_X.shape[0])]
batch_y = test_Y[i: min(i + batch_size, test_X.shape[0])]
predicted = pred2label(
sess.run(model.tags_seq,
feed_dict={
model.word_ids: batch_x,
model.char_ids: batch_char,
},
)
)
real = pred2label(batch_y)
predict_Y.extend(predicted)
real_Y.extend(real)
print(classification_report(np.array(real_Y).ravel(), np.array(predict_Y).ravel()))
|
normal
|
{
"blob_id": "5d9c8e235385ff53c7510994826ff3a04e4a5888",
"index": 10,
"step-1": "<mask token>\n\n\nclass Model:\n\n def __init__(self, dim_word, dim_char, dropout, learning_rate,\n hidden_size_char, hidden_size_word, num_layers):\n \"\"\"\n :param dim_word: 词的维度\n :param dim_char: 字符维度\n :param dropout: dropout\n :param learning_rate: 学习率\n :param hidden_size_char: 字符隐层输出维度\n :param hidden_size_word: 词隐层输出维度\n :param num_layers: 几层\n \"\"\"\n\n def cells(size, reuse=False):\n return tf.contrib.rnn.DropoutWrapper(tf.nn.rnn_cell.LSTMCell(\n size, initializer=tf.orthogonal_initializer(), reuse=reuse),\n output_keep_prob=dropout)\n self.word_ids = tf.placeholder(tf.int32, shape=[None, None])\n self.char_ids = tf.placeholder(tf.int32, shape=[None, None, None])\n self.labels = tf.placeholder(tf.int32, shape=[None, None])\n self.maxlen = tf.shape(self.word_ids)[1]\n self.lengths = tf.count_nonzero(self.word_ids, 1)\n self.word_embeddings = tf.Variable(tf.truncated_normal([len(\n word2idx), dim_word], stddev=1.0 / np.sqrt(dim_word)))\n self.char_embeddings = tf.Variable(tf.truncated_normal([len(\n char2idx), dim_char], stddev=1.0 / np.sqrt(dim_char)))\n word_embedded = tf.nn.embedding_lookup(self.word_embeddings, self.\n word_ids)\n char_embedded = tf.nn.embedding_lookup(self.char_embeddings, self.\n char_ids)\n s = tf.shape(char_embedded)\n char_embedded = tf.reshape(char_embedded, shape=[s[0] * s[1], s[-2],\n dim_char])\n for n in range(num_layers):\n (out_fw, out_bw), (state_fw, state_bw\n ) = tf.nn.bidirectional_dynamic_rnn(cell_fw=cells(\n hidden_size_char), cell_bw=cells(hidden_size_char), inputs=\n char_embedded, dtype=tf.float32, scope=\n 'bidirectional_rnn_char_%d' % n)\n char_embedded = tf.concat((out_fw, out_bw), 2)\n output = tf.reshape(char_embedded[:, -1], shape=[s[0], s[1], 2 *\n hidden_size_char])\n word_embedded = tf.concat([word_embedded, output], axis=-1)\n for n in range(num_layers):\n (out_fw, out_bw), (state_fw, state_bw\n ) = tf.nn.bidirectional_dynamic_rnn(cell_fw=cells(\n hidden_size_word), cell_bw=cells(hidden_size_word), inputs=\n word_embedded, dtype=tf.float32, scope=\n 'bidirectional_rnn_word_%d' % n)\n word_embedded = tf.concat((out_fw, out_bw), 2)\n logits = tf.layers.Dense(word_embedded, len(idx2tag))\n y_t = self.labels\n log_likelihood, transition_params = tf.contrib.crf.crf_log_likelihood(\n logits, y_t, self.lengths)\n self.cost = tf.reduce_mean(-log_likelihood)\n self.optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate\n ).minimize(self.cost)\n mask = tf.sequence_mask(self.lengths, maxlen=self.maxlen)\n self.tags_seq, tags_score = tf.contrib.crf.crf_decode(logits,\n transition_params, self.lengths)\n self.tags_seq = tf.identity(self.tags_seq, name='logits')\n y_t = tf.cast(y_t, tf.int32)\n self.prediction = tf.boolean_mask(self.tags_seq, mask)\n mask_label = tf.boolean_mask(y_t, mask)\n correct_pred = tf.equal(self.prediction, mask_label)\n correct_index = tf.cast(correct_pred, tf.float32)\n self.accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))\n\n\n<mask token>\n\n\ndef process_string(string):\n \"\"\"\n :param string:\n :return:\n \"\"\"\n string = re.sub('[^A-Za-z0-9\\\\-\\\\/ ]+', ' ', string).split()\n return ' '.join([to_title(y.strip()) for y in string])\n\n\n<mask token>\n\n\ndef generate_char_seq(batch):\n \"\"\"\n 传进来是50一个块 总共有多少块\n 然后将每块的单词转为字符序列\n :param batch:\n :return:\n \"\"\"\n x = [[len(idx2word[i]) for i in k] for k in batch]\n maxlen = max([j for i in x for j in i])\n temp = np.zeros((batch.shape[0], batch.shape[1], maxlen), dtype=np.int32)\n for i in range(batch.shape[0]):\n for k in range(batch.shape[1]):\n for no, c in enumerate(idx2word[batch[i, k]]):\n temp[i, k, -1 - no] = char2idx[c]\n return temp\n\n\ndef pred2label(pred):\n out = []\n for pred_i in pred:\n out_i = []\n for p in pred_i:\n out_i.append(idx2tag[p])\n out.append(out_i)\n return out\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Model:\n\n def __init__(self, dim_word, dim_char, dropout, learning_rate,\n hidden_size_char, hidden_size_word, num_layers):\n \"\"\"\n :param dim_word: 词的维度\n :param dim_char: 字符维度\n :param dropout: dropout\n :param learning_rate: 学习率\n :param hidden_size_char: 字符隐层输出维度\n :param hidden_size_word: 词隐层输出维度\n :param num_layers: 几层\n \"\"\"\n\n def cells(size, reuse=False):\n return tf.contrib.rnn.DropoutWrapper(tf.nn.rnn_cell.LSTMCell(\n size, initializer=tf.orthogonal_initializer(), reuse=reuse),\n output_keep_prob=dropout)\n self.word_ids = tf.placeholder(tf.int32, shape=[None, None])\n self.char_ids = tf.placeholder(tf.int32, shape=[None, None, None])\n self.labels = tf.placeholder(tf.int32, shape=[None, None])\n self.maxlen = tf.shape(self.word_ids)[1]\n self.lengths = tf.count_nonzero(self.word_ids, 1)\n self.word_embeddings = tf.Variable(tf.truncated_normal([len(\n word2idx), dim_word], stddev=1.0 / np.sqrt(dim_word)))\n self.char_embeddings = tf.Variable(tf.truncated_normal([len(\n char2idx), dim_char], stddev=1.0 / np.sqrt(dim_char)))\n word_embedded = tf.nn.embedding_lookup(self.word_embeddings, self.\n word_ids)\n char_embedded = tf.nn.embedding_lookup(self.char_embeddings, self.\n char_ids)\n s = tf.shape(char_embedded)\n char_embedded = tf.reshape(char_embedded, shape=[s[0] * s[1], s[-2],\n dim_char])\n for n in range(num_layers):\n (out_fw, out_bw), (state_fw, state_bw\n ) = tf.nn.bidirectional_dynamic_rnn(cell_fw=cells(\n hidden_size_char), cell_bw=cells(hidden_size_char), inputs=\n char_embedded, dtype=tf.float32, scope=\n 'bidirectional_rnn_char_%d' % n)\n char_embedded = tf.concat((out_fw, out_bw), 2)\n output = tf.reshape(char_embedded[:, -1], shape=[s[0], s[1], 2 *\n hidden_size_char])\n word_embedded = tf.concat([word_embedded, output], axis=-1)\n for n in range(num_layers):\n (out_fw, out_bw), (state_fw, state_bw\n ) = tf.nn.bidirectional_dynamic_rnn(cell_fw=cells(\n hidden_size_word), cell_bw=cells(hidden_size_word), inputs=\n word_embedded, dtype=tf.float32, scope=\n 'bidirectional_rnn_word_%d' % n)\n word_embedded = tf.concat((out_fw, out_bw), 2)\n logits = tf.layers.Dense(word_embedded, len(idx2tag))\n y_t = self.labels\n log_likelihood, transition_params = tf.contrib.crf.crf_log_likelihood(\n logits, y_t, self.lengths)\n self.cost = tf.reduce_mean(-log_likelihood)\n self.optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate\n ).minimize(self.cost)\n mask = tf.sequence_mask(self.lengths, maxlen=self.maxlen)\n self.tags_seq, tags_score = tf.contrib.crf.crf_decode(logits,\n transition_params, self.lengths)\n self.tags_seq = tf.identity(self.tags_seq, name='logits')\n y_t = tf.cast(y_t, tf.int32)\n self.prediction = tf.boolean_mask(self.tags_seq, mask)\n mask_label = tf.boolean_mask(y_t, mask)\n correct_pred = tf.equal(self.prediction, mask_label)\n correct_index = tf.cast(correct_pred, tf.float32)\n self.accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))\n\n\ndef parse(file):\n \"\"\"\n 加载文件并且解析\n :param file: 文件名\n :return: 词<->词性\n \"\"\"\n with open(file) as fopen:\n texts = fopen.read().split('\\n')\n left, right = [], []\n for text in texts:\n if '-DOCSTART' in text or not len(text):\n continue\n splitted = text.split()\n left.append(splitted[0])\n right.append(splitted[-1])\n return left, right\n\n\ndef process_string(string):\n \"\"\"\n :param string:\n :return:\n \"\"\"\n string = re.sub('[^A-Za-z0-9\\\\-\\\\/ ]+', ' ', string).split()\n return ' '.join([to_title(y.strip()) for y in string])\n\n\ndef to_title(string):\n if string.isupper():\n string = string.title()\n return string\n\n\n<mask token>\n\n\ndef iter_seq(x):\n return np.array([x[i:i + seq_len] for i in range(0, len(x) - seq_len, 1)])\n\n\n<mask token>\n\n\ndef generate_char_seq(batch):\n \"\"\"\n 传进来是50一个块 总共有多少块\n 然后将每块的单词转为字符序列\n :param batch:\n :return:\n \"\"\"\n x = [[len(idx2word[i]) for i in k] for k in batch]\n maxlen = max([j for i in x for j in i])\n temp = np.zeros((batch.shape[0], batch.shape[1], maxlen), dtype=np.int32)\n for i in range(batch.shape[0]):\n for k in range(batch.shape[1]):\n for no, c in enumerate(idx2word[batch[i, k]]):\n temp[i, k, -1 - no] = char2idx[c]\n return temp\n\n\ndef pred2label(pred):\n out = []\n for pred_i in pred:\n out_i = []\n for p in pred_i:\n out_i.append(idx2tag[p])\n out.append(out_i)\n return out\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Model:\n\n def __init__(self, dim_word, dim_char, dropout, learning_rate,\n hidden_size_char, hidden_size_word, num_layers):\n \"\"\"\n :param dim_word: 词的维度\n :param dim_char: 字符维度\n :param dropout: dropout\n :param learning_rate: 学习率\n :param hidden_size_char: 字符隐层输出维度\n :param hidden_size_word: 词隐层输出维度\n :param num_layers: 几层\n \"\"\"\n\n def cells(size, reuse=False):\n return tf.contrib.rnn.DropoutWrapper(tf.nn.rnn_cell.LSTMCell(\n size, initializer=tf.orthogonal_initializer(), reuse=reuse),\n output_keep_prob=dropout)\n self.word_ids = tf.placeholder(tf.int32, shape=[None, None])\n self.char_ids = tf.placeholder(tf.int32, shape=[None, None, None])\n self.labels = tf.placeholder(tf.int32, shape=[None, None])\n self.maxlen = tf.shape(self.word_ids)[1]\n self.lengths = tf.count_nonzero(self.word_ids, 1)\n self.word_embeddings = tf.Variable(tf.truncated_normal([len(\n word2idx), dim_word], stddev=1.0 / np.sqrt(dim_word)))\n self.char_embeddings = tf.Variable(tf.truncated_normal([len(\n char2idx), dim_char], stddev=1.0 / np.sqrt(dim_char)))\n word_embedded = tf.nn.embedding_lookup(self.word_embeddings, self.\n word_ids)\n char_embedded = tf.nn.embedding_lookup(self.char_embeddings, self.\n char_ids)\n s = tf.shape(char_embedded)\n char_embedded = tf.reshape(char_embedded, shape=[s[0] * s[1], s[-2],\n dim_char])\n for n in range(num_layers):\n (out_fw, out_bw), (state_fw, state_bw\n ) = tf.nn.bidirectional_dynamic_rnn(cell_fw=cells(\n hidden_size_char), cell_bw=cells(hidden_size_char), inputs=\n char_embedded, dtype=tf.float32, scope=\n 'bidirectional_rnn_char_%d' % n)\n char_embedded = tf.concat((out_fw, out_bw), 2)\n output = tf.reshape(char_embedded[:, -1], shape=[s[0], s[1], 2 *\n hidden_size_char])\n word_embedded = tf.concat([word_embedded, output], axis=-1)\n for n in range(num_layers):\n (out_fw, out_bw), (state_fw, state_bw\n ) = tf.nn.bidirectional_dynamic_rnn(cell_fw=cells(\n hidden_size_word), cell_bw=cells(hidden_size_word), inputs=\n word_embedded, dtype=tf.float32, scope=\n 'bidirectional_rnn_word_%d' % n)\n word_embedded = tf.concat((out_fw, out_bw), 2)\n logits = tf.layers.Dense(word_embedded, len(idx2tag))\n y_t = self.labels\n log_likelihood, transition_params = tf.contrib.crf.crf_log_likelihood(\n logits, y_t, self.lengths)\n self.cost = tf.reduce_mean(-log_likelihood)\n self.optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate\n ).minimize(self.cost)\n mask = tf.sequence_mask(self.lengths, maxlen=self.maxlen)\n self.tags_seq, tags_score = tf.contrib.crf.crf_decode(logits,\n transition_params, self.lengths)\n self.tags_seq = tf.identity(self.tags_seq, name='logits')\n y_t = tf.cast(y_t, tf.int32)\n self.prediction = tf.boolean_mask(self.tags_seq, mask)\n mask_label = tf.boolean_mask(y_t, mask)\n correct_pred = tf.equal(self.prediction, mask_label)\n correct_index = tf.cast(correct_pred, tf.float32)\n self.accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))\n\n\ndef parse(file):\n \"\"\"\n 加载文件并且解析\n :param file: 文件名\n :return: 词<->词性\n \"\"\"\n with open(file) as fopen:\n texts = fopen.read().split('\\n')\n left, right = [], []\n for text in texts:\n if '-DOCSTART' in text or not len(text):\n continue\n splitted = text.split()\n left.append(splitted[0])\n right.append(splitted[-1])\n return left, right\n\n\ndef process_string(string):\n \"\"\"\n :param string:\n :return:\n \"\"\"\n string = re.sub('[^A-Za-z0-9\\\\-\\\\/ ]+', ' ', string).split()\n return ' '.join([to_title(y.strip()) for y in string])\n\n\ndef to_title(string):\n if string.isupper():\n string = string.title()\n return string\n\n\ndef parse_XY(texts, labels):\n \"\"\"\n 整理词性表 词表 字符表 并将文本转为对应的数字序列\n :param texts: 文本 词的一个列表\n :param labels: 词性的一个列表\n :return: 词转为id的序列 词性转为id的序列\n \"\"\"\n global word2idx, tag2idx, char2idx, word_idx, tag_idx, char_idx\n X, Y = [], []\n for no, text in enumerate(texts):\n text = text.lower()\n tag = labels[no]\n for c in text:\n if c not in char2idx:\n char2idx[c] = char_idx\n char_idx += 1\n if tag not in tag2idx:\n tag2idx[tag] = tag_idx\n tag_idx += 1\n Y.append(tag2idx[tag])\n if text not in word2idx:\n word2idx[text] = word_idx\n word_idx += 1\n X.append(word2idx[text])\n return X, np.array(Y)\n\n\ndef iter_seq(x):\n return np.array([x[i:i + seq_len] for i in range(0, len(x) - seq_len, 1)])\n\n\ndef to_train_seq(*args):\n \"\"\"\n :param args: 词转为的id的序列 词性转为id的序列\n :return:\n \"\"\"\n return [iter_seq(x) for x in args]\n\n\ndef generate_char_seq(batch):\n \"\"\"\n 传进来是50一个块 总共有多少块\n 然后将每块的单词转为字符序列\n :param batch:\n :return:\n \"\"\"\n x = [[len(idx2word[i]) for i in k] for k in batch]\n maxlen = max([j for i in x for j in i])\n temp = np.zeros((batch.shape[0], batch.shape[1], maxlen), dtype=np.int32)\n for i in range(batch.shape[0]):\n for k in range(batch.shape[1]):\n for no, c in enumerate(idx2word[batch[i, k]]):\n temp[i, k, -1 - no] = char2idx[c]\n return temp\n\n\ndef pred2label(pred):\n out = []\n for pred_i in pred:\n out_i = []\n for p in pred_i:\n out_i.append(idx2tag[p])\n out.append(out_i)\n return out\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\nclass Model:\n\n def __init__(self, dim_word, dim_char, dropout, learning_rate,\n hidden_size_char, hidden_size_word, num_layers):\n \"\"\"\n :param dim_word: 词的维度\n :param dim_char: 字符维度\n :param dropout: dropout\n :param learning_rate: 学习率\n :param hidden_size_char: 字符隐层输出维度\n :param hidden_size_word: 词隐层输出维度\n :param num_layers: 几层\n \"\"\"\n\n def cells(size, reuse=False):\n return tf.contrib.rnn.DropoutWrapper(tf.nn.rnn_cell.LSTMCell(\n size, initializer=tf.orthogonal_initializer(), reuse=reuse),\n output_keep_prob=dropout)\n self.word_ids = tf.placeholder(tf.int32, shape=[None, None])\n self.char_ids = tf.placeholder(tf.int32, shape=[None, None, None])\n self.labels = tf.placeholder(tf.int32, shape=[None, None])\n self.maxlen = tf.shape(self.word_ids)[1]\n self.lengths = tf.count_nonzero(self.word_ids, 1)\n self.word_embeddings = tf.Variable(tf.truncated_normal([len(\n word2idx), dim_word], stddev=1.0 / np.sqrt(dim_word)))\n self.char_embeddings = tf.Variable(tf.truncated_normal([len(\n char2idx), dim_char], stddev=1.0 / np.sqrt(dim_char)))\n word_embedded = tf.nn.embedding_lookup(self.word_embeddings, self.\n word_ids)\n char_embedded = tf.nn.embedding_lookup(self.char_embeddings, self.\n char_ids)\n s = tf.shape(char_embedded)\n char_embedded = tf.reshape(char_embedded, shape=[s[0] * s[1], s[-2],\n dim_char])\n for n in range(num_layers):\n (out_fw, out_bw), (state_fw, state_bw\n ) = tf.nn.bidirectional_dynamic_rnn(cell_fw=cells(\n hidden_size_char), cell_bw=cells(hidden_size_char), inputs=\n char_embedded, dtype=tf.float32, scope=\n 'bidirectional_rnn_char_%d' % n)\n char_embedded = tf.concat((out_fw, out_bw), 2)\n output = tf.reshape(char_embedded[:, -1], shape=[s[0], s[1], 2 *\n hidden_size_char])\n word_embedded = tf.concat([word_embedded, output], axis=-1)\n for n in range(num_layers):\n (out_fw, out_bw), (state_fw, state_bw\n ) = tf.nn.bidirectional_dynamic_rnn(cell_fw=cells(\n hidden_size_word), cell_bw=cells(hidden_size_word), inputs=\n word_embedded, dtype=tf.float32, scope=\n 'bidirectional_rnn_word_%d' % n)\n word_embedded = tf.concat((out_fw, out_bw), 2)\n logits = tf.layers.Dense(word_embedded, len(idx2tag))\n y_t = self.labels\n log_likelihood, transition_params = tf.contrib.crf.crf_log_likelihood(\n logits, y_t, self.lengths)\n self.cost = tf.reduce_mean(-log_likelihood)\n self.optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate\n ).minimize(self.cost)\n mask = tf.sequence_mask(self.lengths, maxlen=self.maxlen)\n self.tags_seq, tags_score = tf.contrib.crf.crf_decode(logits,\n transition_params, self.lengths)\n self.tags_seq = tf.identity(self.tags_seq, name='logits')\n y_t = tf.cast(y_t, tf.int32)\n self.prediction = tf.boolean_mask(self.tags_seq, mask)\n mask_label = tf.boolean_mask(y_t, mask)\n correct_pred = tf.equal(self.prediction, mask_label)\n correct_index = tf.cast(correct_pred, tf.float32)\n self.accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))\n\n\ndef parse(file):\n \"\"\"\n 加载文件并且解析\n :param file: 文件名\n :return: 词<->词性\n \"\"\"\n with open(file) as fopen:\n texts = fopen.read().split('\\n')\n left, right = [], []\n for text in texts:\n if '-DOCSTART' in text or not len(text):\n continue\n splitted = text.split()\n left.append(splitted[0])\n right.append(splitted[-1])\n return left, right\n\n\ndef process_string(string):\n \"\"\"\n :param string:\n :return:\n \"\"\"\n string = re.sub('[^A-Za-z0-9\\\\-\\\\/ ]+', ' ', string).split()\n return ' '.join([to_title(y.strip()) for y in string])\n\n\ndef to_title(string):\n if string.isupper():\n string = string.title()\n return string\n\n\ndef parse_XY(texts, labels):\n \"\"\"\n 整理词性表 词表 字符表 并将文本转为对应的数字序列\n :param texts: 文本 词的一个列表\n :param labels: 词性的一个列表\n :return: 词转为id的序列 词性转为id的序列\n \"\"\"\n global word2idx, tag2idx, char2idx, word_idx, tag_idx, char_idx\n X, Y = [], []\n for no, text in enumerate(texts):\n text = text.lower()\n tag = labels[no]\n for c in text:\n if c not in char2idx:\n char2idx[c] = char_idx\n char_idx += 1\n if tag not in tag2idx:\n tag2idx[tag] = tag_idx\n tag_idx += 1\n Y.append(tag2idx[tag])\n if text not in word2idx:\n word2idx[text] = word_idx\n word_idx += 1\n X.append(word2idx[text])\n return X, np.array(Y)\n\n\ndef iter_seq(x):\n return np.array([x[i:i + seq_len] for i in range(0, len(x) - seq_len, 1)])\n\n\ndef to_train_seq(*args):\n \"\"\"\n :param args: 词转为的id的序列 词性转为id的序列\n :return:\n \"\"\"\n return [iter_seq(x) for x in args]\n\n\ndef generate_char_seq(batch):\n \"\"\"\n 传进来是50一个块 总共有多少块\n 然后将每块的单词转为字符序列\n :param batch:\n :return:\n \"\"\"\n x = [[len(idx2word[i]) for i in k] for k in batch]\n maxlen = max([j for i in x for j in i])\n temp = np.zeros((batch.shape[0], batch.shape[1], maxlen), dtype=np.int32)\n for i in range(batch.shape[0]):\n for k in range(batch.shape[1]):\n for no, c in enumerate(idx2word[batch[i, k]]):\n temp[i, k, -1 - no] = char2idx[c]\n return temp\n\n\ndef pred2label(pred):\n out = []\n for pred_i in pred:\n out_i = []\n for p in pred_i:\n out_i.append(idx2tag[p])\n out.append(out_i)\n return out\n\n\nif __name__ == '__main__':\n left_train, right_train = parse('./data/eng.train')\n left_test, right_test = parse('./data/eng.testa')\n word2idx = {'PAD': 0, 'NUM': 1, 'UNK': 2}\n tag2idx = {'PAD': 0}\n char2idx = {'PAD': 0}\n word_idx = 3\n tag_idx = 1\n char_idx = 1\n train_X, train_Y = parse_XY(left_train, right_train)\n test_X, test_Y = parse_XY(left_test, right_test)\n idx2word = {idx: tag for tag, idx in word2idx.items()}\n idx2tag = {i: w for w, i in tag2idx.items()}\n seq_len = 50\n X_seq, Y_seq = to_train_seq(train_X, train_Y)\n X_char_seq = generate_char_seq(X_seq)\n print(X_seq.shape)\n print(X_char_seq.shape)\n X_seq_test, Y_seq_test = to_train_seq(test_X, test_Y)\n X_char_seq_test = generate_char_seq(X_seq_test)\n print(X_seq_test.shape)\n print(X_char_seq_test.shape)\n train_X, train_Y, train_char = X_seq, Y_seq, X_char_seq\n test_X, test_Y, test_char = X_seq_test, Y_seq_test, X_char_seq_test\n tf.reset_default_graph()\n sess = tf.Session()\n dim_word = 64\n dim_char = 128\n dropout = 0.8\n learning_rate = 0.001\n hidden_size_char = 128\n hidden_size_word = 128\n num_layers = 2\n batch_size = 32\n model = Model(dim_word, dim_char, dropout, learning_rate,\n hidden_size_char, hidden_size_word, num_layers)\n sess.run(tf.global_variables_initializer())\n for e in range(3):\n train_acc, train_loss, test_acc, test_loss = 0, 0, 0, 0\n for i in range(0, len(train_X), batch_size):\n batch_x = train_X[i:min(i + batch_size, train_X.shape[0])]\n batch_char = train_char[i:min(i + batch_size, train_X.shape[0])]\n batch_y = train_Y[i:min(i + batch_size, train_X.shape[0])]\n acc, cost, _ = sess.run([model.accuracy, model.cost, model.\n optimizer], feed_dict={model.word_ids: batch_x, model.\n char_ids: batch_char, model.labels: batch_y})\n train_loss += cost\n train_acc += acc\n print('train_data: epoch:{}, step:{}, loss:{}, accuracy:{}'.\n format(e, i // batch_size + 1, cost, acc))\n for i in range(0, len(test_X), batch_size):\n batch_x = test_X[i:min(i + batch_size, test_X.shape[0])]\n batch_char = test_char[i:min(i + batch_size, test_X.shape[0])]\n batch_y = test_Y[i:min(i + batch_size, test_X.shape[0])]\n acc, cost = sess.run([model.accuracy, model.cost], feed_dict={\n model.word_ids: batch_x, model.char_ids: batch_char, model.\n labels: batch_y})\n test_loss += cost\n test_acc += acc\n print('test_data: epoch:{}, step:{}, loss:{}, accuracy:{}'.\n format(e, i // batch_size + 1, cost, acc))\n train_loss /= len(train_X) / batch_size\n train_acc /= len(train_X) / batch_size\n test_loss /= len(test_X) / batch_size\n test_acc /= len(test_X) / batch_size\n print(\n 'epoch: %d, training loss: %f, training acc: %f, valid loss: %f, valid acc: %f\\n'\n % (e, train_loss, train_acc, test_loss, test_acc))\n real_Y, predict_Y = [], []\n for i in range(0, len(test_X), batch_size):\n batch_x = test_X[i:min(i + batch_size, test_X.shape[0])]\n batch_char = test_char[i:min(i + batch_size, test_X.shape[0])]\n batch_y = test_Y[i:min(i + batch_size, test_X.shape[0])]\n predicted = pred2label(sess.run(model.tags_seq, feed_dict={model.\n word_ids: batch_x, model.char_ids: batch_char}))\n real = pred2label(batch_y)\n predict_Y.extend(predicted)\n real_Y.extend(real)\n print(classification_report(np.array(real_Y).ravel(), np.array(\n predict_Y).ravel()))\n",
"step-5": "\"\"\"\n\n@file : 001-rnn+lstm+crf.py\n\n@author: xiaolu\n\n@time : 2019-09-06\n\n\"\"\"\nimport re\nimport numpy as np\nimport tensorflow as tf\nfrom sklearn.metrics import classification_report\n\n\nclass Model:\n def __init__(self, dim_word, dim_char, dropout, learning_rate,\n hidden_size_char, hidden_size_word, num_layers):\n '''\n :param dim_word: 词的维度\n :param dim_char: 字符维度\n :param dropout: dropout\n :param learning_rate: 学习率\n :param hidden_size_char: 字符隐层输出维度\n :param hidden_size_word: 词隐层输出维度\n :param num_layers: 几层\n '''\n def cells(size, reuse=False):\n return tf.contrib.rnn.DropoutWrapper(\n tf.nn.rnn_cell.LSTMCell(size, initializer=tf.orthogonal_initializer(), reuse=reuse),\n output_keep_prob=dropout\n )\n\n # 1. define input\n self.word_ids = tf.placeholder(tf.int32, shape=[None, None])\n self.char_ids = tf.placeholder(tf.int32, shape=[None, None, None])\n self.labels = tf.placeholder(tf.int32, shape=[None, None])\n self.maxlen = tf.shape(self.word_ids)[1]\n self.lengths = tf.count_nonzero(self.word_ids, 1)\n\n # 2. embedding\n self.word_embeddings = tf.Variable(tf.truncated_normal([len(word2idx), dim_word], stddev=1.0 / np.sqrt(dim_word)))\n self.char_embeddings = tf.Variable(tf.truncated_normal([len(char2idx), dim_char], stddev=1.0 / np.sqrt(dim_char)))\n word_embedded = tf.nn.embedding_lookup(self.word_embeddings, self.word_ids)\n char_embedded = tf.nn.embedding_lookup(self.char_embeddings, self.char_ids)\n\n s = tf.shape(char_embedded) # (51312, 50, 27, embedding_size)\n char_embedded = tf.reshape(char_embedded, shape=[s[0] * s[1], s[-2], dim_char])\n\n for n in range(num_layers):\n (out_fw, out_bw), (state_fw, state_bw) = tf.nn.bidirectional_dynamic_rnn(\n cell_fw=cells(hidden_size_char),\n cell_bw=cells(hidden_size_char),\n inputs=char_embedded,\n dtype=tf.float32,\n scope='bidirectional_rnn_char_%d' % n\n )\n char_embedded = tf.concat((out_fw, out_bw), 2)\n\n output = tf.reshape(char_embedded[:, -1], shape=[s[0], s[1], 2*hidden_size_char])\n word_embedded = tf.concat([word_embedded, output], axis=-1) # 将词嵌入部分与字符嵌入通过双向lstm输出部分进行拼接\n\n for n in range(num_layers):\n (out_fw, out_bw), (state_fw, state_bw) = tf.nn.bidirectional_dynamic_rnn(\n cell_fw=cells(hidden_size_word),\n cell_bw=cells(hidden_size_word),\n inputs=word_embedded,\n dtype=tf.float32,\n scope='bidirectional_rnn_word_%d' % n\n )\n word_embedded = tf.concat((out_fw, out_bw), 2)\n\n logits = tf.layers.Dense(word_embedded, len(idx2tag))\n\n y_t = self.labels\n log_likelihood, transition_params = tf.contrib.crf.crf_log_likelihood(\n logits, y_t, self.lengths\n )\n\n self.cost = tf.reduce_mean(-log_likelihood)\n self.optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(self.cost)\n mask = tf.sequence_mask(self.lengths, maxlen=self.maxlen)\n\n self.tags_seq, tags_score = tf.contrib.crf.crf_decode(\n logits, transition_params, self.lengths\n )\n\n self.tags_seq = tf.identity(self.tags_seq, name='logits')\n\n y_t = tf.cast(y_t, tf.int32)\n\n self.prediction = tf.boolean_mask(self.tags_seq, mask)\n mask_label = tf.boolean_mask(y_t, mask)\n\n correct_pred = tf.equal(self.prediction, mask_label)\n correct_index = tf.cast(correct_pred, tf.float32)\n self.accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))\n\n\ndef parse(file):\n '''\n 加载文件并且解析\n :param file: 文件名\n :return: 词<->词性\n '''\n with open(file) as fopen:\n texts = fopen.read().split('\\n')\n\n left, right = [], []\n for text in texts:\n if '-DOCSTART' in text or not len(text):\n continue\n splitted = text.split()\n left.append(splitted[0])\n right.append(splitted[-1])\n return left, right\n\n\ndef process_string(string):\n '''\n :param string:\n :return:\n '''\n string= re.sub('[^A-Za-z0-9\\-\\/ ]+', ' ', string).split()\n return ' '.join([to_title(y.strip()) for y in string])\n\n\ndef to_title(string):\n if string.isupper():\n string = string.title()\n return string\n\n\ndef parse_XY(texts, labels):\n '''\n 整理词性表 词表 字符表 并将文本转为对应的数字序列\n :param texts: 文本 词的一个列表\n :param labels: 词性的一个列表\n :return: 词转为id的序列 词性转为id的序列\n '''\n global word2idx, tag2idx, char2idx, word_idx, tag_idx, char_idx\n X, Y = [], []\n for no, text in enumerate(texts):\n text = text.lower() # 当前这个单词转小写\n tag = labels[no] # 取出对应的词性\n for c in text: # 字符表\n if c not in char2idx:\n char2idx[c] = char_idx\n char_idx += 1\n if tag not in tag2idx: # 词性表\n tag2idx[tag] = tag_idx\n tag_idx += 1\n Y.append(tag2idx[tag]) # 当前这个词的词性转为id的值\n if text not in word2idx: # 词表\n word2idx[text] = word_idx\n word_idx += 1\n X.append(word2idx[text]) # 将词转为id的标号\n return X, np.array(Y)\n\n\ndef iter_seq(x):\n return np.array([x[i: i+seq_len] for i in range(0, len(x)-seq_len, 1)])\n\n\ndef to_train_seq(*args):\n '''\n :param args: 词转为的id的序列 词性转为id的序列\n :return:\n '''\n return [iter_seq(x) for x in args]\n\n\ndef generate_char_seq(batch):\n '''\n 传进来是50一个块 总共有多少块\n 然后将每块的单词转为字符序列\n :param batch:\n :return:\n '''\n x = [[len(idx2word[i]) for i in k] for k in batch] # 得出每个单词的长度\n maxlen = max([j for i in x for j in i]) # 最大长度\n temp = np.zeros((batch.shape[0], batch.shape[1], maxlen), dtype=np.int32)\n\n for i in range(batch.shape[0]):\n for k in range(batch.shape[1]):\n for no, c in enumerate(idx2word[batch[i, k]]):\n temp[i, k, -1-no] = char2idx[c]\n return temp # [文章数, 单词个数, maxlen(每个单词按字符转的id)]\n\n\ndef pred2label(pred):\n # 将预测结果转为标签\n out = []\n for pred_i in pred:\n out_i = []\n for p in pred_i:\n out_i.append(idx2tag[p])\n out.append(out_i)\n return out\n\n\nif __name__ == '__main__':\n left_train, right_train = parse('./data/eng.train')\n left_test, right_test = parse('./data/eng.testa')\n # print(left_train[:10])\n # print(right_train[:10])\n\n word2idx = {'PAD': 0, 'NUM': 1, 'UNK': 2} # 词表\n tag2idx = {'PAD': 0} # 词性表\n char2idx = {'PAD': 0}\n word_idx = 3\n tag_idx = 1\n char_idx = 1\n\n train_X, train_Y = parse_XY(left_train, right_train)\n test_X, test_Y = parse_XY(left_test, right_test)\n # print(train_X[:20])\n # print(train_Y[:20])\n\n idx2word = {idx: tag for tag, idx in word2idx.items()}\n idx2tag = {i: w for w, i in tag2idx.items()}\n\n seq_len = 50\n\n X_seq, Y_seq = to_train_seq(train_X, train_Y) # 长度为50为一个段落\n X_char_seq = generate_char_seq(X_seq)\n print(X_seq.shape) # (203571, 50)\n print(X_char_seq.shape) # (203571, 50, 61)\n\n X_seq_test, Y_seq_test = to_train_seq(test_X, test_Y)\n X_char_seq_test = generate_char_seq(X_seq_test)\n print(X_seq_test.shape) # (51312, 50)\n print(X_char_seq_test.shape) # (51312, 50, 27)\n\n train_X, train_Y, train_char = X_seq, Y_seq, X_char_seq\n test_X, test_Y, test_char = X_seq_test, Y_seq_test, X_char_seq_test\n\n tf.reset_default_graph()\n sess = tf.Session()\n\n dim_word = 64\n dim_char = 128\n dropout = 0.8\n learning_rate = 1e-3\n hidden_size_char = 128\n hidden_size_word = 128\n num_layers = 2\n batch_size = 32\n\n model = Model(dim_word, dim_char, dropout, learning_rate,\n hidden_size_char, hidden_size_word, num_layers)\n sess.run(tf.global_variables_initializer())\n\n for e in range(3):\n train_acc, train_loss, test_acc, test_loss = 0, 0, 0, 0\n for i in range(0, len(train_X), batch_size):\n\n batch_x = train_X[i: min(i + batch_size, train_X.shape[0])]\n batch_char = train_char[i: min(i + batch_size, train_X.shape[0])]\n batch_y = train_Y[i: min(i + batch_size, train_X.shape[0])]\n\n acc, cost, _ = sess.run(\n [model.accuracy, model.cost, model.optimizer],\n feed_dict={\n model.word_ids: batch_x,\n model.char_ids: batch_char,\n model.labels: batch_y\n },\n )\n train_loss += cost\n train_acc += acc\n print('train_data: epoch:{}, step:{}, loss:{}, accuracy:{}'.format(e, i//batch_size+1, cost, acc))\n\n for i in range(0, len(test_X), batch_size):\n batch_x = test_X[i: min(i + batch_size, test_X.shape[0])]\n batch_char = test_char[i: min(i + batch_size, test_X.shape[0])]\n batch_y = test_Y[i: min(i + batch_size, test_X.shape[0])]\n acc, cost = sess.run(\n [model.accuracy, model.cost],\n feed_dict={\n model.word_ids: batch_x,\n model.char_ids: batch_char,\n model.labels: batch_y\n },\n )\n test_loss += cost\n test_acc += acc\n print('test_data: epoch:{}, step:{}, loss:{}, accuracy:{}'.format(e, i//batch_size+1, cost, acc))\n\n train_loss /= len(train_X) / batch_size\n train_acc /= len(train_X) / batch_size\n test_loss /= len(test_X) / batch_size\n test_acc /= len(test_X) / batch_size\n\n print('epoch: %d, training loss: %f, training acc: %f, valid loss: %f, valid acc: %f\\n'\n % (e, train_loss, train_acc, test_loss, test_acc))\n\n real_Y, predict_Y = [], []\n for i in range(0, len(test_X), batch_size):\n batch_x = test_X[i: min(i + batch_size, test_X.shape[0])]\n batch_char = test_char[i: min(i + batch_size, test_X.shape[0])]\n batch_y = test_Y[i: min(i + batch_size, test_X.shape[0])]\n predicted = pred2label(\n sess.run(model.tags_seq,\n feed_dict={\n model.word_ids: batch_x,\n model.char_ids: batch_char,\n },\n )\n )\n real = pred2label(batch_y)\n predict_Y.extend(predicted)\n real_Y.extend(real)\n\n print(classification_report(np.array(real_Y).ravel(), np.array(predict_Y).ravel()))",
"step-ids": [
5,
8,
10,
11,
13
]
}
|
[
5,
8,
10,
11,
13
] |
#!/usr/bin python3
# coding: utf-8
"""
AUTHOR: bovenson
EMAIL: [email protected]
FILE: 03.py
DATE: 17-9-25 下午7:59
DESC:
"""
from socket import socket
|
normal
|
{
"blob_id": "74d1491280eba1ceb06ccf6f45546cdb41149687",
"index": 5642,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfrom socket import socket\n",
"step-3": "#!/usr/bin python3\n# coding: utf-8\n\n\"\"\"\nAUTHOR: bovenson\nEMAIL: [email protected]\nFILE: 03.py\nDATE: 17-9-25 下午7:59\nDESC:\n\"\"\"\n\nfrom socket import socket\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
# -*- coding: utf-8 -*-
import luigi
from luigi import *
#from luigi import Task
import pandas as pd
from pset.tasks.embeddings.load_embeding import EmbedStudentData
from pset.tasks.data.load_dataset import HashedStudentData
import numpy as npy
import pickle
import os
class NearestStudents(Task):
github_id = Parameter(default='b280302a', description='Github id to search nearby (not hashed)')
n = IntParameter(default=5, description='Output top N')
farthest = BoolParameter(default=False, description='Find farthest instead')
def output(self):
return luigi.LocalTarget("/Users/adcxdpf/Downloads/pset_03/sd.csv")
def requires(self):
return {
'data': HashedStudentData(path='/Users/adcxdpf/Downloads/pset_03/pset/tasks/data'),
'embedStudentData': EmbedStudentData(path='/Users/adcxdpf/Downloads/pset_03/pset/tasks/data')
}
#return self.clone(EmbedStudentData)
def run(self):
vectors_lookup_bytes = (self.input()['embedStudentData'].open(mode='rb'))
vectors_lookup = pickle.load(vectors_lookup_bytes)
vecs_list = pd.Series(vectors_lookup)
vectors_df = pd.DataFrame(vectors_lookup, index=vecs_list.index)
vectors_df.columns = ['vectors']
print('##### vectors_df : ', vectors_df)
print(" vectors_df shape is :: " , vectors_df.shape)
print("github_id param : " , self.github_id)
pd_xls_data = pd.read_excel(self.input()['data'].path,0)
idx = pd_xls_data.index[pd_xls_data['hashed_id']== self.github_id]
#print ('######## idx.values ######### ', idx.values)
my_vec = vectors_df.iloc[[idx.values[0]]]
self.my_vec = (my_vec.values[0][0])
print ("my_vec : " , self.my_vec)
print(" my_vec shape is :: " , self.my_vec.shape)
distances = vectors_df['vectors'].apply(self.my_distance)
sortedDistance= distances.sort_values()
print('###### sortedDistance : ', sortedDistance)
# output data
f = self.output().open('w')
sortedDistance.str[0].to_csv(f)
#df.to_csv(f, sep='\t', encoding='utf-8', index=None)
f.close()
nearDis= sortedDistance.head(self.n).index
print ("******** Nearest**********")
for index in nearDis:
print(pd_xls_data.iloc[index])
farDis = sortedDistance.tail(5).index
print ("******** Farthest**********")
for index in farDis:
print(pd_xls_data.iloc[index])
def cosine_similarity(self,a, b):
# """Takes 2 vectors a, b and returns the cosine similarity according
# to the definition of the dot product
# """
# dot_product = npy.dot(a, b)
# norm_a = npy.linalg.norm(a)
# norm_b = npy.linalg.norm(b)
# return dot_product / (norm_a * norm_b)
dot_product = npy.dot(a[0], b.T)
norm_a = npy.linalg.norm(a)
norm_b = npy.linalg.norm(b)
return dot_product / (norm_a * norm_b)
def my_distance(self,vec1):
return 1 - self.cosine_similarity(vec1, self.my_vec)
|
normal
|
{
"blob_id": "15eed401728e07bfe9299edd12add43ad8b9cb71",
"index": 3802,
"step-1": "<mask token>\n\n\nclass NearestStudents(Task):\n <mask token>\n <mask token>\n <mask token>\n\n def output(self):\n return luigi.LocalTarget('/Users/adcxdpf/Downloads/pset_03/sd.csv')\n\n def requires(self):\n return {'data': HashedStudentData(path=\n '/Users/adcxdpf/Downloads/pset_03/pset/tasks/data'),\n 'embedStudentData': EmbedStudentData(path=\n '/Users/adcxdpf/Downloads/pset_03/pset/tasks/data')}\n\n def run(self):\n vectors_lookup_bytes = self.input()['embedStudentData'].open(mode='rb')\n vectors_lookup = pickle.load(vectors_lookup_bytes)\n vecs_list = pd.Series(vectors_lookup)\n vectors_df = pd.DataFrame(vectors_lookup, index=vecs_list.index)\n vectors_df.columns = ['vectors']\n print('##### vectors_df : ', vectors_df)\n print(' vectors_df shape is :: ', vectors_df.shape)\n print('github_id param : ', self.github_id)\n pd_xls_data = pd.read_excel(self.input()['data'].path, 0)\n idx = pd_xls_data.index[pd_xls_data['hashed_id'] == self.github_id]\n my_vec = vectors_df.iloc[[idx.values[0]]]\n self.my_vec = my_vec.values[0][0]\n print('my_vec : ', self.my_vec)\n print(' my_vec shape is :: ', self.my_vec.shape)\n distances = vectors_df['vectors'].apply(self.my_distance)\n sortedDistance = distances.sort_values()\n print('###### sortedDistance : ', sortedDistance)\n f = self.output().open('w')\n sortedDistance.str[0].to_csv(f)\n f.close()\n nearDis = sortedDistance.head(self.n).index\n print('******** Nearest**********')\n for index in nearDis:\n print(pd_xls_data.iloc[index])\n farDis = sortedDistance.tail(5).index\n print('******** Farthest**********')\n for index in farDis:\n print(pd_xls_data.iloc[index])\n\n def cosine_similarity(self, a, b):\n dot_product = npy.dot(a[0], b.T)\n norm_a = npy.linalg.norm(a)\n norm_b = npy.linalg.norm(b)\n return dot_product / (norm_a * norm_b)\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass NearestStudents(Task):\n <mask token>\n <mask token>\n <mask token>\n\n def output(self):\n return luigi.LocalTarget('/Users/adcxdpf/Downloads/pset_03/sd.csv')\n\n def requires(self):\n return {'data': HashedStudentData(path=\n '/Users/adcxdpf/Downloads/pset_03/pset/tasks/data'),\n 'embedStudentData': EmbedStudentData(path=\n '/Users/adcxdpf/Downloads/pset_03/pset/tasks/data')}\n\n def run(self):\n vectors_lookup_bytes = self.input()['embedStudentData'].open(mode='rb')\n vectors_lookup = pickle.load(vectors_lookup_bytes)\n vecs_list = pd.Series(vectors_lookup)\n vectors_df = pd.DataFrame(vectors_lookup, index=vecs_list.index)\n vectors_df.columns = ['vectors']\n print('##### vectors_df : ', vectors_df)\n print(' vectors_df shape is :: ', vectors_df.shape)\n print('github_id param : ', self.github_id)\n pd_xls_data = pd.read_excel(self.input()['data'].path, 0)\n idx = pd_xls_data.index[pd_xls_data['hashed_id'] == self.github_id]\n my_vec = vectors_df.iloc[[idx.values[0]]]\n self.my_vec = my_vec.values[0][0]\n print('my_vec : ', self.my_vec)\n print(' my_vec shape is :: ', self.my_vec.shape)\n distances = vectors_df['vectors'].apply(self.my_distance)\n sortedDistance = distances.sort_values()\n print('###### sortedDistance : ', sortedDistance)\n f = self.output().open('w')\n sortedDistance.str[0].to_csv(f)\n f.close()\n nearDis = sortedDistance.head(self.n).index\n print('******** Nearest**********')\n for index in nearDis:\n print(pd_xls_data.iloc[index])\n farDis = sortedDistance.tail(5).index\n print('******** Farthest**********')\n for index in farDis:\n print(pd_xls_data.iloc[index])\n\n def cosine_similarity(self, a, b):\n dot_product = npy.dot(a[0], b.T)\n norm_a = npy.linalg.norm(a)\n norm_b = npy.linalg.norm(b)\n return dot_product / (norm_a * norm_b)\n\n def my_distance(self, vec1):\n return 1 - self.cosine_similarity(vec1, self.my_vec)\n",
"step-3": "<mask token>\n\n\nclass NearestStudents(Task):\n github_id = Parameter(default='b280302a', description=\n 'Github id to search nearby (not hashed)')\n n = IntParameter(default=5, description='Output top N')\n farthest = BoolParameter(default=False, description='Find farthest instead'\n )\n\n def output(self):\n return luigi.LocalTarget('/Users/adcxdpf/Downloads/pset_03/sd.csv')\n\n def requires(self):\n return {'data': HashedStudentData(path=\n '/Users/adcxdpf/Downloads/pset_03/pset/tasks/data'),\n 'embedStudentData': EmbedStudentData(path=\n '/Users/adcxdpf/Downloads/pset_03/pset/tasks/data')}\n\n def run(self):\n vectors_lookup_bytes = self.input()['embedStudentData'].open(mode='rb')\n vectors_lookup = pickle.load(vectors_lookup_bytes)\n vecs_list = pd.Series(vectors_lookup)\n vectors_df = pd.DataFrame(vectors_lookup, index=vecs_list.index)\n vectors_df.columns = ['vectors']\n print('##### vectors_df : ', vectors_df)\n print(' vectors_df shape is :: ', vectors_df.shape)\n print('github_id param : ', self.github_id)\n pd_xls_data = pd.read_excel(self.input()['data'].path, 0)\n idx = pd_xls_data.index[pd_xls_data['hashed_id'] == self.github_id]\n my_vec = vectors_df.iloc[[idx.values[0]]]\n self.my_vec = my_vec.values[0][0]\n print('my_vec : ', self.my_vec)\n print(' my_vec shape is :: ', self.my_vec.shape)\n distances = vectors_df['vectors'].apply(self.my_distance)\n sortedDistance = distances.sort_values()\n print('###### sortedDistance : ', sortedDistance)\n f = self.output().open('w')\n sortedDistance.str[0].to_csv(f)\n f.close()\n nearDis = sortedDistance.head(self.n).index\n print('******** Nearest**********')\n for index in nearDis:\n print(pd_xls_data.iloc[index])\n farDis = sortedDistance.tail(5).index\n print('******** Farthest**********')\n for index in farDis:\n print(pd_xls_data.iloc[index])\n\n def cosine_similarity(self, a, b):\n dot_product = npy.dot(a[0], b.T)\n norm_a = npy.linalg.norm(a)\n norm_b = npy.linalg.norm(b)\n return dot_product / (norm_a * norm_b)\n\n def my_distance(self, vec1):\n return 1 - self.cosine_similarity(vec1, self.my_vec)\n",
"step-4": "import luigi\nfrom luigi import *\nimport pandas as pd\nfrom pset.tasks.embeddings.load_embeding import EmbedStudentData\nfrom pset.tasks.data.load_dataset import HashedStudentData\nimport numpy as npy\nimport pickle\nimport os\n\n\nclass NearestStudents(Task):\n github_id = Parameter(default='b280302a', description=\n 'Github id to search nearby (not hashed)')\n n = IntParameter(default=5, description='Output top N')\n farthest = BoolParameter(default=False, description='Find farthest instead'\n )\n\n def output(self):\n return luigi.LocalTarget('/Users/adcxdpf/Downloads/pset_03/sd.csv')\n\n def requires(self):\n return {'data': HashedStudentData(path=\n '/Users/adcxdpf/Downloads/pset_03/pset/tasks/data'),\n 'embedStudentData': EmbedStudentData(path=\n '/Users/adcxdpf/Downloads/pset_03/pset/tasks/data')}\n\n def run(self):\n vectors_lookup_bytes = self.input()['embedStudentData'].open(mode='rb')\n vectors_lookup = pickle.load(vectors_lookup_bytes)\n vecs_list = pd.Series(vectors_lookup)\n vectors_df = pd.DataFrame(vectors_lookup, index=vecs_list.index)\n vectors_df.columns = ['vectors']\n print('##### vectors_df : ', vectors_df)\n print(' vectors_df shape is :: ', vectors_df.shape)\n print('github_id param : ', self.github_id)\n pd_xls_data = pd.read_excel(self.input()['data'].path, 0)\n idx = pd_xls_data.index[pd_xls_data['hashed_id'] == self.github_id]\n my_vec = vectors_df.iloc[[idx.values[0]]]\n self.my_vec = my_vec.values[0][0]\n print('my_vec : ', self.my_vec)\n print(' my_vec shape is :: ', self.my_vec.shape)\n distances = vectors_df['vectors'].apply(self.my_distance)\n sortedDistance = distances.sort_values()\n print('###### sortedDistance : ', sortedDistance)\n f = self.output().open('w')\n sortedDistance.str[0].to_csv(f)\n f.close()\n nearDis = sortedDistance.head(self.n).index\n print('******** Nearest**********')\n for index in nearDis:\n print(pd_xls_data.iloc[index])\n farDis = sortedDistance.tail(5).index\n print('******** Farthest**********')\n for index in farDis:\n print(pd_xls_data.iloc[index])\n\n def cosine_similarity(self, a, b):\n dot_product = npy.dot(a[0], b.T)\n norm_a = npy.linalg.norm(a)\n norm_b = npy.linalg.norm(b)\n return dot_product / (norm_a * norm_b)\n\n def my_distance(self, vec1):\n return 1 - self.cosine_similarity(vec1, self.my_vec)\n",
"step-5": "# -*- coding: utf-8 -*-\n\nimport luigi\nfrom luigi import *\n#from luigi import Task\nimport pandas as pd\nfrom pset.tasks.embeddings.load_embeding import EmbedStudentData\nfrom pset.tasks.data.load_dataset import HashedStudentData\nimport numpy as npy\nimport pickle\nimport os\n\nclass NearestStudents(Task):\n\n github_id = Parameter(default='b280302a', description='Github id to search nearby (not hashed)')\n n = IntParameter(default=5, description='Output top N')\n farthest = BoolParameter(default=False, description='Find farthest instead')\n \n def output(self):\n return luigi.LocalTarget(\"/Users/adcxdpf/Downloads/pset_03/sd.csv\")\n \n\n def requires(self):\n return {\n 'data': HashedStudentData(path='/Users/adcxdpf/Downloads/pset_03/pset/tasks/data'),\n 'embedStudentData': EmbedStudentData(path='/Users/adcxdpf/Downloads/pset_03/pset/tasks/data')\n }\n #return self.clone(EmbedStudentData)\n\n\n def run(self):\n \n vectors_lookup_bytes = (self.input()['embedStudentData'].open(mode='rb'))\n vectors_lookup = pickle.load(vectors_lookup_bytes)\n\n vecs_list = pd.Series(vectors_lookup)\n vectors_df = pd.DataFrame(vectors_lookup, index=vecs_list.index)\n vectors_df.columns = ['vectors']\n print('##### vectors_df : ', vectors_df)\n print(\" vectors_df shape is :: \" , vectors_df.shape)\n \n print(\"github_id param : \" , self.github_id)\n \n pd_xls_data = pd.read_excel(self.input()['data'].path,0) \n idx = pd_xls_data.index[pd_xls_data['hashed_id']== self.github_id]\n #print ('######## idx.values ######### ', idx.values)\n \n my_vec = vectors_df.iloc[[idx.values[0]]]\n self.my_vec = (my_vec.values[0][0])\n \n print (\"my_vec : \" , self.my_vec)\n print(\" my_vec shape is :: \" , self.my_vec.shape)\n \n distances = vectors_df['vectors'].apply(self.my_distance)\n \n sortedDistance= distances.sort_values()\n print('###### sortedDistance : ', sortedDistance)\n \n # output data\n f = self.output().open('w')\n sortedDistance.str[0].to_csv(f)\n #df.to_csv(f, sep='\\t', encoding='utf-8', index=None)\n f.close() \n \n nearDis= sortedDistance.head(self.n).index\n print (\"******** Nearest**********\")\n for index in nearDis: \n print(pd_xls_data.iloc[index]) \n \n farDis = sortedDistance.tail(5).index\n print (\"******** Farthest**********\")\n for index in farDis: \n print(pd_xls_data.iloc[index]) \n \n\n\n def cosine_similarity(self,a, b):\n # \"\"\"Takes 2 vectors a, b and returns the cosine similarity according \n # to the definition of the dot product\n # \"\"\"\n # dot_product = npy.dot(a, b)\n # norm_a = npy.linalg.norm(a)\n # norm_b = npy.linalg.norm(b)\n # return dot_product / (norm_a * norm_b)\n \n \n dot_product = npy.dot(a[0], b.T)\n norm_a = npy.linalg.norm(a)\n norm_b = npy.linalg.norm(b)\n \n return dot_product / (norm_a * norm_b)\n \n\n def my_distance(self,vec1):\n \n return 1 - self.cosine_similarity(vec1, self.my_vec)\n",
"step-ids": [
5,
6,
7,
8,
9
]
}
|
[
5,
6,
7,
8,
9
] |
from django import forms
from django.contrib.auth.models import User
from ServicePad.apps.account.models import UserProfile
import hashlib, random, datetime
from ServicePad.apps.registration.models import ActivationKey
MIN_PASSWORD_LENGTH=8
MAX_PASSWORD_LENGTH=30
class UserRegistrationForm(forms.Form):
first_name = forms.CharField(required=True,max_length=30)
last_name = forms.CharField(required=True,max_length=30)
email = forms.EmailField(required=True,max_length=30)
password = forms.CharField(widget=forms.PasswordInput,min_length=MIN_PASSWORD_LENGTH,max_length=MAX_PASSWORD_LENGTH)
confirm_password = forms.CharField(widget=forms.PasswordInput,min_length=MIN_PASSWORD_LENGTH,max_length=MAX_PASSWORD_LENGTH)
form_type = forms.CharField(widget=forms.HiddenInput(),initial=UserProfile.ACCOUNT_VOLUNTEER)
def clean(self):
cleaned_data = self.cleaned_data
#Verify usernames
try:
User.objects.get(username__exact=cleaned_data.get('email'))
except User.DoesNotExist:
pass
else:
raise forms.ValidationError("Email already exists")
#Verify Passwords
password = cleaned_data.get('password')
confirm_password = cleaned_data.get('confirm_password')
if password != confirm_password:
raise forms.ValidationError("Passwords do not match")
del cleaned_data['password']
del cleaned_data['confirm_password']
account_type = int(cleaned_data.get('form_type'))
if account_type != UserProfile.ACCOUNT_VOLUNTEER and account_type != UserProfile.ACCOUNT_ORGANIZATION:
raise forms.ValidationError("Invalid account type")
return cleaned_data
def save(self):
new_user = User.objects.create_user(self.cleaned_data['email'], self.cleaned_data['email'], self.cleaned_data.get('password'))
new_user.first_name = self.cleaned_data['first_name']
new_user.last_name = self.cleaned_data['last_name']
new_user.is_active = False
new_user.save()
#create the activation key
salt = str(random.random())
hash_salt = hashlib.sha224(salt).hexdigest()
activation_key = hashlib.sha224(hash_salt + new_user.username).hexdigest()[:32]
key_expires = datetime.datetime.today() + datetime.timedelta(days=1)
key_obj = ActivationKey(user=new_user,activation_key=activation_key,key_expires=key_expires)
key_obj.save()
new_profile = UserProfile(user=new_user,account_type=UserProfile.ACCOUNT_VOLUNTEER)
new_profile.save()
return new_user
class OrganizationRegistrationForm(forms.Form):
business_name = forms.CharField(required=True,max_length=60)
primary_contact_first_name = forms.CharField(required=True,max_length=30)
primary_contact_last_name = forms.CharField(required=True,max_length=30)
primary_contact_phone = forms.CharField(required=True,max_length=30)
primary_contact_email = forms.EmailField(required=True,max_length=30)
password = forms.CharField(widget=forms.PasswordInput,min_length=MIN_PASSWORD_LENGTH,max_length=MAX_PASSWORD_LENGTH)
confirm_password = forms.CharField(widget=forms.PasswordInput,min_length=MIN_PASSWORD_LENGTH,max_length=MAX_PASSWORD_LENGTH)
form_type = forms.CharField(widget=forms.HiddenInput(),initial=UserProfile.ACCOUNT_ORGANIZATION)
def clean(self):
cleaned_data = self.cleaned_data
#Verify usernames
try:
User.objects.get(username__exact=cleaned_data.get('primary_contact_email'))
except User.DoesNotExist:
pass
else:
raise forms.ValidationError("Email already exists")
#Verify Passwords
password = cleaned_data.get('password')
confirm_password = cleaned_data.get('confirm_password')
if password != confirm_password:
raise forms.ValidationError("Passwords do not match")
del cleaned_data['password']
del cleaned_data['confirm_password']
return cleaned_data
def save(self):
new_user = User.objects.create_user(self.cleaned_data['primary_contact_email'], self.cleaned_data['primary_contact_email'], self.cleaned_data.get('password'))
new_user.first_name = self.cleaned_data['primary_contact_first_name']
new_user.last_name = self.cleaned_data['primary_contact_last_name']
new_user.is_active = False
new_user.save()
salt = str(random.random())
hash_salt = hashlib.sha224(salt).hexdigest()
activation_key = hashlib.sha224(hash_salt + new_user.username).hexdigest()[:32]
key_expires = datetime.datetime.today() + datetime.timedelta(days=1)
new_profile = UserProfile(user=new_user,
account_type=UserProfile.ACCOUNT_ORGANIZATION,
business_name=self.cleaned_data['business_name']
)
new_profile.save()
return new_user
|
normal
|
{
"blob_id": "5f680fb21fe1090dfb58f5b9260739b91ae04d99",
"index": 9922,
"step-1": "<mask token>\n\n\nclass UserRegistrationForm(forms.Form):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def save(self):\n new_user = User.objects.create_user(self.cleaned_data['email'],\n self.cleaned_data['email'], self.cleaned_data.get('password'))\n new_user.first_name = self.cleaned_data['first_name']\n new_user.last_name = self.cleaned_data['last_name']\n new_user.is_active = False\n new_user.save()\n salt = str(random.random())\n hash_salt = hashlib.sha224(salt).hexdigest()\n activation_key = hashlib.sha224(hash_salt + new_user.username\n ).hexdigest()[:32]\n key_expires = datetime.datetime.today() + datetime.timedelta(days=1)\n key_obj = ActivationKey(user=new_user, activation_key=\n activation_key, key_expires=key_expires)\n key_obj.save()\n new_profile = UserProfile(user=new_user, account_type=UserProfile.\n ACCOUNT_VOLUNTEER)\n new_profile.save()\n return new_user\n\n\nclass OrganizationRegistrationForm(forms.Form):\n business_name = forms.CharField(required=True, max_length=60)\n primary_contact_first_name = forms.CharField(required=True, max_length=30)\n primary_contact_last_name = forms.CharField(required=True, max_length=30)\n primary_contact_phone = forms.CharField(required=True, max_length=30)\n primary_contact_email = forms.EmailField(required=True, max_length=30)\n password = forms.CharField(widget=forms.PasswordInput, min_length=\n MIN_PASSWORD_LENGTH, max_length=MAX_PASSWORD_LENGTH)\n confirm_password = forms.CharField(widget=forms.PasswordInput,\n min_length=MIN_PASSWORD_LENGTH, max_length=MAX_PASSWORD_LENGTH)\n form_type = forms.CharField(widget=forms.HiddenInput(), initial=\n UserProfile.ACCOUNT_ORGANIZATION)\n\n def clean(self):\n cleaned_data = self.cleaned_data\n try:\n User.objects.get(username__exact=cleaned_data.get(\n 'primary_contact_email'))\n except User.DoesNotExist:\n pass\n else:\n raise forms.ValidationError('Email already exists')\n password = cleaned_data.get('password')\n confirm_password = cleaned_data.get('confirm_password')\n if password != confirm_password:\n raise forms.ValidationError('Passwords do not match')\n del cleaned_data['password']\n del cleaned_data['confirm_password']\n return cleaned_data\n\n def save(self):\n new_user = User.objects.create_user(self.cleaned_data[\n 'primary_contact_email'], self.cleaned_data[\n 'primary_contact_email'], self.cleaned_data.get('password'))\n new_user.first_name = self.cleaned_data['primary_contact_first_name']\n new_user.last_name = self.cleaned_data['primary_contact_last_name']\n new_user.is_active = False\n new_user.save()\n salt = str(random.random())\n hash_salt = hashlib.sha224(salt).hexdigest()\n activation_key = hashlib.sha224(hash_salt + new_user.username\n ).hexdigest()[:32]\n key_expires = datetime.datetime.today() + datetime.timedelta(days=1)\n new_profile = UserProfile(user=new_user, account_type=UserProfile.\n ACCOUNT_ORGANIZATION, business_name=self.cleaned_data[\n 'business_name'])\n new_profile.save()\n return new_user\n",
"step-2": "<mask token>\n\n\nclass UserRegistrationForm(forms.Form):\n first_name = forms.CharField(required=True, max_length=30)\n last_name = forms.CharField(required=True, max_length=30)\n email = forms.EmailField(required=True, max_length=30)\n password = forms.CharField(widget=forms.PasswordInput, min_length=\n MIN_PASSWORD_LENGTH, max_length=MAX_PASSWORD_LENGTH)\n confirm_password = forms.CharField(widget=forms.PasswordInput,\n min_length=MIN_PASSWORD_LENGTH, max_length=MAX_PASSWORD_LENGTH)\n form_type = forms.CharField(widget=forms.HiddenInput(), initial=\n UserProfile.ACCOUNT_VOLUNTEER)\n\n def clean(self):\n cleaned_data = self.cleaned_data\n try:\n User.objects.get(username__exact=cleaned_data.get('email'))\n except User.DoesNotExist:\n pass\n else:\n raise forms.ValidationError('Email already exists')\n password = cleaned_data.get('password')\n confirm_password = cleaned_data.get('confirm_password')\n if password != confirm_password:\n raise forms.ValidationError('Passwords do not match')\n del cleaned_data['password']\n del cleaned_data['confirm_password']\n account_type = int(cleaned_data.get('form_type'))\n if (account_type != UserProfile.ACCOUNT_VOLUNTEER and account_type !=\n UserProfile.ACCOUNT_ORGANIZATION):\n raise forms.ValidationError('Invalid account type')\n return cleaned_data\n\n def save(self):\n new_user = User.objects.create_user(self.cleaned_data['email'],\n self.cleaned_data['email'], self.cleaned_data.get('password'))\n new_user.first_name = self.cleaned_data['first_name']\n new_user.last_name = self.cleaned_data['last_name']\n new_user.is_active = False\n new_user.save()\n salt = str(random.random())\n hash_salt = hashlib.sha224(salt).hexdigest()\n activation_key = hashlib.sha224(hash_salt + new_user.username\n ).hexdigest()[:32]\n key_expires = datetime.datetime.today() + datetime.timedelta(days=1)\n key_obj = ActivationKey(user=new_user, activation_key=\n activation_key, key_expires=key_expires)\n key_obj.save()\n new_profile = UserProfile(user=new_user, account_type=UserProfile.\n ACCOUNT_VOLUNTEER)\n new_profile.save()\n return new_user\n\n\nclass OrganizationRegistrationForm(forms.Form):\n business_name = forms.CharField(required=True, max_length=60)\n primary_contact_first_name = forms.CharField(required=True, max_length=30)\n primary_contact_last_name = forms.CharField(required=True, max_length=30)\n primary_contact_phone = forms.CharField(required=True, max_length=30)\n primary_contact_email = forms.EmailField(required=True, max_length=30)\n password = forms.CharField(widget=forms.PasswordInput, min_length=\n MIN_PASSWORD_LENGTH, max_length=MAX_PASSWORD_LENGTH)\n confirm_password = forms.CharField(widget=forms.PasswordInput,\n min_length=MIN_PASSWORD_LENGTH, max_length=MAX_PASSWORD_LENGTH)\n form_type = forms.CharField(widget=forms.HiddenInput(), initial=\n UserProfile.ACCOUNT_ORGANIZATION)\n\n def clean(self):\n cleaned_data = self.cleaned_data\n try:\n User.objects.get(username__exact=cleaned_data.get(\n 'primary_contact_email'))\n except User.DoesNotExist:\n pass\n else:\n raise forms.ValidationError('Email already exists')\n password = cleaned_data.get('password')\n confirm_password = cleaned_data.get('confirm_password')\n if password != confirm_password:\n raise forms.ValidationError('Passwords do not match')\n del cleaned_data['password']\n del cleaned_data['confirm_password']\n return cleaned_data\n\n def save(self):\n new_user = User.objects.create_user(self.cleaned_data[\n 'primary_contact_email'], self.cleaned_data[\n 'primary_contact_email'], self.cleaned_data.get('password'))\n new_user.first_name = self.cleaned_data['primary_contact_first_name']\n new_user.last_name = self.cleaned_data['primary_contact_last_name']\n new_user.is_active = False\n new_user.save()\n salt = str(random.random())\n hash_salt = hashlib.sha224(salt).hexdigest()\n activation_key = hashlib.sha224(hash_salt + new_user.username\n ).hexdigest()[:32]\n key_expires = datetime.datetime.today() + datetime.timedelta(days=1)\n new_profile = UserProfile(user=new_user, account_type=UserProfile.\n ACCOUNT_ORGANIZATION, business_name=self.cleaned_data[\n 'business_name'])\n new_profile.save()\n return new_user\n",
"step-3": "<mask token>\nMIN_PASSWORD_LENGTH = 8\nMAX_PASSWORD_LENGTH = 30\n\n\nclass UserRegistrationForm(forms.Form):\n first_name = forms.CharField(required=True, max_length=30)\n last_name = forms.CharField(required=True, max_length=30)\n email = forms.EmailField(required=True, max_length=30)\n password = forms.CharField(widget=forms.PasswordInput, min_length=\n MIN_PASSWORD_LENGTH, max_length=MAX_PASSWORD_LENGTH)\n confirm_password = forms.CharField(widget=forms.PasswordInput,\n min_length=MIN_PASSWORD_LENGTH, max_length=MAX_PASSWORD_LENGTH)\n form_type = forms.CharField(widget=forms.HiddenInput(), initial=\n UserProfile.ACCOUNT_VOLUNTEER)\n\n def clean(self):\n cleaned_data = self.cleaned_data\n try:\n User.objects.get(username__exact=cleaned_data.get('email'))\n except User.DoesNotExist:\n pass\n else:\n raise forms.ValidationError('Email already exists')\n password = cleaned_data.get('password')\n confirm_password = cleaned_data.get('confirm_password')\n if password != confirm_password:\n raise forms.ValidationError('Passwords do not match')\n del cleaned_data['password']\n del cleaned_data['confirm_password']\n account_type = int(cleaned_data.get('form_type'))\n if (account_type != UserProfile.ACCOUNT_VOLUNTEER and account_type !=\n UserProfile.ACCOUNT_ORGANIZATION):\n raise forms.ValidationError('Invalid account type')\n return cleaned_data\n\n def save(self):\n new_user = User.objects.create_user(self.cleaned_data['email'],\n self.cleaned_data['email'], self.cleaned_data.get('password'))\n new_user.first_name = self.cleaned_data['first_name']\n new_user.last_name = self.cleaned_data['last_name']\n new_user.is_active = False\n new_user.save()\n salt = str(random.random())\n hash_salt = hashlib.sha224(salt).hexdigest()\n activation_key = hashlib.sha224(hash_salt + new_user.username\n ).hexdigest()[:32]\n key_expires = datetime.datetime.today() + datetime.timedelta(days=1)\n key_obj = ActivationKey(user=new_user, activation_key=\n activation_key, key_expires=key_expires)\n key_obj.save()\n new_profile = UserProfile(user=new_user, account_type=UserProfile.\n ACCOUNT_VOLUNTEER)\n new_profile.save()\n return new_user\n\n\nclass OrganizationRegistrationForm(forms.Form):\n business_name = forms.CharField(required=True, max_length=60)\n primary_contact_first_name = forms.CharField(required=True, max_length=30)\n primary_contact_last_name = forms.CharField(required=True, max_length=30)\n primary_contact_phone = forms.CharField(required=True, max_length=30)\n primary_contact_email = forms.EmailField(required=True, max_length=30)\n password = forms.CharField(widget=forms.PasswordInput, min_length=\n MIN_PASSWORD_LENGTH, max_length=MAX_PASSWORD_LENGTH)\n confirm_password = forms.CharField(widget=forms.PasswordInput,\n min_length=MIN_PASSWORD_LENGTH, max_length=MAX_PASSWORD_LENGTH)\n form_type = forms.CharField(widget=forms.HiddenInput(), initial=\n UserProfile.ACCOUNT_ORGANIZATION)\n\n def clean(self):\n cleaned_data = self.cleaned_data\n try:\n User.objects.get(username__exact=cleaned_data.get(\n 'primary_contact_email'))\n except User.DoesNotExist:\n pass\n else:\n raise forms.ValidationError('Email already exists')\n password = cleaned_data.get('password')\n confirm_password = cleaned_data.get('confirm_password')\n if password != confirm_password:\n raise forms.ValidationError('Passwords do not match')\n del cleaned_data['password']\n del cleaned_data['confirm_password']\n return cleaned_data\n\n def save(self):\n new_user = User.objects.create_user(self.cleaned_data[\n 'primary_contact_email'], self.cleaned_data[\n 'primary_contact_email'], self.cleaned_data.get('password'))\n new_user.first_name = self.cleaned_data['primary_contact_first_name']\n new_user.last_name = self.cleaned_data['primary_contact_last_name']\n new_user.is_active = False\n new_user.save()\n salt = str(random.random())\n hash_salt = hashlib.sha224(salt).hexdigest()\n activation_key = hashlib.sha224(hash_salt + new_user.username\n ).hexdigest()[:32]\n key_expires = datetime.datetime.today() + datetime.timedelta(days=1)\n new_profile = UserProfile(user=new_user, account_type=UserProfile.\n ACCOUNT_ORGANIZATION, business_name=self.cleaned_data[\n 'business_name'])\n new_profile.save()\n return new_user\n",
"step-4": "from django import forms\nfrom django.contrib.auth.models import User\nfrom ServicePad.apps.account.models import UserProfile\nimport hashlib, random, datetime\nfrom ServicePad.apps.registration.models import ActivationKey\nMIN_PASSWORD_LENGTH = 8\nMAX_PASSWORD_LENGTH = 30\n\n\nclass UserRegistrationForm(forms.Form):\n first_name = forms.CharField(required=True, max_length=30)\n last_name = forms.CharField(required=True, max_length=30)\n email = forms.EmailField(required=True, max_length=30)\n password = forms.CharField(widget=forms.PasswordInput, min_length=\n MIN_PASSWORD_LENGTH, max_length=MAX_PASSWORD_LENGTH)\n confirm_password = forms.CharField(widget=forms.PasswordInput,\n min_length=MIN_PASSWORD_LENGTH, max_length=MAX_PASSWORD_LENGTH)\n form_type = forms.CharField(widget=forms.HiddenInput(), initial=\n UserProfile.ACCOUNT_VOLUNTEER)\n\n def clean(self):\n cleaned_data = self.cleaned_data\n try:\n User.objects.get(username__exact=cleaned_data.get('email'))\n except User.DoesNotExist:\n pass\n else:\n raise forms.ValidationError('Email already exists')\n password = cleaned_data.get('password')\n confirm_password = cleaned_data.get('confirm_password')\n if password != confirm_password:\n raise forms.ValidationError('Passwords do not match')\n del cleaned_data['password']\n del cleaned_data['confirm_password']\n account_type = int(cleaned_data.get('form_type'))\n if (account_type != UserProfile.ACCOUNT_VOLUNTEER and account_type !=\n UserProfile.ACCOUNT_ORGANIZATION):\n raise forms.ValidationError('Invalid account type')\n return cleaned_data\n\n def save(self):\n new_user = User.objects.create_user(self.cleaned_data['email'],\n self.cleaned_data['email'], self.cleaned_data.get('password'))\n new_user.first_name = self.cleaned_data['first_name']\n new_user.last_name = self.cleaned_data['last_name']\n new_user.is_active = False\n new_user.save()\n salt = str(random.random())\n hash_salt = hashlib.sha224(salt).hexdigest()\n activation_key = hashlib.sha224(hash_salt + new_user.username\n ).hexdigest()[:32]\n key_expires = datetime.datetime.today() + datetime.timedelta(days=1)\n key_obj = ActivationKey(user=new_user, activation_key=\n activation_key, key_expires=key_expires)\n key_obj.save()\n new_profile = UserProfile(user=new_user, account_type=UserProfile.\n ACCOUNT_VOLUNTEER)\n new_profile.save()\n return new_user\n\n\nclass OrganizationRegistrationForm(forms.Form):\n business_name = forms.CharField(required=True, max_length=60)\n primary_contact_first_name = forms.CharField(required=True, max_length=30)\n primary_contact_last_name = forms.CharField(required=True, max_length=30)\n primary_contact_phone = forms.CharField(required=True, max_length=30)\n primary_contact_email = forms.EmailField(required=True, max_length=30)\n password = forms.CharField(widget=forms.PasswordInput, min_length=\n MIN_PASSWORD_LENGTH, max_length=MAX_PASSWORD_LENGTH)\n confirm_password = forms.CharField(widget=forms.PasswordInput,\n min_length=MIN_PASSWORD_LENGTH, max_length=MAX_PASSWORD_LENGTH)\n form_type = forms.CharField(widget=forms.HiddenInput(), initial=\n UserProfile.ACCOUNT_ORGANIZATION)\n\n def clean(self):\n cleaned_data = self.cleaned_data\n try:\n User.objects.get(username__exact=cleaned_data.get(\n 'primary_contact_email'))\n except User.DoesNotExist:\n pass\n else:\n raise forms.ValidationError('Email already exists')\n password = cleaned_data.get('password')\n confirm_password = cleaned_data.get('confirm_password')\n if password != confirm_password:\n raise forms.ValidationError('Passwords do not match')\n del cleaned_data['password']\n del cleaned_data['confirm_password']\n return cleaned_data\n\n def save(self):\n new_user = User.objects.create_user(self.cleaned_data[\n 'primary_contact_email'], self.cleaned_data[\n 'primary_contact_email'], self.cleaned_data.get('password'))\n new_user.first_name = self.cleaned_data['primary_contact_first_name']\n new_user.last_name = self.cleaned_data['primary_contact_last_name']\n new_user.is_active = False\n new_user.save()\n salt = str(random.random())\n hash_salt = hashlib.sha224(salt).hexdigest()\n activation_key = hashlib.sha224(hash_salt + new_user.username\n ).hexdigest()[:32]\n key_expires = datetime.datetime.today() + datetime.timedelta(days=1)\n new_profile = UserProfile(user=new_user, account_type=UserProfile.\n ACCOUNT_ORGANIZATION, business_name=self.cleaned_data[\n 'business_name'])\n new_profile.save()\n return new_user\n",
"step-5": "from django import forms\nfrom django.contrib.auth.models import User\nfrom ServicePad.apps.account.models import UserProfile\nimport hashlib, random, datetime\nfrom ServicePad.apps.registration.models import ActivationKey\n\nMIN_PASSWORD_LENGTH=8\nMAX_PASSWORD_LENGTH=30\n\nclass UserRegistrationForm(forms.Form):\n first_name = forms.CharField(required=True,max_length=30)\n last_name = forms.CharField(required=True,max_length=30)\n email = forms.EmailField(required=True,max_length=30)\n password = forms.CharField(widget=forms.PasswordInput,min_length=MIN_PASSWORD_LENGTH,max_length=MAX_PASSWORD_LENGTH)\n confirm_password = forms.CharField(widget=forms.PasswordInput,min_length=MIN_PASSWORD_LENGTH,max_length=MAX_PASSWORD_LENGTH)\n form_type = forms.CharField(widget=forms.HiddenInput(),initial=UserProfile.ACCOUNT_VOLUNTEER)\n \n def clean(self):\n cleaned_data = self.cleaned_data\n \n #Verify usernames\n try:\n User.objects.get(username__exact=cleaned_data.get('email'))\n except User.DoesNotExist:\n pass\n else:\n raise forms.ValidationError(\"Email already exists\")\n \n #Verify Passwords\n password = cleaned_data.get('password')\n confirm_password = cleaned_data.get('confirm_password')\n if password != confirm_password:\n raise forms.ValidationError(\"Passwords do not match\")\n del cleaned_data['password']\n del cleaned_data['confirm_password']\n \n account_type = int(cleaned_data.get('form_type'))\n if account_type != UserProfile.ACCOUNT_VOLUNTEER and account_type != UserProfile.ACCOUNT_ORGANIZATION:\n raise forms.ValidationError(\"Invalid account type\")\n \n \n return cleaned_data\n \n def save(self):\n new_user = User.objects.create_user(self.cleaned_data['email'], self.cleaned_data['email'], self.cleaned_data.get('password'))\n new_user.first_name = self.cleaned_data['first_name']\n new_user.last_name = self.cleaned_data['last_name']\n new_user.is_active = False\n new_user.save()\n \n #create the activation key\n salt = str(random.random())\n hash_salt = hashlib.sha224(salt).hexdigest()\n activation_key = hashlib.sha224(hash_salt + new_user.username).hexdigest()[:32]\n key_expires = datetime.datetime.today() + datetime.timedelta(days=1)\n \n key_obj = ActivationKey(user=new_user,activation_key=activation_key,key_expires=key_expires)\n key_obj.save()\n \n new_profile = UserProfile(user=new_user,account_type=UserProfile.ACCOUNT_VOLUNTEER)\n \n new_profile.save()\n \n return new_user\n\nclass OrganizationRegistrationForm(forms.Form):\n business_name = forms.CharField(required=True,max_length=60)\n primary_contact_first_name = forms.CharField(required=True,max_length=30)\n primary_contact_last_name = forms.CharField(required=True,max_length=30)\n primary_contact_phone = forms.CharField(required=True,max_length=30)\n primary_contact_email = forms.EmailField(required=True,max_length=30)\n password = forms.CharField(widget=forms.PasswordInput,min_length=MIN_PASSWORD_LENGTH,max_length=MAX_PASSWORD_LENGTH)\n confirm_password = forms.CharField(widget=forms.PasswordInput,min_length=MIN_PASSWORD_LENGTH,max_length=MAX_PASSWORD_LENGTH)\n form_type = forms.CharField(widget=forms.HiddenInput(),initial=UserProfile.ACCOUNT_ORGANIZATION)\n \n def clean(self):\n cleaned_data = self.cleaned_data\n \n #Verify usernames\n try:\n User.objects.get(username__exact=cleaned_data.get('primary_contact_email'))\n except User.DoesNotExist:\n pass\n else:\n raise forms.ValidationError(\"Email already exists\")\n \n #Verify Passwords\n password = cleaned_data.get('password')\n confirm_password = cleaned_data.get('confirm_password')\n if password != confirm_password:\n raise forms.ValidationError(\"Passwords do not match\")\n del cleaned_data['password']\n del cleaned_data['confirm_password']\n \n \n return cleaned_data\n \n def save(self):\n new_user = User.objects.create_user(self.cleaned_data['primary_contact_email'], self.cleaned_data['primary_contact_email'], self.cleaned_data.get('password'))\n new_user.first_name = self.cleaned_data['primary_contact_first_name']\n new_user.last_name = self.cleaned_data['primary_contact_last_name']\n new_user.is_active = False\n new_user.save()\n \n salt = str(random.random())\n hash_salt = hashlib.sha224(salt).hexdigest()\n activation_key = hashlib.sha224(hash_salt + new_user.username).hexdigest()[:32]\n key_expires = datetime.datetime.today() + datetime.timedelta(days=1)\n new_profile = UserProfile(user=new_user,\n account_type=UserProfile.ACCOUNT_ORGANIZATION,\n business_name=self.cleaned_data['business_name']\n )\n \n new_profile.save()\n \n return new_user\n\n ",
"step-ids": [
6,
8,
9,
10,
11
]
}
|
[
6,
8,
9,
10,
11
] |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.16 on 2018-11-23 19:31
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('mocbackend', '0034_auto_20181122_1903'),
]
operations = [
migrations.AddField(
model_name='stagecollection',
name='last_in_log',
field=models.DateTimeField(blank=True, default=None, editable=False, null=True),
),
migrations.AddField(
model_name='stagesource',
name='last_in_log',
field=models.DateTimeField(blank=True, default=None, editable=False, null=True),
),
]
|
normal
|
{
"blob_id": "36bdd6f7c130914856ddf495c50f928405c345aa",
"index": 6646,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('mocbackend', '0034_auto_20181122_1903')]\n operations = [migrations.AddField(model_name='stagecollection', name=\n 'last_in_log', field=models.DateTimeField(blank=True, default=None,\n editable=False, null=True)), migrations.AddField(model_name=\n 'stagesource', name='last_in_log', field=models.DateTimeField(blank\n =True, default=None, editable=False, null=True))]\n",
"step-4": "from __future__ import unicode_literals\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n dependencies = [('mocbackend', '0034_auto_20181122_1903')]\n operations = [migrations.AddField(model_name='stagecollection', name=\n 'last_in_log', field=models.DateTimeField(blank=True, default=None,\n editable=False, null=True)), migrations.AddField(model_name=\n 'stagesource', name='last_in_log', field=models.DateTimeField(blank\n =True, default=None, editable=False, null=True))]\n",
"step-5": "# -*- coding: utf-8 -*-\n# Generated by Django 1.11.16 on 2018-11-23 19:31\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('mocbackend', '0034_auto_20181122_1903'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='stagecollection',\n name='last_in_log',\n field=models.DateTimeField(blank=True, default=None, editable=False, null=True),\n ),\n migrations.AddField(\n model_name='stagesource',\n name='last_in_log',\n field=models.DateTimeField(blank=True, default=None, editable=False, null=True),\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from pants.backend.scala.goals.tailor import classify_source_files
from pants.backend.scala.target_types import (
ScalaJunitTestsGeneratorTarget,
ScalaSourcesGeneratorTarget,
ScalatestTestsGeneratorTarget,
)
def test_classify_source_files() -> None:
scalatest_files = {
"foo/bar/BazSpec.scala",
}
junit_files = {
"foo/bar/BazTest.scala",
}
lib_files = {"foo/bar/Baz.scala"}
assert {
ScalatestTestsGeneratorTarget: scalatest_files,
ScalaJunitTestsGeneratorTarget: junit_files,
ScalaSourcesGeneratorTarget: lib_files,
} == classify_source_files(junit_files | lib_files | scalatest_files)
|
normal
|
{
"blob_id": "42d2d8717ec2c25a99302e8de3090d600f8e80ff",
"index": 674,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef test_classify_source_files() ->None:\n scalatest_files = {'foo/bar/BazSpec.scala'}\n junit_files = {'foo/bar/BazTest.scala'}\n lib_files = {'foo/bar/Baz.scala'}\n assert {ScalatestTestsGeneratorTarget: scalatest_files,\n ScalaJunitTestsGeneratorTarget: junit_files,\n ScalaSourcesGeneratorTarget: lib_files} == classify_source_files(\n junit_files | lib_files | scalatest_files)\n",
"step-3": "from pants.backend.scala.goals.tailor import classify_source_files\nfrom pants.backend.scala.target_types import ScalaJunitTestsGeneratorTarget, ScalaSourcesGeneratorTarget, ScalatestTestsGeneratorTarget\n\n\ndef test_classify_source_files() ->None:\n scalatest_files = {'foo/bar/BazSpec.scala'}\n junit_files = {'foo/bar/BazTest.scala'}\n lib_files = {'foo/bar/Baz.scala'}\n assert {ScalatestTestsGeneratorTarget: scalatest_files,\n ScalaJunitTestsGeneratorTarget: junit_files,\n ScalaSourcesGeneratorTarget: lib_files} == classify_source_files(\n junit_files | lib_files | scalatest_files)\n",
"step-4": "# Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\n\nfrom pants.backend.scala.goals.tailor import classify_source_files\nfrom pants.backend.scala.target_types import (\n ScalaJunitTestsGeneratorTarget,\n ScalaSourcesGeneratorTarget,\n ScalatestTestsGeneratorTarget,\n)\n\n\ndef test_classify_source_files() -> None:\n scalatest_files = {\n \"foo/bar/BazSpec.scala\",\n }\n junit_files = {\n \"foo/bar/BazTest.scala\",\n }\n lib_files = {\"foo/bar/Baz.scala\"}\n\n assert {\n ScalatestTestsGeneratorTarget: scalatest_files,\n ScalaJunitTestsGeneratorTarget: junit_files,\n ScalaSourcesGeneratorTarget: lib_files,\n } == classify_source_files(junit_files | lib_files | scalatest_files)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
def fun(st,n):
suffix=[0 for i in range(n)]
prefix=[0 for i in range(n)]
count=0
for i,val in enumerate(st):
if(val=='*'):
if(i==0):
prefix[i]=0
count+=1
else:
prefix[i]=prefix[i-1]
count+=1
else:
if(i==0):
prefix[i]=0
count+=0
else:
prefix[i]=prefix[i-1]+count
count+=0
count=0
for i in range(n-1,-1,-1):
val=st[i]
if(val=='*'):
if(i==n-1):
suffix[i]=0
count+=1
else:
suffix[i]=suffix[i+1]
count+=1
else:
if(i==n-1):
suffix[i]=0
count+=0
else:
suffix[i]=suffix[i+1]+count
count+=0
ans=10**12
for i in range(n):
if(i!=n-1):
ans=min(ans,prefix[i]+suffix[i+1])
else:
ans=min(ans,prefix[i])
print(ans)
T = int(input())
for _ in range(T):
n=int(input())
st=input()
fun(st,n)
|
normal
|
{
"blob_id": "77c7ca3391426d1e56e15a93ef3e6227a45140fc",
"index": 2829,
"step-1": "<mask token>\n",
"step-2": "def fun(st, n):\n suffix = [(0) for i in range(n)]\n prefix = [(0) for i in range(n)]\n count = 0\n for i, val in enumerate(st):\n if val == '*':\n if i == 0:\n prefix[i] = 0\n count += 1\n else:\n prefix[i] = prefix[i - 1]\n count += 1\n elif i == 0:\n prefix[i] = 0\n count += 0\n else:\n prefix[i] = prefix[i - 1] + count\n count += 0\n count = 0\n for i in range(n - 1, -1, -1):\n val = st[i]\n if val == '*':\n if i == n - 1:\n suffix[i] = 0\n count += 1\n else:\n suffix[i] = suffix[i + 1]\n count += 1\n elif i == n - 1:\n suffix[i] = 0\n count += 0\n else:\n suffix[i] = suffix[i + 1] + count\n count += 0\n ans = 10 ** 12\n for i in range(n):\n if i != n - 1:\n ans = min(ans, prefix[i] + suffix[i + 1])\n else:\n ans = min(ans, prefix[i])\n print(ans)\n\n\n<mask token>\n",
"step-3": "def fun(st, n):\n suffix = [(0) for i in range(n)]\n prefix = [(0) for i in range(n)]\n count = 0\n for i, val in enumerate(st):\n if val == '*':\n if i == 0:\n prefix[i] = 0\n count += 1\n else:\n prefix[i] = prefix[i - 1]\n count += 1\n elif i == 0:\n prefix[i] = 0\n count += 0\n else:\n prefix[i] = prefix[i - 1] + count\n count += 0\n count = 0\n for i in range(n - 1, -1, -1):\n val = st[i]\n if val == '*':\n if i == n - 1:\n suffix[i] = 0\n count += 1\n else:\n suffix[i] = suffix[i + 1]\n count += 1\n elif i == n - 1:\n suffix[i] = 0\n count += 0\n else:\n suffix[i] = suffix[i + 1] + count\n count += 0\n ans = 10 ** 12\n for i in range(n):\n if i != n - 1:\n ans = min(ans, prefix[i] + suffix[i + 1])\n else:\n ans = min(ans, prefix[i])\n print(ans)\n\n\n<mask token>\nfor _ in range(T):\n n = int(input())\n st = input()\n fun(st, n)\n",
"step-4": "def fun(st, n):\n suffix = [(0) for i in range(n)]\n prefix = [(0) for i in range(n)]\n count = 0\n for i, val in enumerate(st):\n if val == '*':\n if i == 0:\n prefix[i] = 0\n count += 1\n else:\n prefix[i] = prefix[i - 1]\n count += 1\n elif i == 0:\n prefix[i] = 0\n count += 0\n else:\n prefix[i] = prefix[i - 1] + count\n count += 0\n count = 0\n for i in range(n - 1, -1, -1):\n val = st[i]\n if val == '*':\n if i == n - 1:\n suffix[i] = 0\n count += 1\n else:\n suffix[i] = suffix[i + 1]\n count += 1\n elif i == n - 1:\n suffix[i] = 0\n count += 0\n else:\n suffix[i] = suffix[i + 1] + count\n count += 0\n ans = 10 ** 12\n for i in range(n):\n if i != n - 1:\n ans = min(ans, prefix[i] + suffix[i + 1])\n else:\n ans = min(ans, prefix[i])\n print(ans)\n\n\nT = int(input())\nfor _ in range(T):\n n = int(input())\n st = input()\n fun(st, n)\n",
"step-5": "def fun(st,n):\n suffix=[0 for i in range(n)]\n prefix=[0 for i in range(n)]\n count=0\n for i,val in enumerate(st):\n if(val=='*'):\n if(i==0):\n prefix[i]=0\n count+=1\n else:\n prefix[i]=prefix[i-1]\n count+=1\n else:\n if(i==0):\n prefix[i]=0\n count+=0\n else:\n prefix[i]=prefix[i-1]+count\n count+=0\n count=0\n for i in range(n-1,-1,-1):\n val=st[i]\n if(val=='*'):\n if(i==n-1):\n suffix[i]=0\n count+=1\n else:\n suffix[i]=suffix[i+1]\n count+=1\n else:\n if(i==n-1):\n suffix[i]=0\n count+=0\n else:\n suffix[i]=suffix[i+1]+count\n count+=0\n ans=10**12\n for i in range(n):\n if(i!=n-1):\n ans=min(ans,prefix[i]+suffix[i+1])\n else:\n ans=min(ans,prefix[i])\n print(ans)\n\nT = int(input())\nfor _ in range(T):\n n=int(input())\n st=input()\n fun(st,n)",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
class Node:
def __init__(self,data):
self.data = data
self.next = None
def Add(Head,data):
Temp = Head
while(Temp.next != None):
Temp = Temp.next
Temp.next = Node(data)
# print(Temp.data)
def create(data):
Head = Node(data)
return Head
def printLL(Head):
Temp = Head
while(Temp != None):
# input()
print(Temp.data,end=" ")
Temp = Temp.next
print()
def Reverse(Head):
Temp = Head
TempNext = Head.next
# curr = TempNext
while(TempNext != None):
NextSaved = TempNext.next
TempNext.next = Temp
Temp = TempNext
TempNext = NextSaved
Head.next = None
Head = Temp
return Head
if __name__ == '__main__':
Head = create(5)
Add(Head,6)
Add(Head,7)
Add(Head,8)
Add(Head,9)
Add(Head,10)
printLL(Head)
NewHead = Reverse(Head)
printLL(NewHead)
|
normal
|
{
"blob_id": "ff137b51ea5b8c21e335a38a3d307a3302921245",
"index": 9993,
"step-1": "class Node:\n\n def __init__(self, data):\n self.data = data\n self.next = None\n\n\n<mask token>\n\n\ndef Reverse(Head):\n Temp = Head\n TempNext = Head.next\n while TempNext != None:\n NextSaved = TempNext.next\n TempNext.next = Temp\n Temp = TempNext\n TempNext = NextSaved\n Head.next = None\n Head = Temp\n return Head\n\n\n<mask token>\n",
"step-2": "class Node:\n\n def __init__(self, data):\n self.data = data\n self.next = None\n\n\ndef Add(Head, data):\n Temp = Head\n while Temp.next != None:\n Temp = Temp.next\n Temp.next = Node(data)\n\n\ndef create(data):\n Head = Node(data)\n return Head\n\n\n<mask token>\n\n\ndef Reverse(Head):\n Temp = Head\n TempNext = Head.next\n while TempNext != None:\n NextSaved = TempNext.next\n TempNext.next = Temp\n Temp = TempNext\n TempNext = NextSaved\n Head.next = None\n Head = Temp\n return Head\n\n\n<mask token>\n",
"step-3": "class Node:\n\n def __init__(self, data):\n self.data = data\n self.next = None\n\n\ndef Add(Head, data):\n Temp = Head\n while Temp.next != None:\n Temp = Temp.next\n Temp.next = Node(data)\n\n\ndef create(data):\n Head = Node(data)\n return Head\n\n\ndef printLL(Head):\n Temp = Head\n while Temp != None:\n print(Temp.data, end=' ')\n Temp = Temp.next\n print()\n\n\ndef Reverse(Head):\n Temp = Head\n TempNext = Head.next\n while TempNext != None:\n NextSaved = TempNext.next\n TempNext.next = Temp\n Temp = TempNext\n TempNext = NextSaved\n Head.next = None\n Head = Temp\n return Head\n\n\n<mask token>\n",
"step-4": "class Node:\n\n def __init__(self, data):\n self.data = data\n self.next = None\n\n\ndef Add(Head, data):\n Temp = Head\n while Temp.next != None:\n Temp = Temp.next\n Temp.next = Node(data)\n\n\ndef create(data):\n Head = Node(data)\n return Head\n\n\ndef printLL(Head):\n Temp = Head\n while Temp != None:\n print(Temp.data, end=' ')\n Temp = Temp.next\n print()\n\n\ndef Reverse(Head):\n Temp = Head\n TempNext = Head.next\n while TempNext != None:\n NextSaved = TempNext.next\n TempNext.next = Temp\n Temp = TempNext\n TempNext = NextSaved\n Head.next = None\n Head = Temp\n return Head\n\n\nif __name__ == '__main__':\n Head = create(5)\n Add(Head, 6)\n Add(Head, 7)\n Add(Head, 8)\n Add(Head, 9)\n Add(Head, 10)\n printLL(Head)\n NewHead = Reverse(Head)\n printLL(NewHead)\n",
"step-5": "\nclass Node:\n def __init__(self,data):\n self.data = data\n self.next = None\n\ndef Add(Head,data):\n Temp = Head\n while(Temp.next != None):\n Temp = Temp.next\n Temp.next = Node(data)\n # print(Temp.data)\n\ndef create(data):\n Head = Node(data)\n return Head\n\ndef printLL(Head):\n Temp = Head\n while(Temp != None):\n # input()\n print(Temp.data,end=\" \")\n Temp = Temp.next\n print()\n\ndef Reverse(Head): \n Temp = Head\n TempNext = Head.next\n # curr = TempNext\n while(TempNext != None):\n NextSaved = TempNext.next\n TempNext.next = Temp\n \n Temp = TempNext\n TempNext = NextSaved\n \n Head.next = None\n Head = Temp\n return Head\n\nif __name__ == '__main__':\n Head = create(5)\n Add(Head,6)\n Add(Head,7)\n Add(Head,8)\n Add(Head,9)\n Add(Head,10)\n printLL(Head)\n NewHead = Reverse(Head)\n printLL(NewHead)\n\n",
"step-ids": [
3,
5,
6,
7,
8
]
}
|
[
3,
5,
6,
7,
8
] |
#!/usr/bin/env python3
import optparse
from bs4 import BeautifulSoup
import re
import jieba
import pickle
import requests
import asyncio
if __name__ == '__main__':
# 读取10000个关键词
fs = open("./src/keywords.txt", "rb")
keywords = fs.read().decode("utf-8").split(",")
fs.close()
# 找出特征
def find_features(doc):
words = set(doc)
features = {}
for word in keywords:
features["contains %s" % word] = (word in words)
return features
# 读取预先做好的nltk分词器
fs = open('./src/my_classifier.pickle', 'rb')
classifier = pickle.load(fs)
# 匹配中文字符
regex = re.compile("[\u4e00-\u9fa5]")
p = optparse.OptionParser(usage="usage: %prog [options] arg1 arg2", version="%prog 0.1", prog="url-tagger")
p.add_option("--url", "-u", help="Your url")
p.add_option("--file", "-f", help="Your url file. One line one url")
(options, arguments) = p.parse_args()
url_list = []
for key, value in options.__dict__.items():
if value is not None:
print("%s: %s" % (key, value))
if key is "url":
url_list.append(value)
else:
url_file = open(value, "rb+")
for line in url_file.readlines():
url_list.append(str(line, encoding="utf-8").strip())
# 异步发起http请求
@asyncio.coroutine
def get_docs(url):
response = requests.get(url=url, headers={'Accept-Encoding': ''})
# print(response.apparent_encoding)
html = str(response.content, encoding=response.apparent_encoding, errors="ignore")
soup = BeautifulSoup(html, "lxml")
for script in soup(["script", "style"]):
script.extract()
text = soup.get_text()
lines = (line.strip() for line in text.splitlines())
chunks = (phrase.strip() for line in lines for phrase in line.split(" "))
text = "".join(chunk for chunk in chunks if chunk)
# print(text)
return url, text
loop = asyncio.get_event_loop()
tasks = list(map(lambda url: asyncio.ensure_future(get_docs(url)), url_list))
data_list = list(loop.run_until_complete(asyncio.gather(*tasks)))
loop.close()
# 分类器进行分类
results = [(url, classifier.classify(find_features(jieba.lcut("".join(regex.findall(data)))))) for (url, data)
in data_list]
# 打印结果
for (url, category) in results:
print("%s: %s" % (url, category))
|
normal
|
{
"blob_id": "88590aef975f7e473ef964ee0c4004cff7e24b07",
"index": 1049,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif __name__ == '__main__':\n fs = open('./src/keywords.txt', 'rb')\n keywords = fs.read().decode('utf-8').split(',')\n fs.close()\n\n def find_features(doc):\n words = set(doc)\n features = {}\n for word in keywords:\n features['contains %s' % word] = word in words\n return features\n fs = open('./src/my_classifier.pickle', 'rb')\n classifier = pickle.load(fs)\n regex = re.compile('[一-龥]')\n p = optparse.OptionParser(usage='usage: %prog [options] arg1 arg2',\n version='%prog 0.1', prog='url-tagger')\n p.add_option('--url', '-u', help='Your url')\n p.add_option('--file', '-f', help='Your url file. One line one url')\n options, arguments = p.parse_args()\n url_list = []\n for key, value in options.__dict__.items():\n if value is not None:\n print('%s: %s' % (key, value))\n if key is 'url':\n url_list.append(value)\n else:\n url_file = open(value, 'rb+')\n for line in url_file.readlines():\n url_list.append(str(line, encoding='utf-8').strip())\n\n @asyncio.coroutine\n def get_docs(url):\n response = requests.get(url=url, headers={'Accept-Encoding': ''})\n html = str(response.content, encoding=response.apparent_encoding,\n errors='ignore')\n soup = BeautifulSoup(html, 'lxml')\n for script in soup(['script', 'style']):\n script.extract()\n text = soup.get_text()\n lines = (line.strip() for line in text.splitlines())\n chunks = (phrase.strip() for line in lines for phrase in line.split\n (' '))\n text = ''.join(chunk for chunk in chunks if chunk)\n return url, text\n loop = asyncio.get_event_loop()\n tasks = list(map(lambda url: asyncio.ensure_future(get_docs(url)),\n url_list))\n data_list = list(loop.run_until_complete(asyncio.gather(*tasks)))\n loop.close()\n results = [(url, classifier.classify(find_features(jieba.lcut(''.join(\n regex.findall(data)))))) for url, data in data_list]\n for url, category in results:\n print('%s: %s' % (url, category))\n",
"step-3": "import optparse\nfrom bs4 import BeautifulSoup\nimport re\nimport jieba\nimport pickle\nimport requests\nimport asyncio\nif __name__ == '__main__':\n fs = open('./src/keywords.txt', 'rb')\n keywords = fs.read().decode('utf-8').split(',')\n fs.close()\n\n def find_features(doc):\n words = set(doc)\n features = {}\n for word in keywords:\n features['contains %s' % word] = word in words\n return features\n fs = open('./src/my_classifier.pickle', 'rb')\n classifier = pickle.load(fs)\n regex = re.compile('[一-龥]')\n p = optparse.OptionParser(usage='usage: %prog [options] arg1 arg2',\n version='%prog 0.1', prog='url-tagger')\n p.add_option('--url', '-u', help='Your url')\n p.add_option('--file', '-f', help='Your url file. One line one url')\n options, arguments = p.parse_args()\n url_list = []\n for key, value in options.__dict__.items():\n if value is not None:\n print('%s: %s' % (key, value))\n if key is 'url':\n url_list.append(value)\n else:\n url_file = open(value, 'rb+')\n for line in url_file.readlines():\n url_list.append(str(line, encoding='utf-8').strip())\n\n @asyncio.coroutine\n def get_docs(url):\n response = requests.get(url=url, headers={'Accept-Encoding': ''})\n html = str(response.content, encoding=response.apparent_encoding,\n errors='ignore')\n soup = BeautifulSoup(html, 'lxml')\n for script in soup(['script', 'style']):\n script.extract()\n text = soup.get_text()\n lines = (line.strip() for line in text.splitlines())\n chunks = (phrase.strip() for line in lines for phrase in line.split\n (' '))\n text = ''.join(chunk for chunk in chunks if chunk)\n return url, text\n loop = asyncio.get_event_loop()\n tasks = list(map(lambda url: asyncio.ensure_future(get_docs(url)),\n url_list))\n data_list = list(loop.run_until_complete(asyncio.gather(*tasks)))\n loop.close()\n results = [(url, classifier.classify(find_features(jieba.lcut(''.join(\n regex.findall(data)))))) for url, data in data_list]\n for url, category in results:\n print('%s: %s' % (url, category))\n",
"step-4": "#!/usr/bin/env python3\n\nimport optparse\nfrom bs4 import BeautifulSoup\nimport re\nimport jieba\nimport pickle\nimport requests\nimport asyncio\n\nif __name__ == '__main__':\n\n # 读取10000个关键词\n fs = open(\"./src/keywords.txt\", \"rb\")\n keywords = fs.read().decode(\"utf-8\").split(\",\")\n fs.close()\n\n # 找出特征\n def find_features(doc):\n words = set(doc)\n features = {}\n for word in keywords:\n features[\"contains %s\" % word] = (word in words)\n return features\n\n # 读取预先做好的nltk分词器\n fs = open('./src/my_classifier.pickle', 'rb')\n classifier = pickle.load(fs)\n\n # 匹配中文字符\n regex = re.compile(\"[\\u4e00-\\u9fa5]\")\n\n p = optparse.OptionParser(usage=\"usage: %prog [options] arg1 arg2\", version=\"%prog 0.1\", prog=\"url-tagger\")\n p.add_option(\"--url\", \"-u\", help=\"Your url\")\n p.add_option(\"--file\", \"-f\", help=\"Your url file. One line one url\")\n (options, arguments) = p.parse_args()\n\n url_list = []\n for key, value in options.__dict__.items():\n if value is not None:\n print(\"%s: %s\" % (key, value))\n if key is \"url\":\n url_list.append(value)\n else:\n url_file = open(value, \"rb+\")\n for line in url_file.readlines():\n url_list.append(str(line, encoding=\"utf-8\").strip())\n\n\n # 异步发起http请求\n @asyncio.coroutine\n def get_docs(url):\n response = requests.get(url=url, headers={'Accept-Encoding': ''})\n # print(response.apparent_encoding)\n html = str(response.content, encoding=response.apparent_encoding, errors=\"ignore\")\n soup = BeautifulSoup(html, \"lxml\")\n for script in soup([\"script\", \"style\"]):\n script.extract()\n text = soup.get_text()\n lines = (line.strip() for line in text.splitlines())\n chunks = (phrase.strip() for line in lines for phrase in line.split(\" \"))\n text = \"\".join(chunk for chunk in chunks if chunk)\n # print(text)\n return url, text\n\n loop = asyncio.get_event_loop()\n tasks = list(map(lambda url: asyncio.ensure_future(get_docs(url)), url_list))\n data_list = list(loop.run_until_complete(asyncio.gather(*tasks)))\n loop.close()\n\n # 分类器进行分类\n results = [(url, classifier.classify(find_features(jieba.lcut(\"\".join(regex.findall(data)))))) for (url, data)\n in data_list]\n\n # 打印结果\n for (url, category) in results:\n print(\"%s: %s\" % (url, category))\n\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import sqlite3
def connect():
connect = sqlite3.connect("books.db")
cursor = connect.cursor()
cursor.execute("CREATE TABLE IF NOT EXISTS bookstore (id INTEGER PRIMARY KEY,"
"title TEXT,"
"author TEXT,"
"year INTEGER,"
"isbn INTEGER)"
)
connect.commit()
connect.close()
def insert(title,author,year,isbn):
connect = sqlite3.connect("books.db")
cursor = connect.cursor()
cursor.execute("INSERT INTO bookstore VALUES (NULL,?,?,?,?)",(title, author, year, isbn))
connect.commit()
connect.close()
def view():
connect = sqlite3.connect("books.db")
cursor = connect.cursor()
cursor.execute("SELECT * FROM bookstore")
books = cursor.fetchall()
connect.close()
return books
def search(title="", author="", year="", isbn=""):
connect = sqlite3.connect("books.db")
cursor = connect.cursor()
cursor.execute("SELECT * FROM bookstore WHERE title=?"
"OR author=?"
"OR year=?"
"OR isbn=?", (title,author,year,isbn))
books = cursor.fetchall()
connect.close()
return books
def delete(id):
connect = sqlite3.connect("books.db")
cursor = connect.cursor()
cursor.execute("DELETE FROM bookstore WHERE id=?", (id,))
connect.commit()
connect.close()
def update(id,title,author,year,isbn):
connect = sqlite3.connect("books.db")
cursor = connect.cursor()
cursor.execute("UPDATE bookstore SET title=?, author=?, year=?, isbn=?"
"WHERE id=?", (title, author, year, isbn, id))
connect.commit()
connect.close()
def close():
return True
connect()
# insert("Holy Bible", "Joseph Smith", 1823, 123456)
# print(view())
|
normal
|
{
"blob_id": "d7d23b04f6e73db6a0a8730192398941743f32ce",
"index": 6800,
"step-1": "<mask token>\n\n\ndef view():\n connect = sqlite3.connect('books.db')\n cursor = connect.cursor()\n cursor.execute('SELECT * FROM bookstore')\n books = cursor.fetchall()\n connect.close()\n return books\n\n\n<mask token>\n\n\ndef delete(id):\n connect = sqlite3.connect('books.db')\n cursor = connect.cursor()\n cursor.execute('DELETE FROM bookstore WHERE id=?', (id,))\n connect.commit()\n connect.close()\n\n\ndef update(id, title, author, year, isbn):\n connect = sqlite3.connect('books.db')\n cursor = connect.cursor()\n cursor.execute(\n 'UPDATE bookstore SET title=?, author=?, year=?, isbn=?WHERE id=?',\n (title, author, year, isbn, id))\n connect.commit()\n connect.close()\n\n\ndef close():\n return True\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef connect():\n connect = sqlite3.connect('books.db')\n cursor = connect.cursor()\n cursor.execute(\n 'CREATE TABLE IF NOT EXISTS bookstore (id INTEGER PRIMARY KEY,title TEXT,author TEXT,year INTEGER,isbn INTEGER)'\n )\n connect.commit()\n connect.close()\n\n\n<mask token>\n\n\ndef view():\n connect = sqlite3.connect('books.db')\n cursor = connect.cursor()\n cursor.execute('SELECT * FROM bookstore')\n books = cursor.fetchall()\n connect.close()\n return books\n\n\ndef search(title='', author='', year='', isbn=''):\n connect = sqlite3.connect('books.db')\n cursor = connect.cursor()\n cursor.execute(\n 'SELECT * FROM bookstore WHERE title=?OR author=?OR year=?OR isbn=?',\n (title, author, year, isbn))\n books = cursor.fetchall()\n connect.close()\n return books\n\n\ndef delete(id):\n connect = sqlite3.connect('books.db')\n cursor = connect.cursor()\n cursor.execute('DELETE FROM bookstore WHERE id=?', (id,))\n connect.commit()\n connect.close()\n\n\ndef update(id, title, author, year, isbn):\n connect = sqlite3.connect('books.db')\n cursor = connect.cursor()\n cursor.execute(\n 'UPDATE bookstore SET title=?, author=?, year=?, isbn=?WHERE id=?',\n (title, author, year, isbn, id))\n connect.commit()\n connect.close()\n\n\ndef close():\n return True\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef connect():\n connect = sqlite3.connect('books.db')\n cursor = connect.cursor()\n cursor.execute(\n 'CREATE TABLE IF NOT EXISTS bookstore (id INTEGER PRIMARY KEY,title TEXT,author TEXT,year INTEGER,isbn INTEGER)'\n )\n connect.commit()\n connect.close()\n\n\ndef insert(title, author, year, isbn):\n connect = sqlite3.connect('books.db')\n cursor = connect.cursor()\n cursor.execute('INSERT INTO bookstore VALUES (NULL,?,?,?,?)', (title,\n author, year, isbn))\n connect.commit()\n connect.close()\n\n\ndef view():\n connect = sqlite3.connect('books.db')\n cursor = connect.cursor()\n cursor.execute('SELECT * FROM bookstore')\n books = cursor.fetchall()\n connect.close()\n return books\n\n\ndef search(title='', author='', year='', isbn=''):\n connect = sqlite3.connect('books.db')\n cursor = connect.cursor()\n cursor.execute(\n 'SELECT * FROM bookstore WHERE title=?OR author=?OR year=?OR isbn=?',\n (title, author, year, isbn))\n books = cursor.fetchall()\n connect.close()\n return books\n\n\ndef delete(id):\n connect = sqlite3.connect('books.db')\n cursor = connect.cursor()\n cursor.execute('DELETE FROM bookstore WHERE id=?', (id,))\n connect.commit()\n connect.close()\n\n\ndef update(id, title, author, year, isbn):\n connect = sqlite3.connect('books.db')\n cursor = connect.cursor()\n cursor.execute(\n 'UPDATE bookstore SET title=?, author=?, year=?, isbn=?WHERE id=?',\n (title, author, year, isbn, id))\n connect.commit()\n connect.close()\n\n\ndef close():\n return True\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\ndef connect():\n connect = sqlite3.connect('books.db')\n cursor = connect.cursor()\n cursor.execute(\n 'CREATE TABLE IF NOT EXISTS bookstore (id INTEGER PRIMARY KEY,title TEXT,author TEXT,year INTEGER,isbn INTEGER)'\n )\n connect.commit()\n connect.close()\n\n\ndef insert(title, author, year, isbn):\n connect = sqlite3.connect('books.db')\n cursor = connect.cursor()\n cursor.execute('INSERT INTO bookstore VALUES (NULL,?,?,?,?)', (title,\n author, year, isbn))\n connect.commit()\n connect.close()\n\n\ndef view():\n connect = sqlite3.connect('books.db')\n cursor = connect.cursor()\n cursor.execute('SELECT * FROM bookstore')\n books = cursor.fetchall()\n connect.close()\n return books\n\n\ndef search(title='', author='', year='', isbn=''):\n connect = sqlite3.connect('books.db')\n cursor = connect.cursor()\n cursor.execute(\n 'SELECT * FROM bookstore WHERE title=?OR author=?OR year=?OR isbn=?',\n (title, author, year, isbn))\n books = cursor.fetchall()\n connect.close()\n return books\n\n\ndef delete(id):\n connect = sqlite3.connect('books.db')\n cursor = connect.cursor()\n cursor.execute('DELETE FROM bookstore WHERE id=?', (id,))\n connect.commit()\n connect.close()\n\n\ndef update(id, title, author, year, isbn):\n connect = sqlite3.connect('books.db')\n cursor = connect.cursor()\n cursor.execute(\n 'UPDATE bookstore SET title=?, author=?, year=?, isbn=?WHERE id=?',\n (title, author, year, isbn, id))\n connect.commit()\n connect.close()\n\n\ndef close():\n return True\n\n\nconnect()\n",
"step-5": "import sqlite3\n\ndef connect():\n connect = sqlite3.connect(\"books.db\")\n cursor = connect.cursor()\n cursor.execute(\"CREATE TABLE IF NOT EXISTS bookstore (id INTEGER PRIMARY KEY,\"\n \"title TEXT,\"\n \"author TEXT,\"\n \"year INTEGER,\"\n \"isbn INTEGER)\"\n )\n connect.commit()\n connect.close()\n\ndef insert(title,author,year,isbn):\n connect = sqlite3.connect(\"books.db\")\n cursor = connect.cursor()\n cursor.execute(\"INSERT INTO bookstore VALUES (NULL,?,?,?,?)\",(title, author, year, isbn))\n connect.commit()\n connect.close()\n\ndef view():\n connect = sqlite3.connect(\"books.db\")\n cursor = connect.cursor()\n cursor.execute(\"SELECT * FROM bookstore\")\n books = cursor.fetchall()\n connect.close()\n return books\n\ndef search(title=\"\", author=\"\", year=\"\", isbn=\"\"):\n connect = sqlite3.connect(\"books.db\")\n cursor = connect.cursor()\n cursor.execute(\"SELECT * FROM bookstore WHERE title=?\"\n \"OR author=?\"\n \"OR year=?\"\n \"OR isbn=?\", (title,author,year,isbn))\n books = cursor.fetchall()\n connect.close()\n return books\n\ndef delete(id):\n connect = sqlite3.connect(\"books.db\")\n cursor = connect.cursor()\n cursor.execute(\"DELETE FROM bookstore WHERE id=?\", (id,))\n connect.commit()\n connect.close()\n\ndef update(id,title,author,year,isbn):\n connect = sqlite3.connect(\"books.db\")\n cursor = connect.cursor()\n cursor.execute(\"UPDATE bookstore SET title=?, author=?, year=?, isbn=?\"\n \"WHERE id=?\", (title, author, year, isbn, id))\n connect.commit()\n connect.close()\n\ndef close():\n return True\n\n\n\nconnect()\n# insert(\"Holy Bible\", \"Joseph Smith\", 1823, 123456)\n# print(view())\n\n",
"step-ids": [
4,
6,
7,
8,
10
]
}
|
[
4,
6,
7,
8,
10
] |
# -*- coding: utf-8 -*-
"""
Created on Tue Jan 23 20:44:38 2018
@author: user
"""
import fitbit
import gather_keys_oauth2 as Oauth2
import pandas as pd
import datetime as dt
from config import CLIENT_ID, CLIENT_SECRET
#Establish connection to Fitbit API
server = Oauth2.OAuth2Server(CLIENT_ID, CLIENT_SECRET)
server.browser_authorize()
ACCESS_TOKEN = str(server.fitbit.client.session.token['access_token'])
REFRESH_TOKEN = str(server.fitbit.client.session.token['refresh_token'])
auth2_client = fitbit.Fitbit(CLIENT_ID, CLIENT_SECRET, oauth2=True, access_token=ACCESS_TOKEN, refresh_token=REFRESH_TOKEN)
def get_heart_rate(auth2_client, date, granularity='1sec'):
"""
Query intraday time series given date
granularity: 1sec or 1min
"""
heart_rate_raw = auth2_client.intraday_time_series('activities/heart', base_date=date, detail_level=granularity)
time_list = []
val_list = []
date_list = []
for i in heart_rate_raw['activities-heart-intraday']['dataset']:
val_list.append(i['value'])
time_list.append(i['time'])
date_list.append(date)
heart_rate_df = pd.DataFrame({'Date': date_list,'Heart Rate':val_list,'Time':time_list})
heart_rate_df['Timestamp'] = pd.to_datetime(heart_rate_df['Date'] + ' ' + heart_rate_df['Time'])
heart_rate_df = heart_rate_df[['Timestamp','Heart Rate']]
return heart_rate_df
START_DATE = '2018-01-20'
END_DATE = '2018-02-13'
DATES = pd.date_range(start=START_DATE, end=END_DATE).tolist()
DATES = [date.strftime('%Y-%m-%d') for date in DATES]
heart_rate_dfs = []
for date in DATES:
heart_rate_dfs.append(get_heart_rate(auth2_client, date))
#Concatenate individual heart_rate_dfs for each date into one big df
heart_rate_df = pd.concat(heart_rate_dfs, axis=0, ignore_index=True)
#Label each reading as 0 (not on date) or 1 (on date)
DATE_RANGES = pd.read_csv('./data/date_times.csv')
DATE_RANGES['Start'] = pd.to_datetime(DATE_RANGES['Start'])
DATE_RANGES['End'] = pd.to_datetime(DATE_RANGES['End'])
heart_rate_df['onDate?'] = 0
for i in range(len(DATE_RANGES)):
start = pd.to_datetime(DATE_RANGES['Start'][i])
end = pd.to_datetime(DATE_RANGES['End'][i])
mask = (pd.to_datetime(heart_rate_df['Timestamp']) >= start) & (pd.to_datetime(heart_rate_df['Timestamp']) <= end)
heart_rate_df['onDate?'] = heart_rate_df['onDate?'].where(~mask, other=1)
#Save to CSV
FILEPATH = './data/' + 'heart_rate ' + START_DATE + ' to ' + END_DATE + '.csv'
heart_rate_df.to_csv(FILEPATH, index=False)
|
normal
|
{
"blob_id": "9f1cbc655a5d8f14fa45cf977bb2dcee4874b188",
"index": 5809,
"step-1": "<mask token>\n\n\ndef get_heart_rate(auth2_client, date, granularity='1sec'):\n \"\"\"\n Query intraday time series given date\n granularity: 1sec or 1min\n \"\"\"\n heart_rate_raw = auth2_client.intraday_time_series('activities/heart',\n base_date=date, detail_level=granularity)\n time_list = []\n val_list = []\n date_list = []\n for i in heart_rate_raw['activities-heart-intraday']['dataset']:\n val_list.append(i['value'])\n time_list.append(i['time'])\n date_list.append(date)\n heart_rate_df = pd.DataFrame({'Date': date_list, 'Heart Rate': val_list,\n 'Time': time_list})\n heart_rate_df['Timestamp'] = pd.to_datetime(heart_rate_df['Date'] + ' ' +\n heart_rate_df['Time'])\n heart_rate_df = heart_rate_df[['Timestamp', 'Heart Rate']]\n return heart_rate_df\n\n\n<mask token>\n",
"step-2": "<mask token>\nserver.browser_authorize()\n<mask token>\n\n\ndef get_heart_rate(auth2_client, date, granularity='1sec'):\n \"\"\"\n Query intraday time series given date\n granularity: 1sec or 1min\n \"\"\"\n heart_rate_raw = auth2_client.intraday_time_series('activities/heart',\n base_date=date, detail_level=granularity)\n time_list = []\n val_list = []\n date_list = []\n for i in heart_rate_raw['activities-heart-intraday']['dataset']:\n val_list.append(i['value'])\n time_list.append(i['time'])\n date_list.append(date)\n heart_rate_df = pd.DataFrame({'Date': date_list, 'Heart Rate': val_list,\n 'Time': time_list})\n heart_rate_df['Timestamp'] = pd.to_datetime(heart_rate_df['Date'] + ' ' +\n heart_rate_df['Time'])\n heart_rate_df = heart_rate_df[['Timestamp', 'Heart Rate']]\n return heart_rate_df\n\n\n<mask token>\nfor date in DATES:\n heart_rate_dfs.append(get_heart_rate(auth2_client, date))\n<mask token>\nfor i in range(len(DATE_RANGES)):\n start = pd.to_datetime(DATE_RANGES['Start'][i])\n end = pd.to_datetime(DATE_RANGES['End'][i])\n mask = (pd.to_datetime(heart_rate_df['Timestamp']) >= start) & (pd.\n to_datetime(heart_rate_df['Timestamp']) <= end)\n heart_rate_df['onDate?'] = heart_rate_df['onDate?'].where(~mask, other=1)\n<mask token>\nheart_rate_df.to_csv(FILEPATH, index=False)\n",
"step-3": "<mask token>\nserver = Oauth2.OAuth2Server(CLIENT_ID, CLIENT_SECRET)\nserver.browser_authorize()\nACCESS_TOKEN = str(server.fitbit.client.session.token['access_token'])\nREFRESH_TOKEN = str(server.fitbit.client.session.token['refresh_token'])\nauth2_client = fitbit.Fitbit(CLIENT_ID, CLIENT_SECRET, oauth2=True,\n access_token=ACCESS_TOKEN, refresh_token=REFRESH_TOKEN)\n\n\ndef get_heart_rate(auth2_client, date, granularity='1sec'):\n \"\"\"\n Query intraday time series given date\n granularity: 1sec or 1min\n \"\"\"\n heart_rate_raw = auth2_client.intraday_time_series('activities/heart',\n base_date=date, detail_level=granularity)\n time_list = []\n val_list = []\n date_list = []\n for i in heart_rate_raw['activities-heart-intraday']['dataset']:\n val_list.append(i['value'])\n time_list.append(i['time'])\n date_list.append(date)\n heart_rate_df = pd.DataFrame({'Date': date_list, 'Heart Rate': val_list,\n 'Time': time_list})\n heart_rate_df['Timestamp'] = pd.to_datetime(heart_rate_df['Date'] + ' ' +\n heart_rate_df['Time'])\n heart_rate_df = heart_rate_df[['Timestamp', 'Heart Rate']]\n return heart_rate_df\n\n\nSTART_DATE = '2018-01-20'\nEND_DATE = '2018-02-13'\nDATES = pd.date_range(start=START_DATE, end=END_DATE).tolist()\nDATES = [date.strftime('%Y-%m-%d') for date in DATES]\nheart_rate_dfs = []\nfor date in DATES:\n heart_rate_dfs.append(get_heart_rate(auth2_client, date))\nheart_rate_df = pd.concat(heart_rate_dfs, axis=0, ignore_index=True)\nDATE_RANGES = pd.read_csv('./data/date_times.csv')\nDATE_RANGES['Start'] = pd.to_datetime(DATE_RANGES['Start'])\nDATE_RANGES['End'] = pd.to_datetime(DATE_RANGES['End'])\nheart_rate_df['onDate?'] = 0\nfor i in range(len(DATE_RANGES)):\n start = pd.to_datetime(DATE_RANGES['Start'][i])\n end = pd.to_datetime(DATE_RANGES['End'][i])\n mask = (pd.to_datetime(heart_rate_df['Timestamp']) >= start) & (pd.\n to_datetime(heart_rate_df['Timestamp']) <= end)\n heart_rate_df['onDate?'] = heart_rate_df['onDate?'].where(~mask, other=1)\nFILEPATH = './data/' + 'heart_rate ' + START_DATE + ' to ' + END_DATE + '.csv'\nheart_rate_df.to_csv(FILEPATH, index=False)\n",
"step-4": "<mask token>\nimport fitbit\nimport gather_keys_oauth2 as Oauth2\nimport pandas as pd\nimport datetime as dt\nfrom config import CLIENT_ID, CLIENT_SECRET\nserver = Oauth2.OAuth2Server(CLIENT_ID, CLIENT_SECRET)\nserver.browser_authorize()\nACCESS_TOKEN = str(server.fitbit.client.session.token['access_token'])\nREFRESH_TOKEN = str(server.fitbit.client.session.token['refresh_token'])\nauth2_client = fitbit.Fitbit(CLIENT_ID, CLIENT_SECRET, oauth2=True,\n access_token=ACCESS_TOKEN, refresh_token=REFRESH_TOKEN)\n\n\ndef get_heart_rate(auth2_client, date, granularity='1sec'):\n \"\"\"\n Query intraday time series given date\n granularity: 1sec or 1min\n \"\"\"\n heart_rate_raw = auth2_client.intraday_time_series('activities/heart',\n base_date=date, detail_level=granularity)\n time_list = []\n val_list = []\n date_list = []\n for i in heart_rate_raw['activities-heart-intraday']['dataset']:\n val_list.append(i['value'])\n time_list.append(i['time'])\n date_list.append(date)\n heart_rate_df = pd.DataFrame({'Date': date_list, 'Heart Rate': val_list,\n 'Time': time_list})\n heart_rate_df['Timestamp'] = pd.to_datetime(heart_rate_df['Date'] + ' ' +\n heart_rate_df['Time'])\n heart_rate_df = heart_rate_df[['Timestamp', 'Heart Rate']]\n return heart_rate_df\n\n\nSTART_DATE = '2018-01-20'\nEND_DATE = '2018-02-13'\nDATES = pd.date_range(start=START_DATE, end=END_DATE).tolist()\nDATES = [date.strftime('%Y-%m-%d') for date in DATES]\nheart_rate_dfs = []\nfor date in DATES:\n heart_rate_dfs.append(get_heart_rate(auth2_client, date))\nheart_rate_df = pd.concat(heart_rate_dfs, axis=0, ignore_index=True)\nDATE_RANGES = pd.read_csv('./data/date_times.csv')\nDATE_RANGES['Start'] = pd.to_datetime(DATE_RANGES['Start'])\nDATE_RANGES['End'] = pd.to_datetime(DATE_RANGES['End'])\nheart_rate_df['onDate?'] = 0\nfor i in range(len(DATE_RANGES)):\n start = pd.to_datetime(DATE_RANGES['Start'][i])\n end = pd.to_datetime(DATE_RANGES['End'][i])\n mask = (pd.to_datetime(heart_rate_df['Timestamp']) >= start) & (pd.\n to_datetime(heart_rate_df['Timestamp']) <= end)\n heart_rate_df['onDate?'] = heart_rate_df['onDate?'].where(~mask, other=1)\nFILEPATH = './data/' + 'heart_rate ' + START_DATE + ' to ' + END_DATE + '.csv'\nheart_rate_df.to_csv(FILEPATH, index=False)\n",
"step-5": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Jan 23 20:44:38 2018\n\n@author: user\n\"\"\"\n\nimport fitbit\nimport gather_keys_oauth2 as Oauth2\nimport pandas as pd \nimport datetime as dt\nfrom config import CLIENT_ID, CLIENT_SECRET\n\n\n#Establish connection to Fitbit API\nserver = Oauth2.OAuth2Server(CLIENT_ID, CLIENT_SECRET)\nserver.browser_authorize()\n\nACCESS_TOKEN = str(server.fitbit.client.session.token['access_token'])\nREFRESH_TOKEN = str(server.fitbit.client.session.token['refresh_token'])\n\nauth2_client = fitbit.Fitbit(CLIENT_ID, CLIENT_SECRET, oauth2=True, access_token=ACCESS_TOKEN, refresh_token=REFRESH_TOKEN)\n\n\ndef get_heart_rate(auth2_client, date, granularity='1sec'):\n \"\"\"\n Query intraday time series given date\n granularity: 1sec or 1min\n \"\"\"\n \n heart_rate_raw = auth2_client.intraday_time_series('activities/heart', base_date=date, detail_level=granularity)\n\n time_list = []\n val_list = []\n date_list = []\n \n for i in heart_rate_raw['activities-heart-intraday']['dataset']:\n val_list.append(i['value'])\n time_list.append(i['time'])\n date_list.append(date)\n \n heart_rate_df = pd.DataFrame({'Date': date_list,'Heart Rate':val_list,'Time':time_list})\n heart_rate_df['Timestamp'] = pd.to_datetime(heart_rate_df['Date'] + ' ' + heart_rate_df['Time'])\n heart_rate_df = heart_rate_df[['Timestamp','Heart Rate']]\n \n return heart_rate_df\n\n\nSTART_DATE = '2018-01-20'\nEND_DATE = '2018-02-13' \nDATES = pd.date_range(start=START_DATE, end=END_DATE).tolist()\nDATES = [date.strftime('%Y-%m-%d') for date in DATES]\n \nheart_rate_dfs = []\nfor date in DATES:\n heart_rate_dfs.append(get_heart_rate(auth2_client, date))\n\n#Concatenate individual heart_rate_dfs for each date into one big df\nheart_rate_df = pd.concat(heart_rate_dfs, axis=0, ignore_index=True)\n\n#Label each reading as 0 (not on date) or 1 (on date)\nDATE_RANGES = pd.read_csv('./data/date_times.csv')\nDATE_RANGES['Start'] = pd.to_datetime(DATE_RANGES['Start'])\nDATE_RANGES['End'] = pd.to_datetime(DATE_RANGES['End'])\n\nheart_rate_df['onDate?'] = 0\nfor i in range(len(DATE_RANGES)):\n start = pd.to_datetime(DATE_RANGES['Start'][i])\n end = pd.to_datetime(DATE_RANGES['End'][i])\n \n mask = (pd.to_datetime(heart_rate_df['Timestamp']) >= start) & (pd.to_datetime(heart_rate_df['Timestamp']) <= end)\n heart_rate_df['onDate?'] = heart_rate_df['onDate?'].where(~mask, other=1)\n\n#Save to CSV\nFILEPATH = './data/' + 'heart_rate ' + START_DATE + ' to ' + END_DATE + '.csv'\nheart_rate_df.to_csv(FILEPATH, index=False)",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
#!/usr/bin/env python3
import click
@click.command()
@click.option("--name", prompt = "Your name")
def hello(name):
print("hello", name)
if __name__ == '__main__':
hello()
|
normal
|
{
"blob_id": "19c1a50cf19f04a9e0d0163a9383cb900bca1d38",
"index": 9862,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\[email protected]()\[email protected]('--name', prompt='Your name')\ndef hello(name):\n print('hello', name)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\[email protected]()\[email protected]('--name', prompt='Your name')\ndef hello(name):\n print('hello', name)\n\n\nif __name__ == '__main__':\n hello()\n",
"step-4": "import click\n\n\[email protected]()\[email protected]('--name', prompt='Your name')\ndef hello(name):\n print('hello', name)\n\n\nif __name__ == '__main__':\n hello()\n",
"step-5": "#!/usr/bin/env python3\n\nimport click\n\[email protected]()\[email protected](\"--name\", prompt = \"Your name\")\ndef hello(name):\n print(\"hello\", name)\n\nif __name__ == '__main__':\n hello()\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import os
from tqdm import tqdm
from system.krl import KRL
from system.utils.format import format_data
from system.oie import OIE
# extract one file
def execute_file(input_fp, output_fp):
oie = OIE()
oie.extract_file(input_fp, output_fp)
# extract one sentence
def execute_sentence():
oie = OIE()
# test one data
line = {"text": "印度空军参谋长阿尔琼也提防巴空军的“决定性行动”,并且他致电帕赞科特基地司令苏里上校"}
line = {"text": "中美两国的人民反对大规模的杀伤性的武器"}
line = {"id": "6",
"sysId": "eb88374b30fda925b399e787a927327c",
"text": "乔治·塞菲里斯,生于小亚细亚的斯弥尔纳城,父亲是雅典大学教授,国际法专家。",
"event_list": [
{"event_type": "举办类", "trigger": "举行", "trigger_start_index": "38", "trigger_end_index": "40",
"trigger_entity_type": "NONE", "arguments": [
{"role": "会议", "argument": "抗议集会", "argument_start_index": "40", "argument_end_index": "44",
"argument_entity_type": "Meeting"},
{"role": "地点", "argument": "普天间基地", "argument_start_index": "31", "argument_end_index": "36",
"argument_entity_type": "ZBGC"},
{"role": "时间", "argument": "13日", "argument_start_index": "0", "argument_end_index": "3",
"argument_entity_type": "Time"},
{"role": "主体", "argument": "冲绳和平运动中心", "argument_start_index": "4", "argument_end_index": "12",
"argument_entity_type": "Org"}]}]}
sample = line['text']
result, quick_look = oie.extract(sample, True, True, True)
print(quick_look)
# s += len(result)
# opobj.write(str(result) + "\n")
# opobj2.write(str(quick_look) + "\n")
# print(s)
# opobj.close()
# opobj2.close()
def clean_triples(train_fp, output_fp, is_train: bool):
krl = KRL()
model_type = 'TransE'
if is_train:
model_type = 'TransE'
krl.train(train_fp, model_type=model_type, dev_path=train_fp, save_path='./krl_{}_saves'.format(model_type))
else:
krl.load(save_path='./krl_{}_saves'.format(model_type), model_type=model_type)
if __name__ == "__main__":
# 1 extract the triples
# eg:{"id": "870", "sysId": "3669195fb557cea411d166d353cc194d",
# "text": "目前,黎以临时边界“蓝线”沿线,特别是靠近叙利亚戈兰高地的地段局势紧张,黎以军队和联合国驻黎巴嫩南部临时部队(联黎部队)都处于高度戒备状态,以应对以色列空袭叙利亚可能引发的军事冲突。",
# "event_list": [{"event_type": "军事冲突类", "trigger": "空袭", "trigger_start_index": "76", "trigger_end_index": "78", "trigger_entity_type": "$element$", "arguments": [{"role": "主体", "argument": "以色列", "argument_start_index": "73", "argument_end_index": "76", "argument_entity_type": "Country"}, {"role": "目标", "argument": "叙利亚", "argument_start_index": "78", "argument_end_index": "81", "argument_entity_type": "Country"}]}]}
# -> [['南部临时部队(联黎部队)', '处于', '高度戒备状态'], ['以色列', '空袭', '叙利亚']]
input_file_path = 'data/all_data.json'
triples_file_path = 'result/1_after_extract.txt'
# execute_file(input_file_path, triples_file_path)
# 2 clean the triples
# transform the data format
# [['南部临时部队(联黎部队)', '处于', '高度戒备状态'], ['以色列', '空袭', '叙利亚']] ->
# 南部临时部队(联黎部队), 处于, 高度戒备状态
# 以色列, 空袭, 叙利亚
formatted_fp = 'result/1_after_extract_formatted.txt'
format_data(triples_file_path, formatted_fp)
# using Knowledge Relation Learning (KRL) to score the triples
cleared_file_path = 'result/2_cleared_extract.txt'
clean_triples(train_fp=formatted_fp, output_fp=cleared_file_path, is_train=True)
|
normal
|
{
"blob_id": "bc5e928305d82c92c10106fe1f69f5979d57e3d2",
"index": 5446,
"step-1": "<mask token>\n\n\ndef execute_file(input_fp, output_fp):\n oie = OIE()\n oie.extract_file(input_fp, output_fp)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef execute_file(input_fp, output_fp):\n oie = OIE()\n oie.extract_file(input_fp, output_fp)\n\n\ndef execute_sentence():\n oie = OIE()\n line = {'text': '印度空军参谋长阿尔琼也提防巴空军的“决定性行动”,并且他致电帕赞科特基地司令苏里上校'}\n line = {'text': '中美两国的人民反对大规模的杀伤性的武器'}\n line = {'id': '6', 'sysId': 'eb88374b30fda925b399e787a927327c', 'text':\n '乔治·塞菲里斯,生于小亚细亚的斯弥尔纳城,父亲是雅典大学教授,国际法专家。', 'event_list': [{\n 'event_type': '举办类', 'trigger': '举行', 'trigger_start_index': '38',\n 'trigger_end_index': '40', 'trigger_entity_type': 'NONE',\n 'arguments': [{'role': '会议', 'argument': '抗议集会',\n 'argument_start_index': '40', 'argument_end_index': '44',\n 'argument_entity_type': 'Meeting'}, {'role': '地点', 'argument':\n '普天间基地', 'argument_start_index': '31', 'argument_end_index': '36',\n 'argument_entity_type': 'ZBGC'}, {'role': '时间', 'argument': '13日',\n 'argument_start_index': '0', 'argument_end_index': '3',\n 'argument_entity_type': 'Time'}, {'role': '主体', 'argument':\n '冲绳和平运动中心', 'argument_start_index': '4', 'argument_end_index': '12',\n 'argument_entity_type': 'Org'}]}]}\n sample = line['text']\n result, quick_look = oie.extract(sample, True, True, True)\n print(quick_look)\n\n\ndef clean_triples(train_fp, output_fp, is_train: bool):\n krl = KRL()\n model_type = 'TransE'\n if is_train:\n model_type = 'TransE'\n krl.train(train_fp, model_type=model_type, dev_path=train_fp,\n save_path='./krl_{}_saves'.format(model_type))\n else:\n krl.load(save_path='./krl_{}_saves'.format(model_type), model_type=\n model_type)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef execute_file(input_fp, output_fp):\n oie = OIE()\n oie.extract_file(input_fp, output_fp)\n\n\ndef execute_sentence():\n oie = OIE()\n line = {'text': '印度空军参谋长阿尔琼也提防巴空军的“决定性行动”,并且他致电帕赞科特基地司令苏里上校'}\n line = {'text': '中美两国的人民反对大规模的杀伤性的武器'}\n line = {'id': '6', 'sysId': 'eb88374b30fda925b399e787a927327c', 'text':\n '乔治·塞菲里斯,生于小亚细亚的斯弥尔纳城,父亲是雅典大学教授,国际法专家。', 'event_list': [{\n 'event_type': '举办类', 'trigger': '举行', 'trigger_start_index': '38',\n 'trigger_end_index': '40', 'trigger_entity_type': 'NONE',\n 'arguments': [{'role': '会议', 'argument': '抗议集会',\n 'argument_start_index': '40', 'argument_end_index': '44',\n 'argument_entity_type': 'Meeting'}, {'role': '地点', 'argument':\n '普天间基地', 'argument_start_index': '31', 'argument_end_index': '36',\n 'argument_entity_type': 'ZBGC'}, {'role': '时间', 'argument': '13日',\n 'argument_start_index': '0', 'argument_end_index': '3',\n 'argument_entity_type': 'Time'}, {'role': '主体', 'argument':\n '冲绳和平运动中心', 'argument_start_index': '4', 'argument_end_index': '12',\n 'argument_entity_type': 'Org'}]}]}\n sample = line['text']\n result, quick_look = oie.extract(sample, True, True, True)\n print(quick_look)\n\n\ndef clean_triples(train_fp, output_fp, is_train: bool):\n krl = KRL()\n model_type = 'TransE'\n if is_train:\n model_type = 'TransE'\n krl.train(train_fp, model_type=model_type, dev_path=train_fp,\n save_path='./krl_{}_saves'.format(model_type))\n else:\n krl.load(save_path='./krl_{}_saves'.format(model_type), model_type=\n model_type)\n\n\nif __name__ == '__main__':\n input_file_path = 'data/all_data.json'\n triples_file_path = 'result/1_after_extract.txt'\n formatted_fp = 'result/1_after_extract_formatted.txt'\n format_data(triples_file_path, formatted_fp)\n cleared_file_path = 'result/2_cleared_extract.txt'\n clean_triples(train_fp=formatted_fp, output_fp=cleared_file_path,\n is_train=True)\n",
"step-4": "import os\nfrom tqdm import tqdm\nfrom system.krl import KRL\nfrom system.utils.format import format_data\nfrom system.oie import OIE\n\n\ndef execute_file(input_fp, output_fp):\n oie = OIE()\n oie.extract_file(input_fp, output_fp)\n\n\ndef execute_sentence():\n oie = OIE()\n line = {'text': '印度空军参谋长阿尔琼也提防巴空军的“决定性行动”,并且他致电帕赞科特基地司令苏里上校'}\n line = {'text': '中美两国的人民反对大规模的杀伤性的武器'}\n line = {'id': '6', 'sysId': 'eb88374b30fda925b399e787a927327c', 'text':\n '乔治·塞菲里斯,生于小亚细亚的斯弥尔纳城,父亲是雅典大学教授,国际法专家。', 'event_list': [{\n 'event_type': '举办类', 'trigger': '举行', 'trigger_start_index': '38',\n 'trigger_end_index': '40', 'trigger_entity_type': 'NONE',\n 'arguments': [{'role': '会议', 'argument': '抗议集会',\n 'argument_start_index': '40', 'argument_end_index': '44',\n 'argument_entity_type': 'Meeting'}, {'role': '地点', 'argument':\n '普天间基地', 'argument_start_index': '31', 'argument_end_index': '36',\n 'argument_entity_type': 'ZBGC'}, {'role': '时间', 'argument': '13日',\n 'argument_start_index': '0', 'argument_end_index': '3',\n 'argument_entity_type': 'Time'}, {'role': '主体', 'argument':\n '冲绳和平运动中心', 'argument_start_index': '4', 'argument_end_index': '12',\n 'argument_entity_type': 'Org'}]}]}\n sample = line['text']\n result, quick_look = oie.extract(sample, True, True, True)\n print(quick_look)\n\n\ndef clean_triples(train_fp, output_fp, is_train: bool):\n krl = KRL()\n model_type = 'TransE'\n if is_train:\n model_type = 'TransE'\n krl.train(train_fp, model_type=model_type, dev_path=train_fp,\n save_path='./krl_{}_saves'.format(model_type))\n else:\n krl.load(save_path='./krl_{}_saves'.format(model_type), model_type=\n model_type)\n\n\nif __name__ == '__main__':\n input_file_path = 'data/all_data.json'\n triples_file_path = 'result/1_after_extract.txt'\n formatted_fp = 'result/1_after_extract_formatted.txt'\n format_data(triples_file_path, formatted_fp)\n cleared_file_path = 'result/2_cleared_extract.txt'\n clean_triples(train_fp=formatted_fp, output_fp=cleared_file_path,\n is_train=True)\n",
"step-5": "import os\r\n\r\nfrom tqdm import tqdm\r\n\r\nfrom system.krl import KRL\r\nfrom system.utils.format import format_data\r\nfrom system.oie import OIE\r\n\r\n\r\n# extract one file\r\ndef execute_file(input_fp, output_fp):\r\n oie = OIE()\r\n oie.extract_file(input_fp, output_fp)\r\n\r\n\r\n# extract one sentence\r\ndef execute_sentence():\r\n oie = OIE()\r\n # test one data\r\n line = {\"text\": \"印度空军参谋长阿尔琼也提防巴空军的“决定性行动”,并且他致电帕赞科特基地司令苏里上校\"}\r\n line = {\"text\": \"中美两国的人民反对大规模的杀伤性的武器\"}\r\n line = {\"id\": \"6\",\r\n \"sysId\": \"eb88374b30fda925b399e787a927327c\",\r\n \"text\": \"乔治·塞菲里斯,生于小亚细亚的斯弥尔纳城,父亲是雅典大学教授,国际法专家。\",\r\n \"event_list\": [\r\n {\"event_type\": \"举办类\", \"trigger\": \"举行\", \"trigger_start_index\": \"38\", \"trigger_end_index\": \"40\",\r\n \"trigger_entity_type\": \"NONE\", \"arguments\": [\r\n {\"role\": \"会议\", \"argument\": \"抗议集会\", \"argument_start_index\": \"40\", \"argument_end_index\": \"44\",\r\n \"argument_entity_type\": \"Meeting\"},\r\n {\"role\": \"地点\", \"argument\": \"普天间基地\", \"argument_start_index\": \"31\", \"argument_end_index\": \"36\",\r\n \"argument_entity_type\": \"ZBGC\"},\r\n {\"role\": \"时间\", \"argument\": \"13日\", \"argument_start_index\": \"0\", \"argument_end_index\": \"3\",\r\n \"argument_entity_type\": \"Time\"},\r\n {\"role\": \"主体\", \"argument\": \"冲绳和平运动中心\", \"argument_start_index\": \"4\", \"argument_end_index\": \"12\",\r\n \"argument_entity_type\": \"Org\"}]}]}\r\n\r\n sample = line['text']\r\n result, quick_look = oie.extract(sample, True, True, True)\r\n print(quick_look)\r\n # s += len(result)\r\n # opobj.write(str(result) + \"\\n\")\r\n # opobj2.write(str(quick_look) + \"\\n\")\r\n # print(s)\r\n # opobj.close()\r\n # opobj2.close()\r\n\r\n\r\ndef clean_triples(train_fp, output_fp, is_train: bool):\r\n krl = KRL()\r\n model_type = 'TransE'\r\n\r\n if is_train:\r\n model_type = 'TransE'\r\n krl.train(train_fp, model_type=model_type, dev_path=train_fp, save_path='./krl_{}_saves'.format(model_type))\r\n else:\r\n krl.load(save_path='./krl_{}_saves'.format(model_type), model_type=model_type)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n # 1 extract the triples\r\n # eg:{\"id\": \"870\", \"sysId\": \"3669195fb557cea411d166d353cc194d\",\r\n # \"text\": \"目前,黎以临时边界“蓝线”沿线,特别是靠近叙利亚戈兰高地的地段局势紧张,黎以军队和联合国驻黎巴嫩南部临时部队(联黎部队)都处于高度戒备状态,以应对以色列空袭叙利亚可能引发的军事冲突。\",\r\n # \"event_list\": [{\"event_type\": \"军事冲突类\", \"trigger\": \"空袭\", \"trigger_start_index\": \"76\", \"trigger_end_index\": \"78\", \"trigger_entity_type\": \"$element$\", \"arguments\": [{\"role\": \"主体\", \"argument\": \"以色列\", \"argument_start_index\": \"73\", \"argument_end_index\": \"76\", \"argument_entity_type\": \"Country\"}, {\"role\": \"目标\", \"argument\": \"叙利亚\", \"argument_start_index\": \"78\", \"argument_end_index\": \"81\", \"argument_entity_type\": \"Country\"}]}]}\r\n # -> [['南部临时部队(联黎部队)', '处于', '高度戒备状态'], ['以色列', '空袭', '叙利亚']]\r\n\r\n input_file_path = 'data/all_data.json'\r\n triples_file_path = 'result/1_after_extract.txt'\r\n # execute_file(input_file_path, triples_file_path)\r\n\r\n # 2 clean the triples\r\n # transform the data format\r\n # [['南部临时部队(联黎部队)', '处于', '高度戒备状态'], ['以色列', '空袭', '叙利亚']] ->\r\n # 南部临时部队(联黎部队), 处于, 高度戒备状态\r\n # 以色列, 空袭, 叙利亚\r\n formatted_fp = 'result/1_after_extract_formatted.txt'\r\n format_data(triples_file_path, formatted_fp)\r\n\r\n # using Knowledge Relation Learning (KRL) to score the triples\r\n cleared_file_path = 'result/2_cleared_extract.txt'\r\n clean_triples(train_fp=formatted_fp, output_fp=cleared_file_path, is_train=True)\r\n",
"step-ids": [
1,
3,
4,
5,
6
]
}
|
[
1,
3,
4,
5,
6
] |
#! /usr/bin/env python
import os
import re
from codecs import open
from setuptools import find_packages, setup
here = os.path.abspath(os.path.dirname(__file__))
def get_changelog():
with open(os.path.join(here, 'CHANGELOG'), encoding='utf-8') as f:
text = f.read()
header_matches = list(re.finditer('^=+$', text, re.MULTILINE))
text = text[:header_matches[5].start()] # until fifth header
lines = text.splitlines()[:-1] # all lines without fifth release number
return '=========\nChangelog\n=========\n\n' + '\n'.join(lines)
about = {}
with open(os.path.join(here, 'devpi_server', '__version__.py'), 'r', 'utf-8') as f:
exec(f.read(), about)
with open('README.rst', encoding='utf-8') as f:
README = f.read()
CHANGELOG = get_changelog()
requires = [
'py>=1.4.23',
'appdirs',
'devpi_common<4,>=3.3.0',
'itsdangerous>=0.24',
'execnet>=1.2',
'pyramid>=1.8',
'waitress>=1.0.1',
'repoze.lru>=0.6',
'passlib[argon2]',
'pluggy>=0.3.0,<1.0',
'strictyaml',
]
extras_require = {}
setup(
name=about['__title__'],
description=about['__description__'],
keywords='pypi realtime cache server',
long_description="\n\n".join([README, CHANGELOG]),
url=about['__url__'],
version=about['__version__'],
maintainer=about['__maintainer__'],
maintainer_email=about['__maintainer_email__'],
packages=find_packages(),
include_package_data=True,
zip_safe=False,
license=about['__license__'],
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Internet :: WWW/HTTP :: WSGI :: Application',
'Programming Language :: Python :: Implementation :: PyPy',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
install_requires=requires,
extras_require=extras_require,
entry_points={
'console_scripts': [
'devpi-server = devpi_server.main:main' ],
'devpi_server': [
'devpi-server-auth-basic = devpi_server.auth_basic',
'devpi-server-auth-devpi = devpi_server.auth_devpi',
'devpi-server-sqlite = devpi_server.keyfs_sqlite',
'devpi-server-sqlite-fs = devpi_server.keyfs_sqlite_fs' ],
'devpi_web': [
'devpi-server-status = devpi_server.views'],
'pytest11': [
'pytest_devpi_server = pytest_devpi_server' ],
},
)
|
normal
|
{
"blob_id": "c81889cf4d87933b562aa4618bc5185a8d213107",
"index": 8075,
"step-1": "<mask token>\n\n\ndef get_changelog():\n with open(os.path.join(here, 'CHANGELOG'), encoding='utf-8') as f:\n text = f.read()\n header_matches = list(re.finditer('^=+$', text, re.MULTILINE))\n text = text[:header_matches[5].start()]\n lines = text.splitlines()[:-1]\n return '=========\\nChangelog\\n=========\\n\\n' + '\\n'.join(lines)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef get_changelog():\n with open(os.path.join(here, 'CHANGELOG'), encoding='utf-8') as f:\n text = f.read()\n header_matches = list(re.finditer('^=+$', text, re.MULTILINE))\n text = text[:header_matches[5].start()]\n lines = text.splitlines()[:-1]\n return '=========\\nChangelog\\n=========\\n\\n' + '\\n'.join(lines)\n\n\n<mask token>\nwith open(os.path.join(here, 'devpi_server', '__version__.py'), 'r', 'utf-8'\n ) as f:\n exec(f.read(), about)\nwith open('README.rst', encoding='utf-8') as f:\n README = f.read()\n<mask token>\nsetup(name=about['__title__'], description=about['__description__'],\n keywords='pypi realtime cache server', long_description='\\n\\n'.join([\n README, CHANGELOG]), url=about['__url__'], version=about['__version__'],\n maintainer=about['__maintainer__'], maintainer_email=about[\n '__maintainer_email__'], packages=find_packages(), include_package_data\n =True, zip_safe=False, license=about['__license__'], classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Environment :: Web Environment', 'Intended Audience :: Developers',\n 'Intended Audience :: System Administrators',\n 'License :: OSI Approved :: MIT License',\n 'Programming Language :: Python', 'Topic :: Internet :: WWW/HTTP',\n 'Topic :: Internet :: WWW/HTTP :: WSGI :: Application',\n 'Programming Language :: Python :: Implementation :: PyPy',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6'], install_requires=requires,\n extras_require=extras_require, entry_points={'console_scripts': [\n 'devpi-server = devpi_server.main:main'], 'devpi_server': [\n 'devpi-server-auth-basic = devpi_server.auth_basic',\n 'devpi-server-auth-devpi = devpi_server.auth_devpi',\n 'devpi-server-sqlite = devpi_server.keyfs_sqlite',\n 'devpi-server-sqlite-fs = devpi_server.keyfs_sqlite_fs'], 'devpi_web':\n ['devpi-server-status = devpi_server.views'], 'pytest11': [\n 'pytest_devpi_server = pytest_devpi_server']})\n",
"step-3": "<mask token>\nhere = os.path.abspath(os.path.dirname(__file__))\n\n\ndef get_changelog():\n with open(os.path.join(here, 'CHANGELOG'), encoding='utf-8') as f:\n text = f.read()\n header_matches = list(re.finditer('^=+$', text, re.MULTILINE))\n text = text[:header_matches[5].start()]\n lines = text.splitlines()[:-1]\n return '=========\\nChangelog\\n=========\\n\\n' + '\\n'.join(lines)\n\n\nabout = {}\nwith open(os.path.join(here, 'devpi_server', '__version__.py'), 'r', 'utf-8'\n ) as f:\n exec(f.read(), about)\nwith open('README.rst', encoding='utf-8') as f:\n README = f.read()\nCHANGELOG = get_changelog()\nrequires = ['py>=1.4.23', 'appdirs', 'devpi_common<4,>=3.3.0',\n 'itsdangerous>=0.24', 'execnet>=1.2', 'pyramid>=1.8', 'waitress>=1.0.1',\n 'repoze.lru>=0.6', 'passlib[argon2]', 'pluggy>=0.3.0,<1.0', 'strictyaml']\nextras_require = {}\nsetup(name=about['__title__'], description=about['__description__'],\n keywords='pypi realtime cache server', long_description='\\n\\n'.join([\n README, CHANGELOG]), url=about['__url__'], version=about['__version__'],\n maintainer=about['__maintainer__'], maintainer_email=about[\n '__maintainer_email__'], packages=find_packages(), include_package_data\n =True, zip_safe=False, license=about['__license__'], classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Environment :: Web Environment', 'Intended Audience :: Developers',\n 'Intended Audience :: System Administrators',\n 'License :: OSI Approved :: MIT License',\n 'Programming Language :: Python', 'Topic :: Internet :: WWW/HTTP',\n 'Topic :: Internet :: WWW/HTTP :: WSGI :: Application',\n 'Programming Language :: Python :: Implementation :: PyPy',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6'], install_requires=requires,\n extras_require=extras_require, entry_points={'console_scripts': [\n 'devpi-server = devpi_server.main:main'], 'devpi_server': [\n 'devpi-server-auth-basic = devpi_server.auth_basic',\n 'devpi-server-auth-devpi = devpi_server.auth_devpi',\n 'devpi-server-sqlite = devpi_server.keyfs_sqlite',\n 'devpi-server-sqlite-fs = devpi_server.keyfs_sqlite_fs'], 'devpi_web':\n ['devpi-server-status = devpi_server.views'], 'pytest11': [\n 'pytest_devpi_server = pytest_devpi_server']})\n",
"step-4": "import os\nimport re\nfrom codecs import open\nfrom setuptools import find_packages, setup\nhere = os.path.abspath(os.path.dirname(__file__))\n\n\ndef get_changelog():\n with open(os.path.join(here, 'CHANGELOG'), encoding='utf-8') as f:\n text = f.read()\n header_matches = list(re.finditer('^=+$', text, re.MULTILINE))\n text = text[:header_matches[5].start()]\n lines = text.splitlines()[:-1]\n return '=========\\nChangelog\\n=========\\n\\n' + '\\n'.join(lines)\n\n\nabout = {}\nwith open(os.path.join(here, 'devpi_server', '__version__.py'), 'r', 'utf-8'\n ) as f:\n exec(f.read(), about)\nwith open('README.rst', encoding='utf-8') as f:\n README = f.read()\nCHANGELOG = get_changelog()\nrequires = ['py>=1.4.23', 'appdirs', 'devpi_common<4,>=3.3.0',\n 'itsdangerous>=0.24', 'execnet>=1.2', 'pyramid>=1.8', 'waitress>=1.0.1',\n 'repoze.lru>=0.6', 'passlib[argon2]', 'pluggy>=0.3.0,<1.0', 'strictyaml']\nextras_require = {}\nsetup(name=about['__title__'], description=about['__description__'],\n keywords='pypi realtime cache server', long_description='\\n\\n'.join([\n README, CHANGELOG]), url=about['__url__'], version=about['__version__'],\n maintainer=about['__maintainer__'], maintainer_email=about[\n '__maintainer_email__'], packages=find_packages(), include_package_data\n =True, zip_safe=False, license=about['__license__'], classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Environment :: Web Environment', 'Intended Audience :: Developers',\n 'Intended Audience :: System Administrators',\n 'License :: OSI Approved :: MIT License',\n 'Programming Language :: Python', 'Topic :: Internet :: WWW/HTTP',\n 'Topic :: Internet :: WWW/HTTP :: WSGI :: Application',\n 'Programming Language :: Python :: Implementation :: PyPy',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6'], install_requires=requires,\n extras_require=extras_require, entry_points={'console_scripts': [\n 'devpi-server = devpi_server.main:main'], 'devpi_server': [\n 'devpi-server-auth-basic = devpi_server.auth_basic',\n 'devpi-server-auth-devpi = devpi_server.auth_devpi',\n 'devpi-server-sqlite = devpi_server.keyfs_sqlite',\n 'devpi-server-sqlite-fs = devpi_server.keyfs_sqlite_fs'], 'devpi_web':\n ['devpi-server-status = devpi_server.views'], 'pytest11': [\n 'pytest_devpi_server = pytest_devpi_server']})\n",
"step-5": "#! /usr/bin/env python\n\nimport os\nimport re\n\nfrom codecs import open\n\nfrom setuptools import find_packages, setup\n\nhere = os.path.abspath(os.path.dirname(__file__))\n\n\ndef get_changelog():\n with open(os.path.join(here, 'CHANGELOG'), encoding='utf-8') as f:\n text = f.read()\n header_matches = list(re.finditer('^=+$', text, re.MULTILINE))\n text = text[:header_matches[5].start()] # until fifth header\n lines = text.splitlines()[:-1] # all lines without fifth release number\n return '=========\\nChangelog\\n=========\\n\\n' + '\\n'.join(lines)\n\nabout = {}\n\nwith open(os.path.join(here, 'devpi_server', '__version__.py'), 'r', 'utf-8') as f:\n exec(f.read(), about)\n\nwith open('README.rst', encoding='utf-8') as f:\n README = f.read()\n\nCHANGELOG = get_changelog()\n\nrequires = [\n 'py>=1.4.23',\n 'appdirs',\n 'devpi_common<4,>=3.3.0',\n 'itsdangerous>=0.24',\n 'execnet>=1.2',\n 'pyramid>=1.8',\n 'waitress>=1.0.1',\n 'repoze.lru>=0.6',\n 'passlib[argon2]',\n 'pluggy>=0.3.0,<1.0',\n 'strictyaml',\n ]\nextras_require = {}\n\nsetup(\n name=about['__title__'],\n description=about['__description__'],\n keywords='pypi realtime cache server',\n long_description=\"\\n\\n\".join([README, CHANGELOG]),\n url=about['__url__'],\n version=about['__version__'],\n maintainer=about['__maintainer__'],\n maintainer_email=about['__maintainer_email__'],\n packages=find_packages(),\n include_package_data=True,\n zip_safe=False,\n license=about['__license__'],\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Environment :: Web Environment',\n 'Intended Audience :: Developers',\n 'Intended Audience :: System Administrators',\n 'License :: OSI Approved :: MIT License',\n 'Programming Language :: Python',\n 'Topic :: Internet :: WWW/HTTP',\n 'Topic :: Internet :: WWW/HTTP :: WSGI :: Application',\n 'Programming Language :: Python :: Implementation :: PyPy',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n ],\n install_requires=requires,\n extras_require=extras_require,\n entry_points={\n 'console_scripts': [\n 'devpi-server = devpi_server.main:main' ],\n 'devpi_server': [\n 'devpi-server-auth-basic = devpi_server.auth_basic',\n 'devpi-server-auth-devpi = devpi_server.auth_devpi',\n 'devpi-server-sqlite = devpi_server.keyfs_sqlite',\n 'devpi-server-sqlite-fs = devpi_server.keyfs_sqlite_fs' ],\n 'devpi_web': [\n 'devpi-server-status = devpi_server.views'],\n 'pytest11': [\n 'pytest_devpi_server = pytest_devpi_server' ],\n },\n )\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
import torch
import torch.nn.functional as f
import time
import matplotlib.pyplot as plt
from matplotlib.lines import Line2D
import numpy as np
dtype = torch.float
device = torch.device("cpu")
# device = torch.device("cuda:0") # Uncomment this to run on GPU
N, D_in, H, D_out = 64, 1000, 100, 10
x = torch.randn(N, D_in, device=device, dtype=dtype)
y = torch.randn(N, D_out, device=device, dtype=dtype)
model = torch.nn.Sequential(
torch.nn.Linear(D_in, H),
torch.nn.ReLU(),
torch.nn.Linear(H, D_out),
)
def plot_grad_flow(named_parameters):
'''Plots the gradients flowing through different layers in the net during training.
Can be used for checking for possible gradient vanishing / exploding problems.
Usage: Plug this function in Trainer class after loss.backwards() as
"plot_grad_flow(self.model.named_parameters())" to visualize the gradient flow'''
ave_grads = []
max_grads = []
layers = []
for n, p in named_parameters:
if (p.requires_grad) and ("bias" not in n):
layers.append(n)
ave_grads.append(p.grad.abs().mean())
max_grads.append(p.grad.abs().max())
plt.bar(np.arange(len(max_grads)), max_grads, alpha=0.1, lw=1, color="c")
plt.bar(np.arange(len(max_grads)), ave_grads, alpha=0.1, lw=1, color="b")
plt.hlines(0, 0, len(ave_grads) + 1, lw=2, color="k")
plt.xticks(range(0, len(ave_grads), 1), layers, rotation="vertical")
plt.xlim(left=0, right=len(ave_grads))
plt.ylim(bottom=-0.001, top=0.02) # zoom in on the lower gradient regions
plt.xlabel("Layers")
plt.ylabel("average gradient")
plt.title("Gradient flow")
plt.grid(True)
plt.legend([Line2D([0], [0], color="c", lw=4),
Line2D([0], [0], color="b", lw=4),
Line2D([0], [0], color="k", lw=4)], ['max-gradient', 'mean-gradient', 'zero-gradient'])
plt.show()
learning_rate = 1e-6
y_pred = model(x)
loss = (y_pred - y).pow(2).sum()
loss.backward()
plot_grad_flow(model.named_parameters())
|
normal
|
{
"blob_id": "0fb424dafaac184882ea56f36265e0b19b5a4c50",
"index": 9758,
"step-1": "<mask token>\n\n\ndef plot_grad_flow(named_parameters):\n \"\"\"Plots the gradients flowing through different layers in the net during training.\n Can be used for checking for possible gradient vanishing / exploding problems.\n\n Usage: Plug this function in Trainer class after loss.backwards() as\n \"plot_grad_flow(self.model.named_parameters())\" to visualize the gradient flow\"\"\"\n ave_grads = []\n max_grads = []\n layers = []\n for n, p in named_parameters:\n if p.requires_grad and 'bias' not in n:\n layers.append(n)\n ave_grads.append(p.grad.abs().mean())\n max_grads.append(p.grad.abs().max())\n plt.bar(np.arange(len(max_grads)), max_grads, alpha=0.1, lw=1, color='c')\n plt.bar(np.arange(len(max_grads)), ave_grads, alpha=0.1, lw=1, color='b')\n plt.hlines(0, 0, len(ave_grads) + 1, lw=2, color='k')\n plt.xticks(range(0, len(ave_grads), 1), layers, rotation='vertical')\n plt.xlim(left=0, right=len(ave_grads))\n plt.ylim(bottom=-0.001, top=0.02)\n plt.xlabel('Layers')\n plt.ylabel('average gradient')\n plt.title('Gradient flow')\n plt.grid(True)\n plt.legend([Line2D([0], [0], color='c', lw=4), Line2D([0], [0], color=\n 'b', lw=4), Line2D([0], [0], color='k', lw=4)], ['max-gradient',\n 'mean-gradient', 'zero-gradient'])\n plt.show()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef plot_grad_flow(named_parameters):\n \"\"\"Plots the gradients flowing through different layers in the net during training.\n Can be used for checking for possible gradient vanishing / exploding problems.\n\n Usage: Plug this function in Trainer class after loss.backwards() as\n \"plot_grad_flow(self.model.named_parameters())\" to visualize the gradient flow\"\"\"\n ave_grads = []\n max_grads = []\n layers = []\n for n, p in named_parameters:\n if p.requires_grad and 'bias' not in n:\n layers.append(n)\n ave_grads.append(p.grad.abs().mean())\n max_grads.append(p.grad.abs().max())\n plt.bar(np.arange(len(max_grads)), max_grads, alpha=0.1, lw=1, color='c')\n plt.bar(np.arange(len(max_grads)), ave_grads, alpha=0.1, lw=1, color='b')\n plt.hlines(0, 0, len(ave_grads) + 1, lw=2, color='k')\n plt.xticks(range(0, len(ave_grads), 1), layers, rotation='vertical')\n plt.xlim(left=0, right=len(ave_grads))\n plt.ylim(bottom=-0.001, top=0.02)\n plt.xlabel('Layers')\n plt.ylabel('average gradient')\n plt.title('Gradient flow')\n plt.grid(True)\n plt.legend([Line2D([0], [0], color='c', lw=4), Line2D([0], [0], color=\n 'b', lw=4), Line2D([0], [0], color='k', lw=4)], ['max-gradient',\n 'mean-gradient', 'zero-gradient'])\n plt.show()\n\n\n<mask token>\nloss.backward()\nplot_grad_flow(model.named_parameters())\n",
"step-3": "<mask token>\ndtype = torch.float\ndevice = torch.device('cpu')\nN, D_in, H, D_out = 64, 1000, 100, 10\nx = torch.randn(N, D_in, device=device, dtype=dtype)\ny = torch.randn(N, D_out, device=device, dtype=dtype)\nmodel = torch.nn.Sequential(torch.nn.Linear(D_in, H), torch.nn.ReLU(),\n torch.nn.Linear(H, D_out))\n\n\ndef plot_grad_flow(named_parameters):\n \"\"\"Plots the gradients flowing through different layers in the net during training.\n Can be used for checking for possible gradient vanishing / exploding problems.\n\n Usage: Plug this function in Trainer class after loss.backwards() as\n \"plot_grad_flow(self.model.named_parameters())\" to visualize the gradient flow\"\"\"\n ave_grads = []\n max_grads = []\n layers = []\n for n, p in named_parameters:\n if p.requires_grad and 'bias' not in n:\n layers.append(n)\n ave_grads.append(p.grad.abs().mean())\n max_grads.append(p.grad.abs().max())\n plt.bar(np.arange(len(max_grads)), max_grads, alpha=0.1, lw=1, color='c')\n plt.bar(np.arange(len(max_grads)), ave_grads, alpha=0.1, lw=1, color='b')\n plt.hlines(0, 0, len(ave_grads) + 1, lw=2, color='k')\n plt.xticks(range(0, len(ave_grads), 1), layers, rotation='vertical')\n plt.xlim(left=0, right=len(ave_grads))\n plt.ylim(bottom=-0.001, top=0.02)\n plt.xlabel('Layers')\n plt.ylabel('average gradient')\n plt.title('Gradient flow')\n plt.grid(True)\n plt.legend([Line2D([0], [0], color='c', lw=4), Line2D([0], [0], color=\n 'b', lw=4), Line2D([0], [0], color='k', lw=4)], ['max-gradient',\n 'mean-gradient', 'zero-gradient'])\n plt.show()\n\n\nlearning_rate = 1e-06\ny_pred = model(x)\nloss = (y_pred - y).pow(2).sum()\nloss.backward()\nplot_grad_flow(model.named_parameters())\n",
"step-4": "import torch\nimport torch.nn.functional as f\nimport time\nimport matplotlib.pyplot as plt\nfrom matplotlib.lines import Line2D\nimport numpy as np\ndtype = torch.float\ndevice = torch.device('cpu')\nN, D_in, H, D_out = 64, 1000, 100, 10\nx = torch.randn(N, D_in, device=device, dtype=dtype)\ny = torch.randn(N, D_out, device=device, dtype=dtype)\nmodel = torch.nn.Sequential(torch.nn.Linear(D_in, H), torch.nn.ReLU(),\n torch.nn.Linear(H, D_out))\n\n\ndef plot_grad_flow(named_parameters):\n \"\"\"Plots the gradients flowing through different layers in the net during training.\n Can be used for checking for possible gradient vanishing / exploding problems.\n\n Usage: Plug this function in Trainer class after loss.backwards() as\n \"plot_grad_flow(self.model.named_parameters())\" to visualize the gradient flow\"\"\"\n ave_grads = []\n max_grads = []\n layers = []\n for n, p in named_parameters:\n if p.requires_grad and 'bias' not in n:\n layers.append(n)\n ave_grads.append(p.grad.abs().mean())\n max_grads.append(p.grad.abs().max())\n plt.bar(np.arange(len(max_grads)), max_grads, alpha=0.1, lw=1, color='c')\n plt.bar(np.arange(len(max_grads)), ave_grads, alpha=0.1, lw=1, color='b')\n plt.hlines(0, 0, len(ave_grads) + 1, lw=2, color='k')\n plt.xticks(range(0, len(ave_grads), 1), layers, rotation='vertical')\n plt.xlim(left=0, right=len(ave_grads))\n plt.ylim(bottom=-0.001, top=0.02)\n plt.xlabel('Layers')\n plt.ylabel('average gradient')\n plt.title('Gradient flow')\n plt.grid(True)\n plt.legend([Line2D([0], [0], color='c', lw=4), Line2D([0], [0], color=\n 'b', lw=4), Line2D([0], [0], color='k', lw=4)], ['max-gradient',\n 'mean-gradient', 'zero-gradient'])\n plt.show()\n\n\nlearning_rate = 1e-06\ny_pred = model(x)\nloss = (y_pred - y).pow(2).sum()\nloss.backward()\nplot_grad_flow(model.named_parameters())\n",
"step-5": "\nimport torch\nimport torch.nn.functional as f\nimport time\nimport matplotlib.pyplot as plt\nfrom matplotlib.lines import Line2D\nimport numpy as np\n\ndtype = torch.float\ndevice = torch.device(\"cpu\")\n# device = torch.device(\"cuda:0\") # Uncomment this to run on GPU\n\nN, D_in, H, D_out = 64, 1000, 100, 10\n\nx = torch.randn(N, D_in, device=device, dtype=dtype)\ny = torch.randn(N, D_out, device=device, dtype=dtype)\n\nmodel = torch.nn.Sequential(\n torch.nn.Linear(D_in, H),\n torch.nn.ReLU(),\n torch.nn.Linear(H, D_out),\n)\n\n\ndef plot_grad_flow(named_parameters):\n '''Plots the gradients flowing through different layers in the net during training.\n Can be used for checking for possible gradient vanishing / exploding problems.\n\n Usage: Plug this function in Trainer class after loss.backwards() as\n \"plot_grad_flow(self.model.named_parameters())\" to visualize the gradient flow'''\n ave_grads = []\n max_grads = []\n layers = []\n for n, p in named_parameters:\n if (p.requires_grad) and (\"bias\" not in n):\n layers.append(n)\n ave_grads.append(p.grad.abs().mean())\n max_grads.append(p.grad.abs().max())\n plt.bar(np.arange(len(max_grads)), max_grads, alpha=0.1, lw=1, color=\"c\")\n plt.bar(np.arange(len(max_grads)), ave_grads, alpha=0.1, lw=1, color=\"b\")\n plt.hlines(0, 0, len(ave_grads) + 1, lw=2, color=\"k\")\n plt.xticks(range(0, len(ave_grads), 1), layers, rotation=\"vertical\")\n plt.xlim(left=0, right=len(ave_grads))\n plt.ylim(bottom=-0.001, top=0.02) # zoom in on the lower gradient regions\n plt.xlabel(\"Layers\")\n plt.ylabel(\"average gradient\")\n plt.title(\"Gradient flow\")\n plt.grid(True)\n plt.legend([Line2D([0], [0], color=\"c\", lw=4),\n Line2D([0], [0], color=\"b\", lw=4),\n Line2D([0], [0], color=\"k\", lw=4)], ['max-gradient', 'mean-gradient', 'zero-gradient'])\n\n plt.show()\n\nlearning_rate = 1e-6\ny_pred = model(x)\nloss = (y_pred - y).pow(2).sum()\nloss.backward()\nplot_grad_flow(model.named_parameters())\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
from django.contrib import admin
from django.db import models
from tinymce.widgets import TinyMCE
from .models import UserInfo
# Register your models here.
class UserInfoAdmin(admin.ModelAdmin):
list_display=[
'user_name',
'user_profession',
'user_phone',
'user_email',
'user_address',
'facebook_link',
'instagram_link',
'telegram_link',
'whatsup_link',
'linkedin_link',
'github_link',
'stackoverflow_link',
'facebook_link',
]
search_fields=[
'user_name',
'user_profession',
'user_phone',
'user_email',
'user_address',
'facebook_link',
'instagram_link',
'telegram_link',
'whatsup_link',
'linkedin_link',
'github_link',
'stackoverflow_link',
'facebook_link',
]
list_display_links=[
'user_name',
# 'user_profession',
# 'user_phone',
# 'user_email',
# 'user_address',
'facebook_link',
'instagram_link',
'telegram_link',
'whatsup_link',
'linkedin_link',
'github_link',
'stackoverflow_link',
'facebook_link',
]
list_editable = [
# 'user_name',
'user_profession',
'user_phone',
'user_email',
'user_address',
# 'facebook_link',
# 'instagram_link',
# 'telegram_link',
# 'whatsup_link',
# 'linkedin_link',
# 'github_link',
# 'stackoverflow_link',
# 'facebook_link',
]
fieldsets=(
('Basic Info', {'fields' : [
'user_image',
'user_name',
'user_profession',
],
},
),
(
'Contact Info', {
'fields': [
'user_phone',
'user_email',
'user_address',
],
},
),
(
'Social Links', {
'fields': [
'facebook_link',
'instagram_link',
'telegram_link',
'whatsup_link',
'linkedin_link',
'github_link',
'stackoverflow_link',
],
},
),
(
'Core Info', {
'fields' :[
'user_info',
'user_experience',
'user_edu',
],
},
),
)
formfield_overrides = {
models.TextField: {'widget': TinyMCE}
}
admin.site.register(UserInfo, UserInfoAdmin)
|
normal
|
{
"blob_id": "15134d7e4036c102bc9d2ba4d321fadd0467100f",
"index": 6637,
"step-1": "<mask token>\n\n\nclass UserInfoAdmin(admin.ModelAdmin):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass UserInfoAdmin(admin.ModelAdmin):\n list_display = ['user_name', 'user_profession', 'user_phone',\n 'user_email', 'user_address', 'facebook_link', 'instagram_link',\n 'telegram_link', 'whatsup_link', 'linkedin_link', 'github_link',\n 'stackoverflow_link', 'facebook_link']\n search_fields = ['user_name', 'user_profession', 'user_phone',\n 'user_email', 'user_address', 'facebook_link', 'instagram_link',\n 'telegram_link', 'whatsup_link', 'linkedin_link', 'github_link',\n 'stackoverflow_link', 'facebook_link']\n list_display_links = ['user_name', 'facebook_link', 'instagram_link',\n 'telegram_link', 'whatsup_link', 'linkedin_link', 'github_link',\n 'stackoverflow_link', 'facebook_link']\n list_editable = ['user_profession', 'user_phone', 'user_email',\n 'user_address']\n fieldsets = ('Basic Info', {'fields': ['user_image', 'user_name',\n 'user_profession']}), ('Contact Info', {'fields': ['user_phone',\n 'user_email', 'user_address']}), ('Social Links', {'fields': [\n 'facebook_link', 'instagram_link', 'telegram_link', 'whatsup_link',\n 'linkedin_link', 'github_link', 'stackoverflow_link']}), ('Core Info',\n {'fields': ['user_info', 'user_experience', 'user_edu']})\n formfield_overrides = {models.TextField: {'widget': TinyMCE}}\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass UserInfoAdmin(admin.ModelAdmin):\n list_display = ['user_name', 'user_profession', 'user_phone',\n 'user_email', 'user_address', 'facebook_link', 'instagram_link',\n 'telegram_link', 'whatsup_link', 'linkedin_link', 'github_link',\n 'stackoverflow_link', 'facebook_link']\n search_fields = ['user_name', 'user_profession', 'user_phone',\n 'user_email', 'user_address', 'facebook_link', 'instagram_link',\n 'telegram_link', 'whatsup_link', 'linkedin_link', 'github_link',\n 'stackoverflow_link', 'facebook_link']\n list_display_links = ['user_name', 'facebook_link', 'instagram_link',\n 'telegram_link', 'whatsup_link', 'linkedin_link', 'github_link',\n 'stackoverflow_link', 'facebook_link']\n list_editable = ['user_profession', 'user_phone', 'user_email',\n 'user_address']\n fieldsets = ('Basic Info', {'fields': ['user_image', 'user_name',\n 'user_profession']}), ('Contact Info', {'fields': ['user_phone',\n 'user_email', 'user_address']}), ('Social Links', {'fields': [\n 'facebook_link', 'instagram_link', 'telegram_link', 'whatsup_link',\n 'linkedin_link', 'github_link', 'stackoverflow_link']}), ('Core Info',\n {'fields': ['user_info', 'user_experience', 'user_edu']})\n formfield_overrides = {models.TextField: {'widget': TinyMCE}}\n\n\nadmin.site.register(UserInfo, UserInfoAdmin)\n",
"step-4": "from django.contrib import admin\nfrom django.db import models\nfrom tinymce.widgets import TinyMCE\nfrom .models import UserInfo\n\n\nclass UserInfoAdmin(admin.ModelAdmin):\n list_display = ['user_name', 'user_profession', 'user_phone',\n 'user_email', 'user_address', 'facebook_link', 'instagram_link',\n 'telegram_link', 'whatsup_link', 'linkedin_link', 'github_link',\n 'stackoverflow_link', 'facebook_link']\n search_fields = ['user_name', 'user_profession', 'user_phone',\n 'user_email', 'user_address', 'facebook_link', 'instagram_link',\n 'telegram_link', 'whatsup_link', 'linkedin_link', 'github_link',\n 'stackoverflow_link', 'facebook_link']\n list_display_links = ['user_name', 'facebook_link', 'instagram_link',\n 'telegram_link', 'whatsup_link', 'linkedin_link', 'github_link',\n 'stackoverflow_link', 'facebook_link']\n list_editable = ['user_profession', 'user_phone', 'user_email',\n 'user_address']\n fieldsets = ('Basic Info', {'fields': ['user_image', 'user_name',\n 'user_profession']}), ('Contact Info', {'fields': ['user_phone',\n 'user_email', 'user_address']}), ('Social Links', {'fields': [\n 'facebook_link', 'instagram_link', 'telegram_link', 'whatsup_link',\n 'linkedin_link', 'github_link', 'stackoverflow_link']}), ('Core Info',\n {'fields': ['user_info', 'user_experience', 'user_edu']})\n formfield_overrides = {models.TextField: {'widget': TinyMCE}}\n\n\nadmin.site.register(UserInfo, UserInfoAdmin)\n",
"step-5": "from django.contrib import admin\nfrom django.db import models\nfrom tinymce.widgets import TinyMCE\n\nfrom .models import UserInfo\n\n# Register your models here.\nclass UserInfoAdmin(admin.ModelAdmin):\n list_display=[\n 'user_name', \n 'user_profession', \n 'user_phone', \n 'user_email', \n 'user_address', \n 'facebook_link', \n 'instagram_link', \n 'telegram_link', \n 'whatsup_link', \n 'linkedin_link', \n 'github_link', \n 'stackoverflow_link', \n 'facebook_link', \n ]\n search_fields=[\n 'user_name', \n 'user_profession', \n 'user_phone', \n 'user_email', \n 'user_address', \n 'facebook_link', \n 'instagram_link', \n 'telegram_link', \n 'whatsup_link', \n 'linkedin_link', \n 'github_link', \n 'stackoverflow_link', \n 'facebook_link', \n ]\n list_display_links=[\n 'user_name', \n # 'user_profession', \n # 'user_phone', \n # 'user_email', \n # 'user_address', \n 'facebook_link', \n 'instagram_link', \n 'telegram_link', \n 'whatsup_link', \n 'linkedin_link', \n 'github_link', \n 'stackoverflow_link', \n 'facebook_link', \n ]\n list_editable = [\n # 'user_name', \n 'user_profession', \n 'user_phone', \n 'user_email', \n 'user_address', \n # 'facebook_link', \n # 'instagram_link', \n # 'telegram_link', \n # 'whatsup_link', \n # 'linkedin_link', \n # 'github_link', \n # 'stackoverflow_link', \n # 'facebook_link', \n ]\n\n fieldsets=(\n ('Basic Info', {'fields' : [\n 'user_image', \n 'user_name', \n 'user_profession', \n ],\n },\n ),\n (\n 'Contact Info', {\n 'fields': [\n 'user_phone', \n 'user_email', \n 'user_address', \n ],\n },\n ),\n (\n 'Social Links', {\n 'fields': [\n 'facebook_link', \n 'instagram_link', \n 'telegram_link', \n 'whatsup_link', \n 'linkedin_link', \n 'github_link', \n 'stackoverflow_link', \n ],\n },\n ),\n (\n 'Core Info', {\n 'fields' :[\n 'user_info',\n 'user_experience',\n 'user_edu',\n ],\n },\n ),\n )\n formfield_overrides = {\n models.TextField: {'widget': TinyMCE}\n }\nadmin.site.register(UserInfo, UserInfoAdmin)",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
from db_upgrader.Repositories.store import Store, StoreException
from db_upgrader.Models.product import *
class ProductStore(Store):
table = 'product'
def add_product(self, product):
try:
c = self.conn.cursor()
c.execute(
'INSERT INTO product (`name`,customerId,is_enable) VALUES(%s,%s,%s)'
, (product.name, product.customerId, product.is_enable))
return c.lastrowid
except Exception as e:
raise StoreException('error storing product: {}'.format(e))
|
normal
|
{
"blob_id": "963499e071873083dc942486b9a5b094393cd99e",
"index": 4458,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass ProductStore(Store):\n <mask token>\n\n def add_product(self, product):\n try:\n c = self.conn.cursor()\n c.execute(\n 'INSERT INTO product (`name`,customerId,is_enable) VALUES(%s,%s,%s)'\n , (product.name, product.customerId, product.is_enable))\n return c.lastrowid\n except Exception as e:\n raise StoreException('error storing product: {}'.format(e))\n",
"step-3": "<mask token>\n\n\nclass ProductStore(Store):\n table = 'product'\n\n def add_product(self, product):\n try:\n c = self.conn.cursor()\n c.execute(\n 'INSERT INTO product (`name`,customerId,is_enable) VALUES(%s,%s,%s)'\n , (product.name, product.customerId, product.is_enable))\n return c.lastrowid\n except Exception as e:\n raise StoreException('error storing product: {}'.format(e))\n",
"step-4": "from db_upgrader.Repositories.store import Store, StoreException\nfrom db_upgrader.Models.product import *\n\n\nclass ProductStore(Store):\n table = 'product'\n\n def add_product(self, product):\n try:\n c = self.conn.cursor()\n c.execute(\n 'INSERT INTO product (`name`,customerId,is_enable) VALUES(%s,%s,%s)'\n , (product.name, product.customerId, product.is_enable))\n return c.lastrowid\n except Exception as e:\n raise StoreException('error storing product: {}'.format(e))\n",
"step-5": null,
"step-ids": [
0,
2,
3,
4
]
}
|
[
0,
2,
3,
4
] |
import numpy as np
import itertools
from scipy.linalg import eig, schur
from eigen_rootfinding.polynomial import MultiCheb, MultiPower
from eigen_rootfinding.utils import memoize
from scipy.stats import ortho_group
def indexarray(matrix_terms, which, var):
"""Compute the array mapping monomials under multiplication by x_var
Parameters
----------
matrix_terms : 2d integer ndarray
Array containing the monomials in order. matrix_terms[i] is the array
containing the exponent for each variable in the ith multivariate
monomial
which : slice object
object to index into the matrix_terms for the monomials we want to multiply by var
var : int
Variable to multiply by: x_0, ..., x_(dim-1)
Returns
-------
arr : 1d integer ndarray
Array containing the indices of the lower-degree monomials after multiplication
by x_var
"""
mults = matrix_terms[which].copy()
mults[:, var] += 1
return np.argmin(np.abs(mults[:, np.newaxis] - matrix_terms[np.newaxis]).sum(axis=-1), axis=1)
def indexarray_cheb(matrix_terms, which, var):
"""Compute the array mapping Chebyshev monomials under multiplication by x_var:
T_1*T_0 = T_1
T_1*T_n = .5(T_(n+1)+ T_(n-1))
Parameters
----------
matrix_terms : 2d integer ndarray
Array containing the monomials in order. matrix_terms[i] is the array
containing the degree for each univariate Chebyshev monomial in the ith
multivariate monomial
m : int
Number of monomials of highest degree, i.e. those that do not need to be
multiplied
var : int
Variable to multiply by: x_0, ..., x_(dim-1)
Returns
-------
arr1 : 1d integer ndarray
Array containing the indices of T_(n+1)
arr2 : 1d
Array containing the indices of T_(n-1)
"""
up = matrix_terms[which].copy()
up[:, var] += 1
down = matrix_terms[which].copy()
down[:, var] -= 1
down[down[:, var]==-1, var] += 2
arr1 = np.argmin(np.abs(up[:, np.newaxis] - matrix_terms[np.newaxis]).sum(axis=-1), axis=1)
arr2 = np.argmin(np.abs(down[:, np.newaxis] - matrix_terms[np.newaxis]).sum(axis=-1), axis=1)
return arr1, arr2
def ms_matrices(E, Q, matrix_terms, dim):
"""Compute the Möller-Stetter matrices in the monomial basis from a
reduced Macaulay matrix
Parameters
----------
E : (m, k) ndarray
Columns of the reduced Macaulay matrix corresponding to the quotient basis
Q : (l, n) 2d ndarray
Matrix whose columns give the quotient basis in terms of the monomial basis
matrix_terms : 2d ndarray
Array with ordered monomial basis
dim : int
Number of variables
Returns
-------
M : (n, n, dim) ndarray
Array containing the nxn Möller-Stetter matrices, where the matrix
corresponding to multiplication by x_i is M[..., i]
"""
n = Q.shape[1]
m = E.shape[0]
M = np.empty((n, n, dim),dtype=E.dtype)
A = np.vstack((-E, Q))
for i in range(dim):
arr = indexarray(matrix_terms, slice(m,None), i)
M[..., i] = Q.conj().T@A[arr]
return M
def ms_matrices_cheb(E, Q, matrix_terms, dim):
"""Compute the Möller-Stetter matrices in the Chebyshev basis from a
reduced Macaulay matrix
Parameters
----------
E : (m, k) ndarray
Columns of the reduced Macaulay matrix corresponding to the quotient basis
Q : (l, n) 2d ndarray
Matrix whose columns give the quotient basis in terms of the Chebyshev basis
matrix_terms : 2d ndarray
Array with ordered Chebyshev basis
dim : int
Number of variables
Returns
-------
M : (n, n, dim) ndarray
Array containing the nxn Möller-Stetter matrices, where the matrix
corresponding to multiplication by x_i is M[..., i]
"""
n = Q.shape[1]
m = E.shape[0]
M = np.empty((n, n, dim),dtype=E.dtype)
A = np.vstack((-E, Q))
for i in range(dim):
arr1, arr2 = indexarray_cheb(matrix_terms, slice(m,None), i)
M[..., i] = .5*Q.T.conj()@(A[arr1]+A[arr2])
return M
def ms_matrices_p(E, P, matrix_terms, dim, cut):
"""Compute the Möller-Stetter matrices in the power basis from a
reduced Macaulay matrix (QRP method)
Parameters
----------
E : (m, k) ndarray
Columns of the reduced Macaulay matrix corresponding to the quotient basis
P : (, l) ndarray
Array of pivots returned in QR with pivoting, used to permute the columns.
matrix_terms : 2d ndarray
Array with ordered Chebyshev basis
dim : int
Number of variables
Returns
-------
M : (n, n, dim) ndarray
Array containing the nxn Möller-Stetter matrices, where the matrix
corresponding to multiplication by x_i is M[..., i]
"""
r, n = E.shape
matrix_terms[cut:] = matrix_terms[cut:][P]
M = np.empty((n, n, dim),dtype=E.dtype)
A = np.vstack((-E, np.eye(n)))
for i in range(dim):
arr = indexarray(matrix_terms, slice(r,None), i)
M[..., i] = A[arr]
return M
def ms_matrices_p_cheb(E, P, matrix_terms, dim, cut):
""" Compute the Möller-Stetter matrices in the Chebyshev basis from a
reduced Macaulay matrix (QRP method)
Parameters
----------
E : (m, k) ndarray
Columns of the reduced Macaulay matrix corresponding to the quotient basis
P : (, l) ndarray
Array of pivots returned in QR with pivoting, used to permute the columns.
matrix_terms : 2d ndarray
Array with ordered Chebyshev basis
dim : int
Number of variables
Returns
-------
M : (n, n, dim) ndarray
Array containing the nxn Möller-Stetter matrices, where the matrix
corresponding to multiplication by x_i is M[..., i]
"""
r, n = E.shape
matrix_terms[cut:] = matrix_terms[cut:][P]
M = np.empty((n, n, dim),dtype=E.dtype)
A = np.vstack((-E, np.eye(n)))
for i in range(dim):
arr1, arr2 = indexarray_cheb(matrix_terms, slice(r,None), i)
M[..., i] = .5*(A[arr1] + A[arr2])
return M
def sort_eigs(eigs, diag):
"""Sorts the eigs array to match the order on the diagonal
of the Schur factorization
Parameters
----------
eigs : 1d ndarray
Array of unsorted eigenvalues
diag : 1d complex ndarray
Array containing the diagonal of the approximate Schur factorization
Returns
-------
w : 1d ndarray
Eigenvalues from eigs sorted to match the order in diag
"""
n = diag.shape[0]
lst = list(range(n))
arr = []
for eig in eigs:
i = lst[np.argmin(np.abs(diag[lst]-eig))]
arr.append(i)
lst.remove(i)
return np.argsort(arr)
@memoize
def get_rand_combos_matrix(rows, cols, normal=False):
""" Generates a rows by cols random matrix with orthogonal rows or columns,
depending on if rows > cols or cols > rows.
Parameters
----------
rows : int
Number of rows
cols : int
Number of columns
normal : bool
Optional. Whether or not to create a matrix using entries drawn
from the standard normal distribution (N(0, 1)) or not. If it's
False, it will return an orthogonal matrix.
Returns
-------
C : (rows, cols) ndarray
Matrix with orthgonal rows or columns, depending on if rows > cols or
cols > rows if normal is False, otherwise a matrix with
coefficients drawn from the standard normal (N(0, 1)).
"""
np.random.seed(57)
# TODO perhaps explore different types of random matrices?
# randn was giving me conditioning problems
if normal:
C = np.random.normal(loc=0, scale=1, size=(rows, cols))
return C
size = max(rows, cols)
C = ortho_group.rvs(size)
return C[:rows, :cols]
@memoize
def get_Q_c(dim):
""" Generates a once-chosen random orthogonal matrix and a random linear combination
for use in the simultaneous eigenvalue compution.
Parameters
----------
dim : int
Dimension of the system
Returns
-------
Q : (dim, dim) ndarray
Random orthogonal rotation
c : (dim, ) ndarray
Random linear combination
"""
np.random.seed(103)
Q = ortho_group.rvs(dim)
c = np.random.randn(dim)
return Q, c
def msroots(M):
"""Computes the roots to a system via the eigenvalues of the Möller-Stetter
matrices. Implicitly performs a random rotation of the coordinate system
to avoid repeated eigenvalues arising from special structure in the underlying
polynomial system. Approximates the joint eigenvalue problem using a Schur
factorization of a linear combination of the matrices.
Parameters
----------
M : (n, n, dim) ndarray
Array containing the nxn Möller-Stetter matrices, where the matrix
corresponding to multiplication by x_i is M[..., i]
Returns
-------
roots : (n, dim) ndarray
Array containing the approximate roots of the system, where each row
is a root.
"""
dim = M.shape[-1]
# perform a random rotation with a random orthogonal Q
Q, c = get_Q_c(dim)
M = (Q@M[..., np.newaxis])[..., 0]
eigs = np.empty((dim, M.shape[0]), dtype='complex')
# Compute the matrix U that triangularizes a random linear combination
U = schur((M*c).sum(axis=-1), output='complex')[1]
for i in range(0, dim):
T = (U.T.conj())@(M[..., i])@U
w = eig(M[..., i], right=False)
arr = sort_eigs(w, np.diag(T))
eigs[i] = w[arr]
# Rotate back before returning, transposing to match expected shape
return (Q.T@eigs).T
|
normal
|
{
"blob_id": "14fb6776ac30802edf43c43acbee64263c6bdd7b",
"index": 2777,
"step-1": "<mask token>\n\n\ndef ms_matrices(E, Q, matrix_terms, dim):\n \"\"\"Compute the Möller-Stetter matrices in the monomial basis from a\n reduced Macaulay matrix\n\n Parameters\n ----------\n E : (m, k) ndarray\n Columns of the reduced Macaulay matrix corresponding to the quotient basis\n Q : (l, n) 2d ndarray\n Matrix whose columns give the quotient basis in terms of the monomial basis\n matrix_terms : 2d ndarray\n Array with ordered monomial basis\n dim : int\n Number of variables\n\n Returns\n -------\n M : (n, n, dim) ndarray\n Array containing the nxn Möller-Stetter matrices, where the matrix\n corresponding to multiplication by x_i is M[..., i]\n \"\"\"\n n = Q.shape[1]\n m = E.shape[0]\n M = np.empty((n, n, dim), dtype=E.dtype)\n A = np.vstack((-E, Q))\n for i in range(dim):\n arr = indexarray(matrix_terms, slice(m, None), i)\n M[..., i] = Q.conj().T @ A[arr]\n return M\n\n\ndef ms_matrices_cheb(E, Q, matrix_terms, dim):\n \"\"\"Compute the Möller-Stetter matrices in the Chebyshev basis from a\n reduced Macaulay matrix\n\n Parameters\n ----------\n E : (m, k) ndarray\n Columns of the reduced Macaulay matrix corresponding to the quotient basis\n Q : (l, n) 2d ndarray\n Matrix whose columns give the quotient basis in terms of the Chebyshev basis\n matrix_terms : 2d ndarray\n Array with ordered Chebyshev basis\n dim : int\n Number of variables\n\n Returns\n -------\n M : (n, n, dim) ndarray\n Array containing the nxn Möller-Stetter matrices, where the matrix\n corresponding to multiplication by x_i is M[..., i]\n \"\"\"\n n = Q.shape[1]\n m = E.shape[0]\n M = np.empty((n, n, dim), dtype=E.dtype)\n A = np.vstack((-E, Q))\n for i in range(dim):\n arr1, arr2 = indexarray_cheb(matrix_terms, slice(m, None), i)\n M[..., i] = 0.5 * Q.T.conj() @ (A[arr1] + A[arr2])\n return M\n\n\n<mask token>\n\n\ndef ms_matrices_p_cheb(E, P, matrix_terms, dim, cut):\n \"\"\" Compute the Möller-Stetter matrices in the Chebyshev basis from a\n reduced Macaulay matrix (QRP method)\n\n Parameters\n ----------\n E : (m, k) ndarray\n Columns of the reduced Macaulay matrix corresponding to the quotient basis\n P : (, l) ndarray\n Array of pivots returned in QR with pivoting, used to permute the columns.\n matrix_terms : 2d ndarray\n Array with ordered Chebyshev basis\n dim : int\n Number of variables\n\n Returns\n -------\n M : (n, n, dim) ndarray\n Array containing the nxn Möller-Stetter matrices, where the matrix\n corresponding to multiplication by x_i is M[..., i]\n \"\"\"\n r, n = E.shape\n matrix_terms[cut:] = matrix_terms[cut:][P]\n M = np.empty((n, n, dim), dtype=E.dtype)\n A = np.vstack((-E, np.eye(n)))\n for i in range(dim):\n arr1, arr2 = indexarray_cheb(matrix_terms, slice(r, None), i)\n M[..., i] = 0.5 * (A[arr1] + A[arr2])\n return M\n\n\ndef sort_eigs(eigs, diag):\n \"\"\"Sorts the eigs array to match the order on the diagonal\n of the Schur factorization\n\n Parameters\n ----------\n eigs : 1d ndarray\n Array of unsorted eigenvalues\n diag : 1d complex ndarray\n Array containing the diagonal of the approximate Schur factorization\n\n Returns\n -------\n w : 1d ndarray\n Eigenvalues from eigs sorted to match the order in diag\n \"\"\"\n n = diag.shape[0]\n lst = list(range(n))\n arr = []\n for eig in eigs:\n i = lst[np.argmin(np.abs(diag[lst] - eig))]\n arr.append(i)\n lst.remove(i)\n return np.argsort(arr)\n\n\n<mask token>\n\n\ndef msroots(M):\n \"\"\"Computes the roots to a system via the eigenvalues of the Möller-Stetter\n matrices. Implicitly performs a random rotation of the coordinate system\n to avoid repeated eigenvalues arising from special structure in the underlying\n polynomial system. Approximates the joint eigenvalue problem using a Schur\n factorization of a linear combination of the matrices.\n\n Parameters\n ----------\n M : (n, n, dim) ndarray\n Array containing the nxn Möller-Stetter matrices, where the matrix\n corresponding to multiplication by x_i is M[..., i]\n\n Returns\n -------\n roots : (n, dim) ndarray\n Array containing the approximate roots of the system, where each row\n is a root.\n \"\"\"\n dim = M.shape[-1]\n Q, c = get_Q_c(dim)\n M = (Q @ M[..., np.newaxis])[..., 0]\n eigs = np.empty((dim, M.shape[0]), dtype='complex')\n U = schur((M * c).sum(axis=-1), output='complex')[1]\n for i in range(0, dim):\n T = U.T.conj() @ M[..., i] @ U\n w = eig(M[..., i], right=False)\n arr = sort_eigs(w, np.diag(T))\n eigs[i] = w[arr]\n return (Q.T @ eigs).T\n",
"step-2": "<mask token>\n\n\ndef indexarray(matrix_terms, which, var):\n \"\"\"Compute the array mapping monomials under multiplication by x_var\n\n Parameters\n ----------\n matrix_terms : 2d integer ndarray\n Array containing the monomials in order. matrix_terms[i] is the array\n containing the exponent for each variable in the ith multivariate\n monomial\n which : slice object\n object to index into the matrix_terms for the monomials we want to multiply by var\n var : int\n Variable to multiply by: x_0, ..., x_(dim-1)\n\n Returns\n -------\n arr : 1d integer ndarray\n Array containing the indices of the lower-degree monomials after multiplication\n by x_var\n \"\"\"\n mults = matrix_terms[which].copy()\n mults[:, var] += 1\n return np.argmin(np.abs(mults[:, np.newaxis] - matrix_terms[np.newaxis]\n ).sum(axis=-1), axis=1)\n\n\n<mask token>\n\n\ndef ms_matrices(E, Q, matrix_terms, dim):\n \"\"\"Compute the Möller-Stetter matrices in the monomial basis from a\n reduced Macaulay matrix\n\n Parameters\n ----------\n E : (m, k) ndarray\n Columns of the reduced Macaulay matrix corresponding to the quotient basis\n Q : (l, n) 2d ndarray\n Matrix whose columns give the quotient basis in terms of the monomial basis\n matrix_terms : 2d ndarray\n Array with ordered monomial basis\n dim : int\n Number of variables\n\n Returns\n -------\n M : (n, n, dim) ndarray\n Array containing the nxn Möller-Stetter matrices, where the matrix\n corresponding to multiplication by x_i is M[..., i]\n \"\"\"\n n = Q.shape[1]\n m = E.shape[0]\n M = np.empty((n, n, dim), dtype=E.dtype)\n A = np.vstack((-E, Q))\n for i in range(dim):\n arr = indexarray(matrix_terms, slice(m, None), i)\n M[..., i] = Q.conj().T @ A[arr]\n return M\n\n\ndef ms_matrices_cheb(E, Q, matrix_terms, dim):\n \"\"\"Compute the Möller-Stetter matrices in the Chebyshev basis from a\n reduced Macaulay matrix\n\n Parameters\n ----------\n E : (m, k) ndarray\n Columns of the reduced Macaulay matrix corresponding to the quotient basis\n Q : (l, n) 2d ndarray\n Matrix whose columns give the quotient basis in terms of the Chebyshev basis\n matrix_terms : 2d ndarray\n Array with ordered Chebyshev basis\n dim : int\n Number of variables\n\n Returns\n -------\n M : (n, n, dim) ndarray\n Array containing the nxn Möller-Stetter matrices, where the matrix\n corresponding to multiplication by x_i is M[..., i]\n \"\"\"\n n = Q.shape[1]\n m = E.shape[0]\n M = np.empty((n, n, dim), dtype=E.dtype)\n A = np.vstack((-E, Q))\n for i in range(dim):\n arr1, arr2 = indexarray_cheb(matrix_terms, slice(m, None), i)\n M[..., i] = 0.5 * Q.T.conj() @ (A[arr1] + A[arr2])\n return M\n\n\n<mask token>\n\n\ndef ms_matrices_p_cheb(E, P, matrix_terms, dim, cut):\n \"\"\" Compute the Möller-Stetter matrices in the Chebyshev basis from a\n reduced Macaulay matrix (QRP method)\n\n Parameters\n ----------\n E : (m, k) ndarray\n Columns of the reduced Macaulay matrix corresponding to the quotient basis\n P : (, l) ndarray\n Array of pivots returned in QR with pivoting, used to permute the columns.\n matrix_terms : 2d ndarray\n Array with ordered Chebyshev basis\n dim : int\n Number of variables\n\n Returns\n -------\n M : (n, n, dim) ndarray\n Array containing the nxn Möller-Stetter matrices, where the matrix\n corresponding to multiplication by x_i is M[..., i]\n \"\"\"\n r, n = E.shape\n matrix_terms[cut:] = matrix_terms[cut:][P]\n M = np.empty((n, n, dim), dtype=E.dtype)\n A = np.vstack((-E, np.eye(n)))\n for i in range(dim):\n arr1, arr2 = indexarray_cheb(matrix_terms, slice(r, None), i)\n M[..., i] = 0.5 * (A[arr1] + A[arr2])\n return M\n\n\ndef sort_eigs(eigs, diag):\n \"\"\"Sorts the eigs array to match the order on the diagonal\n of the Schur factorization\n\n Parameters\n ----------\n eigs : 1d ndarray\n Array of unsorted eigenvalues\n diag : 1d complex ndarray\n Array containing the diagonal of the approximate Schur factorization\n\n Returns\n -------\n w : 1d ndarray\n Eigenvalues from eigs sorted to match the order in diag\n \"\"\"\n n = diag.shape[0]\n lst = list(range(n))\n arr = []\n for eig in eigs:\n i = lst[np.argmin(np.abs(diag[lst] - eig))]\n arr.append(i)\n lst.remove(i)\n return np.argsort(arr)\n\n\n<mask token>\n\n\ndef msroots(M):\n \"\"\"Computes the roots to a system via the eigenvalues of the Möller-Stetter\n matrices. Implicitly performs a random rotation of the coordinate system\n to avoid repeated eigenvalues arising from special structure in the underlying\n polynomial system. Approximates the joint eigenvalue problem using a Schur\n factorization of a linear combination of the matrices.\n\n Parameters\n ----------\n M : (n, n, dim) ndarray\n Array containing the nxn Möller-Stetter matrices, where the matrix\n corresponding to multiplication by x_i is M[..., i]\n\n Returns\n -------\n roots : (n, dim) ndarray\n Array containing the approximate roots of the system, where each row\n is a root.\n \"\"\"\n dim = M.shape[-1]\n Q, c = get_Q_c(dim)\n M = (Q @ M[..., np.newaxis])[..., 0]\n eigs = np.empty((dim, M.shape[0]), dtype='complex')\n U = schur((M * c).sum(axis=-1), output='complex')[1]\n for i in range(0, dim):\n T = U.T.conj() @ M[..., i] @ U\n w = eig(M[..., i], right=False)\n arr = sort_eigs(w, np.diag(T))\n eigs[i] = w[arr]\n return (Q.T @ eigs).T\n",
"step-3": "<mask token>\n\n\ndef indexarray(matrix_terms, which, var):\n \"\"\"Compute the array mapping monomials under multiplication by x_var\n\n Parameters\n ----------\n matrix_terms : 2d integer ndarray\n Array containing the monomials in order. matrix_terms[i] is the array\n containing the exponent for each variable in the ith multivariate\n monomial\n which : slice object\n object to index into the matrix_terms for the monomials we want to multiply by var\n var : int\n Variable to multiply by: x_0, ..., x_(dim-1)\n\n Returns\n -------\n arr : 1d integer ndarray\n Array containing the indices of the lower-degree monomials after multiplication\n by x_var\n \"\"\"\n mults = matrix_terms[which].copy()\n mults[:, var] += 1\n return np.argmin(np.abs(mults[:, np.newaxis] - matrix_terms[np.newaxis]\n ).sum(axis=-1), axis=1)\n\n\ndef indexarray_cheb(matrix_terms, which, var):\n \"\"\"Compute the array mapping Chebyshev monomials under multiplication by x_var:\n\n T_1*T_0 = T_1\n T_1*T_n = .5(T_(n+1)+ T_(n-1))\n\n Parameters\n ----------\n matrix_terms : 2d integer ndarray\n Array containing the monomials in order. matrix_terms[i] is the array\n containing the degree for each univariate Chebyshev monomial in the ith\n multivariate monomial\n m : int\n Number of monomials of highest degree, i.e. those that do not need to be\n multiplied\n var : int\n Variable to multiply by: x_0, ..., x_(dim-1)\n\n Returns\n -------\n arr1 : 1d integer ndarray\n Array containing the indices of T_(n+1)\n arr2 : 1d\n Array containing the indices of T_(n-1)\n \"\"\"\n up = matrix_terms[which].copy()\n up[:, var] += 1\n down = matrix_terms[which].copy()\n down[:, var] -= 1\n down[down[:, var] == -1, var] += 2\n arr1 = np.argmin(np.abs(up[:, np.newaxis] - matrix_terms[np.newaxis]).\n sum(axis=-1), axis=1)\n arr2 = np.argmin(np.abs(down[:, np.newaxis] - matrix_terms[np.newaxis])\n .sum(axis=-1), axis=1)\n return arr1, arr2\n\n\ndef ms_matrices(E, Q, matrix_terms, dim):\n \"\"\"Compute the Möller-Stetter matrices in the monomial basis from a\n reduced Macaulay matrix\n\n Parameters\n ----------\n E : (m, k) ndarray\n Columns of the reduced Macaulay matrix corresponding to the quotient basis\n Q : (l, n) 2d ndarray\n Matrix whose columns give the quotient basis in terms of the monomial basis\n matrix_terms : 2d ndarray\n Array with ordered monomial basis\n dim : int\n Number of variables\n\n Returns\n -------\n M : (n, n, dim) ndarray\n Array containing the nxn Möller-Stetter matrices, where the matrix\n corresponding to multiplication by x_i is M[..., i]\n \"\"\"\n n = Q.shape[1]\n m = E.shape[0]\n M = np.empty((n, n, dim), dtype=E.dtype)\n A = np.vstack((-E, Q))\n for i in range(dim):\n arr = indexarray(matrix_terms, slice(m, None), i)\n M[..., i] = Q.conj().T @ A[arr]\n return M\n\n\ndef ms_matrices_cheb(E, Q, matrix_terms, dim):\n \"\"\"Compute the Möller-Stetter matrices in the Chebyshev basis from a\n reduced Macaulay matrix\n\n Parameters\n ----------\n E : (m, k) ndarray\n Columns of the reduced Macaulay matrix corresponding to the quotient basis\n Q : (l, n) 2d ndarray\n Matrix whose columns give the quotient basis in terms of the Chebyshev basis\n matrix_terms : 2d ndarray\n Array with ordered Chebyshev basis\n dim : int\n Number of variables\n\n Returns\n -------\n M : (n, n, dim) ndarray\n Array containing the nxn Möller-Stetter matrices, where the matrix\n corresponding to multiplication by x_i is M[..., i]\n \"\"\"\n n = Q.shape[1]\n m = E.shape[0]\n M = np.empty((n, n, dim), dtype=E.dtype)\n A = np.vstack((-E, Q))\n for i in range(dim):\n arr1, arr2 = indexarray_cheb(matrix_terms, slice(m, None), i)\n M[..., i] = 0.5 * Q.T.conj() @ (A[arr1] + A[arr2])\n return M\n\n\ndef ms_matrices_p(E, P, matrix_terms, dim, cut):\n \"\"\"Compute the Möller-Stetter matrices in the power basis from a\n reduced Macaulay matrix (QRP method)\n\n Parameters\n ----------\n E : (m, k) ndarray\n Columns of the reduced Macaulay matrix corresponding to the quotient basis\n P : (, l) ndarray\n Array of pivots returned in QR with pivoting, used to permute the columns.\n matrix_terms : 2d ndarray\n Array with ordered Chebyshev basis\n dim : int\n Number of variables\n\n Returns\n -------\n M : (n, n, dim) ndarray\n Array containing the nxn Möller-Stetter matrices, where the matrix\n corresponding to multiplication by x_i is M[..., i]\n \"\"\"\n r, n = E.shape\n matrix_terms[cut:] = matrix_terms[cut:][P]\n M = np.empty((n, n, dim), dtype=E.dtype)\n A = np.vstack((-E, np.eye(n)))\n for i in range(dim):\n arr = indexarray(matrix_terms, slice(r, None), i)\n M[..., i] = A[arr]\n return M\n\n\ndef ms_matrices_p_cheb(E, P, matrix_terms, dim, cut):\n \"\"\" Compute the Möller-Stetter matrices in the Chebyshev basis from a\n reduced Macaulay matrix (QRP method)\n\n Parameters\n ----------\n E : (m, k) ndarray\n Columns of the reduced Macaulay matrix corresponding to the quotient basis\n P : (, l) ndarray\n Array of pivots returned in QR with pivoting, used to permute the columns.\n matrix_terms : 2d ndarray\n Array with ordered Chebyshev basis\n dim : int\n Number of variables\n\n Returns\n -------\n M : (n, n, dim) ndarray\n Array containing the nxn Möller-Stetter matrices, where the matrix\n corresponding to multiplication by x_i is M[..., i]\n \"\"\"\n r, n = E.shape\n matrix_terms[cut:] = matrix_terms[cut:][P]\n M = np.empty((n, n, dim), dtype=E.dtype)\n A = np.vstack((-E, np.eye(n)))\n for i in range(dim):\n arr1, arr2 = indexarray_cheb(matrix_terms, slice(r, None), i)\n M[..., i] = 0.5 * (A[arr1] + A[arr2])\n return M\n\n\ndef sort_eigs(eigs, diag):\n \"\"\"Sorts the eigs array to match the order on the diagonal\n of the Schur factorization\n\n Parameters\n ----------\n eigs : 1d ndarray\n Array of unsorted eigenvalues\n diag : 1d complex ndarray\n Array containing the diagonal of the approximate Schur factorization\n\n Returns\n -------\n w : 1d ndarray\n Eigenvalues from eigs sorted to match the order in diag\n \"\"\"\n n = diag.shape[0]\n lst = list(range(n))\n arr = []\n for eig in eigs:\n i = lst[np.argmin(np.abs(diag[lst] - eig))]\n arr.append(i)\n lst.remove(i)\n return np.argsort(arr)\n\n\n<mask token>\n\n\ndef msroots(M):\n \"\"\"Computes the roots to a system via the eigenvalues of the Möller-Stetter\n matrices. Implicitly performs a random rotation of the coordinate system\n to avoid repeated eigenvalues arising from special structure in the underlying\n polynomial system. Approximates the joint eigenvalue problem using a Schur\n factorization of a linear combination of the matrices.\n\n Parameters\n ----------\n M : (n, n, dim) ndarray\n Array containing the nxn Möller-Stetter matrices, where the matrix\n corresponding to multiplication by x_i is M[..., i]\n\n Returns\n -------\n roots : (n, dim) ndarray\n Array containing the approximate roots of the system, where each row\n is a root.\n \"\"\"\n dim = M.shape[-1]\n Q, c = get_Q_c(dim)\n M = (Q @ M[..., np.newaxis])[..., 0]\n eigs = np.empty((dim, M.shape[0]), dtype='complex')\n U = schur((M * c).sum(axis=-1), output='complex')[1]\n for i in range(0, dim):\n T = U.T.conj() @ M[..., i] @ U\n w = eig(M[..., i], right=False)\n arr = sort_eigs(w, np.diag(T))\n eigs[i] = w[arr]\n return (Q.T @ eigs).T\n",
"step-4": "<mask token>\n\n\ndef indexarray(matrix_terms, which, var):\n \"\"\"Compute the array mapping monomials under multiplication by x_var\n\n Parameters\n ----------\n matrix_terms : 2d integer ndarray\n Array containing the monomials in order. matrix_terms[i] is the array\n containing the exponent for each variable in the ith multivariate\n monomial\n which : slice object\n object to index into the matrix_terms for the monomials we want to multiply by var\n var : int\n Variable to multiply by: x_0, ..., x_(dim-1)\n\n Returns\n -------\n arr : 1d integer ndarray\n Array containing the indices of the lower-degree monomials after multiplication\n by x_var\n \"\"\"\n mults = matrix_terms[which].copy()\n mults[:, var] += 1\n return np.argmin(np.abs(mults[:, np.newaxis] - matrix_terms[np.newaxis]\n ).sum(axis=-1), axis=1)\n\n\ndef indexarray_cheb(matrix_terms, which, var):\n \"\"\"Compute the array mapping Chebyshev monomials under multiplication by x_var:\n\n T_1*T_0 = T_1\n T_1*T_n = .5(T_(n+1)+ T_(n-1))\n\n Parameters\n ----------\n matrix_terms : 2d integer ndarray\n Array containing the monomials in order. matrix_terms[i] is the array\n containing the degree for each univariate Chebyshev monomial in the ith\n multivariate monomial\n m : int\n Number of monomials of highest degree, i.e. those that do not need to be\n multiplied\n var : int\n Variable to multiply by: x_0, ..., x_(dim-1)\n\n Returns\n -------\n arr1 : 1d integer ndarray\n Array containing the indices of T_(n+1)\n arr2 : 1d\n Array containing the indices of T_(n-1)\n \"\"\"\n up = matrix_terms[which].copy()\n up[:, var] += 1\n down = matrix_terms[which].copy()\n down[:, var] -= 1\n down[down[:, var] == -1, var] += 2\n arr1 = np.argmin(np.abs(up[:, np.newaxis] - matrix_terms[np.newaxis]).\n sum(axis=-1), axis=1)\n arr2 = np.argmin(np.abs(down[:, np.newaxis] - matrix_terms[np.newaxis])\n .sum(axis=-1), axis=1)\n return arr1, arr2\n\n\ndef ms_matrices(E, Q, matrix_terms, dim):\n \"\"\"Compute the Möller-Stetter matrices in the monomial basis from a\n reduced Macaulay matrix\n\n Parameters\n ----------\n E : (m, k) ndarray\n Columns of the reduced Macaulay matrix corresponding to the quotient basis\n Q : (l, n) 2d ndarray\n Matrix whose columns give the quotient basis in terms of the monomial basis\n matrix_terms : 2d ndarray\n Array with ordered monomial basis\n dim : int\n Number of variables\n\n Returns\n -------\n M : (n, n, dim) ndarray\n Array containing the nxn Möller-Stetter matrices, where the matrix\n corresponding to multiplication by x_i is M[..., i]\n \"\"\"\n n = Q.shape[1]\n m = E.shape[0]\n M = np.empty((n, n, dim), dtype=E.dtype)\n A = np.vstack((-E, Q))\n for i in range(dim):\n arr = indexarray(matrix_terms, slice(m, None), i)\n M[..., i] = Q.conj().T @ A[arr]\n return M\n\n\ndef ms_matrices_cheb(E, Q, matrix_terms, dim):\n \"\"\"Compute the Möller-Stetter matrices in the Chebyshev basis from a\n reduced Macaulay matrix\n\n Parameters\n ----------\n E : (m, k) ndarray\n Columns of the reduced Macaulay matrix corresponding to the quotient basis\n Q : (l, n) 2d ndarray\n Matrix whose columns give the quotient basis in terms of the Chebyshev basis\n matrix_terms : 2d ndarray\n Array with ordered Chebyshev basis\n dim : int\n Number of variables\n\n Returns\n -------\n M : (n, n, dim) ndarray\n Array containing the nxn Möller-Stetter matrices, where the matrix\n corresponding to multiplication by x_i is M[..., i]\n \"\"\"\n n = Q.shape[1]\n m = E.shape[0]\n M = np.empty((n, n, dim), dtype=E.dtype)\n A = np.vstack((-E, Q))\n for i in range(dim):\n arr1, arr2 = indexarray_cheb(matrix_terms, slice(m, None), i)\n M[..., i] = 0.5 * Q.T.conj() @ (A[arr1] + A[arr2])\n return M\n\n\ndef ms_matrices_p(E, P, matrix_terms, dim, cut):\n \"\"\"Compute the Möller-Stetter matrices in the power basis from a\n reduced Macaulay matrix (QRP method)\n\n Parameters\n ----------\n E : (m, k) ndarray\n Columns of the reduced Macaulay matrix corresponding to the quotient basis\n P : (, l) ndarray\n Array of pivots returned in QR with pivoting, used to permute the columns.\n matrix_terms : 2d ndarray\n Array with ordered Chebyshev basis\n dim : int\n Number of variables\n\n Returns\n -------\n M : (n, n, dim) ndarray\n Array containing the nxn Möller-Stetter matrices, where the matrix\n corresponding to multiplication by x_i is M[..., i]\n \"\"\"\n r, n = E.shape\n matrix_terms[cut:] = matrix_terms[cut:][P]\n M = np.empty((n, n, dim), dtype=E.dtype)\n A = np.vstack((-E, np.eye(n)))\n for i in range(dim):\n arr = indexarray(matrix_terms, slice(r, None), i)\n M[..., i] = A[arr]\n return M\n\n\ndef ms_matrices_p_cheb(E, P, matrix_terms, dim, cut):\n \"\"\" Compute the Möller-Stetter matrices in the Chebyshev basis from a\n reduced Macaulay matrix (QRP method)\n\n Parameters\n ----------\n E : (m, k) ndarray\n Columns of the reduced Macaulay matrix corresponding to the quotient basis\n P : (, l) ndarray\n Array of pivots returned in QR with pivoting, used to permute the columns.\n matrix_terms : 2d ndarray\n Array with ordered Chebyshev basis\n dim : int\n Number of variables\n\n Returns\n -------\n M : (n, n, dim) ndarray\n Array containing the nxn Möller-Stetter matrices, where the matrix\n corresponding to multiplication by x_i is M[..., i]\n \"\"\"\n r, n = E.shape\n matrix_terms[cut:] = matrix_terms[cut:][P]\n M = np.empty((n, n, dim), dtype=E.dtype)\n A = np.vstack((-E, np.eye(n)))\n for i in range(dim):\n arr1, arr2 = indexarray_cheb(matrix_terms, slice(r, None), i)\n M[..., i] = 0.5 * (A[arr1] + A[arr2])\n return M\n\n\ndef sort_eigs(eigs, diag):\n \"\"\"Sorts the eigs array to match the order on the diagonal\n of the Schur factorization\n\n Parameters\n ----------\n eigs : 1d ndarray\n Array of unsorted eigenvalues\n diag : 1d complex ndarray\n Array containing the diagonal of the approximate Schur factorization\n\n Returns\n -------\n w : 1d ndarray\n Eigenvalues from eigs sorted to match the order in diag\n \"\"\"\n n = diag.shape[0]\n lst = list(range(n))\n arr = []\n for eig in eigs:\n i = lst[np.argmin(np.abs(diag[lst] - eig))]\n arr.append(i)\n lst.remove(i)\n return np.argsort(arr)\n\n\n@memoize\ndef get_rand_combos_matrix(rows, cols, normal=False):\n \"\"\" Generates a rows by cols random matrix with orthogonal rows or columns,\n depending on if rows > cols or cols > rows.\n\n Parameters\n ----------\n rows : int\n Number of rows\n cols : int\n Number of columns\n normal : bool\n Optional. Whether or not to create a matrix using entries drawn\n from the standard normal distribution (N(0, 1)) or not. If it's\n False, it will return an orthogonal matrix.\n\n Returns\n -------\n C : (rows, cols) ndarray\n Matrix with orthgonal rows or columns, depending on if rows > cols or\n cols > rows if normal is False, otherwise a matrix with\n coefficients drawn from the standard normal (N(0, 1)).\n \"\"\"\n np.random.seed(57)\n if normal:\n C = np.random.normal(loc=0, scale=1, size=(rows, cols))\n return C\n size = max(rows, cols)\n C = ortho_group.rvs(size)\n return C[:rows, :cols]\n\n\n@memoize\ndef get_Q_c(dim):\n \"\"\" Generates a once-chosen random orthogonal matrix and a random linear combination\n for use in the simultaneous eigenvalue compution.\n\n Parameters\n ----------\n dim : int\n Dimension of the system\n\n Returns\n -------\n Q : (dim, dim) ndarray\n Random orthogonal rotation\n c : (dim, ) ndarray\n Random linear combination\n \"\"\"\n np.random.seed(103)\n Q = ortho_group.rvs(dim)\n c = np.random.randn(dim)\n return Q, c\n\n\ndef msroots(M):\n \"\"\"Computes the roots to a system via the eigenvalues of the Möller-Stetter\n matrices. Implicitly performs a random rotation of the coordinate system\n to avoid repeated eigenvalues arising from special structure in the underlying\n polynomial system. Approximates the joint eigenvalue problem using a Schur\n factorization of a linear combination of the matrices.\n\n Parameters\n ----------\n M : (n, n, dim) ndarray\n Array containing the nxn Möller-Stetter matrices, where the matrix\n corresponding to multiplication by x_i is M[..., i]\n\n Returns\n -------\n roots : (n, dim) ndarray\n Array containing the approximate roots of the system, where each row\n is a root.\n \"\"\"\n dim = M.shape[-1]\n Q, c = get_Q_c(dim)\n M = (Q @ M[..., np.newaxis])[..., 0]\n eigs = np.empty((dim, M.shape[0]), dtype='complex')\n U = schur((M * c).sum(axis=-1), output='complex')[1]\n for i in range(0, dim):\n T = U.T.conj() @ M[..., i] @ U\n w = eig(M[..., i], right=False)\n arr = sort_eigs(w, np.diag(T))\n eigs[i] = w[arr]\n return (Q.T @ eigs).T\n",
"step-5": "import numpy as np\nimport itertools\nfrom scipy.linalg import eig, schur\nfrom eigen_rootfinding.polynomial import MultiCheb, MultiPower\nfrom eigen_rootfinding.utils import memoize\nfrom scipy.stats import ortho_group\n\ndef indexarray(matrix_terms, which, var):\n \"\"\"Compute the array mapping monomials under multiplication by x_var\n\n Parameters\n ----------\n matrix_terms : 2d integer ndarray\n Array containing the monomials in order. matrix_terms[i] is the array\n containing the exponent for each variable in the ith multivariate\n monomial\n which : slice object\n object to index into the matrix_terms for the monomials we want to multiply by var\n var : int\n Variable to multiply by: x_0, ..., x_(dim-1)\n\n Returns\n -------\n arr : 1d integer ndarray\n Array containing the indices of the lower-degree monomials after multiplication\n by x_var\n \"\"\"\n mults = matrix_terms[which].copy()\n mults[:, var] += 1\n return np.argmin(np.abs(mults[:, np.newaxis] - matrix_terms[np.newaxis]).sum(axis=-1), axis=1)\n\ndef indexarray_cheb(matrix_terms, which, var):\n \"\"\"Compute the array mapping Chebyshev monomials under multiplication by x_var:\n\n T_1*T_0 = T_1\n T_1*T_n = .5(T_(n+1)+ T_(n-1))\n\n Parameters\n ----------\n matrix_terms : 2d integer ndarray\n Array containing the monomials in order. matrix_terms[i] is the array\n containing the degree for each univariate Chebyshev monomial in the ith\n multivariate monomial\n m : int\n Number of monomials of highest degree, i.e. those that do not need to be\n multiplied\n var : int\n Variable to multiply by: x_0, ..., x_(dim-1)\n\n Returns\n -------\n arr1 : 1d integer ndarray\n Array containing the indices of T_(n+1)\n arr2 : 1d\n Array containing the indices of T_(n-1)\n \"\"\"\n up = matrix_terms[which].copy()\n up[:, var] += 1\n down = matrix_terms[which].copy()\n down[:, var] -= 1\n down[down[:, var]==-1, var] += 2\n arr1 = np.argmin(np.abs(up[:, np.newaxis] - matrix_terms[np.newaxis]).sum(axis=-1), axis=1)\n arr2 = np.argmin(np.abs(down[:, np.newaxis] - matrix_terms[np.newaxis]).sum(axis=-1), axis=1)\n return arr1, arr2\n\ndef ms_matrices(E, Q, matrix_terms, dim):\n \"\"\"Compute the Möller-Stetter matrices in the monomial basis from a\n reduced Macaulay matrix\n\n Parameters\n ----------\n E : (m, k) ndarray\n Columns of the reduced Macaulay matrix corresponding to the quotient basis\n Q : (l, n) 2d ndarray\n Matrix whose columns give the quotient basis in terms of the monomial basis\n matrix_terms : 2d ndarray\n Array with ordered monomial basis\n dim : int\n Number of variables\n\n Returns\n -------\n M : (n, n, dim) ndarray\n Array containing the nxn Möller-Stetter matrices, where the matrix\n corresponding to multiplication by x_i is M[..., i]\n \"\"\"\n n = Q.shape[1]\n m = E.shape[0]\n M = np.empty((n, n, dim),dtype=E.dtype)\n A = np.vstack((-E, Q))\n for i in range(dim):\n arr = indexarray(matrix_terms, slice(m,None), i)\n M[..., i] = Q.conj().T@A[arr]\n return M\n\ndef ms_matrices_cheb(E, Q, matrix_terms, dim):\n \"\"\"Compute the Möller-Stetter matrices in the Chebyshev basis from a\n reduced Macaulay matrix\n\n Parameters\n ----------\n E : (m, k) ndarray\n Columns of the reduced Macaulay matrix corresponding to the quotient basis\n Q : (l, n) 2d ndarray\n Matrix whose columns give the quotient basis in terms of the Chebyshev basis\n matrix_terms : 2d ndarray\n Array with ordered Chebyshev basis\n dim : int\n Number of variables\n\n Returns\n -------\n M : (n, n, dim) ndarray\n Array containing the nxn Möller-Stetter matrices, where the matrix\n corresponding to multiplication by x_i is M[..., i]\n \"\"\"\n n = Q.shape[1]\n m = E.shape[0]\n M = np.empty((n, n, dim),dtype=E.dtype)\n A = np.vstack((-E, Q))\n for i in range(dim):\n arr1, arr2 = indexarray_cheb(matrix_terms, slice(m,None), i)\n M[..., i] = .5*Q.T.conj()@(A[arr1]+A[arr2])\n return M\n\ndef ms_matrices_p(E, P, matrix_terms, dim, cut):\n \"\"\"Compute the Möller-Stetter matrices in the power basis from a\n reduced Macaulay matrix (QRP method)\n\n Parameters\n ----------\n E : (m, k) ndarray\n Columns of the reduced Macaulay matrix corresponding to the quotient basis\n P : (, l) ndarray\n Array of pivots returned in QR with pivoting, used to permute the columns.\n matrix_terms : 2d ndarray\n Array with ordered Chebyshev basis\n dim : int\n Number of variables\n\n Returns\n -------\n M : (n, n, dim) ndarray\n Array containing the nxn Möller-Stetter matrices, where the matrix\n corresponding to multiplication by x_i is M[..., i]\n \"\"\"\n r, n = E.shape\n matrix_terms[cut:] = matrix_terms[cut:][P]\n M = np.empty((n, n, dim),dtype=E.dtype)\n A = np.vstack((-E, np.eye(n)))\n for i in range(dim):\n arr = indexarray(matrix_terms, slice(r,None), i)\n M[..., i] = A[arr]\n return M\n\ndef ms_matrices_p_cheb(E, P, matrix_terms, dim, cut):\n \"\"\" Compute the Möller-Stetter matrices in the Chebyshev basis from a\n reduced Macaulay matrix (QRP method)\n\n Parameters\n ----------\n E : (m, k) ndarray\n Columns of the reduced Macaulay matrix corresponding to the quotient basis\n P : (, l) ndarray\n Array of pivots returned in QR with pivoting, used to permute the columns.\n matrix_terms : 2d ndarray\n Array with ordered Chebyshev basis\n dim : int\n Number of variables\n\n Returns\n -------\n M : (n, n, dim) ndarray\n Array containing the nxn Möller-Stetter matrices, where the matrix\n corresponding to multiplication by x_i is M[..., i]\n \"\"\"\n r, n = E.shape\n matrix_terms[cut:] = matrix_terms[cut:][P]\n M = np.empty((n, n, dim),dtype=E.dtype)\n A = np.vstack((-E, np.eye(n)))\n for i in range(dim):\n arr1, arr2 = indexarray_cheb(matrix_terms, slice(r,None), i)\n M[..., i] = .5*(A[arr1] + A[arr2])\n return M\n\ndef sort_eigs(eigs, diag):\n \"\"\"Sorts the eigs array to match the order on the diagonal\n of the Schur factorization\n\n Parameters\n ----------\n eigs : 1d ndarray\n Array of unsorted eigenvalues\n diag : 1d complex ndarray\n Array containing the diagonal of the approximate Schur factorization\n\n Returns\n -------\n w : 1d ndarray\n Eigenvalues from eigs sorted to match the order in diag\n \"\"\"\n n = diag.shape[0]\n lst = list(range(n))\n arr = []\n for eig in eigs:\n i = lst[np.argmin(np.abs(diag[lst]-eig))]\n arr.append(i)\n lst.remove(i)\n return np.argsort(arr)\n\n@memoize\ndef get_rand_combos_matrix(rows, cols, normal=False):\n \"\"\" Generates a rows by cols random matrix with orthogonal rows or columns,\n depending on if rows > cols or cols > rows.\n\n Parameters\n ----------\n rows : int\n Number of rows\n cols : int\n Number of columns\n normal : bool\n Optional. Whether or not to create a matrix using entries drawn\n from the standard normal distribution (N(0, 1)) or not. If it's\n False, it will return an orthogonal matrix.\n\n Returns\n -------\n C : (rows, cols) ndarray\n Matrix with orthgonal rows or columns, depending on if rows > cols or\n cols > rows if normal is False, otherwise a matrix with\n coefficients drawn from the standard normal (N(0, 1)).\n \"\"\"\n np.random.seed(57)\n # TODO perhaps explore different types of random matrices?\n # randn was giving me conditioning problems\n if normal:\n C = np.random.normal(loc=0, scale=1, size=(rows, cols))\n return C\n size = max(rows, cols)\n C = ortho_group.rvs(size)\n return C[:rows, :cols]\n\n@memoize\ndef get_Q_c(dim):\n \"\"\" Generates a once-chosen random orthogonal matrix and a random linear combination\n for use in the simultaneous eigenvalue compution.\n\n Parameters\n ----------\n dim : int\n Dimension of the system\n\n Returns\n -------\n Q : (dim, dim) ndarray\n Random orthogonal rotation\n c : (dim, ) ndarray\n Random linear combination\n \"\"\"\n np.random.seed(103)\n Q = ortho_group.rvs(dim)\n c = np.random.randn(dim)\n return Q, c\n\ndef msroots(M):\n \"\"\"Computes the roots to a system via the eigenvalues of the Möller-Stetter\n matrices. Implicitly performs a random rotation of the coordinate system\n to avoid repeated eigenvalues arising from special structure in the underlying\n polynomial system. Approximates the joint eigenvalue problem using a Schur\n factorization of a linear combination of the matrices.\n\n Parameters\n ----------\n M : (n, n, dim) ndarray\n Array containing the nxn Möller-Stetter matrices, where the matrix\n corresponding to multiplication by x_i is M[..., i]\n\n Returns\n -------\n roots : (n, dim) ndarray\n Array containing the approximate roots of the system, where each row\n is a root.\n \"\"\"\n dim = M.shape[-1]\n\n # perform a random rotation with a random orthogonal Q\n Q, c = get_Q_c(dim)\n M = (Q@M[..., np.newaxis])[..., 0]\n\n eigs = np.empty((dim, M.shape[0]), dtype='complex')\n # Compute the matrix U that triangularizes a random linear combination\n U = schur((M*c).sum(axis=-1), output='complex')[1]\n\n for i in range(0, dim):\n T = (U.T.conj())@(M[..., i])@U\n w = eig(M[..., i], right=False)\n arr = sort_eigs(w, np.diag(T))\n eigs[i] = w[arr]\n\n # Rotate back before returning, transposing to match expected shape\n return (Q.T@eigs).T\n",
"step-ids": [
5,
6,
8,
10,
12
]
}
|
[
5,
6,
8,
10,
12
] |
import unittest
from utils import getParams
from utils.httpUtil import HttpUtil
from utils.logger import Log
logger = Log(logger='cms_getMarket').get_log()
class NavTest(unittest.TestCase):
@classmethod
def setUpClass(cls) ->None:
cls.url = getParams.get_url('cms_getMarket', 'getMarket')
HttpUtil.get_token()
@classmethod
def tearDownClass(cls) ->None:
pass
def test01_getMarket(self):
resp_c = getParams.get_resp_params('cms_getMarket', 'getMarket', 'code'
)
resp_m = getParams.get_resp_params('cms_getMarket', 'getMarket', 'msg')
response = HttpUtil().do_get(self.url)
self.assertEqual(resp_c, response['code'])
self.assertEqual(resp_m, response['msg'])
|
normal
|
{
"blob_id": "b328ee0b6c5afaf496297cefe477f933af458a03",
"index": 5654,
"step-1": "<mask token>\n\n\nclass NavTest(unittest.TestCase):\n <mask token>\n\n @classmethod\n def tearDownClass(cls) ->None:\n pass\n\n def test01_getMarket(self):\n resp_c = getParams.get_resp_params('cms_getMarket', 'getMarket', 'code'\n )\n resp_m = getParams.get_resp_params('cms_getMarket', 'getMarket', 'msg')\n response = HttpUtil().do_get(self.url)\n self.assertEqual(resp_c, response['code'])\n self.assertEqual(resp_m, response['msg'])\n",
"step-2": "<mask token>\n\n\nclass NavTest(unittest.TestCase):\n\n @classmethod\n def setUpClass(cls) ->None:\n cls.url = getParams.get_url('cms_getMarket', 'getMarket')\n HttpUtil.get_token()\n\n @classmethod\n def tearDownClass(cls) ->None:\n pass\n\n def test01_getMarket(self):\n resp_c = getParams.get_resp_params('cms_getMarket', 'getMarket', 'code'\n )\n resp_m = getParams.get_resp_params('cms_getMarket', 'getMarket', 'msg')\n response = HttpUtil().do_get(self.url)\n self.assertEqual(resp_c, response['code'])\n self.assertEqual(resp_m, response['msg'])\n",
"step-3": "<mask token>\nlogger = Log(logger='cms_getMarket').get_log()\n\n\nclass NavTest(unittest.TestCase):\n\n @classmethod\n def setUpClass(cls) ->None:\n cls.url = getParams.get_url('cms_getMarket', 'getMarket')\n HttpUtil.get_token()\n\n @classmethod\n def tearDownClass(cls) ->None:\n pass\n\n def test01_getMarket(self):\n resp_c = getParams.get_resp_params('cms_getMarket', 'getMarket', 'code'\n )\n resp_m = getParams.get_resp_params('cms_getMarket', 'getMarket', 'msg')\n response = HttpUtil().do_get(self.url)\n self.assertEqual(resp_c, response['code'])\n self.assertEqual(resp_m, response['msg'])\n",
"step-4": "import unittest\nfrom utils import getParams\nfrom utils.httpUtil import HttpUtil\nfrom utils.logger import Log\nlogger = Log(logger='cms_getMarket').get_log()\n\n\nclass NavTest(unittest.TestCase):\n\n @classmethod\n def setUpClass(cls) ->None:\n cls.url = getParams.get_url('cms_getMarket', 'getMarket')\n HttpUtil.get_token()\n\n @classmethod\n def tearDownClass(cls) ->None:\n pass\n\n def test01_getMarket(self):\n resp_c = getParams.get_resp_params('cms_getMarket', 'getMarket', 'code'\n )\n resp_m = getParams.get_resp_params('cms_getMarket', 'getMarket', 'msg')\n response = HttpUtil().do_get(self.url)\n self.assertEqual(resp_c, response['code'])\n self.assertEqual(resp_m, response['msg'])\n",
"step-5": null,
"step-ids": [
3,
4,
5,
6
]
}
|
[
3,
4,
5,
6
] |
from collections import defaultdict
from mask import Mask
from utils import bits_to_decimal
def get_program(filename):
program = []
mask = None
with open(filename, 'r') as f:
for line in f:
line = line[:-1]
if 'mask' in line:
if mask is not None:
program.append(mask)
mask = Mask(line)
elif 'mem' in line:
mask.add_mem(line)
program.append(mask)
return program
def run_program_v1(program):
addresses = defaultdict(int)
for mask in program:
for mem in mask.mems:
address = mem[0]
bits = mem[1]
masked = mask.apply_v1(bits)
addresses[address] = masked
return addresses
def run_program_v2(program):
addresses = defaultdict(int)
for mask in program:
for mem in mask.mems:
subaddresses = mask.apply_v2(mem)
addresses.update(subaddresses)
return addresses
if __name__ == "__main__":
program = get_program('input.txt')
addresses_v1 = run_program_v1(program)
part_one = sum([bits_to_decimal(bits) for bits in addresses_v1.values()])
print(f'Part One Answer: {part_one}')
addresses_v2 = run_program_v2(program)
part_two = sum([bits_to_decimal(bits) for bits in addresses_v2.values()])
print(f'Part Two Answer: {part_two}')
|
normal
|
{
"blob_id": "56e8cdec854b3b7a2f925e70d7d59a73b76f9952",
"index": 9340,
"step-1": "<mask token>\n\n\ndef get_program(filename):\n program = []\n mask = None\n with open(filename, 'r') as f:\n for line in f:\n line = line[:-1]\n if 'mask' in line:\n if mask is not None:\n program.append(mask)\n mask = Mask(line)\n elif 'mem' in line:\n mask.add_mem(line)\n program.append(mask)\n return program\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef get_program(filename):\n program = []\n mask = None\n with open(filename, 'r') as f:\n for line in f:\n line = line[:-1]\n if 'mask' in line:\n if mask is not None:\n program.append(mask)\n mask = Mask(line)\n elif 'mem' in line:\n mask.add_mem(line)\n program.append(mask)\n return program\n\n\ndef run_program_v1(program):\n addresses = defaultdict(int)\n for mask in program:\n for mem in mask.mems:\n address = mem[0]\n bits = mem[1]\n masked = mask.apply_v1(bits)\n addresses[address] = masked\n return addresses\n\n\ndef run_program_v2(program):\n addresses = defaultdict(int)\n for mask in program:\n for mem in mask.mems:\n subaddresses = mask.apply_v2(mem)\n addresses.update(subaddresses)\n return addresses\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef get_program(filename):\n program = []\n mask = None\n with open(filename, 'r') as f:\n for line in f:\n line = line[:-1]\n if 'mask' in line:\n if mask is not None:\n program.append(mask)\n mask = Mask(line)\n elif 'mem' in line:\n mask.add_mem(line)\n program.append(mask)\n return program\n\n\ndef run_program_v1(program):\n addresses = defaultdict(int)\n for mask in program:\n for mem in mask.mems:\n address = mem[0]\n bits = mem[1]\n masked = mask.apply_v1(bits)\n addresses[address] = masked\n return addresses\n\n\ndef run_program_v2(program):\n addresses = defaultdict(int)\n for mask in program:\n for mem in mask.mems:\n subaddresses = mask.apply_v2(mem)\n addresses.update(subaddresses)\n return addresses\n\n\nif __name__ == '__main__':\n program = get_program('input.txt')\n addresses_v1 = run_program_v1(program)\n part_one = sum([bits_to_decimal(bits) for bits in addresses_v1.values()])\n print(f'Part One Answer: {part_one}')\n addresses_v2 = run_program_v2(program)\n part_two = sum([bits_to_decimal(bits) for bits in addresses_v2.values()])\n print(f'Part Two Answer: {part_two}')\n",
"step-4": "from collections import defaultdict\nfrom mask import Mask\nfrom utils import bits_to_decimal\n\n\ndef get_program(filename):\n program = []\n mask = None\n with open(filename, 'r') as f:\n for line in f:\n line = line[:-1]\n if 'mask' in line:\n if mask is not None:\n program.append(mask)\n mask = Mask(line)\n elif 'mem' in line:\n mask.add_mem(line)\n program.append(mask)\n return program\n\n\ndef run_program_v1(program):\n addresses = defaultdict(int)\n for mask in program:\n for mem in mask.mems:\n address = mem[0]\n bits = mem[1]\n masked = mask.apply_v1(bits)\n addresses[address] = masked\n return addresses\n\n\ndef run_program_v2(program):\n addresses = defaultdict(int)\n for mask in program:\n for mem in mask.mems:\n subaddresses = mask.apply_v2(mem)\n addresses.update(subaddresses)\n return addresses\n\n\nif __name__ == '__main__':\n program = get_program('input.txt')\n addresses_v1 = run_program_v1(program)\n part_one = sum([bits_to_decimal(bits) for bits in addresses_v1.values()])\n print(f'Part One Answer: {part_one}')\n addresses_v2 = run_program_v2(program)\n part_two = sum([bits_to_decimal(bits) for bits in addresses_v2.values()])\n print(f'Part Two Answer: {part_two}')\n",
"step-5": "from collections import defaultdict\n\nfrom mask import Mask\nfrom utils import bits_to_decimal\n\n\ndef get_program(filename):\n program = []\n mask = None\n with open(filename, 'r') as f:\n for line in f:\n line = line[:-1]\n if 'mask' in line:\n if mask is not None:\n program.append(mask)\n mask = Mask(line)\n elif 'mem' in line:\n mask.add_mem(line)\n program.append(mask)\n return program\n\n\ndef run_program_v1(program):\n addresses = defaultdict(int)\n\n for mask in program:\n for mem in mask.mems:\n address = mem[0]\n bits = mem[1]\n masked = mask.apply_v1(bits)\n addresses[address] = masked\n return addresses\n\n\ndef run_program_v2(program):\n addresses = defaultdict(int)\n for mask in program:\n for mem in mask.mems:\n subaddresses = mask.apply_v2(mem)\n addresses.update(subaddresses)\n return addresses\n\n\nif __name__ == \"__main__\":\n program = get_program('input.txt')\n addresses_v1 = run_program_v1(program)\n part_one = sum([bits_to_decimal(bits) for bits in addresses_v1.values()])\n print(f'Part One Answer: {part_one}')\n addresses_v2 = run_program_v2(program)\n part_two = sum([bits_to_decimal(bits) for bits in addresses_v2.values()])\n print(f'Part Two Answer: {part_two}')\n",
"step-ids": [
1,
3,
4,
5,
6
]
}
|
[
1,
3,
4,
5,
6
] |
a, b = input().split()
def test_input_text(expected_result, actual_result):
assert expected_result == actual_result, \
f'expected {expected_result}, got {actual_result}'
test_input_text(a,b)
|
normal
|
{
"blob_id": "63391b31d1746f9b3583df5353ae160a430943a9",
"index": 9027,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef test_input_text(expected_result, actual_result):\n assert expected_result == actual_result, f'expected {expected_result}, got {actual_result}'\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef test_input_text(expected_result, actual_result):\n assert expected_result == actual_result, f'expected {expected_result}, got {actual_result}'\n\n\ntest_input_text(a, b)\n",
"step-4": "a, b = input().split()\n\n\ndef test_input_text(expected_result, actual_result):\n assert expected_result == actual_result, f'expected {expected_result}, got {actual_result}'\n\n\ntest_input_text(a, b)\n",
"step-5": "a, b = input().split()\n\ndef test_input_text(expected_result, actual_result):\n assert expected_result == actual_result, \\\n f'expected {expected_result}, got {actual_result}'\n\ntest_input_text(a,b)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# -*- coding: utf-8 -*-
import sys
#from Constants import *
# start
import CrudMatrixDao
class CrudAccessValue:
def __init__(self):
self.crudAccessValue = {}
self.__run()
def __run(self):
aCrudMatrixDao = CrudMatrixDao.CrudMatrixDao()
# print aCrudMatrixDao.selectCrudAccessValueAction()
for row in aCrudMatrixDao.selectCrudAccessValueAction():
crudGubun = row[0]; accessValue= row[1]
self.crudAccessValue[crudGubun] = accessValue
def getAccessValue(self, crudGubun):
try:
out = self.crudAccessValue[crudGubun]
except KeyError:
out = crudGubun
return out
if __name__ == "__main__":
aCrudAccessValue = CrudAccessValue()
print aCrudAccessValue.getAccessValue('C')
|
normal
|
{
"blob_id": "38e616e35f165d458d774dd0b6837a733b8402d7",
"index": 1555,
"step-1": "# -*- coding: utf-8 -*-\r\nimport sys\r\n#from Constants import *\r\n# start\r\nimport CrudMatrixDao\r\n\r\nclass CrudAccessValue:\r\n\tdef __init__(self):\r\n\t\tself.crudAccessValue = {}\r\n\t\tself.__run()\r\n\t\t\r\n\tdef __run(self):\r\n\t\taCrudMatrixDao = CrudMatrixDao.CrudMatrixDao()\r\n\t\t# print aCrudMatrixDao.selectCrudAccessValueAction()\r\n\r\n\t\tfor row in aCrudMatrixDao.selectCrudAccessValueAction():\r\n\t\t\tcrudGubun = row[0]; accessValue= row[1]\r\n\t\t\tself.crudAccessValue[crudGubun] = accessValue\r\n\t\t\t\r\n\tdef getAccessValue(self, crudGubun):\r\n\t\t\r\n\t\ttry:\r\n\t\t\tout = self.crudAccessValue[crudGubun]\r\n\t\texcept KeyError:\r\n\t\t\tout = crudGubun\r\n\t\t\t\r\n\t\treturn out\r\n\t\r\nif __name__ == \"__main__\":\r\n\taCrudAccessValue = CrudAccessValue()\r\n\tprint aCrudAccessValue.getAccessValue('C')\r\n\r\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
"""Add uri on identity provider
Revision ID: 52561c782d96
Revises: cdf9f34b764c
Create Date: 2022-03-11 10:16:39.583434
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '52561c782d96'
down_revision = 'cdf9f34b764c'
branch_labels = None
depends_on = None
def upgrade():
bind = op.get_bind()
# get api urls
urls = bind.execute("SELECT p.id as pid, r.id as rid, r.uri as uri "
"FROM oauth2_identity_provider p JOIN resource r ON p.api_resource_id = r.id")
# add URI
op.add_column('oauth2_identity_provider', sa.Column('uri', sa.String(), nullable=True))
# set api_url as default URI
for url in urls:
bind.execute(f"UPDATE oauth2_identity_provider SET uri = '{url[2]}' WHERE id = {url[0]}")
# patch Github URI
bind.execute("UPDATE oauth2_identity_provider SET uri = 'https://github.com' WHERE name = 'github'")
# add constraints
op.alter_column('oauth2_identity_provider', 'uri', nullable=False)
op.create_unique_constraint(None, 'oauth2_identity_provider', ['uri'])
def downgrade():
# remove URI
op.drop_constraint(None, 'oauth2_identity_provider', type_='unique')
op.drop_column('oauth2_identity_provider', 'uri')
|
normal
|
{
"blob_id": "c185a88332e39c561649f087f01fd3b704e7010b",
"index": 1959,
"step-1": "<mask token>\n\n\ndef upgrade():\n bind = op.get_bind()\n urls = bind.execute(\n 'SELECT p.id as pid, r.id as rid, r.uri as uri FROM oauth2_identity_provider p JOIN resource r ON p.api_resource_id = r.id'\n )\n op.add_column('oauth2_identity_provider', sa.Column('uri', sa.String(),\n nullable=True))\n for url in urls:\n bind.execute(\n f\"UPDATE oauth2_identity_provider SET uri = '{url[2]}' WHERE id = {url[0]}\"\n )\n bind.execute(\n \"UPDATE oauth2_identity_provider SET uri = 'https://github.com' WHERE name = 'github'\"\n )\n op.alter_column('oauth2_identity_provider', 'uri', nullable=False)\n op.create_unique_constraint(None, 'oauth2_identity_provider', ['uri'])\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef upgrade():\n bind = op.get_bind()\n urls = bind.execute(\n 'SELECT p.id as pid, r.id as rid, r.uri as uri FROM oauth2_identity_provider p JOIN resource r ON p.api_resource_id = r.id'\n )\n op.add_column('oauth2_identity_provider', sa.Column('uri', sa.String(),\n nullable=True))\n for url in urls:\n bind.execute(\n f\"UPDATE oauth2_identity_provider SET uri = '{url[2]}' WHERE id = {url[0]}\"\n )\n bind.execute(\n \"UPDATE oauth2_identity_provider SET uri = 'https://github.com' WHERE name = 'github'\"\n )\n op.alter_column('oauth2_identity_provider', 'uri', nullable=False)\n op.create_unique_constraint(None, 'oauth2_identity_provider', ['uri'])\n\n\ndef downgrade():\n op.drop_constraint(None, 'oauth2_identity_provider', type_='unique')\n op.drop_column('oauth2_identity_provider', 'uri')\n",
"step-3": "<mask token>\nrevision = '52561c782d96'\ndown_revision = 'cdf9f34b764c'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n bind = op.get_bind()\n urls = bind.execute(\n 'SELECT p.id as pid, r.id as rid, r.uri as uri FROM oauth2_identity_provider p JOIN resource r ON p.api_resource_id = r.id'\n )\n op.add_column('oauth2_identity_provider', sa.Column('uri', sa.String(),\n nullable=True))\n for url in urls:\n bind.execute(\n f\"UPDATE oauth2_identity_provider SET uri = '{url[2]}' WHERE id = {url[0]}\"\n )\n bind.execute(\n \"UPDATE oauth2_identity_provider SET uri = 'https://github.com' WHERE name = 'github'\"\n )\n op.alter_column('oauth2_identity_provider', 'uri', nullable=False)\n op.create_unique_constraint(None, 'oauth2_identity_provider', ['uri'])\n\n\ndef downgrade():\n op.drop_constraint(None, 'oauth2_identity_provider', type_='unique')\n op.drop_column('oauth2_identity_provider', 'uri')\n",
"step-4": "<mask token>\nfrom alembic import op\nimport sqlalchemy as sa\nrevision = '52561c782d96'\ndown_revision = 'cdf9f34b764c'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n bind = op.get_bind()\n urls = bind.execute(\n 'SELECT p.id as pid, r.id as rid, r.uri as uri FROM oauth2_identity_provider p JOIN resource r ON p.api_resource_id = r.id'\n )\n op.add_column('oauth2_identity_provider', sa.Column('uri', sa.String(),\n nullable=True))\n for url in urls:\n bind.execute(\n f\"UPDATE oauth2_identity_provider SET uri = '{url[2]}' WHERE id = {url[0]}\"\n )\n bind.execute(\n \"UPDATE oauth2_identity_provider SET uri = 'https://github.com' WHERE name = 'github'\"\n )\n op.alter_column('oauth2_identity_provider', 'uri', nullable=False)\n op.create_unique_constraint(None, 'oauth2_identity_provider', ['uri'])\n\n\ndef downgrade():\n op.drop_constraint(None, 'oauth2_identity_provider', type_='unique')\n op.drop_column('oauth2_identity_provider', 'uri')\n",
"step-5": "\"\"\"Add uri on identity provider\n\nRevision ID: 52561c782d96\nRevises: cdf9f34b764c\nCreate Date: 2022-03-11 10:16:39.583434\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '52561c782d96'\ndown_revision = 'cdf9f34b764c'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n bind = op.get_bind()\n # get api urls\n urls = bind.execute(\"SELECT p.id as pid, r.id as rid, r.uri as uri \"\n \"FROM oauth2_identity_provider p JOIN resource r ON p.api_resource_id = r.id\")\n # add URI\n op.add_column('oauth2_identity_provider', sa.Column('uri', sa.String(), nullable=True))\n # set api_url as default URI\n for url in urls:\n bind.execute(f\"UPDATE oauth2_identity_provider SET uri = '{url[2]}' WHERE id = {url[0]}\")\n # patch Github URI\n bind.execute(\"UPDATE oauth2_identity_provider SET uri = 'https://github.com' WHERE name = 'github'\")\n # add constraints\n op.alter_column('oauth2_identity_provider', 'uri', nullable=False)\n op.create_unique_constraint(None, 'oauth2_identity_provider', ['uri'])\n\n\ndef downgrade():\n # remove URI\n op.drop_constraint(None, 'oauth2_identity_provider', type_='unique')\n op.drop_column('oauth2_identity_provider', 'uri')\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.