code
stringlengths 13
6.09M
| order_type
stringclasses 2
values | original_example
dict | step_ids
listlengths 1
5
|
---|---|---|---|
class cal4:
def setdata(self,n1):
self.n1 = n1
def display(self):
return n1*n1
n1 = int(input("Enter number: "))
c = cal4()
print(c.display())
|
normal
|
{
"blob_id": "65b90fccd0ee74b369475aa9fe33f159881c8b82",
"index": 6645,
"step-1": "class cal4:\n\n def setdata(self, n1):\n self.n1 = n1\n <mask token>\n\n\n<mask token>\n",
"step-2": "class cal4:\n\n def setdata(self, n1):\n self.n1 = n1\n\n def display(self):\n return n1 * n1\n\n\n<mask token>\n",
"step-3": "class cal4:\n\n def setdata(self, n1):\n self.n1 = n1\n\n def display(self):\n return n1 * n1\n\n\n<mask token>\nprint(c.display())\n",
"step-4": "class cal4:\n\n def setdata(self, n1):\n self.n1 = n1\n\n def display(self):\n return n1 * n1\n\n\nn1 = int(input('Enter number: '))\nc = cal4()\nprint(c.display())\n",
"step-5": "class cal4:\r\n def setdata(self,n1):\r\n self.n1 = n1\r\n def display(self):\r\n return n1*n1\r\nn1 = int(input(\"Enter number: \"))\r\nc = cal4()\r\n\r\nprint(c.display())",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
class MLPNet(nn.Module):
<|reserved_special_token_0|>
def forward(self, x):
x = x.view(x.size(0), -1)
x = self.fc1(x)
x = torch.sigmoid(x)
x = self.fc2(x)
return x
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class MLPNet(nn.Module):
def __init__(self, num_classes):
super(MLPNet, self).__init__()
self.fc1 = nn.Linear(32 * 32 * 3, 512)
self.fc2 = nn.Linear(512, num_classes)
def forward(self, x):
x = x.view(x.size(0), -1)
x = self.fc1(x)
x = torch.sigmoid(x)
x = self.fc2(x)
return x
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class MLPNet(nn.Module):
def __init__(self, num_classes):
super(MLPNet, self).__init__()
self.fc1 = nn.Linear(32 * 32 * 3, 512)
self.fc2 = nn.Linear(512, num_classes)
def forward(self, x):
x = x.view(x.size(0), -1)
x = self.fc1(x)
x = torch.sigmoid(x)
x = self.fc2(x)
return x
def zero_weights(self):
self.fc1.weight.data.fill_(0.0)
self.fc1.bias.data.fill_(0.0)
self.fc2.weight.data.fill_(0.0)
self.fc2.bias.data.fill_(0.0)
<|reserved_special_token_1|>
import torch
import torch.nn as nn
class MLPNet(nn.Module):
def __init__(self, num_classes):
super(MLPNet, self).__init__()
self.fc1 = nn.Linear(32 * 32 * 3, 512)
self.fc2 = nn.Linear(512, num_classes)
def forward(self, x):
x = x.view(x.size(0), -1)
x = self.fc1(x)
x = torch.sigmoid(x)
x = self.fc2(x)
return x
def zero_weights(self):
self.fc1.weight.data.fill_(0.0)
self.fc1.bias.data.fill_(0.0)
self.fc2.weight.data.fill_(0.0)
self.fc2.bias.data.fill_(0.0)
|
flexible
|
{
"blob_id": "eff8b6a282ac73a116587e7ed04f386927c9f826",
"index": 9089,
"step-1": "<mask token>\n\n\nclass MLPNet(nn.Module):\n <mask token>\n\n def forward(self, x):\n x = x.view(x.size(0), -1)\n x = self.fc1(x)\n x = torch.sigmoid(x)\n x = self.fc2(x)\n return x\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass MLPNet(nn.Module):\n\n def __init__(self, num_classes):\n super(MLPNet, self).__init__()\n self.fc1 = nn.Linear(32 * 32 * 3, 512)\n self.fc2 = nn.Linear(512, num_classes)\n\n def forward(self, x):\n x = x.view(x.size(0), -1)\n x = self.fc1(x)\n x = torch.sigmoid(x)\n x = self.fc2(x)\n return x\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass MLPNet(nn.Module):\n\n def __init__(self, num_classes):\n super(MLPNet, self).__init__()\n self.fc1 = nn.Linear(32 * 32 * 3, 512)\n self.fc2 = nn.Linear(512, num_classes)\n\n def forward(self, x):\n x = x.view(x.size(0), -1)\n x = self.fc1(x)\n x = torch.sigmoid(x)\n x = self.fc2(x)\n return x\n\n def zero_weights(self):\n self.fc1.weight.data.fill_(0.0)\n self.fc1.bias.data.fill_(0.0)\n self.fc2.weight.data.fill_(0.0)\n self.fc2.bias.data.fill_(0.0)\n",
"step-4": "import torch\nimport torch.nn as nn\n\n\nclass MLPNet(nn.Module):\n\n def __init__(self, num_classes):\n super(MLPNet, self).__init__()\n self.fc1 = nn.Linear(32 * 32 * 3, 512)\n self.fc2 = nn.Linear(512, num_classes)\n\n def forward(self, x):\n x = x.view(x.size(0), -1)\n x = self.fc1(x)\n x = torch.sigmoid(x)\n x = self.fc2(x)\n return x\n\n def zero_weights(self):\n self.fc1.weight.data.fill_(0.0)\n self.fc1.bias.data.fill_(0.0)\n self.fc2.weight.data.fill_(0.0)\n self.fc2.bias.data.fill_(0.0)\n",
"step-5": null,
"step-ids": [
2,
3,
4,
5
]
}
|
[
2,
3,
4,
5
] |
#!/usr/bin/python
# -*- coding: utf-8 -*-
import os
import sys
import paramiko
import commands
def ip_check():
"""
Parses attributes for given hosts,
then checks if hosts are up
and then calls path_check function with working hosts.
"""
hosts = []
valid_hosts = []
for item in sys.argv:
if '@' in item:
hosts.append(item)
for i in hosts:
host = i.split('@')[1].split(':')[0]
command = os.system('ping -c 1 '+host+' > /dev/null')
if command == 0:
valid_hosts.append(i)
if valid_hosts:
path_check(valid_hosts)
def path_check(hosts):
"""
Parses username, port, host and local and remote path,
finds all local and remote files, using find_local_files and find_remote_files functions,
and then opens ssh session using paramiko for each given host.
"""
local_files = []
local_path = ''
for item in sys.argv:
if '–pass' in item:
secret = item.split('=')[1].strip("'")
break
else:
secret = ''
for item in sys.argv:
if '/' in item and '@' not in item:
local_path = item
if '.' in item and '/' not in item:
local_files.append(item)
if local_path:
local_files.append(find_local_files(local_path, 'f'))
for i in hosts:
user_port, host_remote_path = i.split('@')
if ':' in i:
host, remote_path = host_remote_path.split(':')
else:
host = host_remote_path
remote_path = ''
for separator in ',.:':
if separator in user_port:
user, port = user_port.split(separator)
break
else:
user = user_port
port = 0
ssh = open_sshclient(host, user, port, secret)
if not remote_path:
remote_path = local_path
ssh.exec_command('mkdir -p '+remote_path)
remote_files = find_remote_files(remote_path, 'f', ssh)
ssh.close()
copy_file(hosts)
def open_sshclient(host, user, port, secret):
"""
Opens ssh session using paramiko.
"""
ssh_client = paramiko.SSHClient()
ssh_client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh_client.load_system_host_keys()
if secret and port:
ssh_client.connect(hostname=host, username=user, password=secret, port=port)
elif secret and port==0:
ssh_client.connect(hostname=host, username=user, password=secret)
elif not secret and port:
ssh_client.connect(hostname=host, username=user, port=port)
else:
ssh_client.connect(hostname=host, username=user)
return ssh_client
def copy_file(hosts):
"""
Makes all needed operations according to given attributes with rsync.
"""
arguments = []
for item in sys.argv[1:]:
if '@' not in item and '–pass' not in item:
arguments.append(item)
for item in hosts:
# plz use .format for test strings concatenation
os.system('rsync '+' '.join(arguments)+' '+item)
def find_remote_files(remote_path, type, ssh):
"""
Finds all files or directories on remote machine, according to given attributes.
"""
(ssh_in, ssh_out, ssh_err) = ssh.exec_command("find %s -name \"*\" -type %s" % (remote_path, type))
files = []
for file in ssh_out.readlines():
files.append(file.rstrip())
return files
def find_local_files(local_path, type):
"""
Finds all files or directories on local machine, according to given attributes.
"""
local_out = commands.getoutput("find %s -name \"*\" -type %s" % (local_path, type))
files = []
for file in local_out.split("\n"):
files.append(file)
return files
ip_check()
|
normal
|
{
"blob_id": "6e3aa677985d7bd91bfbbd2078665206839bac63",
"index": 3578,
"step-1": "<mask token>\n\n\ndef path_check(hosts):\n \"\"\"\n Parses username, port, host and local and remote path,\n finds all local and remote files, using find_local_files and find_remote_files functions,\n and then opens ssh session using paramiko for each given host.\n \"\"\"\n local_files = []\n local_path = ''\n for item in sys.argv:\n if '–pass' in item:\n secret = item.split('=')[1].strip(\"'\")\n break\n else:\n secret = ''\n for item in sys.argv:\n if '/' in item and '@' not in item:\n local_path = item\n if '.' in item and '/' not in item:\n local_files.append(item)\n if local_path:\n local_files.append(find_local_files(local_path, 'f'))\n for i in hosts:\n user_port, host_remote_path = i.split('@')\n if ':' in i:\n host, remote_path = host_remote_path.split(':')\n else:\n host = host_remote_path\n remote_path = ''\n for separator in ',.:':\n if separator in user_port:\n user, port = user_port.split(separator)\n break\n else:\n user = user_port\n port = 0\n ssh = open_sshclient(host, user, port, secret)\n if not remote_path:\n remote_path = local_path\n ssh.exec_command('mkdir -p ' + remote_path)\n remote_files = find_remote_files(remote_path, 'f', ssh)\n ssh.close()\n copy_file(hosts)\n\n\ndef open_sshclient(host, user, port, secret):\n \"\"\"\n Opens ssh session using paramiko.\n \"\"\"\n ssh_client = paramiko.SSHClient()\n ssh_client.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n ssh_client.load_system_host_keys()\n if secret and port:\n ssh_client.connect(hostname=host, username=user, password=secret,\n port=port)\n elif secret and port == 0:\n ssh_client.connect(hostname=host, username=user, password=secret)\n elif not secret and port:\n ssh_client.connect(hostname=host, username=user, port=port)\n else:\n ssh_client.connect(hostname=host, username=user)\n return ssh_client\n\n\ndef copy_file(hosts):\n \"\"\"\n Makes all needed operations according to given attributes with rsync.\n \"\"\"\n arguments = []\n for item in sys.argv[1:]:\n if '@' not in item and '–pass' not in item:\n arguments.append(item)\n for item in hosts:\n os.system('rsync ' + ' '.join(arguments) + ' ' + item)\n\n\n<mask token>\n\n\ndef find_local_files(local_path, type):\n \"\"\"\n Finds all files or directories on local machine, according to given attributes.\n \"\"\"\n local_out = commands.getoutput('find %s -name \"*\" -type %s' % (\n local_path, type))\n files = []\n for file in local_out.split('\\n'):\n files.append(file)\n return files\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef ip_check():\n \"\"\"\n Parses attributes for given hosts,\n then checks if hosts are up\n and then calls path_check function with working hosts.\n \"\"\"\n hosts = []\n valid_hosts = []\n for item in sys.argv:\n if '@' in item:\n hosts.append(item)\n for i in hosts:\n host = i.split('@')[1].split(':')[0]\n command = os.system('ping -c 1 ' + host + ' > /dev/null')\n if command == 0:\n valid_hosts.append(i)\n if valid_hosts:\n path_check(valid_hosts)\n\n\ndef path_check(hosts):\n \"\"\"\n Parses username, port, host and local and remote path,\n finds all local and remote files, using find_local_files and find_remote_files functions,\n and then opens ssh session using paramiko for each given host.\n \"\"\"\n local_files = []\n local_path = ''\n for item in sys.argv:\n if '–pass' in item:\n secret = item.split('=')[1].strip(\"'\")\n break\n else:\n secret = ''\n for item in sys.argv:\n if '/' in item and '@' not in item:\n local_path = item\n if '.' in item and '/' not in item:\n local_files.append(item)\n if local_path:\n local_files.append(find_local_files(local_path, 'f'))\n for i in hosts:\n user_port, host_remote_path = i.split('@')\n if ':' in i:\n host, remote_path = host_remote_path.split(':')\n else:\n host = host_remote_path\n remote_path = ''\n for separator in ',.:':\n if separator in user_port:\n user, port = user_port.split(separator)\n break\n else:\n user = user_port\n port = 0\n ssh = open_sshclient(host, user, port, secret)\n if not remote_path:\n remote_path = local_path\n ssh.exec_command('mkdir -p ' + remote_path)\n remote_files = find_remote_files(remote_path, 'f', ssh)\n ssh.close()\n copy_file(hosts)\n\n\ndef open_sshclient(host, user, port, secret):\n \"\"\"\n Opens ssh session using paramiko.\n \"\"\"\n ssh_client = paramiko.SSHClient()\n ssh_client.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n ssh_client.load_system_host_keys()\n if secret and port:\n ssh_client.connect(hostname=host, username=user, password=secret,\n port=port)\n elif secret and port == 0:\n ssh_client.connect(hostname=host, username=user, password=secret)\n elif not secret and port:\n ssh_client.connect(hostname=host, username=user, port=port)\n else:\n ssh_client.connect(hostname=host, username=user)\n return ssh_client\n\n\ndef copy_file(hosts):\n \"\"\"\n Makes all needed operations according to given attributes with rsync.\n \"\"\"\n arguments = []\n for item in sys.argv[1:]:\n if '@' not in item and '–pass' not in item:\n arguments.append(item)\n for item in hosts:\n os.system('rsync ' + ' '.join(arguments) + ' ' + item)\n\n\ndef find_remote_files(remote_path, type, ssh):\n \"\"\"\n Finds all files or directories on remote machine, according to given attributes.\n \"\"\"\n ssh_in, ssh_out, ssh_err = ssh.exec_command(\n 'find %s -name \"*\" -type %s' % (remote_path, type))\n files = []\n for file in ssh_out.readlines():\n files.append(file.rstrip())\n return files\n\n\ndef find_local_files(local_path, type):\n \"\"\"\n Finds all files or directories on local machine, according to given attributes.\n \"\"\"\n local_out = commands.getoutput('find %s -name \"*\" -type %s' % (\n local_path, type))\n files = []\n for file in local_out.split('\\n'):\n files.append(file)\n return files\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef ip_check():\n \"\"\"\n Parses attributes for given hosts,\n then checks if hosts are up\n and then calls path_check function with working hosts.\n \"\"\"\n hosts = []\n valid_hosts = []\n for item in sys.argv:\n if '@' in item:\n hosts.append(item)\n for i in hosts:\n host = i.split('@')[1].split(':')[0]\n command = os.system('ping -c 1 ' + host + ' > /dev/null')\n if command == 0:\n valid_hosts.append(i)\n if valid_hosts:\n path_check(valid_hosts)\n\n\ndef path_check(hosts):\n \"\"\"\n Parses username, port, host and local and remote path,\n finds all local and remote files, using find_local_files and find_remote_files functions,\n and then opens ssh session using paramiko for each given host.\n \"\"\"\n local_files = []\n local_path = ''\n for item in sys.argv:\n if '–pass' in item:\n secret = item.split('=')[1].strip(\"'\")\n break\n else:\n secret = ''\n for item in sys.argv:\n if '/' in item and '@' not in item:\n local_path = item\n if '.' in item and '/' not in item:\n local_files.append(item)\n if local_path:\n local_files.append(find_local_files(local_path, 'f'))\n for i in hosts:\n user_port, host_remote_path = i.split('@')\n if ':' in i:\n host, remote_path = host_remote_path.split(':')\n else:\n host = host_remote_path\n remote_path = ''\n for separator in ',.:':\n if separator in user_port:\n user, port = user_port.split(separator)\n break\n else:\n user = user_port\n port = 0\n ssh = open_sshclient(host, user, port, secret)\n if not remote_path:\n remote_path = local_path\n ssh.exec_command('mkdir -p ' + remote_path)\n remote_files = find_remote_files(remote_path, 'f', ssh)\n ssh.close()\n copy_file(hosts)\n\n\ndef open_sshclient(host, user, port, secret):\n \"\"\"\n Opens ssh session using paramiko.\n \"\"\"\n ssh_client = paramiko.SSHClient()\n ssh_client.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n ssh_client.load_system_host_keys()\n if secret and port:\n ssh_client.connect(hostname=host, username=user, password=secret,\n port=port)\n elif secret and port == 0:\n ssh_client.connect(hostname=host, username=user, password=secret)\n elif not secret and port:\n ssh_client.connect(hostname=host, username=user, port=port)\n else:\n ssh_client.connect(hostname=host, username=user)\n return ssh_client\n\n\ndef copy_file(hosts):\n \"\"\"\n Makes all needed operations according to given attributes with rsync.\n \"\"\"\n arguments = []\n for item in sys.argv[1:]:\n if '@' not in item and '–pass' not in item:\n arguments.append(item)\n for item in hosts:\n os.system('rsync ' + ' '.join(arguments) + ' ' + item)\n\n\ndef find_remote_files(remote_path, type, ssh):\n \"\"\"\n Finds all files or directories on remote machine, according to given attributes.\n \"\"\"\n ssh_in, ssh_out, ssh_err = ssh.exec_command(\n 'find %s -name \"*\" -type %s' % (remote_path, type))\n files = []\n for file in ssh_out.readlines():\n files.append(file.rstrip())\n return files\n\n\ndef find_local_files(local_path, type):\n \"\"\"\n Finds all files or directories on local machine, according to given attributes.\n \"\"\"\n local_out = commands.getoutput('find %s -name \"*\" -type %s' % (\n local_path, type))\n files = []\n for file in local_out.split('\\n'):\n files.append(file)\n return files\n\n\nip_check()\n",
"step-4": "import os\nimport sys\nimport paramiko\nimport commands\n\n\ndef ip_check():\n \"\"\"\n Parses attributes for given hosts,\n then checks if hosts are up\n and then calls path_check function with working hosts.\n \"\"\"\n hosts = []\n valid_hosts = []\n for item in sys.argv:\n if '@' in item:\n hosts.append(item)\n for i in hosts:\n host = i.split('@')[1].split(':')[0]\n command = os.system('ping -c 1 ' + host + ' > /dev/null')\n if command == 0:\n valid_hosts.append(i)\n if valid_hosts:\n path_check(valid_hosts)\n\n\ndef path_check(hosts):\n \"\"\"\n Parses username, port, host and local and remote path,\n finds all local and remote files, using find_local_files and find_remote_files functions,\n and then opens ssh session using paramiko for each given host.\n \"\"\"\n local_files = []\n local_path = ''\n for item in sys.argv:\n if '–pass' in item:\n secret = item.split('=')[1].strip(\"'\")\n break\n else:\n secret = ''\n for item in sys.argv:\n if '/' in item and '@' not in item:\n local_path = item\n if '.' in item and '/' not in item:\n local_files.append(item)\n if local_path:\n local_files.append(find_local_files(local_path, 'f'))\n for i in hosts:\n user_port, host_remote_path = i.split('@')\n if ':' in i:\n host, remote_path = host_remote_path.split(':')\n else:\n host = host_remote_path\n remote_path = ''\n for separator in ',.:':\n if separator in user_port:\n user, port = user_port.split(separator)\n break\n else:\n user = user_port\n port = 0\n ssh = open_sshclient(host, user, port, secret)\n if not remote_path:\n remote_path = local_path\n ssh.exec_command('mkdir -p ' + remote_path)\n remote_files = find_remote_files(remote_path, 'f', ssh)\n ssh.close()\n copy_file(hosts)\n\n\ndef open_sshclient(host, user, port, secret):\n \"\"\"\n Opens ssh session using paramiko.\n \"\"\"\n ssh_client = paramiko.SSHClient()\n ssh_client.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n ssh_client.load_system_host_keys()\n if secret and port:\n ssh_client.connect(hostname=host, username=user, password=secret,\n port=port)\n elif secret and port == 0:\n ssh_client.connect(hostname=host, username=user, password=secret)\n elif not secret and port:\n ssh_client.connect(hostname=host, username=user, port=port)\n else:\n ssh_client.connect(hostname=host, username=user)\n return ssh_client\n\n\ndef copy_file(hosts):\n \"\"\"\n Makes all needed operations according to given attributes with rsync.\n \"\"\"\n arguments = []\n for item in sys.argv[1:]:\n if '@' not in item and '–pass' not in item:\n arguments.append(item)\n for item in hosts:\n os.system('rsync ' + ' '.join(arguments) + ' ' + item)\n\n\ndef find_remote_files(remote_path, type, ssh):\n \"\"\"\n Finds all files or directories on remote machine, according to given attributes.\n \"\"\"\n ssh_in, ssh_out, ssh_err = ssh.exec_command(\n 'find %s -name \"*\" -type %s' % (remote_path, type))\n files = []\n for file in ssh_out.readlines():\n files.append(file.rstrip())\n return files\n\n\ndef find_local_files(local_path, type):\n \"\"\"\n Finds all files or directories on local machine, according to given attributes.\n \"\"\"\n local_out = commands.getoutput('find %s -name \"*\" -type %s' % (\n local_path, type))\n files = []\n for file in local_out.split('\\n'):\n files.append(file)\n return files\n\n\nip_check()\n",
"step-5": "#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\nimport os\nimport sys\nimport paramiko\nimport commands\n\n\ndef ip_check():\n \"\"\"\n Parses attributes for given hosts,\n then checks if hosts are up\n and then calls path_check function with working hosts.\n \"\"\"\n hosts = []\n valid_hosts = []\n for item in sys.argv:\n if '@' in item:\n hosts.append(item)\n for i in hosts:\n host = i.split('@')[1].split(':')[0]\n command = os.system('ping -c 1 '+host+' > /dev/null')\n if command == 0:\n valid_hosts.append(i)\n if valid_hosts:\n path_check(valid_hosts)\n\n\ndef path_check(hosts):\n \"\"\"\n Parses username, port, host and local and remote path,\n finds all local and remote files, using find_local_files and find_remote_files functions,\n and then opens ssh session using paramiko for each given host.\n \"\"\"\n local_files = []\n local_path = ''\n for item in sys.argv:\n if '–pass' in item:\n secret = item.split('=')[1].strip(\"'\")\n break\n else:\n secret = ''\n for item in sys.argv:\n if '/' in item and '@' not in item:\n local_path = item\n if '.' in item and '/' not in item:\n local_files.append(item)\n if local_path:\n local_files.append(find_local_files(local_path, 'f'))\n for i in hosts:\n user_port, host_remote_path = i.split('@')\n if ':' in i:\n host, remote_path = host_remote_path.split(':')\n else:\n host = host_remote_path\n remote_path = ''\n for separator in ',.:':\n if separator in user_port:\n user, port = user_port.split(separator)\n break\n else:\n user = user_port\n port = 0\n ssh = open_sshclient(host, user, port, secret)\n if not remote_path:\n remote_path = local_path\n ssh.exec_command('mkdir -p '+remote_path)\n remote_files = find_remote_files(remote_path, 'f', ssh)\n ssh.close()\n copy_file(hosts)\n\n\ndef open_sshclient(host, user, port, secret):\n \"\"\"\n Opens ssh session using paramiko.\n \"\"\"\n ssh_client = paramiko.SSHClient()\n ssh_client.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n ssh_client.load_system_host_keys()\n if secret and port:\n ssh_client.connect(hostname=host, username=user, password=secret, port=port)\n elif secret and port==0:\n ssh_client.connect(hostname=host, username=user, password=secret)\n elif not secret and port:\n ssh_client.connect(hostname=host, username=user, port=port)\n else:\n ssh_client.connect(hostname=host, username=user)\n return ssh_client\n\n\ndef copy_file(hosts):\n \"\"\"\n Makes all needed operations according to given attributes with rsync.\n \"\"\"\n arguments = []\n for item in sys.argv[1:]:\n if '@' not in item and '–pass' not in item:\n arguments.append(item)\n for item in hosts:\n # plz use .format for test strings concatenation\n os.system('rsync '+' '.join(arguments)+' '+item)\n\n\ndef find_remote_files(remote_path, type, ssh):\n \"\"\"\n Finds all files or directories on remote machine, according to given attributes.\n \"\"\"\n (ssh_in, ssh_out, ssh_err) = ssh.exec_command(\"find %s -name \\\"*\\\" -type %s\" % (remote_path, type))\n files = []\n for file in ssh_out.readlines():\n files.append(file.rstrip())\n return files\n\n\ndef find_local_files(local_path, type):\n \"\"\"\n Finds all files or directories on local machine, according to given attributes.\n \"\"\"\n local_out = commands.getoutput(\"find %s -name \\\"*\\\" -type %s\" % (local_path, type))\n files = []\n for file in local_out.split(\"\\n\"):\n files.append(file)\n return files\n\nip_check()\n\n",
"step-ids": [
4,
6,
7,
8,
9
]
}
|
[
4,
6,
7,
8,
9
] |
# -*- coding: utf-8 -*-
"""
This is a simple sample for seuif.py
License: this code is in the public domain
Author: Cheng Maohua
Email: [email protected]
Last modified: 2016.4.20
"""
from seuif97 import *
import matplotlib.pyplot as plt
import numpy as np
p1,t1 = 16, 535
p2,t2 = 3.56,315
h1 = pt2h(p1, t1)
s1 = pt2s(p1, t1)
h2 = pt2h(p2, t2)
s2 = pt2s(p2, t2)
h2s = ps2h(p2, s1)
his = ishd(p1, t1, p2)
ef = ief(p1, t1, p2, t2)
# print('The isentropic efficiency is ',ef)
# 4条线:p1、p2 等压,等熵焓降线、膨胀线
samp = 0.01
smp1 = s1 - samp
hsmp1 = ps2h(p1, smp1)
sap1 = s1 + samp
hsap1 = ps2h(p1, sap1)
smt1 = s1 - samp
hsmt1 = ps2h(p1, smp1)
sat1 = s1 + samp
hsat1 = ts2h(t1, sap1)
point_p1_h = np.zeros(shape=3)
point_p1_h[0] = hsmp1
point_p1_h[1] = h1
point_p1_h[2] = hsap1
point_p1_s = np.zeros(shape=3)
point_p1_s[0] = smp1
point_p1_s[1] = s1
point_p1_s[2] = sap1
# p2
smp2 = s1 - samp # 等熵焓降点延伸
hsmp2 = ps2h(p2, smp2)
sap2 = s2 + samp
hsap2 = ps2h(p2, sap2)
smt2 = s2 - samp
hsmt2 = ps2h(p1, smp1)
sat2 = s2 + samp
hsat2 = ts2h(t2, sap1)
point_p2_h = np.zeros(shape=3)
point_p2_h[0] = hsmp2
point_p2_h[1] = h2
point_p2_h[2] = hsap2
point_p2_s = np.zeros(shape=3)
point_p2_s[0] = smp2
point_p2_s[1] = s2
point_p2_s[2] = sap2
# 等熵焓降
point_is_h = np.zeros(shape=2)
point_is_h[0] = h1
point_is_h[1] = h2s
point_is_s = np.zeros(shape=2)
point_is_s[0] = s1
point_is_s[1] = s1
# HP Expansion Line
point_hp_h = np.zeros(shape=2)
point_hp_h[0] = h1
point_hp_h[1] = h2
point_hp_s = np.zeros(shape=2)
point_hp_s[0] = s1
point_hp_s[1] = s2
plt.plot(point_p1_s, point_p1_h, 'bs-')
plt.plot(point_p2_s, point_p2_h, 'bs-')
plt.plot(point_is_s, point_is_h, 'ys-')
plt.plot(point_hp_s, point_hp_h, 'rs-', label='Expansion Line')
_title = 'The isentropic efficiency = ' + \
r'$\frac{h1-h2}{h1-h2s}$' + '=' + '{:.2f}'.format(ef) + '%'
plt.legend(loc="best", bbox_to_anchor=[0.5, 0.5],
ncol=2, shadow=True, title=_title)
# annotate some interesting points
plt.annotate('(P1,T1)',
xy=(s1, h1), xycoords='data',
xytext=(+10, +30), textcoords='offset points', fontsize=12,
arrowprops=dict(arrowstyle="->", connectionstyle="arc3,rad=.2"))
plt.annotate('(P2,T2)',
xy=(s2, h2), xycoords='data',
xytext=(+10, +30), textcoords='offset points', fontsize=12,
arrowprops=dict(arrowstyle="->", connectionstyle="arc3,rad=.2"))
plt.xlabel('s(kJ/(kg.K))')
plt.ylabel('h(kJ/kg)')
plt.show()
|
normal
|
{
"blob_id": "ebe546794131eddea396bd6b82fbb41aeead4661",
"index": 572,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nplt.plot(point_p1_s, point_p1_h, 'bs-')\nplt.plot(point_p2_s, point_p2_h, 'bs-')\nplt.plot(point_is_s, point_is_h, 'ys-')\nplt.plot(point_hp_s, point_hp_h, 'rs-', label='Expansion Line')\n<mask token>\nplt.legend(loc='best', bbox_to_anchor=[0.5, 0.5], ncol=2, shadow=True,\n title=_title)\nplt.annotate('(P1,T1)', xy=(s1, h1), xycoords='data', xytext=(+10, +30),\n textcoords='offset points', fontsize=12, arrowprops=dict(arrowstyle=\n '->', connectionstyle='arc3,rad=.2'))\nplt.annotate('(P2,T2)', xy=(s2, h2), xycoords='data', xytext=(+10, +30),\n textcoords='offset points', fontsize=12, arrowprops=dict(arrowstyle=\n '->', connectionstyle='arc3,rad=.2'))\nplt.xlabel('s(kJ/(kg.K))')\nplt.ylabel('h(kJ/kg)')\nplt.show()\n",
"step-3": "<mask token>\np1, t1 = 16, 535\np2, t2 = 3.56, 315\nh1 = pt2h(p1, t1)\ns1 = pt2s(p1, t1)\nh2 = pt2h(p2, t2)\ns2 = pt2s(p2, t2)\nh2s = ps2h(p2, s1)\nhis = ishd(p1, t1, p2)\nef = ief(p1, t1, p2, t2)\nsamp = 0.01\nsmp1 = s1 - samp\nhsmp1 = ps2h(p1, smp1)\nsap1 = s1 + samp\nhsap1 = ps2h(p1, sap1)\nsmt1 = s1 - samp\nhsmt1 = ps2h(p1, smp1)\nsat1 = s1 + samp\nhsat1 = ts2h(t1, sap1)\npoint_p1_h = np.zeros(shape=3)\npoint_p1_h[0] = hsmp1\npoint_p1_h[1] = h1\npoint_p1_h[2] = hsap1\npoint_p1_s = np.zeros(shape=3)\npoint_p1_s[0] = smp1\npoint_p1_s[1] = s1\npoint_p1_s[2] = sap1\nsmp2 = s1 - samp\nhsmp2 = ps2h(p2, smp2)\nsap2 = s2 + samp\nhsap2 = ps2h(p2, sap2)\nsmt2 = s2 - samp\nhsmt2 = ps2h(p1, smp1)\nsat2 = s2 + samp\nhsat2 = ts2h(t2, sap1)\npoint_p2_h = np.zeros(shape=3)\npoint_p2_h[0] = hsmp2\npoint_p2_h[1] = h2\npoint_p2_h[2] = hsap2\npoint_p2_s = np.zeros(shape=3)\npoint_p2_s[0] = smp2\npoint_p2_s[1] = s2\npoint_p2_s[2] = sap2\npoint_is_h = np.zeros(shape=2)\npoint_is_h[0] = h1\npoint_is_h[1] = h2s\npoint_is_s = np.zeros(shape=2)\npoint_is_s[0] = s1\npoint_is_s[1] = s1\npoint_hp_h = np.zeros(shape=2)\npoint_hp_h[0] = h1\npoint_hp_h[1] = h2\npoint_hp_s = np.zeros(shape=2)\npoint_hp_s[0] = s1\npoint_hp_s[1] = s2\nplt.plot(point_p1_s, point_p1_h, 'bs-')\nplt.plot(point_p2_s, point_p2_h, 'bs-')\nplt.plot(point_is_s, point_is_h, 'ys-')\nplt.plot(point_hp_s, point_hp_h, 'rs-', label='Expansion Line')\n_title = ('The isentropic efficiency = ' + '$\\\\frac{h1-h2}{h1-h2s}$' + '=' +\n '{:.2f}'.format(ef) + '%')\nplt.legend(loc='best', bbox_to_anchor=[0.5, 0.5], ncol=2, shadow=True,\n title=_title)\nplt.annotate('(P1,T1)', xy=(s1, h1), xycoords='data', xytext=(+10, +30),\n textcoords='offset points', fontsize=12, arrowprops=dict(arrowstyle=\n '->', connectionstyle='arc3,rad=.2'))\nplt.annotate('(P2,T2)', xy=(s2, h2), xycoords='data', xytext=(+10, +30),\n textcoords='offset points', fontsize=12, arrowprops=dict(arrowstyle=\n '->', connectionstyle='arc3,rad=.2'))\nplt.xlabel('s(kJ/(kg.K))')\nplt.ylabel('h(kJ/kg)')\nplt.show()\n",
"step-4": "<mask token>\nfrom seuif97 import *\nimport matplotlib.pyplot as plt\nimport numpy as np\np1, t1 = 16, 535\np2, t2 = 3.56, 315\nh1 = pt2h(p1, t1)\ns1 = pt2s(p1, t1)\nh2 = pt2h(p2, t2)\ns2 = pt2s(p2, t2)\nh2s = ps2h(p2, s1)\nhis = ishd(p1, t1, p2)\nef = ief(p1, t1, p2, t2)\nsamp = 0.01\nsmp1 = s1 - samp\nhsmp1 = ps2h(p1, smp1)\nsap1 = s1 + samp\nhsap1 = ps2h(p1, sap1)\nsmt1 = s1 - samp\nhsmt1 = ps2h(p1, smp1)\nsat1 = s1 + samp\nhsat1 = ts2h(t1, sap1)\npoint_p1_h = np.zeros(shape=3)\npoint_p1_h[0] = hsmp1\npoint_p1_h[1] = h1\npoint_p1_h[2] = hsap1\npoint_p1_s = np.zeros(shape=3)\npoint_p1_s[0] = smp1\npoint_p1_s[1] = s1\npoint_p1_s[2] = sap1\nsmp2 = s1 - samp\nhsmp2 = ps2h(p2, smp2)\nsap2 = s2 + samp\nhsap2 = ps2h(p2, sap2)\nsmt2 = s2 - samp\nhsmt2 = ps2h(p1, smp1)\nsat2 = s2 + samp\nhsat2 = ts2h(t2, sap1)\npoint_p2_h = np.zeros(shape=3)\npoint_p2_h[0] = hsmp2\npoint_p2_h[1] = h2\npoint_p2_h[2] = hsap2\npoint_p2_s = np.zeros(shape=3)\npoint_p2_s[0] = smp2\npoint_p2_s[1] = s2\npoint_p2_s[2] = sap2\npoint_is_h = np.zeros(shape=2)\npoint_is_h[0] = h1\npoint_is_h[1] = h2s\npoint_is_s = np.zeros(shape=2)\npoint_is_s[0] = s1\npoint_is_s[1] = s1\npoint_hp_h = np.zeros(shape=2)\npoint_hp_h[0] = h1\npoint_hp_h[1] = h2\npoint_hp_s = np.zeros(shape=2)\npoint_hp_s[0] = s1\npoint_hp_s[1] = s2\nplt.plot(point_p1_s, point_p1_h, 'bs-')\nplt.plot(point_p2_s, point_p2_h, 'bs-')\nplt.plot(point_is_s, point_is_h, 'ys-')\nplt.plot(point_hp_s, point_hp_h, 'rs-', label='Expansion Line')\n_title = ('The isentropic efficiency = ' + '$\\\\frac{h1-h2}{h1-h2s}$' + '=' +\n '{:.2f}'.format(ef) + '%')\nplt.legend(loc='best', bbox_to_anchor=[0.5, 0.5], ncol=2, shadow=True,\n title=_title)\nplt.annotate('(P1,T1)', xy=(s1, h1), xycoords='data', xytext=(+10, +30),\n textcoords='offset points', fontsize=12, arrowprops=dict(arrowstyle=\n '->', connectionstyle='arc3,rad=.2'))\nplt.annotate('(P2,T2)', xy=(s2, h2), xycoords='data', xytext=(+10, +30),\n textcoords='offset points', fontsize=12, arrowprops=dict(arrowstyle=\n '->', connectionstyle='arc3,rad=.2'))\nplt.xlabel('s(kJ/(kg.K))')\nplt.ylabel('h(kJ/kg)')\nplt.show()\n",
"step-5": "# -*- coding: utf-8 -*-\n\"\"\"\nThis is a simple sample for seuif.py\n\nLicense: this code is in the public domain\n\nAuthor: Cheng Maohua\nEmail: [email protected]\n\nLast modified: 2016.4.20\n\n\"\"\"\nfrom seuif97 import *\n\nimport matplotlib.pyplot as plt\nimport numpy as np\n\np1,t1 = 16, 535\np2,t2 = 3.56,315\nh1 = pt2h(p1, t1)\ns1 = pt2s(p1, t1)\n\nh2 = pt2h(p2, t2)\ns2 = pt2s(p2, t2)\n\nh2s = ps2h(p2, s1)\nhis = ishd(p1, t1, p2)\n\nef = ief(p1, t1, p2, t2)\n\n# print('The isentropic efficiency is ',ef)\n\n# 4条线:p1、p2 等压,等熵焓降线、膨胀线\n\nsamp = 0.01\n\nsmp1 = s1 - samp\nhsmp1 = ps2h(p1, smp1)\nsap1 = s1 + samp\nhsap1 = ps2h(p1, sap1)\n\nsmt1 = s1 - samp\nhsmt1 = ps2h(p1, smp1)\nsat1 = s1 + samp\nhsat1 = ts2h(t1, sap1)\n\npoint_p1_h = np.zeros(shape=3)\npoint_p1_h[0] = hsmp1\npoint_p1_h[1] = h1\npoint_p1_h[2] = hsap1\npoint_p1_s = np.zeros(shape=3)\npoint_p1_s[0] = smp1\npoint_p1_s[1] = s1\npoint_p1_s[2] = sap1\n\n# p2\nsmp2 = s1 - samp # 等熵焓降点延伸\nhsmp2 = ps2h(p2, smp2)\nsap2 = s2 + samp\nhsap2 = ps2h(p2, sap2)\n\nsmt2 = s2 - samp\nhsmt2 = ps2h(p1, smp1)\nsat2 = s2 + samp\nhsat2 = ts2h(t2, sap1)\n\npoint_p2_h = np.zeros(shape=3)\npoint_p2_h[0] = hsmp2\npoint_p2_h[1] = h2\npoint_p2_h[2] = hsap2\n\npoint_p2_s = np.zeros(shape=3)\npoint_p2_s[0] = smp2\npoint_p2_s[1] = s2\npoint_p2_s[2] = sap2\n\n# 等熵焓降\npoint_is_h = np.zeros(shape=2)\npoint_is_h[0] = h1\npoint_is_h[1] = h2s\npoint_is_s = np.zeros(shape=2)\npoint_is_s[0] = s1\npoint_is_s[1] = s1\n\n# HP Expansion Line\npoint_hp_h = np.zeros(shape=2)\npoint_hp_h[0] = h1\npoint_hp_h[1] = h2\npoint_hp_s = np.zeros(shape=2)\npoint_hp_s[0] = s1\npoint_hp_s[1] = s2\n\nplt.plot(point_p1_s, point_p1_h, 'bs-')\n\nplt.plot(point_p2_s, point_p2_h, 'bs-')\nplt.plot(point_is_s, point_is_h, 'ys-')\nplt.plot(point_hp_s, point_hp_h, 'rs-', label='Expansion Line')\n\n_title = 'The isentropic efficiency = ' + \\\n r'$\\frac{h1-h2}{h1-h2s}$' + '=' + '{:.2f}'.format(ef) + '%'\n\nplt.legend(loc=\"best\", bbox_to_anchor=[0.5, 0.5],\n ncol=2, shadow=True, title=_title)\n\n# annotate some interesting points\nplt.annotate('(P1,T1)',\n xy=(s1, h1), xycoords='data',\n xytext=(+10, +30), textcoords='offset points', fontsize=12,\n arrowprops=dict(arrowstyle=\"->\", connectionstyle=\"arc3,rad=.2\"))\n\nplt.annotate('(P2,T2)',\n xy=(s2, h2), xycoords='data',\n xytext=(+10, +30), textcoords='offset points', fontsize=12,\n arrowprops=dict(arrowstyle=\"->\", connectionstyle=\"arc3,rad=.2\"))\n\nplt.xlabel('s(kJ/(kg.K))')\nplt.ylabel('h(kJ/kg)')\nplt.show()\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
class AccessFilter(BaseFilter):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class FilterTests(helper.CPWebCase):
def testCPFilterList(self):
self.getPage('/cpfilterlist/')
self.assertBody('A horrorshow lomtick of cherry 3.14159')
self.getPage('/cpfilterlist/ended/1')
self.assertBody('True')
valerr = '\n raise ValueError()\nValueError'
self.getPage('/cpfilterlist/err')
self.assertErrorPage(500, pattern=valerr)
self.getPage('/cpfilterlist/ended/3')
self.assertBody('True')
self.getPage('/cpfilterlist/errinstream')
self.assertStatus('200 OK')
self.assertBody('Unrecoverable error in the server.')
self.getPage('/cpfilterlist/ended/5')
self.assertBody('True')
self.getPage('/cpfilterlist/restricted')
self.assertErrorPage(401)
def testGuaranteedFilters(self):
self.getPage('/cpfilterlist/err_in_onstart')
self.assertErrorPage(500)
self.assertInBody(
"AttributeError: 'Request' object has no attribute 'numerify_map'")
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class AccessFilter(BaseFilter):
def before_request_body(self):
if not cherrypy.config.get('access_filter.on', False):
return
if not getattr(cherrypy.request, 'login', None):
raise cherrypy.HTTPError(401)
<|reserved_special_token_0|>
class FilterTests(helper.CPWebCase):
def testCPFilterList(self):
self.getPage('/cpfilterlist/')
self.assertBody('A horrorshow lomtick of cherry 3.14159')
self.getPage('/cpfilterlist/ended/1')
self.assertBody('True')
valerr = '\n raise ValueError()\nValueError'
self.getPage('/cpfilterlist/err')
self.assertErrorPage(500, pattern=valerr)
self.getPage('/cpfilterlist/ended/3')
self.assertBody('True')
self.getPage('/cpfilterlist/errinstream')
self.assertStatus('200 OK')
self.assertBody('Unrecoverable error in the server.')
self.getPage('/cpfilterlist/ended/5')
self.assertBody('True')
self.getPage('/cpfilterlist/restricted')
self.assertErrorPage(401)
def testGuaranteedFilters(self):
self.getPage('/cpfilterlist/err_in_onstart')
self.assertErrorPage(500)
self.assertInBody(
"AttributeError: 'Request' object has no attribute 'numerify_map'")
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
test.prefer_parent_path()
<|reserved_special_token_0|>
class AccessFilter(BaseFilter):
def before_request_body(self):
if not cherrypy.config.get('access_filter.on', False):
return
if not getattr(cherrypy.request, 'login', None):
raise cherrypy.HTTPError(401)
def setup_server():
class Numerify(BaseFilter):
def on_start_resource(self):
m = cherrypy.config.get('numerify_filter.map', {})
cherrypy.request.numerify_map = m.items()
def before_finalize(self):
if not cherrypy.config.get('numerify_filter.on', False):
return
def number_it(body):
for chunk in body:
for k, v in cherrypy.request.numerify_map:
chunk = chunk.replace(k, v)
yield chunk
cherrypy.response.body = number_it(cherrypy.response.body)
class NadsatFilter:
def __init__(self):
self.counter = 0
self.ended = {}
def before_main(self):
cherrypy.request.counter = self.counter = self.counter + 1
self.ended[cherrypy.request.counter] = False
def before_finalize(self):
def nadsat_it_up(body):
for chunk in body:
chunk = chunk.replace('good', 'horrorshow')
chunk = chunk.replace('piece', 'lomtick')
yield chunk
cherrypy.response.body = nadsat_it_up(cherrypy.response.body)
def on_end_request(self):
cherrypy.response.body = 'razdrez'
self.ended[cherrypy.request.counter] = True
class Root:
def index(self):
return 'Howdy earth!'
index.exposed = True
cherrypy.root = Root()
class TestType(type):
"""Metaclass which automatically exposes all functions in each subclass,
and adds an instance of the subclass as an attribute of cherrypy.root.
"""
def __init__(cls, name, bases, dct):
type.__init__(name, bases, dct)
for value in dct.itervalues():
if isinstance(value, types.FunctionType):
value.exposed = True
setattr(cherrypy.root, name.lower(), cls())
class Test(object):
__metaclass__ = TestType
class CPFilterList(Test):
_cp_filters = [NadsatFilter()]
def index(self):
return 'A good piece of cherry pie'
def ended(self, id):
return repr(self._cp_filters[0].ended[int(id)])
def err(self):
raise ValueError()
def errinstream(self):
raise ValueError()
yield 'confidential'
def restricted(self):
return 'Welcome!'
def err_in_onstart(self):
return 'success!'
cherrypy.config.update({'global': {'server.input_filters': [
'cherrypy.test.test_custom_filters.AccessFilter'],
'server.log_to_screen': False, 'server.environment': 'production',
'server.show_tracebacks': True}, '/cpfilterlist': {
'numerify_filter.on': True, 'numerify_filter.map': {'pie':
'3.14159'}}, '/cpfilterlist/restricted': {'access_filter.on': True,
'server.show_tracebacks': False}, '/cpfilterlist/errinstream': {
'stream_response': True}, '/cpfilterlist/err_in_onstart': {
'numerify_filter.map': 'pie->3.14159'}})
filters.input_filters.insert(0, Numerify)
filters.output_filters.insert(0, Numerify)
filters.init()
<|reserved_special_token_0|>
class FilterTests(helper.CPWebCase):
def testCPFilterList(self):
self.getPage('/cpfilterlist/')
self.assertBody('A horrorshow lomtick of cherry 3.14159')
self.getPage('/cpfilterlist/ended/1')
self.assertBody('True')
valerr = '\n raise ValueError()\nValueError'
self.getPage('/cpfilterlist/err')
self.assertErrorPage(500, pattern=valerr)
self.getPage('/cpfilterlist/ended/3')
self.assertBody('True')
self.getPage('/cpfilterlist/errinstream')
self.assertStatus('200 OK')
self.assertBody('Unrecoverable error in the server.')
self.getPage('/cpfilterlist/ended/5')
self.assertBody('True')
self.getPage('/cpfilterlist/restricted')
self.assertErrorPage(401)
def testGuaranteedFilters(self):
self.getPage('/cpfilterlist/err_in_onstart')
self.assertErrorPage(500)
self.assertInBody(
"AttributeError: 'Request' object has no attribute 'numerify_map'")
if __name__ == '__main__':
setup_server()
helper.testmain()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
import types
import test
test.prefer_parent_path()
import cherrypy
from cherrypy import filters
from cherrypy.filters.basefilter import BaseFilter
class AccessFilter(BaseFilter):
def before_request_body(self):
if not cherrypy.config.get('access_filter.on', False):
return
if not getattr(cherrypy.request, 'login', None):
raise cherrypy.HTTPError(401)
def setup_server():
class Numerify(BaseFilter):
def on_start_resource(self):
m = cherrypy.config.get('numerify_filter.map', {})
cherrypy.request.numerify_map = m.items()
def before_finalize(self):
if not cherrypy.config.get('numerify_filter.on', False):
return
def number_it(body):
for chunk in body:
for k, v in cherrypy.request.numerify_map:
chunk = chunk.replace(k, v)
yield chunk
cherrypy.response.body = number_it(cherrypy.response.body)
class NadsatFilter:
def __init__(self):
self.counter = 0
self.ended = {}
def before_main(self):
cherrypy.request.counter = self.counter = self.counter + 1
self.ended[cherrypy.request.counter] = False
def before_finalize(self):
def nadsat_it_up(body):
for chunk in body:
chunk = chunk.replace('good', 'horrorshow')
chunk = chunk.replace('piece', 'lomtick')
yield chunk
cherrypy.response.body = nadsat_it_up(cherrypy.response.body)
def on_end_request(self):
cherrypy.response.body = 'razdrez'
self.ended[cherrypy.request.counter] = True
class Root:
def index(self):
return 'Howdy earth!'
index.exposed = True
cherrypy.root = Root()
class TestType(type):
"""Metaclass which automatically exposes all functions in each subclass,
and adds an instance of the subclass as an attribute of cherrypy.root.
"""
def __init__(cls, name, bases, dct):
type.__init__(name, bases, dct)
for value in dct.itervalues():
if isinstance(value, types.FunctionType):
value.exposed = True
setattr(cherrypy.root, name.lower(), cls())
class Test(object):
__metaclass__ = TestType
class CPFilterList(Test):
_cp_filters = [NadsatFilter()]
def index(self):
return 'A good piece of cherry pie'
def ended(self, id):
return repr(self._cp_filters[0].ended[int(id)])
def err(self):
raise ValueError()
def errinstream(self):
raise ValueError()
yield 'confidential'
def restricted(self):
return 'Welcome!'
def err_in_onstart(self):
return 'success!'
cherrypy.config.update({'global': {'server.input_filters': [
'cherrypy.test.test_custom_filters.AccessFilter'],
'server.log_to_screen': False, 'server.environment': 'production',
'server.show_tracebacks': True}, '/cpfilterlist': {
'numerify_filter.on': True, 'numerify_filter.map': {'pie':
'3.14159'}}, '/cpfilterlist/restricted': {'access_filter.on': True,
'server.show_tracebacks': False}, '/cpfilterlist/errinstream': {
'stream_response': True}, '/cpfilterlist/err_in_onstart': {
'numerify_filter.map': 'pie->3.14159'}})
filters.input_filters.insert(0, Numerify)
filters.output_filters.insert(0, Numerify)
filters.init()
import helper
class FilterTests(helper.CPWebCase):
def testCPFilterList(self):
self.getPage('/cpfilterlist/')
self.assertBody('A horrorshow lomtick of cherry 3.14159')
self.getPage('/cpfilterlist/ended/1')
self.assertBody('True')
valerr = '\n raise ValueError()\nValueError'
self.getPage('/cpfilterlist/err')
self.assertErrorPage(500, pattern=valerr)
self.getPage('/cpfilterlist/ended/3')
self.assertBody('True')
self.getPage('/cpfilterlist/errinstream')
self.assertStatus('200 OK')
self.assertBody('Unrecoverable error in the server.')
self.getPage('/cpfilterlist/ended/5')
self.assertBody('True')
self.getPage('/cpfilterlist/restricted')
self.assertErrorPage(401)
def testGuaranteedFilters(self):
self.getPage('/cpfilterlist/err_in_onstart')
self.assertErrorPage(500)
self.assertInBody(
"AttributeError: 'Request' object has no attribute 'numerify_map'")
if __name__ == '__main__':
setup_server()
helper.testmain()
<|reserved_special_token_1|>
"""Test the various means of instantiating and invoking filters."""
import types
import test
test.prefer_parent_path()
import cherrypy
from cherrypy import filters
from cherrypy.filters.basefilter import BaseFilter
class AccessFilter(BaseFilter):
def before_request_body(self):
if not cherrypy.config.get("access_filter.on", False):
return
if not getattr(cherrypy.request, "login", None):
raise cherrypy.HTTPError(401)
def setup_server():
class Numerify(BaseFilter):
def on_start_resource(self):
m = cherrypy.config.get("numerify_filter.map", {})
cherrypy.request.numerify_map = m.items()
def before_finalize(self):
if not cherrypy.config.get("numerify_filter.on", False):
return
def number_it(body):
for chunk in body:
for k, v in cherrypy.request.numerify_map:
chunk = chunk.replace(k, v)
yield chunk
cherrypy.response.body = number_it(cherrypy.response.body)
# It's not mandatory to inherit from BaseFilter.
class NadsatFilter:
def __init__(self):
self.counter = 0
self.ended = {}
def before_main(self):
cherrypy.request.counter = self.counter = self.counter + 1
self.ended[cherrypy.request.counter] = False
def before_finalize(self):
def nadsat_it_up(body):
for chunk in body:
chunk = chunk.replace("good", "horrorshow")
chunk = chunk.replace("piece", "lomtick")
yield chunk
cherrypy.response.body = nadsat_it_up(cherrypy.response.body)
def on_end_request(self):
# This runs after the request has been completely written out.
cherrypy.response.body = "razdrez"
self.ended[cherrypy.request.counter] = True
class Root:
def index(self):
return "Howdy earth!"
index.exposed = True
cherrypy.root = Root()
class TestType(type):
"""Metaclass which automatically exposes all functions in each subclass,
and adds an instance of the subclass as an attribute of cherrypy.root.
"""
def __init__(cls, name, bases, dct):
type.__init__(name, bases, dct)
for value in dct.itervalues():
if isinstance(value, types.FunctionType):
value.exposed = True
setattr(cherrypy.root, name.lower(), cls())
class Test(object):
__metaclass__ = TestType
class CPFilterList(Test):
# METHOD ONE:
# Use _cp_filters (old name: _cpFilterList)
_cp_filters = [NadsatFilter()]
def index(self):
return "A good piece of cherry pie"
def ended(self, id):
return repr(self._cp_filters[0].ended[int(id)])
def err(self):
raise ValueError()
def errinstream(self):
raise ValueError()
yield "confidential"
def restricted(self):
return "Welcome!"
def err_in_onstart(self):
return "success!"
cherrypy.config.update({
'global': {
# METHOD TWO:
# Declare a classname in server.input_filters.
'server.input_filters': ["cherrypy.test.test_custom_filters.AccessFilter"],
'server.log_to_screen': False,
'server.environment': 'production',
'server.show_tracebacks': True,
},
'/cpfilterlist': {
'numerify_filter.on': True,
'numerify_filter.map': {"pie": "3.14159"}
},
'/cpfilterlist/restricted': {
'access_filter.on': True,
'server.show_tracebacks': False,
},
'/cpfilterlist/errinstream': {
'stream_response': True,
},
'/cpfilterlist/err_in_onstart': {
# Because this isn't a dict, on_start_resource will error.
'numerify_filter.map': "pie->3.14159"
},
})
# METHOD THREE:
# Insert a class directly into the filters.output_filters chain.
# You can also insert a string, but we're effectively testing
# using-a-string via the config file.
filters.input_filters.insert(0, Numerify)
filters.output_filters.insert(0, Numerify)
# We have to call filters.init() here (if we want methods #2 and #3
# to work), because the test suite may already have run server.start()
# (which is where filters.init() is usually called).
filters.init()
# Client-side code #
import helper
class FilterTests(helper.CPWebCase):
def testCPFilterList(self):
self.getPage("/cpfilterlist/")
# If body is "razdrez", then on_end_request is being called too early.
self.assertBody("A horrorshow lomtick of cherry 3.14159")
# If this fails, then on_end_request isn't being called at all.
self.getPage("/cpfilterlist/ended/1")
self.assertBody("True")
valerr = '\n raise ValueError()\nValueError'
self.getPage("/cpfilterlist/err")
# If body is "razdrez", then on_end_request is being called too early.
self.assertErrorPage(500, pattern=valerr)
# If this fails, then on_end_request isn't being called at all.
self.getPage("/cpfilterlist/ended/3")
self.assertBody("True")
# If body is "razdrez", then on_end_request is being called too early.
self.getPage("/cpfilterlist/errinstream")
# Because this error is raised after the response body has
# started, the status should not change to an error status.
self.assertStatus("200 OK")
self.assertBody("Unrecoverable error in the server.")
# If this fails, then on_end_request isn't being called at all.
self.getPage("/cpfilterlist/ended/5")
self.assertBody("True")
# Test the config method.
self.getPage("/cpfilterlist/restricted")
self.assertErrorPage(401)
def testGuaranteedFilters(self):
# The on_start_resource and on_end_request filter methods are all
# guaranteed to run, even if there are failures in other on_start
# or on_end methods. This is NOT true of the other filter methods.
# Here, we have set up a failure in NumerifyFilter.on_start_resource,
# but because that failure is logged and passed over, the error
# page we obtain in the user agent should be from before_finalize.
self.getPage("/cpfilterlist/err_in_onstart")
self.assertErrorPage(500)
self.assertInBody("AttributeError: 'Request' object has no "
"attribute 'numerify_map'")
if __name__ == '__main__':
setup_server()
helper.testmain()
|
flexible
|
{
"blob_id": "8a412231c13df1b364b6e2a27549730d06048186",
"index": 9978,
"step-1": "<mask token>\n\n\nclass AccessFilter(BaseFilter):\n <mask token>\n\n\n<mask token>\n\n\nclass FilterTests(helper.CPWebCase):\n\n def testCPFilterList(self):\n self.getPage('/cpfilterlist/')\n self.assertBody('A horrorshow lomtick of cherry 3.14159')\n self.getPage('/cpfilterlist/ended/1')\n self.assertBody('True')\n valerr = '\\n raise ValueError()\\nValueError'\n self.getPage('/cpfilterlist/err')\n self.assertErrorPage(500, pattern=valerr)\n self.getPage('/cpfilterlist/ended/3')\n self.assertBody('True')\n self.getPage('/cpfilterlist/errinstream')\n self.assertStatus('200 OK')\n self.assertBody('Unrecoverable error in the server.')\n self.getPage('/cpfilterlist/ended/5')\n self.assertBody('True')\n self.getPage('/cpfilterlist/restricted')\n self.assertErrorPage(401)\n\n def testGuaranteedFilters(self):\n self.getPage('/cpfilterlist/err_in_onstart')\n self.assertErrorPage(500)\n self.assertInBody(\n \"AttributeError: 'Request' object has no attribute 'numerify_map'\")\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass AccessFilter(BaseFilter):\n\n def before_request_body(self):\n if not cherrypy.config.get('access_filter.on', False):\n return\n if not getattr(cherrypy.request, 'login', None):\n raise cherrypy.HTTPError(401)\n\n\n<mask token>\n\n\nclass FilterTests(helper.CPWebCase):\n\n def testCPFilterList(self):\n self.getPage('/cpfilterlist/')\n self.assertBody('A horrorshow lomtick of cherry 3.14159')\n self.getPage('/cpfilterlist/ended/1')\n self.assertBody('True')\n valerr = '\\n raise ValueError()\\nValueError'\n self.getPage('/cpfilterlist/err')\n self.assertErrorPage(500, pattern=valerr)\n self.getPage('/cpfilterlist/ended/3')\n self.assertBody('True')\n self.getPage('/cpfilterlist/errinstream')\n self.assertStatus('200 OK')\n self.assertBody('Unrecoverable error in the server.')\n self.getPage('/cpfilterlist/ended/5')\n self.assertBody('True')\n self.getPage('/cpfilterlist/restricted')\n self.assertErrorPage(401)\n\n def testGuaranteedFilters(self):\n self.getPage('/cpfilterlist/err_in_onstart')\n self.assertErrorPage(500)\n self.assertInBody(\n \"AttributeError: 'Request' object has no attribute 'numerify_map'\")\n\n\n<mask token>\n",
"step-3": "<mask token>\ntest.prefer_parent_path()\n<mask token>\n\n\nclass AccessFilter(BaseFilter):\n\n def before_request_body(self):\n if not cherrypy.config.get('access_filter.on', False):\n return\n if not getattr(cherrypy.request, 'login', None):\n raise cherrypy.HTTPError(401)\n\n\ndef setup_server():\n\n\n class Numerify(BaseFilter):\n\n def on_start_resource(self):\n m = cherrypy.config.get('numerify_filter.map', {})\n cherrypy.request.numerify_map = m.items()\n\n def before_finalize(self):\n if not cherrypy.config.get('numerify_filter.on', False):\n return\n\n def number_it(body):\n for chunk in body:\n for k, v in cherrypy.request.numerify_map:\n chunk = chunk.replace(k, v)\n yield chunk\n cherrypy.response.body = number_it(cherrypy.response.body)\n\n\n class NadsatFilter:\n\n def __init__(self):\n self.counter = 0\n self.ended = {}\n\n def before_main(self):\n cherrypy.request.counter = self.counter = self.counter + 1\n self.ended[cherrypy.request.counter] = False\n\n def before_finalize(self):\n\n def nadsat_it_up(body):\n for chunk in body:\n chunk = chunk.replace('good', 'horrorshow')\n chunk = chunk.replace('piece', 'lomtick')\n yield chunk\n cherrypy.response.body = nadsat_it_up(cherrypy.response.body)\n\n def on_end_request(self):\n cherrypy.response.body = 'razdrez'\n self.ended[cherrypy.request.counter] = True\n\n\n class Root:\n\n def index(self):\n return 'Howdy earth!'\n index.exposed = True\n cherrypy.root = Root()\n\n\n class TestType(type):\n \"\"\"Metaclass which automatically exposes all functions in each subclass,\n and adds an instance of the subclass as an attribute of cherrypy.root.\n \"\"\"\n\n def __init__(cls, name, bases, dct):\n type.__init__(name, bases, dct)\n for value in dct.itervalues():\n if isinstance(value, types.FunctionType):\n value.exposed = True\n setattr(cherrypy.root, name.lower(), cls())\n\n\n class Test(object):\n __metaclass__ = TestType\n\n\n class CPFilterList(Test):\n _cp_filters = [NadsatFilter()]\n\n def index(self):\n return 'A good piece of cherry pie'\n\n def ended(self, id):\n return repr(self._cp_filters[0].ended[int(id)])\n\n def err(self):\n raise ValueError()\n\n def errinstream(self):\n raise ValueError()\n yield 'confidential'\n\n def restricted(self):\n return 'Welcome!'\n\n def err_in_onstart(self):\n return 'success!'\n cherrypy.config.update({'global': {'server.input_filters': [\n 'cherrypy.test.test_custom_filters.AccessFilter'],\n 'server.log_to_screen': False, 'server.environment': 'production',\n 'server.show_tracebacks': True}, '/cpfilterlist': {\n 'numerify_filter.on': True, 'numerify_filter.map': {'pie':\n '3.14159'}}, '/cpfilterlist/restricted': {'access_filter.on': True,\n 'server.show_tracebacks': False}, '/cpfilterlist/errinstream': {\n 'stream_response': True}, '/cpfilterlist/err_in_onstart': {\n 'numerify_filter.map': 'pie->3.14159'}})\n filters.input_filters.insert(0, Numerify)\n filters.output_filters.insert(0, Numerify)\n filters.init()\n\n\n<mask token>\n\n\nclass FilterTests(helper.CPWebCase):\n\n def testCPFilterList(self):\n self.getPage('/cpfilterlist/')\n self.assertBody('A horrorshow lomtick of cherry 3.14159')\n self.getPage('/cpfilterlist/ended/1')\n self.assertBody('True')\n valerr = '\\n raise ValueError()\\nValueError'\n self.getPage('/cpfilterlist/err')\n self.assertErrorPage(500, pattern=valerr)\n self.getPage('/cpfilterlist/ended/3')\n self.assertBody('True')\n self.getPage('/cpfilterlist/errinstream')\n self.assertStatus('200 OK')\n self.assertBody('Unrecoverable error in the server.')\n self.getPage('/cpfilterlist/ended/5')\n self.assertBody('True')\n self.getPage('/cpfilterlist/restricted')\n self.assertErrorPage(401)\n\n def testGuaranteedFilters(self):\n self.getPage('/cpfilterlist/err_in_onstart')\n self.assertErrorPage(500)\n self.assertInBody(\n \"AttributeError: 'Request' object has no attribute 'numerify_map'\")\n\n\nif __name__ == '__main__':\n setup_server()\n helper.testmain()\n",
"step-4": "<mask token>\nimport types\nimport test\ntest.prefer_parent_path()\nimport cherrypy\nfrom cherrypy import filters\nfrom cherrypy.filters.basefilter import BaseFilter\n\n\nclass AccessFilter(BaseFilter):\n\n def before_request_body(self):\n if not cherrypy.config.get('access_filter.on', False):\n return\n if not getattr(cherrypy.request, 'login', None):\n raise cherrypy.HTTPError(401)\n\n\ndef setup_server():\n\n\n class Numerify(BaseFilter):\n\n def on_start_resource(self):\n m = cherrypy.config.get('numerify_filter.map', {})\n cherrypy.request.numerify_map = m.items()\n\n def before_finalize(self):\n if not cherrypy.config.get('numerify_filter.on', False):\n return\n\n def number_it(body):\n for chunk in body:\n for k, v in cherrypy.request.numerify_map:\n chunk = chunk.replace(k, v)\n yield chunk\n cherrypy.response.body = number_it(cherrypy.response.body)\n\n\n class NadsatFilter:\n\n def __init__(self):\n self.counter = 0\n self.ended = {}\n\n def before_main(self):\n cherrypy.request.counter = self.counter = self.counter + 1\n self.ended[cherrypy.request.counter] = False\n\n def before_finalize(self):\n\n def nadsat_it_up(body):\n for chunk in body:\n chunk = chunk.replace('good', 'horrorshow')\n chunk = chunk.replace('piece', 'lomtick')\n yield chunk\n cherrypy.response.body = nadsat_it_up(cherrypy.response.body)\n\n def on_end_request(self):\n cherrypy.response.body = 'razdrez'\n self.ended[cherrypy.request.counter] = True\n\n\n class Root:\n\n def index(self):\n return 'Howdy earth!'\n index.exposed = True\n cherrypy.root = Root()\n\n\n class TestType(type):\n \"\"\"Metaclass which automatically exposes all functions in each subclass,\n and adds an instance of the subclass as an attribute of cherrypy.root.\n \"\"\"\n\n def __init__(cls, name, bases, dct):\n type.__init__(name, bases, dct)\n for value in dct.itervalues():\n if isinstance(value, types.FunctionType):\n value.exposed = True\n setattr(cherrypy.root, name.lower(), cls())\n\n\n class Test(object):\n __metaclass__ = TestType\n\n\n class CPFilterList(Test):\n _cp_filters = [NadsatFilter()]\n\n def index(self):\n return 'A good piece of cherry pie'\n\n def ended(self, id):\n return repr(self._cp_filters[0].ended[int(id)])\n\n def err(self):\n raise ValueError()\n\n def errinstream(self):\n raise ValueError()\n yield 'confidential'\n\n def restricted(self):\n return 'Welcome!'\n\n def err_in_onstart(self):\n return 'success!'\n cherrypy.config.update({'global': {'server.input_filters': [\n 'cherrypy.test.test_custom_filters.AccessFilter'],\n 'server.log_to_screen': False, 'server.environment': 'production',\n 'server.show_tracebacks': True}, '/cpfilterlist': {\n 'numerify_filter.on': True, 'numerify_filter.map': {'pie':\n '3.14159'}}, '/cpfilterlist/restricted': {'access_filter.on': True,\n 'server.show_tracebacks': False}, '/cpfilterlist/errinstream': {\n 'stream_response': True}, '/cpfilterlist/err_in_onstart': {\n 'numerify_filter.map': 'pie->3.14159'}})\n filters.input_filters.insert(0, Numerify)\n filters.output_filters.insert(0, Numerify)\n filters.init()\n\n\nimport helper\n\n\nclass FilterTests(helper.CPWebCase):\n\n def testCPFilterList(self):\n self.getPage('/cpfilterlist/')\n self.assertBody('A horrorshow lomtick of cherry 3.14159')\n self.getPage('/cpfilterlist/ended/1')\n self.assertBody('True')\n valerr = '\\n raise ValueError()\\nValueError'\n self.getPage('/cpfilterlist/err')\n self.assertErrorPage(500, pattern=valerr)\n self.getPage('/cpfilterlist/ended/3')\n self.assertBody('True')\n self.getPage('/cpfilterlist/errinstream')\n self.assertStatus('200 OK')\n self.assertBody('Unrecoverable error in the server.')\n self.getPage('/cpfilterlist/ended/5')\n self.assertBody('True')\n self.getPage('/cpfilterlist/restricted')\n self.assertErrorPage(401)\n\n def testGuaranteedFilters(self):\n self.getPage('/cpfilterlist/err_in_onstart')\n self.assertErrorPage(500)\n self.assertInBody(\n \"AttributeError: 'Request' object has no attribute 'numerify_map'\")\n\n\nif __name__ == '__main__':\n setup_server()\n helper.testmain()\n",
"step-5": "\"\"\"Test the various means of instantiating and invoking filters.\"\"\"\n\nimport types\nimport test\ntest.prefer_parent_path()\n\nimport cherrypy\nfrom cherrypy import filters\nfrom cherrypy.filters.basefilter import BaseFilter\n\n\nclass AccessFilter(BaseFilter):\n \n def before_request_body(self):\n if not cherrypy.config.get(\"access_filter.on\", False):\n return\n \n if not getattr(cherrypy.request, \"login\", None):\n raise cherrypy.HTTPError(401)\n\n\ndef setup_server():\n\n class Numerify(BaseFilter):\n \n def on_start_resource(self):\n m = cherrypy.config.get(\"numerify_filter.map\", {})\n cherrypy.request.numerify_map = m.items()\n \n def before_finalize(self):\n if not cherrypy.config.get(\"numerify_filter.on\", False):\n return\n \n def number_it(body):\n for chunk in body:\n for k, v in cherrypy.request.numerify_map:\n chunk = chunk.replace(k, v)\n yield chunk\n cherrypy.response.body = number_it(cherrypy.response.body)\n \n \n # It's not mandatory to inherit from BaseFilter.\n class NadsatFilter:\n \n def __init__(self):\n self.counter = 0\n self.ended = {}\n \n def before_main(self):\n cherrypy.request.counter = self.counter = self.counter + 1\n self.ended[cherrypy.request.counter] = False\n \n def before_finalize(self):\n def nadsat_it_up(body):\n for chunk in body:\n chunk = chunk.replace(\"good\", \"horrorshow\")\n chunk = chunk.replace(\"piece\", \"lomtick\")\n yield chunk\n cherrypy.response.body = nadsat_it_up(cherrypy.response.body)\n \n def on_end_request(self):\n # This runs after the request has been completely written out.\n cherrypy.response.body = \"razdrez\"\n self.ended[cherrypy.request.counter] = True\n\n\n\n class Root:\n def index(self):\n return \"Howdy earth!\"\n index.exposed = True\n\n cherrypy.root = Root()\n\n\n class TestType(type):\n \"\"\"Metaclass which automatically exposes all functions in each subclass,\n and adds an instance of the subclass as an attribute of cherrypy.root.\n \"\"\"\n def __init__(cls, name, bases, dct):\n type.__init__(name, bases, dct)\n for value in dct.itervalues():\n if isinstance(value, types.FunctionType):\n value.exposed = True\n setattr(cherrypy.root, name.lower(), cls())\n class Test(object):\n __metaclass__ = TestType\n\n\n class CPFilterList(Test):\n \n # METHOD ONE:\n # Use _cp_filters (old name: _cpFilterList)\n _cp_filters = [NadsatFilter()]\n \n def index(self):\n return \"A good piece of cherry pie\"\n \n def ended(self, id):\n return repr(self._cp_filters[0].ended[int(id)])\n \n def err(self):\n raise ValueError()\n \n def errinstream(self):\n raise ValueError()\n yield \"confidential\"\n \n def restricted(self):\n return \"Welcome!\"\n \n def err_in_onstart(self):\n return \"success!\"\n\n\n cherrypy.config.update({\n 'global': {\n # METHOD TWO:\n # Declare a classname in server.input_filters.\n 'server.input_filters': [\"cherrypy.test.test_custom_filters.AccessFilter\"],\n 'server.log_to_screen': False,\n 'server.environment': 'production',\n 'server.show_tracebacks': True,\n },\n '/cpfilterlist': {\n 'numerify_filter.on': True,\n 'numerify_filter.map': {\"pie\": \"3.14159\"}\n },\n '/cpfilterlist/restricted': {\n 'access_filter.on': True,\n 'server.show_tracebacks': False,\n },\n '/cpfilterlist/errinstream': {\n 'stream_response': True,\n },\n '/cpfilterlist/err_in_onstart': {\n # Because this isn't a dict, on_start_resource will error.\n 'numerify_filter.map': \"pie->3.14159\"\n },\n })\n\n # METHOD THREE:\n # Insert a class directly into the filters.output_filters chain.\n # You can also insert a string, but we're effectively testing\n # using-a-string via the config file.\n filters.input_filters.insert(0, Numerify)\n filters.output_filters.insert(0, Numerify)\n\n # We have to call filters.init() here (if we want methods #2 and #3\n # to work), because the test suite may already have run server.start()\n # (which is where filters.init() is usually called).\n filters.init()\n\n\n# Client-side code #\n\nimport helper\n\n\nclass FilterTests(helper.CPWebCase):\n \n def testCPFilterList(self):\n self.getPage(\"/cpfilterlist/\")\n # If body is \"razdrez\", then on_end_request is being called too early.\n self.assertBody(\"A horrorshow lomtick of cherry 3.14159\")\n # If this fails, then on_end_request isn't being called at all.\n self.getPage(\"/cpfilterlist/ended/1\")\n self.assertBody(\"True\")\n \n valerr = '\\n raise ValueError()\\nValueError'\n self.getPage(\"/cpfilterlist/err\")\n # If body is \"razdrez\", then on_end_request is being called too early.\n self.assertErrorPage(500, pattern=valerr)\n # If this fails, then on_end_request isn't being called at all.\n self.getPage(\"/cpfilterlist/ended/3\")\n self.assertBody(\"True\")\n \n # If body is \"razdrez\", then on_end_request is being called too early.\n self.getPage(\"/cpfilterlist/errinstream\")\n # Because this error is raised after the response body has\n # started, the status should not change to an error status.\n self.assertStatus(\"200 OK\")\n self.assertBody(\"Unrecoverable error in the server.\")\n # If this fails, then on_end_request isn't being called at all.\n self.getPage(\"/cpfilterlist/ended/5\")\n self.assertBody(\"True\")\n \n # Test the config method.\n self.getPage(\"/cpfilterlist/restricted\")\n self.assertErrorPage(401)\n \n def testGuaranteedFilters(self):\n # The on_start_resource and on_end_request filter methods are all\n # guaranteed to run, even if there are failures in other on_start\n # or on_end methods. This is NOT true of the other filter methods.\n # Here, we have set up a failure in NumerifyFilter.on_start_resource,\n # but because that failure is logged and passed over, the error\n # page we obtain in the user agent should be from before_finalize.\n self.getPage(\"/cpfilterlist/err_in_onstart\")\n self.assertErrorPage(500)\n self.assertInBody(\"AttributeError: 'Request' object has no \"\n \"attribute 'numerify_map'\")\n\n\nif __name__ == '__main__':\n setup_server()\n helper.testmain()\n\n",
"step-ids": [
4,
5,
7,
8,
9
]
}
|
[
4,
5,
7,
8,
9
] |
<|reserved_special_token_0|>
class RequirementSerializer(serializers.ModelSerializer):
<|reserved_special_token_0|>
class Meta:
model = RoughRequirement
fields = ['id', 'index', 'title', 'description',
'detailed_requirements']
class OfferingCourseSerializer(serializers.ModelSerializer):
class Meta:
model = OfferingCourse
fields = '__all__'
class SimpleOfferingCourseSerializer(serializers.ModelSerializer):
class Meta:
model = OfferingCourse
fields = ['id', 'name']
class FieldOfStudySerializer(serializers.ModelSerializer):
class Meta:
model = FieldOfStudy
fields = '__all__'
class IndicatorFactorSerializer(serializers.ModelSerializer):
class Meta:
model = IndicatorFactor
fields = '__all__'
class BasisTemplateSerializer(serializers.ModelSerializer):
class Meta:
model = BasisTemplate
fields = '__all__'
class ReadIndicatorFactorSerializer(serializers.ModelSerializer):
offering_course = SimpleOfferingCourseSerializer()
field_of_study = FieldOfStudySerializer()
basis_templates = BasisTemplateSerializer(many=True)
rough_requirement = serializers.IntegerField(source=
'detailed_requirement.rough_requirement.id')
detailed_index = serializers.IntegerField(source=
'detailed_requirement.index')
rough_index = serializers.IntegerField(source=
'detailed_requirement.rough_requirement.index')
detailed_description = serializers.CharField(source=
'detailed_requirement.description')
rough_description = serializers.CharField(source=
'detailed_requirement.rough_requirement.description')
rough_title = serializers.CharField(source=
'detailed_requirement.rough_requirement.title')
class Meta:
model = IndicatorFactor
fields = '__all__'
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class RoughRequirementSerializer(serializers.ModelSerializer):
class Meta:
model = RoughRequirement
fields = ['id', 'index', 'title', 'description']
class DetailedRequirementSerializer(serializers.ModelSerializer):
class Meta:
model = DetailedRequirement
fields = ['id', 'index', 'description', 'indicator_warning_line',
'rough_requirement']
class RequirementSerializer(serializers.ModelSerializer):
detailed_requirements = DetailedRequirementSerializer(many=True)
class Meta:
model = RoughRequirement
fields = ['id', 'index', 'title', 'description',
'detailed_requirements']
class OfferingCourseSerializer(serializers.ModelSerializer):
class Meta:
model = OfferingCourse
fields = '__all__'
class SimpleOfferingCourseSerializer(serializers.ModelSerializer):
class Meta:
model = OfferingCourse
fields = ['id', 'name']
class FieldOfStudySerializer(serializers.ModelSerializer):
class Meta:
model = FieldOfStudy
fields = '__all__'
class IndicatorFactorSerializer(serializers.ModelSerializer):
class Meta:
model = IndicatorFactor
fields = '__all__'
class BasisTemplateSerializer(serializers.ModelSerializer):
class Meta:
model = BasisTemplate
fields = '__all__'
class ReadIndicatorFactorSerializer(serializers.ModelSerializer):
offering_course = SimpleOfferingCourseSerializer()
field_of_study = FieldOfStudySerializer()
basis_templates = BasisTemplateSerializer(many=True)
rough_requirement = serializers.IntegerField(source=
'detailed_requirement.rough_requirement.id')
detailed_index = serializers.IntegerField(source=
'detailed_requirement.index')
rough_index = serializers.IntegerField(source=
'detailed_requirement.rough_requirement.index')
detailed_description = serializers.CharField(source=
'detailed_requirement.description')
rough_description = serializers.CharField(source=
'detailed_requirement.rough_requirement.description')
rough_title = serializers.CharField(source=
'detailed_requirement.rough_requirement.title')
class Meta:
model = IndicatorFactor
fields = '__all__'
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class SimpleOfferingCourseSerializer(serializers.ModelSerializer):
class Meta:
model = OfferingCourse
fields = ['id', 'name']
extra_kwargs = {'name': {'required': False}}
class RoughRequirementSerializer(serializers.ModelSerializer):
class Meta:
model = RoughRequirement
fields = ['id', 'index', 'title', 'description']
class DetailedRequirementSerializer(serializers.ModelSerializer):
class Meta:
model = DetailedRequirement
fields = ['id', 'index', 'description', 'indicator_warning_line',
'rough_requirement']
class RequirementSerializer(serializers.ModelSerializer):
detailed_requirements = DetailedRequirementSerializer(many=True)
class Meta:
model = RoughRequirement
fields = ['id', 'index', 'title', 'description',
'detailed_requirements']
class OfferingCourseSerializer(serializers.ModelSerializer):
class Meta:
model = OfferingCourse
fields = '__all__'
class SimpleOfferingCourseSerializer(serializers.ModelSerializer):
class Meta:
model = OfferingCourse
fields = ['id', 'name']
class FieldOfStudySerializer(serializers.ModelSerializer):
class Meta:
model = FieldOfStudy
fields = '__all__'
class IndicatorFactorSerializer(serializers.ModelSerializer):
class Meta:
model = IndicatorFactor
fields = '__all__'
class BasisTemplateSerializer(serializers.ModelSerializer):
class Meta:
model = BasisTemplate
fields = '__all__'
class ReadIndicatorFactorSerializer(serializers.ModelSerializer):
offering_course = SimpleOfferingCourseSerializer()
field_of_study = FieldOfStudySerializer()
basis_templates = BasisTemplateSerializer(many=True)
rough_requirement = serializers.IntegerField(source=
'detailed_requirement.rough_requirement.id')
detailed_index = serializers.IntegerField(source=
'detailed_requirement.index')
rough_index = serializers.IntegerField(source=
'detailed_requirement.rough_requirement.index')
detailed_description = serializers.CharField(source=
'detailed_requirement.description')
rough_description = serializers.CharField(source=
'detailed_requirement.rough_requirement.description')
rough_title = serializers.CharField(source=
'detailed_requirement.rough_requirement.title')
class Meta:
model = IndicatorFactor
fields = '__all__'
<|reserved_special_token_1|>
from rest_framework import serializers
from plan.models import RoughRequirement, DetailedRequirement
from plan.models import OfferingCourse, FieldOfStudy, IndicatorFactor
from plan.models import BasisTemplate
class SimpleOfferingCourseSerializer(serializers.ModelSerializer):
class Meta:
model = OfferingCourse
fields = ['id', 'name']
extra_kwargs = {'name': {'required': False}}
class RoughRequirementSerializer(serializers.ModelSerializer):
class Meta:
model = RoughRequirement
fields = ['id', 'index', 'title', 'description']
class DetailedRequirementSerializer(serializers.ModelSerializer):
class Meta:
model = DetailedRequirement
fields = ['id', 'index', 'description', 'indicator_warning_line',
'rough_requirement']
class RequirementSerializer(serializers.ModelSerializer):
detailed_requirements = DetailedRequirementSerializer(many=True)
class Meta:
model = RoughRequirement
fields = ['id', 'index', 'title', 'description',
'detailed_requirements']
class OfferingCourseSerializer(serializers.ModelSerializer):
class Meta:
model = OfferingCourse
fields = '__all__'
class SimpleOfferingCourseSerializer(serializers.ModelSerializer):
class Meta:
model = OfferingCourse
fields = ['id', 'name']
class FieldOfStudySerializer(serializers.ModelSerializer):
class Meta:
model = FieldOfStudy
fields = '__all__'
class IndicatorFactorSerializer(serializers.ModelSerializer):
class Meta:
model = IndicatorFactor
fields = '__all__'
class BasisTemplateSerializer(serializers.ModelSerializer):
class Meta:
model = BasisTemplate
fields = '__all__'
class ReadIndicatorFactorSerializer(serializers.ModelSerializer):
offering_course = SimpleOfferingCourseSerializer()
field_of_study = FieldOfStudySerializer()
basis_templates = BasisTemplateSerializer(many=True)
rough_requirement = serializers.IntegerField(source=
'detailed_requirement.rough_requirement.id')
detailed_index = serializers.IntegerField(source=
'detailed_requirement.index')
rough_index = serializers.IntegerField(source=
'detailed_requirement.rough_requirement.index')
detailed_description = serializers.CharField(source=
'detailed_requirement.description')
rough_description = serializers.CharField(source=
'detailed_requirement.rough_requirement.description')
rough_title = serializers.CharField(source=
'detailed_requirement.rough_requirement.title')
class Meta:
model = IndicatorFactor
fields = '__all__'
<|reserved_special_token_1|>
from rest_framework import serializers
from plan.models import RoughRequirement, DetailedRequirement
from plan.models import OfferingCourse, FieldOfStudy, IndicatorFactor
from plan.models import BasisTemplate
class SimpleOfferingCourseSerializer(serializers.ModelSerializer):
class Meta:
model = OfferingCourse
fields = ['id', 'name']
extra_kwargs = {'name': {'required': False}}
class RoughRequirementSerializer(serializers.ModelSerializer):
class Meta:
model = RoughRequirement
fields = ['id', 'index', 'title', 'description']
class DetailedRequirementSerializer(serializers.ModelSerializer):
class Meta:
model = DetailedRequirement
fields = ['id', 'index', 'description', 'indicator_warning_line', 'rough_requirement']
class RequirementSerializer(serializers.ModelSerializer):
detailed_requirements = DetailedRequirementSerializer(many=True)
class Meta:
model = RoughRequirement
fields = ['id', 'index', 'title', 'description', 'detailed_requirements']
class OfferingCourseSerializer(serializers.ModelSerializer):
class Meta:
model = OfferingCourse
fields = '__all__'
class SimpleOfferingCourseSerializer(serializers.ModelSerializer):
class Meta:
model = OfferingCourse
fields = ["id", "name"]
class FieldOfStudySerializer(serializers.ModelSerializer):
class Meta:
model = FieldOfStudy
fields = '__all__'
class IndicatorFactorSerializer(serializers.ModelSerializer):
class Meta:
model = IndicatorFactor
fields = '__all__'
class BasisTemplateSerializer(serializers.ModelSerializer):
class Meta:
model = BasisTemplate
fields = '__all__'
class ReadIndicatorFactorSerializer(serializers.ModelSerializer):
offering_course = SimpleOfferingCourseSerializer()
field_of_study = FieldOfStudySerializer()
basis_templates = BasisTemplateSerializer(many=True)
rough_requirement = serializers.IntegerField(source='detailed_requirement.rough_requirement.id')
detailed_index = serializers.IntegerField(source='detailed_requirement.index')
rough_index = serializers.IntegerField(source='detailed_requirement.rough_requirement.index')
detailed_description = serializers.CharField(source='detailed_requirement.description')
rough_description = serializers.CharField(source='detailed_requirement.rough_requirement.description')
rough_title = serializers.CharField(source='detailed_requirement.rough_requirement.title')
class Meta:
model = IndicatorFactor
fields = '__all__'
|
flexible
|
{
"blob_id": "596f7dfacc931f5e756c71b8622f4001df19934b",
"index": 5964,
"step-1": "<mask token>\n\n\nclass RequirementSerializer(serializers.ModelSerializer):\n <mask token>\n\n\n class Meta:\n model = RoughRequirement\n fields = ['id', 'index', 'title', 'description',\n 'detailed_requirements']\n\n\nclass OfferingCourseSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = OfferingCourse\n fields = '__all__'\n\n\nclass SimpleOfferingCourseSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = OfferingCourse\n fields = ['id', 'name']\n\n\nclass FieldOfStudySerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = FieldOfStudy\n fields = '__all__'\n\n\nclass IndicatorFactorSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = IndicatorFactor\n fields = '__all__'\n\n\nclass BasisTemplateSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = BasisTemplate\n fields = '__all__'\n\n\nclass ReadIndicatorFactorSerializer(serializers.ModelSerializer):\n offering_course = SimpleOfferingCourseSerializer()\n field_of_study = FieldOfStudySerializer()\n basis_templates = BasisTemplateSerializer(many=True)\n rough_requirement = serializers.IntegerField(source=\n 'detailed_requirement.rough_requirement.id')\n detailed_index = serializers.IntegerField(source=\n 'detailed_requirement.index')\n rough_index = serializers.IntegerField(source=\n 'detailed_requirement.rough_requirement.index')\n detailed_description = serializers.CharField(source=\n 'detailed_requirement.description')\n rough_description = serializers.CharField(source=\n 'detailed_requirement.rough_requirement.description')\n rough_title = serializers.CharField(source=\n 'detailed_requirement.rough_requirement.title')\n\n\n class Meta:\n model = IndicatorFactor\n fields = '__all__'\n",
"step-2": "<mask token>\n\n\nclass RoughRequirementSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = RoughRequirement\n fields = ['id', 'index', 'title', 'description']\n\n\nclass DetailedRequirementSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = DetailedRequirement\n fields = ['id', 'index', 'description', 'indicator_warning_line',\n 'rough_requirement']\n\n\nclass RequirementSerializer(serializers.ModelSerializer):\n detailed_requirements = DetailedRequirementSerializer(many=True)\n\n\n class Meta:\n model = RoughRequirement\n fields = ['id', 'index', 'title', 'description',\n 'detailed_requirements']\n\n\nclass OfferingCourseSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = OfferingCourse\n fields = '__all__'\n\n\nclass SimpleOfferingCourseSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = OfferingCourse\n fields = ['id', 'name']\n\n\nclass FieldOfStudySerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = FieldOfStudy\n fields = '__all__'\n\n\nclass IndicatorFactorSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = IndicatorFactor\n fields = '__all__'\n\n\nclass BasisTemplateSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = BasisTemplate\n fields = '__all__'\n\n\nclass ReadIndicatorFactorSerializer(serializers.ModelSerializer):\n offering_course = SimpleOfferingCourseSerializer()\n field_of_study = FieldOfStudySerializer()\n basis_templates = BasisTemplateSerializer(many=True)\n rough_requirement = serializers.IntegerField(source=\n 'detailed_requirement.rough_requirement.id')\n detailed_index = serializers.IntegerField(source=\n 'detailed_requirement.index')\n rough_index = serializers.IntegerField(source=\n 'detailed_requirement.rough_requirement.index')\n detailed_description = serializers.CharField(source=\n 'detailed_requirement.description')\n rough_description = serializers.CharField(source=\n 'detailed_requirement.rough_requirement.description')\n rough_title = serializers.CharField(source=\n 'detailed_requirement.rough_requirement.title')\n\n\n class Meta:\n model = IndicatorFactor\n fields = '__all__'\n",
"step-3": "<mask token>\n\n\nclass SimpleOfferingCourseSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = OfferingCourse\n fields = ['id', 'name']\n extra_kwargs = {'name': {'required': False}}\n\n\nclass RoughRequirementSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = RoughRequirement\n fields = ['id', 'index', 'title', 'description']\n\n\nclass DetailedRequirementSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = DetailedRequirement\n fields = ['id', 'index', 'description', 'indicator_warning_line',\n 'rough_requirement']\n\n\nclass RequirementSerializer(serializers.ModelSerializer):\n detailed_requirements = DetailedRequirementSerializer(many=True)\n\n\n class Meta:\n model = RoughRequirement\n fields = ['id', 'index', 'title', 'description',\n 'detailed_requirements']\n\n\nclass OfferingCourseSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = OfferingCourse\n fields = '__all__'\n\n\nclass SimpleOfferingCourseSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = OfferingCourse\n fields = ['id', 'name']\n\n\nclass FieldOfStudySerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = FieldOfStudy\n fields = '__all__'\n\n\nclass IndicatorFactorSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = IndicatorFactor\n fields = '__all__'\n\n\nclass BasisTemplateSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = BasisTemplate\n fields = '__all__'\n\n\nclass ReadIndicatorFactorSerializer(serializers.ModelSerializer):\n offering_course = SimpleOfferingCourseSerializer()\n field_of_study = FieldOfStudySerializer()\n basis_templates = BasisTemplateSerializer(many=True)\n rough_requirement = serializers.IntegerField(source=\n 'detailed_requirement.rough_requirement.id')\n detailed_index = serializers.IntegerField(source=\n 'detailed_requirement.index')\n rough_index = serializers.IntegerField(source=\n 'detailed_requirement.rough_requirement.index')\n detailed_description = serializers.CharField(source=\n 'detailed_requirement.description')\n rough_description = serializers.CharField(source=\n 'detailed_requirement.rough_requirement.description')\n rough_title = serializers.CharField(source=\n 'detailed_requirement.rough_requirement.title')\n\n\n class Meta:\n model = IndicatorFactor\n fields = '__all__'\n",
"step-4": "from rest_framework import serializers\nfrom plan.models import RoughRequirement, DetailedRequirement\nfrom plan.models import OfferingCourse, FieldOfStudy, IndicatorFactor\nfrom plan.models import BasisTemplate\n\n\nclass SimpleOfferingCourseSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = OfferingCourse\n fields = ['id', 'name']\n extra_kwargs = {'name': {'required': False}}\n\n\nclass RoughRequirementSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = RoughRequirement\n fields = ['id', 'index', 'title', 'description']\n\n\nclass DetailedRequirementSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = DetailedRequirement\n fields = ['id', 'index', 'description', 'indicator_warning_line',\n 'rough_requirement']\n\n\nclass RequirementSerializer(serializers.ModelSerializer):\n detailed_requirements = DetailedRequirementSerializer(many=True)\n\n\n class Meta:\n model = RoughRequirement\n fields = ['id', 'index', 'title', 'description',\n 'detailed_requirements']\n\n\nclass OfferingCourseSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = OfferingCourse\n fields = '__all__'\n\n\nclass SimpleOfferingCourseSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = OfferingCourse\n fields = ['id', 'name']\n\n\nclass FieldOfStudySerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = FieldOfStudy\n fields = '__all__'\n\n\nclass IndicatorFactorSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = IndicatorFactor\n fields = '__all__'\n\n\nclass BasisTemplateSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = BasisTemplate\n fields = '__all__'\n\n\nclass ReadIndicatorFactorSerializer(serializers.ModelSerializer):\n offering_course = SimpleOfferingCourseSerializer()\n field_of_study = FieldOfStudySerializer()\n basis_templates = BasisTemplateSerializer(many=True)\n rough_requirement = serializers.IntegerField(source=\n 'detailed_requirement.rough_requirement.id')\n detailed_index = serializers.IntegerField(source=\n 'detailed_requirement.index')\n rough_index = serializers.IntegerField(source=\n 'detailed_requirement.rough_requirement.index')\n detailed_description = serializers.CharField(source=\n 'detailed_requirement.description')\n rough_description = serializers.CharField(source=\n 'detailed_requirement.rough_requirement.description')\n rough_title = serializers.CharField(source=\n 'detailed_requirement.rough_requirement.title')\n\n\n class Meta:\n model = IndicatorFactor\n fields = '__all__'\n",
"step-5": "from rest_framework import serializers\nfrom plan.models import RoughRequirement, DetailedRequirement\nfrom plan.models import OfferingCourse, FieldOfStudy, IndicatorFactor\nfrom plan.models import BasisTemplate\n\nclass SimpleOfferingCourseSerializer(serializers.ModelSerializer):\n class Meta:\n model = OfferingCourse\n fields = ['id', 'name']\n extra_kwargs = {'name': {'required': False}}\n\nclass RoughRequirementSerializer(serializers.ModelSerializer):\n\n class Meta:\n model = RoughRequirement\n fields = ['id', 'index', 'title', 'description']\n\nclass DetailedRequirementSerializer(serializers.ModelSerializer):\n\n class Meta:\n model = DetailedRequirement\n fields = ['id', 'index', 'description', 'indicator_warning_line', 'rough_requirement']\n\nclass RequirementSerializer(serializers.ModelSerializer):\n detailed_requirements = DetailedRequirementSerializer(many=True)\n class Meta:\n model = RoughRequirement\n fields = ['id', 'index', 'title', 'description', 'detailed_requirements']\n\nclass OfferingCourseSerializer(serializers.ModelSerializer):\n class Meta:\n model = OfferingCourse\n fields = '__all__'\n\nclass SimpleOfferingCourseSerializer(serializers.ModelSerializer):\n class Meta:\n model = OfferingCourse\n fields = [\"id\", \"name\"]\n\nclass FieldOfStudySerializer(serializers.ModelSerializer):\n class Meta:\n model = FieldOfStudy\n fields = '__all__'\n\nclass IndicatorFactorSerializer(serializers.ModelSerializer):\n class Meta:\n model = IndicatorFactor\n fields = '__all__'\n\nclass BasisTemplateSerializer(serializers.ModelSerializer):\n class Meta:\n model = BasisTemplate\n fields = '__all__'\n\nclass ReadIndicatorFactorSerializer(serializers.ModelSerializer):\n offering_course = SimpleOfferingCourseSerializer()\n field_of_study = FieldOfStudySerializer()\n basis_templates = BasisTemplateSerializer(many=True)\n rough_requirement = serializers.IntegerField(source='detailed_requirement.rough_requirement.id')\n detailed_index = serializers.IntegerField(source='detailed_requirement.index')\n rough_index = serializers.IntegerField(source='detailed_requirement.rough_requirement.index')\n detailed_description = serializers.CharField(source='detailed_requirement.description')\n rough_description = serializers.CharField(source='detailed_requirement.rough_requirement.description')\n rough_title = serializers.CharField(source='detailed_requirement.rough_requirement.title')\n class Meta:\n model = IndicatorFactor\n fields = '__all__'",
"step-ids": [
8,
11,
12,
13,
14
]
}
|
[
8,
11,
12,
13,
14
] |
from typing import List
class NURBS:
def __init__(self, degree: int) -> None:
self._degree = degree
self._points = [] # type: List[complex]
self._weights = [] # type: List[float]
self._knots = [] # type: List[float]
def addPoint(self, p: complex) -> None:
self._points.append(p)
def addKnot(self, knot: float) -> None:
self._knots.append(knot)
def pointCount(self) -> int:
return len(self._points)
def calculate(self, segments: int) -> List[complex]:
while len(self._weights) < len(self._points):
self._weights.append(1.0)
ret = []
for n in range(0, segments):
u = self._knots[0] + (self._knots[-1] - self._knots[0]) * n / (segments - 1)
nku = []
for m in range(0, len(self._points)):
nku.append(self._weights[m] * self._N(m, self._degree, u))
point = complex(0, 0)
denom = sum(nku)
for m in range(0, len(self._points)):
if nku[m] != 0.0 and denom != 0.0:
r_iku = nku[m] / denom
if r_iku != 0.0:
point += self._points[m] * r_iku
ret.append(point)
return ret
def _N(self, i: int, n: int, u: float) -> float:
if n == 0:
if self._knots[i] <= u <= self._knots[i+1]:
return 1
return 0
else:
Nin1u = self._N(i, n - 1, u)
Ni1n1u = self._N(i + 1, n - 1, u)
if Nin1u == 0.0:
a = 0.0
else:
a = self._F(i, n, u) * Nin1u
if Ni1n1u == 0.0:
b = 0.0
else:
b = self._G(i, n, u) * Ni1n1u
return a + b
def _F(self, i: int, n: int, u: float) -> float:
denom = self._knots[i + n] - self._knots[i]
if denom == 0.0:
return 0.0
return (u - self._knots[i]) / denom
def _G(self, i: int, n: int, u: float) -> float:
denom = self._knots[i + n + 1] - self._knots[i]
if denom == 0:
return 0.0
return (self._knots[i + n + 1] - u) / denom
|
normal
|
{
"blob_id": "40b3cacf55f6c5056c3541d70d8b2c0e2cc7d01b",
"index": 2564,
"step-1": "<mask token>\n\n\nclass NURBS:\n <mask token>\n <mask token>\n\n def addKnot(self, knot: float) ->None:\n self._knots.append(knot)\n\n def pointCount(self) ->int:\n return len(self._points)\n <mask token>\n\n def _N(self, i: int, n: int, u: float) ->float:\n if n == 0:\n if self._knots[i] <= u <= self._knots[i + 1]:\n return 1\n return 0\n else:\n Nin1u = self._N(i, n - 1, u)\n Ni1n1u = self._N(i + 1, n - 1, u)\n if Nin1u == 0.0:\n a = 0.0\n else:\n a = self._F(i, n, u) * Nin1u\n if Ni1n1u == 0.0:\n b = 0.0\n else:\n b = self._G(i, n, u) * Ni1n1u\n return a + b\n\n def _F(self, i: int, n: int, u: float) ->float:\n denom = self._knots[i + n] - self._knots[i]\n if denom == 0.0:\n return 0.0\n return (u - self._knots[i]) / denom\n\n def _G(self, i: int, n: int, u: float) ->float:\n denom = self._knots[i + n + 1] - self._knots[i]\n if denom == 0:\n return 0.0\n return (self._knots[i + n + 1] - u) / denom\n",
"step-2": "<mask token>\n\n\nclass NURBS:\n <mask token>\n <mask token>\n\n def addKnot(self, knot: float) ->None:\n self._knots.append(knot)\n\n def pointCount(self) ->int:\n return len(self._points)\n\n def calculate(self, segments: int) ->List[complex]:\n while len(self._weights) < len(self._points):\n self._weights.append(1.0)\n ret = []\n for n in range(0, segments):\n u = self._knots[0] + (self._knots[-1] - self._knots[0]) * n / (\n segments - 1)\n nku = []\n for m in range(0, len(self._points)):\n nku.append(self._weights[m] * self._N(m, self._degree, u))\n point = complex(0, 0)\n denom = sum(nku)\n for m in range(0, len(self._points)):\n if nku[m] != 0.0 and denom != 0.0:\n r_iku = nku[m] / denom\n if r_iku != 0.0:\n point += self._points[m] * r_iku\n ret.append(point)\n return ret\n\n def _N(self, i: int, n: int, u: float) ->float:\n if n == 0:\n if self._knots[i] <= u <= self._knots[i + 1]:\n return 1\n return 0\n else:\n Nin1u = self._N(i, n - 1, u)\n Ni1n1u = self._N(i + 1, n - 1, u)\n if Nin1u == 0.0:\n a = 0.0\n else:\n a = self._F(i, n, u) * Nin1u\n if Ni1n1u == 0.0:\n b = 0.0\n else:\n b = self._G(i, n, u) * Ni1n1u\n return a + b\n\n def _F(self, i: int, n: int, u: float) ->float:\n denom = self._knots[i + n] - self._knots[i]\n if denom == 0.0:\n return 0.0\n return (u - self._knots[i]) / denom\n\n def _G(self, i: int, n: int, u: float) ->float:\n denom = self._knots[i + n + 1] - self._knots[i]\n if denom == 0:\n return 0.0\n return (self._knots[i + n + 1] - u) / denom\n",
"step-3": "<mask token>\n\n\nclass NURBS:\n\n def __init__(self, degree: int) ->None:\n self._degree = degree\n self._points = []\n self._weights = []\n self._knots = []\n <mask token>\n\n def addKnot(self, knot: float) ->None:\n self._knots.append(knot)\n\n def pointCount(self) ->int:\n return len(self._points)\n\n def calculate(self, segments: int) ->List[complex]:\n while len(self._weights) < len(self._points):\n self._weights.append(1.0)\n ret = []\n for n in range(0, segments):\n u = self._knots[0] + (self._knots[-1] - self._knots[0]) * n / (\n segments - 1)\n nku = []\n for m in range(0, len(self._points)):\n nku.append(self._weights[m] * self._N(m, self._degree, u))\n point = complex(0, 0)\n denom = sum(nku)\n for m in range(0, len(self._points)):\n if nku[m] != 0.0 and denom != 0.0:\n r_iku = nku[m] / denom\n if r_iku != 0.0:\n point += self._points[m] * r_iku\n ret.append(point)\n return ret\n\n def _N(self, i: int, n: int, u: float) ->float:\n if n == 0:\n if self._knots[i] <= u <= self._knots[i + 1]:\n return 1\n return 0\n else:\n Nin1u = self._N(i, n - 1, u)\n Ni1n1u = self._N(i + 1, n - 1, u)\n if Nin1u == 0.0:\n a = 0.0\n else:\n a = self._F(i, n, u) * Nin1u\n if Ni1n1u == 0.0:\n b = 0.0\n else:\n b = self._G(i, n, u) * Ni1n1u\n return a + b\n\n def _F(self, i: int, n: int, u: float) ->float:\n denom = self._knots[i + n] - self._knots[i]\n if denom == 0.0:\n return 0.0\n return (u - self._knots[i]) / denom\n\n def _G(self, i: int, n: int, u: float) ->float:\n denom = self._knots[i + n + 1] - self._knots[i]\n if denom == 0:\n return 0.0\n return (self._knots[i + n + 1] - u) / denom\n",
"step-4": "<mask token>\n\n\nclass NURBS:\n\n def __init__(self, degree: int) ->None:\n self._degree = degree\n self._points = []\n self._weights = []\n self._knots = []\n\n def addPoint(self, p: complex) ->None:\n self._points.append(p)\n\n def addKnot(self, knot: float) ->None:\n self._knots.append(knot)\n\n def pointCount(self) ->int:\n return len(self._points)\n\n def calculate(self, segments: int) ->List[complex]:\n while len(self._weights) < len(self._points):\n self._weights.append(1.0)\n ret = []\n for n in range(0, segments):\n u = self._knots[0] + (self._knots[-1] - self._knots[0]) * n / (\n segments - 1)\n nku = []\n for m in range(0, len(self._points)):\n nku.append(self._weights[m] * self._N(m, self._degree, u))\n point = complex(0, 0)\n denom = sum(nku)\n for m in range(0, len(self._points)):\n if nku[m] != 0.0 and denom != 0.0:\n r_iku = nku[m] / denom\n if r_iku != 0.0:\n point += self._points[m] * r_iku\n ret.append(point)\n return ret\n\n def _N(self, i: int, n: int, u: float) ->float:\n if n == 0:\n if self._knots[i] <= u <= self._knots[i + 1]:\n return 1\n return 0\n else:\n Nin1u = self._N(i, n - 1, u)\n Ni1n1u = self._N(i + 1, n - 1, u)\n if Nin1u == 0.0:\n a = 0.0\n else:\n a = self._F(i, n, u) * Nin1u\n if Ni1n1u == 0.0:\n b = 0.0\n else:\n b = self._G(i, n, u) * Ni1n1u\n return a + b\n\n def _F(self, i: int, n: int, u: float) ->float:\n denom = self._knots[i + n] - self._knots[i]\n if denom == 0.0:\n return 0.0\n return (u - self._knots[i]) / denom\n\n def _G(self, i: int, n: int, u: float) ->float:\n denom = self._knots[i + n + 1] - self._knots[i]\n if denom == 0:\n return 0.0\n return (self._knots[i + n + 1] - u) / denom\n",
"step-5": "from typing import List\n\n\nclass NURBS:\n def __init__(self, degree: int) -> None:\n self._degree = degree\n self._points = [] # type: List[complex]\n self._weights = [] # type: List[float]\n self._knots = [] # type: List[float]\n\n def addPoint(self, p: complex) -> None:\n self._points.append(p)\n\n def addKnot(self, knot: float) -> None:\n self._knots.append(knot)\n\n def pointCount(self) -> int:\n return len(self._points)\n\n def calculate(self, segments: int) -> List[complex]:\n while len(self._weights) < len(self._points):\n self._weights.append(1.0)\n\n ret = []\n for n in range(0, segments):\n u = self._knots[0] + (self._knots[-1] - self._knots[0]) * n / (segments - 1)\n nku = []\n for m in range(0, len(self._points)):\n nku.append(self._weights[m] * self._N(m, self._degree, u))\n\n point = complex(0, 0)\n denom = sum(nku)\n for m in range(0, len(self._points)):\n if nku[m] != 0.0 and denom != 0.0:\n r_iku = nku[m] / denom\n if r_iku != 0.0:\n point += self._points[m] * r_iku\n\n ret.append(point)\n return ret\n\n def _N(self, i: int, n: int, u: float) -> float:\n if n == 0:\n if self._knots[i] <= u <= self._knots[i+1]:\n return 1\n return 0\n else:\n Nin1u = self._N(i, n - 1, u)\n Ni1n1u = self._N(i + 1, n - 1, u)\n if Nin1u == 0.0:\n a = 0.0\n else:\n a = self._F(i, n, u) * Nin1u\n if Ni1n1u == 0.0:\n b = 0.0\n else:\n b = self._G(i, n, u) * Ni1n1u\n return a + b\n\n def _F(self, i: int, n: int, u: float) -> float:\n denom = self._knots[i + n] - self._knots[i]\n if denom == 0.0:\n return 0.0\n return (u - self._knots[i]) / denom\n\n def _G(self, i: int, n: int, u: float) -> float:\n denom = self._knots[i + n + 1] - self._knots[i]\n if denom == 0:\n return 0.0\n return (self._knots[i + n + 1] - u) / denom\n",
"step-ids": [
6,
7,
8,
9,
11
]
}
|
[
6,
7,
8,
9,
11
] |
# Generates an infinite series of odd numbers
def odds():
n = 1
while True:
yield n
n += 2
def pi_series():
odd_nums = odds()
approximation = 0
while True:
approximation += (4 / next(odd_nums))
yield approximation
approximation -= (4 / next(odd_nums))
yield approximation
approx_pi = pi_series()
# The higher the range used here the closer to an acurate approximation of PI.
for x in range(10000):
print(next(approx_pi))
|
normal
|
{
"blob_id": "26ef7de89e2e38c419310cc66a33d5dc0575fc0d",
"index": 5012,
"step-1": "def odds():\n n = 1\n while True:\n yield n\n n += 2\n\n\n<mask token>\n",
"step-2": "def odds():\n n = 1\n while True:\n yield n\n n += 2\n\n\ndef pi_series():\n odd_nums = odds()\n approximation = 0\n while True:\n approximation += 4 / next(odd_nums)\n yield approximation\n approximation -= 4 / next(odd_nums)\n yield approximation\n\n\n<mask token>\n",
"step-3": "def odds():\n n = 1\n while True:\n yield n\n n += 2\n\n\ndef pi_series():\n odd_nums = odds()\n approximation = 0\n while True:\n approximation += 4 / next(odd_nums)\n yield approximation\n approximation -= 4 / next(odd_nums)\n yield approximation\n\n\n<mask token>\nfor x in range(10000):\n print(next(approx_pi))\n",
"step-4": "def odds():\n n = 1\n while True:\n yield n\n n += 2\n\n\ndef pi_series():\n odd_nums = odds()\n approximation = 0\n while True:\n approximation += 4 / next(odd_nums)\n yield approximation\n approximation -= 4 / next(odd_nums)\n yield approximation\n\n\napprox_pi = pi_series()\nfor x in range(10000):\n print(next(approx_pi))\n",
"step-5": "# Generates an infinite series of odd numbers\ndef odds():\n n = 1\n while True:\n yield n\n n += 2\n\n\ndef pi_series():\n odd_nums = odds()\n approximation = 0\n while True:\n approximation += (4 / next(odd_nums))\n yield approximation\n approximation -= (4 / next(odd_nums))\n yield approximation\n\napprox_pi = pi_series()\n\n# The higher the range used here the closer to an acurate approximation of PI.\nfor x in range(10000):\n print(next(approx_pi))\n\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
from api.serializers.cart import CartSerializer
from api.serializers.product import ProductSerializer, ProductPopular
from api.serializers.type import TypeSerializer
from api.serializers.user import UserCreationSerializer, UserSerializer
from api.serializers.history import HistorySerializer
from api.serializers.order import OrderSerializer
from api.serializers.comment import CommentSerializer
from api.serializers.reply import ReplySerializer
from api.serializers.reason import ReasonSerializer
from api.serializers.waitinglist import WaitinglistSerializer
|
normal
|
{
"blob_id": "f0ff15a2392b439a54c5ec304192117c08978755",
"index": 4930,
"step-1": "<mask token>\n",
"step-2": "from api.serializers.cart import CartSerializer\nfrom api.serializers.product import ProductSerializer, ProductPopular\nfrom api.serializers.type import TypeSerializer\nfrom api.serializers.user import UserCreationSerializer, UserSerializer\nfrom api.serializers.history import HistorySerializer\nfrom api.serializers.order import OrderSerializer\nfrom api.serializers.comment import CommentSerializer\nfrom api.serializers.reply import ReplySerializer\nfrom api.serializers.reason import ReasonSerializer\nfrom api.serializers.waitinglist import WaitinglistSerializer\n",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
# template for "Guess the number" mini-project
# input will come from buttons and an input field
# all output for the game will be printed in the console
import simplegui
import random
import math
# initialize global variables used in your code
range = 100
guesses_made = 0
guesses_remaining = 0
highest_guess = 0
lowest_guess = 0
correct_num = 0
victory_condition = False
# define event handlers for control panel
def range100():
"""Set the range of guessable numbers to [1,100) and restarts"""
global range, guesses_made, guesses_remaining, correct_num, victory_condition
range = 100
guesses_made = 0
guesses_remaining = 7 #calculate_remaining_guesses(range)
correct_num = random.randrange(range)
victory_condition = False
print "New Game! Guess between 1 and ", range
print "Remaining guesses: ", guesses_remaining
def range1000():
"""Set the range of guessable numbers to [1,1000) and restarts"""
global range, guesses_made, guesses_remaining, correct_num, victory_condition
range = 1000
guesses_made = 0
guesses_remaining = 10#calculate_remaining_guesses(range)
correct_num = random.randrange(range)
victory_condition = False
print "New Game! Guess between 1 and ", range
print "Remaining guesses: ", guesses_remaining
# main game logic goes here
def get_input(guess):
global guesses_made, guesses_remaining, victory_condition
guess = int(guess)
guesses_remaining -= 1
print "Your guess:" , guess
guesses_made += 1
if victory_condition == False:
if guess == correct_num:
print "Correct!"
print "You guessed the number in " , guesses_made , " guesses!"
victory_condition = True
if guesses_remaining > 0 and victory_condition == False:
if guess > correct_num:
print "Lower..."
print "Remaining guesses:" , guesses_remaining , "\n"
else:
print "Higher..."
print "Remaining guesses:" , guesses_remaining , "\n"
elif victory_condition == True:
print "You've won! Start a new game."
else:
print "You've run out of guesses. Game over!"
print "The correct number was: " , correct_num
else:
print "You've won! Start a new game.\n"
# create frame
frame = simplegui.create_frame("Guess the Number!", 400, 400, 300)
# register event handlers for control elements
frame.add_button("Range 1..100", range100, 100)
frame.add_button("Range 1..1000", range1000, 100)
frame.add_input("Enter your guess:", get_input, 100)
get_input(0)
# start frame
frame.start()
|
normal
|
{
"blob_id": "783326ccec31dc7a0ff46c5e4b69806e99aeda57",
"index": 9136,
"step-1": "# template for \"Guess the number\" mini-project\n# input will come from buttons and an input field\n# all output for the game will be printed in the console\nimport simplegui\nimport random\nimport math\n\n# initialize global variables used in your code\nrange = 100\nguesses_made = 0\nguesses_remaining = 0\nhighest_guess = 0\nlowest_guess = 0\ncorrect_num = 0\nvictory_condition = False\n\n# define event handlers for control panel\ndef range100():\n \"\"\"Set the range of guessable numbers to [1,100) and restarts\"\"\"\n global range, guesses_made, guesses_remaining, correct_num, victory_condition\n \n range = 100\n guesses_made = 0\n guesses_remaining = 7 #calculate_remaining_guesses(range)\n correct_num = random.randrange(range)\n victory_condition = False\n\n print \"New Game! Guess between 1 and \", range\n print \"Remaining guesses: \", guesses_remaining\n \ndef range1000():\n \"\"\"Set the range of guessable numbers to [1,1000) and restarts\"\"\"\n global range, guesses_made, guesses_remaining, correct_num, victory_condition\n\n range = 1000\n guesses_made = 0\n guesses_remaining = 10#calculate_remaining_guesses(range)\n correct_num = random.randrange(range)\n victory_condition = False\n\n print \"New Game! Guess between 1 and \", range\n print \"Remaining guesses: \", guesses_remaining\n \n# main game logic goes here \ndef get_input(guess):\n global guesses_made, guesses_remaining, victory_condition\n guess = int(guess)\n guesses_remaining -= 1\n \n print \"Your guess:\" , guess\n guesses_made += 1\n if victory_condition == False:\n if guess == correct_num:\n print \"Correct!\"\n print \"You guessed the number in \" , guesses_made , \" guesses!\"\n victory_condition = True\n \n if guesses_remaining > 0 and victory_condition == False:\n if guess > correct_num:\n print \"Lower...\"\n print \"Remaining guesses:\" , guesses_remaining , \"\\n\"\n else:\n print \"Higher...\"\n print \"Remaining guesses:\" , guesses_remaining , \"\\n\"\n elif victory_condition == True:\n print \"You've won! Start a new game.\"\n else:\n print \"You've run out of guesses. Game over!\"\n print \"The correct number was: \" , correct_num\n else:\n print \"You've won! Start a new game.\\n\"\n# create frame\nframe = simplegui.create_frame(\"Guess the Number!\", 400, 400, 300)\n\n# register event handlers for control elements\nframe.add_button(\"Range 1..100\", range100, 100)\nframe.add_button(\"Range 1..1000\", range1000, 100)\nframe.add_input(\"Enter your guess:\", get_input, 100)\nget_input(0)\n# start frame\nframe.start()\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
print(response)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
ec2 = boto3.resource('ec2')
response = client.allocate_address(Domain='standard')
print(response)
<|reserved_special_token_1|>
import boto3
ec2 = boto3.resource('ec2')
response = client.allocate_address(Domain='standard')
print(response)
|
flexible
|
{
"blob_id": "6424fccb7990b0a1722d5d787e7eb5acb4ff1a74",
"index": 1863,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(response)\n",
"step-3": "<mask token>\nec2 = boto3.resource('ec2')\nresponse = client.allocate_address(Domain='standard')\nprint(response)\n",
"step-4": "import boto3\nec2 = boto3.resource('ec2')\nresponse = client.allocate_address(Domain='standard')\nprint(response)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
class AddBlogs(generics.CreateAPIView):
permission_classes = IsSuperUser,
serializer_class = BlogSerializer
queryset = BlogModel.objects.all()
class ViewBlog(generics.ListAPIView):
permission_classes = IsClient,
serializer_class = BlogSerializer
queryset = BlogModel.objects.all()
class BlogDetailView(generics.RetrieveAPIView):
lookup_field = 'slug'
permission_classes = IsClient,
serializer_class = BlogSerializer
queryset = BlogModel.objects.all()
class BlogDeleteUpdateView(generics.RetrieveUpdateDestroyAPIView):
lookup_field = 'slug'
permission_classes = IsSuperUser,
serializer_class = BlogSerializer
queryset = BlogModel.objects.all()
class AddEventView(generics.CreateAPIView):
permission_classes = IsSuperUser,
serializer_class = EventSerializer
queryset = EventModel.objects.all()
class ListEventView(generics.ListAPIView):
permission_classes = AllowAny,
serializer_class = EventSerializer
queryset = EventModel.objects.all()
class EventDetailView(generics.RetrieveAPIView):
lookup_field = 'slug'
permission_classes = AllowAny,
serializer_class = EventSerializer
queryset = EventModel.objects.all()
class EventDeleteUpdateView(generics.RetrieveUpdateDestroyAPIView):
lookup_field = 'slug'
permission_classes = IsSuperUser,
serializer_class = EventSerializer
queryset = EventModel.objects.all()
class AddBusinessPartners(generics.CreateAPIView):
permission_classes = IsSuperUser,
serializer_class = BusinessPartnersSerializer
queryset = BusinessPartnersModel.objects.all()
class ViewBusinessPartner(generics.ListAPIView):
permission_classes = AllowAny,
serializer_class = BusinessPartnersSerializer
queryset = BusinessPartnersModel.objects.all()
class BusinessPartnerDetailView(generics.RetrieveAPIView):
lookup_field = 'slug'
permission_classes = AllowAny,
serializer_class = BusinessPartnersSerializer
queryset = BusinessPartnersModel.objects.all()
class BusinessPartnerDeleteUpdateView(generics.RetrieveUpdateDestroyAPIView):
lookup_field = 'slug'
permission_classes = IsSuperUser,
serializer_class = BusinessPartnersSerializer
queryset = BusinessPartnersModel.objects.all()
class AddKidStory(generics.CreateAPIView):
permission_classes = IsStudent,
serializer_class = KidStorySerializer
queryset = KidStoryModel.objects.all()
def perform_create(self, serializer):
serializer.save(user=self.request.user)
class ViewKidStory(generics.ListAPIView):
permission_classes = AllowAny,
serializer_class = KidStorySerializer
queryset = KidStoryModel.objects.filter(status__exact='P')
class KidStoryDetailView(generics.RetrieveAPIView):
lookup_field = 'slug'
permission_classes = AllowAny,
serializer_class = KidStorySerializer
queryset = KidStoryModel.objects.filter(status__exact='P')
class KidStoryDeleteUpdateView(generics.RetrieveUpdateDestroyAPIView):
"""
Get: superadmin can see all stories (draft, published)
PATCH : superadmin can mark stories as published by changing status = P
Delete: superadmin can delete stories.
"""
lookup_field = 'slug'
permission_classes = IsSuperUser,
serializer_class = UpdateKidsStorySerializer
queryset = KidStoryModel.objects.all()
class AddKidTalent(generics.CreateAPIView):
permission_classes = IsStudentORClient,
serializer_class = KidTalentSerializer
queryset = KidTalentModel.objects.all()
def perform_create(self, serializer):
serializer.save(user=self.request.user)
class ViewKidTalent(generics.ListAPIView):
permission_classes = AllowAny,
serializer_class = KidTalentSerializer
queryset = KidTalentModel.objects.filter(status__exact='P')
class KidTalentDetailView(generics.RetrieveAPIView):
lookup_field = 'slug'
permission_classes = AllowAny,
serializer_class = KidTalentSerializer
queryset = KidTalentModel.objects.filter(status__exact='P')
class KidTalentDeleteUpdateView(generics.RetrieveUpdateDestroyAPIView):
"""
Get: superadmin can see all kids talent (draft, published)
PATCH : superadmin can mark kids talent as published by changing status = P
Delete: superadmin can delete kids talent.
"""
lookup_field = 'slug'
permission_classes = IsSuperUser,
serializer_class = UpdateKidsTalentSerializer
queryset = KidTalentModel.objects.all()
class AddCourses(generics.CreateAPIView):
permission_classes = IsSuperUser,
serializer_class = CourseSerializer
queryset = CourseModel.objects.all()
class ViewCourse(generics.ListAPIView):
permission_classes = AllowAny,
serializer_class = CourseSerializer
queryset = CourseModel.objects.all()
class CourseDetailView(generics.RetrieveAPIView):
lookup_field = 'slug'
permission_classes = AllowAny,
serializer_class = CourseSerializer
queryset = CourseModel.objects.all()
class CourseDeleteUpdateView(generics.RetrieveUpdateDestroyAPIView):
lookup_field = 'slug'
permission_classes = IsSuperUser,
serializer_class = CourseSerializer
queryset = CourseModel.objects.all()
class AddQuizContext(generics.CreateAPIView):
permission_classes = IsSuperUser,
serializer_class = QuizContextSerializer
queryset = QuizContextModel.objects.all()
class ViewQuizContext(generics.ListAPIView):
permission_classes = IsClient,
serializer_class = QuizContextSerializer
queryset = QuizContextModel.objects.all()
class QuizContextDetailView(generics.RetrieveAPIView):
lookup_field = 'slug'
permission_classes = IsClient,
serializer_class = QuizContextSerializer
queryset = QuizContextModel.objects.all()
class QuizContextDeleteUpdateView(generics.RetrieveUpdateDestroyAPIView):
lookup_field = 'slug'
permission_classes = IsSuperUser,
serializer_class = QuizContextSerializer
queryset = QuizContextModel.objects.all()
class AddFeedback(generics.CreateAPIView):
permission_classes = IsSuperUser,
serializer_class = ClientFeedbackSerializer
queryset = ClientFeedBackModel.objects.all()
class ViewFeedback(generics.ListAPIView):
permission_classes = IsClient,
serializer_class = ClientFeedbackSerializer
queryset = ClientFeedBackModel.objects.all()
class FeedbackDetailView(generics.RetrieveAPIView):
lookup_field = 'slug'
permission_classes = IsClient,
serializer_class = ClientFeedbackSerializer
queryset = ClientFeedBackModel.objects.all()
class FeedbackDeleteUpdateView(generics.RetrieveUpdateDestroyAPIView):
lookup_field = 'slug'
permission_classes = IsSuperUser,
serializer_class = ClientFeedbackSerializer
queryset = ClientFeedBackModel.objects.all()
class AddWebsiteAd(generics.CreateAPIView):
permission_classes = IsSuperUser,
serializer_class = WebsiteAdSerializer
queryset = WebsiteAdModel.objects.all()
class ViewWebsiteAd(generics.ListAPIView):
permission_classes = AllowAny,
serializer_class = WebsiteAdSerializer
queryset = WebsiteAdModel.objects.all()
class WebsiteAdDetailView(generics.RetrieveAPIView):
lookup_field = 'slug'
permission_classes = AllowAny,
serializer_class = WebsiteAdSerializer
queryset = WebsiteAdModel.objects.all()
class WebsiteAdDeleteUpdateView(generics.RetrieveUpdateDestroyAPIView):
lookup_field = 'slug'
permission_classes = IsSuperUser,
serializer_class = WebsiteAdSerializer
queryset = WebsiteAdModel.objects.all()
class AddBusinessPromotion(generics.CreateAPIView):
permission_classes = IsSuperUser,
serializer_class = BusinessPromotionSerializer
queryset = BusinessPromotionModel.objects.all()
class ViewBusinessPromotion(generics.ListAPIView):
permission_classes = AllowAny,
serializer_class = BusinessPromotionSerializer
queryset = BusinessPromotionModel.objects.all()
class BusinessPromotionDetailView(generics.RetrieveAPIView):
lookup_field = 'slug'
permission_classes = AllowAny,
serializer_class = BusinessPromotionSerializer
queryset = BusinessPromotionModel.objects.all()
class BusinessPromotionDeleteUpdateView(generics.RetrieveUpdateDestroyAPIView):
lookup_field = 'slug'
permission_classes = IsSuperUser,
serializer_class = BusinessPromotionSerializer
queryset = BusinessPromotionModel.objects.all()
class AddTeam(generics.CreateAPIView):
permission_classes = IsSuperUser,
serializer_class = TeamSerializer
queryset = TeamModel.objects.all()
class ViewTeam(generics.ListAPIView):
permission_classes = AllowAny,
serializer_class = TeamSerializer
queryset = TeamModel.objects.all()
class TeamDetailView(generics.RetrieveAPIView):
lookup_field = 'slug'
permission_classes = AllowAny,
serializer_class = TeamSerializer
queryset = TeamModel.objects.all()
class TeamDeleteUpdateView(generics.RetrieveUpdateDestroyAPIView):
lookup_field = 'slug'
permission_classes = IsSuperUser,
serializer_class = TeamSerializer
queryset = TeamModel.objects.all()
class AddAdvisoryBoard(generics.CreateAPIView):
permission_classes = IsSuperUser,
serializer_class = AdvisoryBoardSerializer
queryset = AdvisoryBoardModel.objects.all()
class ViewAdvisoryBoard(generics.ListAPIView):
permission_classes = IsSuperUser,
serializer_class = AdvisoryBoardSerializer
queryset = AdvisoryBoardModel.objects.all()
class AdvisoryBoardDetailView(generics.RetrieveAPIView):
lookup_field = 'slug'
permission_classes = IsSuperUser,
serializer_class = AdvisoryBoardSerializer
queryset = AdvisoryBoardModel.objects.all()
class AdvisoryBoardDeleteUpdateView(generics.RetrieveUpdateDestroyAPIView):
lookup_field = 'slug'
permission_classes = IsSuperUser,
serializer_class = AdvisoryBoardSerializer
queryset = AdvisoryBoardModel.objects.all()
class AddAnnouncement(generics.CreateAPIView):
permission_classes = IsSuperUser,
serializer_class = AnnouncementSerializer
queryset = AnnouncementModel.objects.all()
class ListAnnouncement(generics.ListAPIView):
permission_classes = AllowAny,
serializer_class = AnnouncementSerializer
queryset = AnnouncementModel.objects.all()
class AnnouncementDetail(generics.RetrieveAPIView):
lookup_field = 'slug'
permission_classes = AllowAny,
serializer_class = AnnouncementSerializer
queryset = AnnouncementModel.objects.all()
class AnnouncementDeleteUpdate(generics.RetrieveUpdateDestroyAPIView):
lookup_field = 'slug'
permission_classes = IsSuperUser,
serializer_class = AnnouncementSerializer
queryset = AnnouncementModel.objects.all()
class SuperadminProfileView(APIView):
permission_classes = IsSuperUser,
def get(self, request, *args, **kwargs):
user = get_user_from_token(request)
data = {'name': user.username, 'email': user.email}
return Response(data)
class AddJobClassified(generics.CreateAPIView):
permission_classes = IsSuperUser,
serializer_class = JobClassifiedSerializer
queryset = JobClassifiedModel.objects.all()
class ViewJobClassified(generics.ListAPIView):
permission_classes = AllowAny,
serializer_class = JobClassifiedSerializer
queryset = JobClassifiedModel.objects.all()
class JobClassifiedDetailView(generics.RetrieveAPIView):
lookup_field = 'slug'
permission_classes = AllowAny,
serializer_class = JobClassifiedSerializer
queryset = JobClassifiedModel.objects.all()
class JobClassifiedDeleteUpdateView(generics.RetrieveUpdateDestroyAPIView):
lookup_field = 'slug'
permission_classes = IsSuperUser,
serializer_class = JobClassifiedSerializer
queryset = JobClassifiedModel.objects.all()
class AddCustomerReviews(generics.CreateAPIView):
permission_classes = IsSuperUser,
serializer_class = CustomerReviewSerializer
queryset = CustomerReviewModel.objects.all()
class ViewCustomerReview(generics.ListAPIView):
permission_classes = IsClient,
serializer_class = CustomerReviewSerializer
queryset = CustomerReviewModel.objects.all()
class CustomerReviewDetailView(generics.RetrieveAPIView):
lookup_field = 'slug'
permission_classes = IsClient,
serializer_class = CustomerReviewSerializer
queryset = CustomerReviewModel.objects.all()
class CustomerReviewDeleteUpdateView(generics.RetrieveUpdateDestroyAPIView):
lookup_field = 'slug'
permission_classes = IsSuperUser,
serializer_class = CustomerReviewSerializer
queryset = CustomerReviewModel.objects.all()
class ClientComplain(APIView):
permission_classes = IsSuperUser,
serializer = ViewComplainSerializer(many=True)
class clientfeedback(APIView):
permission_classes = IsSuperUser,
def get(self, request, format=None):
feeds = ClientFeedBackModel.objects.filter(Class__admin=self.
request.user)
serializer = ClientFeedbackSerializer(feeds, many=True)
return Response(serializer.data)
class Enroll_Course(APIView):
permission_classes = IsSuperUser,
def post(self, request, format=None):
serializer = EnrollCourseSerializer(data=request.data)
print(serializer)
if serializer.is_valid():
course = serializer.validated_data.get('course', '')
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
else:
return Response(serializer.errors, status=status.
HTTP_400_BAD_REQUEST)
class ViewEnroll_Course(APIView):
permission_classes = IsSuperUser,
def get(self, request, *args, **kwargs):
course = self.kwargs['course_id']
client = self.kwargs['client_id']
data = Enroll_CourseModel.objects.filter(course=course, client=client)
serializer = ViewEnrollCourseSerializer(data, many=True)
return Response(serializer.data, status=status.HTTP_200_OK)
class DetailEnroll_CourseView(APIView):
permission_classes = IsSuperUser,
def get_object(self, pk):
try:
return Enroll_CourseModel.objects.get(id=pk)
except:
raise Http404
def get(self, request, pk, format=None):
data = self.get_object(pk)
serializer = ViewEnrollCourseSerializer(data)
return Response(serializer.data)
def put(self, request, pk, format=None):
data = self.get_object(pk)
serializer = ViewEnrollCourseSerializer(data, data=request.data)
if serializer.is_valid(raise_exception=True):
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
else:
return Response(serializer.errors, status=status.
HTTP_400_BAD_REQUEST)
def delete(self, request, pk, format=None):
data = self.get_object(pk)
data.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
class CourseDetail(APIView):
permission_classes = IsSuperUser,
def get_object(self, slug):
try:
return CourseModel.objects.get(slug=slug)
except CourseModel.DoesNotExist:
raise Http404
def get(self, request, slug, format=None):
data = self.get_object(slug)
if data.classes.school.admin == self.request.user:
serializer = ViewCourseSerializer(data)
return Response(serializer.data)
else:
return Response({'message':
'This course does not belong to your school'}, status=
status.HTTP_400_BAD_REQUEST)
def put(self, request, slug, format=None):
data = self.get_object(slug)
if data.course.client.admin == self.request.user:
serializer = CourseSerializer(data, data=request.data)
if serializer.is_valid(raise_exception=True):
course = serializer.validated_data.get('course', '')
if course.client.admin == self.request.user:
serializer.save()
return Response(serializer.data, status=status.
HTTP_201_CREATED)
return Response({'message':
'This Class does not belong to you'}, status=status.
HTTP_400_BAD_REQUEST)
else:
return Response(serializer.errors, status=status.
HTTP_400_BAD_REQUEST)
else:
return Response({'message':
'This course does not belong to you'}, status=status.
HTTP_400_BAD_REQUEST)
def delete(self, request, slug, format=None):
data = self.get_object(slug)
if data.course.client.admin == self.request.user:
data.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
else:
return Response({'message':
'This course does not belong to you'}, status=status.
HTTP_400_BAD_REQUEST)
class SchoolRegistrationView(RegisterView):
serializer_class = RegisterSchoolSerializer
permission_classes = IsSuperUser,
class Add_question(generics.CreateAPIView):
permission_classes = IsSuperUser,
def post(self, request, format=None):
serializer = QuestionSerializer(data=request.data)
print(serializer)
if serializer.is_valid():
course = serializer.validated_data.get('course', '')
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
else:
return Response(serializer.errors, status=status.
HTTP_400_BAD_REQUEST)
class Viewquestion(generics.ListAPIView):
permission_classes = IsSuperUser,
def get(self, request, *args, **kwargs):
course = self.kwargs['course_id']
data = QuestionModel.objects.filter(course_id=course)
serializer = QuestionSerializer(data, many=True)
return Response(serializer.data, status=status.HTTP_200_OK)
class QuestionDetail(APIView):
permission_classes = IsSuperUser,
def get_object(self, pk):
try:
return QuestionModel.objects.get(id=pk)
except:
raise Http404
def get(self, request, pk, format=None):
data = self.get_object(pk)
serializer = QuestionSerializer(data)
return Response(serializer.data)
def put(self, request, pk, format=None):
data = self.get_object(pk)
serializer = QuestionSerializer(data, data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
else:
return Response(serializer.errors, status=status.
HTTP_400_BAD_REQUEST)
def delete(self, request, pk, format=None):
data = self.get_object(pk)
data.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
class SubmittedQuestionView(APIView):
permission_classes = IsSuperUser,
def get(self, request, *args, **kwargs):
admin = self.request.user
course = self.kwargs['course_id']
client = self.kwargs['client_id']
data = Client_SubmitquestionModel.objects.filter(course__course=
course, client__client=client)
serializer = Client_submittedquestionSerializer(data, many=True)
return Response(serializer.data, status=status.HTTP_200_OK)
class AddonlineTest(generics.CreateAPIView):
permission_classes = IsSuperUser,
def post(self, request, format=None):
serializer = testSerializer(data=request.data)
print(serializer)
if serializer.is_valid():
course = serializer.validated_data.get('course', '')
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
else:
return Response(serializer.errors, status=status.
HTTP_400_BAD_REQUEST)
class ViewOnlinetest(generics.ListAPIView):
permission_classes = IsSuperUser,
def get(self, request, *args, **kwargs):
course = self.kwargs['course_id']
data = Client_testModel.objects.filter(course_id=course)
serializer = testSerializer(data, many=True)
return Response(serializer.data, status=status.HTTP_200_OK)
class onlinetestDetail(APIView):
permission_classes = IsSuperUser,
def get_object(self, pk):
try:
return Client_testModel.objects.get(id=pk)
except:
raise Http404
def get(self, request, pk, format=None):
data = self.get_object(pk)
serializer = testSerializer(data)
return Response(serializer.data)
def put(self, request, pk, format=None):
data = self.get_object(pk)
serializer = testSerializer(data, data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
else:
return Response(serializer.errors, status=status.
HTTP_400_BAD_REQUEST)
def delete(self, request, pk, format=None):
data = self.get_object(pk)
data.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
class SubmittedonlineTestView(APIView):
permission_classes = IsSuperUser,
def get(self, request, *args, **kwargs):
admin = self.request.user
course = self.kwargs['course_id']
client = self.kwargs['client_id']
data = Client_SubmittestModel.objects.filter(course__course=course,
client__client=client)
serializer = Client_submittedtestSerializer(data, many=True)
return Response(serializer.data, status=status.HTTP_200_OK)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class SchoolDetailView(generics.RetrieveAPIView):
lookup_field = 'slug'
permission_classes = IsClient,
serializer_class = SchoolSerializer
queryset = SchoolModel.objects.all()
class SchoolDeleteUpdateView(generics.RetrieveUpdateDestroyAPIView):
lookup_field = 'slug'
permission_classes = IsSuperUser,
serializer_class = SchoolSerializer
queryset = SchoolModel.objects.all()
class AddBlogs(generics.CreateAPIView):
permission_classes = IsSuperUser,
serializer_class = BlogSerializer
queryset = BlogModel.objects.all()
class ViewBlog(generics.ListAPIView):
permission_classes = IsClient,
serializer_class = BlogSerializer
queryset = BlogModel.objects.all()
class BlogDetailView(generics.RetrieveAPIView):
lookup_field = 'slug'
permission_classes = IsClient,
serializer_class = BlogSerializer
queryset = BlogModel.objects.all()
class BlogDeleteUpdateView(generics.RetrieveUpdateDestroyAPIView):
lookup_field = 'slug'
permission_classes = IsSuperUser,
serializer_class = BlogSerializer
queryset = BlogModel.objects.all()
class AddEventView(generics.CreateAPIView):
permission_classes = IsSuperUser,
serializer_class = EventSerializer
queryset = EventModel.objects.all()
class ListEventView(generics.ListAPIView):
permission_classes = AllowAny,
serializer_class = EventSerializer
queryset = EventModel.objects.all()
class EventDetailView(generics.RetrieveAPIView):
lookup_field = 'slug'
permission_classes = AllowAny,
serializer_class = EventSerializer
queryset = EventModel.objects.all()
class EventDeleteUpdateView(generics.RetrieveUpdateDestroyAPIView):
lookup_field = 'slug'
permission_classes = IsSuperUser,
serializer_class = EventSerializer
queryset = EventModel.objects.all()
class AddBusinessPartners(generics.CreateAPIView):
permission_classes = IsSuperUser,
serializer_class = BusinessPartnersSerializer
queryset = BusinessPartnersModel.objects.all()
class ViewBusinessPartner(generics.ListAPIView):
permission_classes = AllowAny,
serializer_class = BusinessPartnersSerializer
queryset = BusinessPartnersModel.objects.all()
class BusinessPartnerDetailView(generics.RetrieveAPIView):
lookup_field = 'slug'
permission_classes = AllowAny,
serializer_class = BusinessPartnersSerializer
queryset = BusinessPartnersModel.objects.all()
class BusinessPartnerDeleteUpdateView(generics.RetrieveUpdateDestroyAPIView):
lookup_field = 'slug'
permission_classes = IsSuperUser,
serializer_class = BusinessPartnersSerializer
queryset = BusinessPartnersModel.objects.all()
class AddKidStory(generics.CreateAPIView):
permission_classes = IsStudent,
serializer_class = KidStorySerializer
queryset = KidStoryModel.objects.all()
def perform_create(self, serializer):
serializer.save(user=self.request.user)
class ViewKidStory(generics.ListAPIView):
permission_classes = AllowAny,
serializer_class = KidStorySerializer
queryset = KidStoryModel.objects.filter(status__exact='P')
class KidStoryDetailView(generics.RetrieveAPIView):
lookup_field = 'slug'
permission_classes = AllowAny,
serializer_class = KidStorySerializer
queryset = KidStoryModel.objects.filter(status__exact='P')
class KidStoryDeleteUpdateView(generics.RetrieveUpdateDestroyAPIView):
"""
Get: superadmin can see all stories (draft, published)
PATCH : superadmin can mark stories as published by changing status = P
Delete: superadmin can delete stories.
"""
lookup_field = 'slug'
permission_classes = IsSuperUser,
serializer_class = UpdateKidsStorySerializer
queryset = KidStoryModel.objects.all()
class AddKidTalent(generics.CreateAPIView):
permission_classes = IsStudentORClient,
serializer_class = KidTalentSerializer
queryset = KidTalentModel.objects.all()
def perform_create(self, serializer):
serializer.save(user=self.request.user)
class ViewKidTalent(generics.ListAPIView):
permission_classes = AllowAny,
serializer_class = KidTalentSerializer
queryset = KidTalentModel.objects.filter(status__exact='P')
class KidTalentDetailView(generics.RetrieveAPIView):
lookup_field = 'slug'
permission_classes = AllowAny,
serializer_class = KidTalentSerializer
queryset = KidTalentModel.objects.filter(status__exact='P')
class KidTalentDeleteUpdateView(generics.RetrieveUpdateDestroyAPIView):
"""
Get: superadmin can see all kids talent (draft, published)
PATCH : superadmin can mark kids talent as published by changing status = P
Delete: superadmin can delete kids talent.
"""
lookup_field = 'slug'
permission_classes = IsSuperUser,
serializer_class = UpdateKidsTalentSerializer
queryset = KidTalentModel.objects.all()
class AddCourses(generics.CreateAPIView):
permission_classes = IsSuperUser,
serializer_class = CourseSerializer
queryset = CourseModel.objects.all()
class ViewCourse(generics.ListAPIView):
permission_classes = AllowAny,
serializer_class = CourseSerializer
queryset = CourseModel.objects.all()
class CourseDetailView(generics.RetrieveAPIView):
lookup_field = 'slug'
permission_classes = AllowAny,
serializer_class = CourseSerializer
queryset = CourseModel.objects.all()
class CourseDeleteUpdateView(generics.RetrieveUpdateDestroyAPIView):
lookup_field = 'slug'
permission_classes = IsSuperUser,
serializer_class = CourseSerializer
queryset = CourseModel.objects.all()
class AddQuizContext(generics.CreateAPIView):
permission_classes = IsSuperUser,
serializer_class = QuizContextSerializer
queryset = QuizContextModel.objects.all()
class ViewQuizContext(generics.ListAPIView):
permission_classes = IsClient,
serializer_class = QuizContextSerializer
queryset = QuizContextModel.objects.all()
class QuizContextDetailView(generics.RetrieveAPIView):
lookup_field = 'slug'
permission_classes = IsClient,
serializer_class = QuizContextSerializer
queryset = QuizContextModel.objects.all()
class QuizContextDeleteUpdateView(generics.RetrieveUpdateDestroyAPIView):
lookup_field = 'slug'
permission_classes = IsSuperUser,
serializer_class = QuizContextSerializer
queryset = QuizContextModel.objects.all()
class AddFeedback(generics.CreateAPIView):
permission_classes = IsSuperUser,
serializer_class = ClientFeedbackSerializer
queryset = ClientFeedBackModel.objects.all()
class ViewFeedback(generics.ListAPIView):
permission_classes = IsClient,
serializer_class = ClientFeedbackSerializer
queryset = ClientFeedBackModel.objects.all()
class FeedbackDetailView(generics.RetrieveAPIView):
lookup_field = 'slug'
permission_classes = IsClient,
serializer_class = ClientFeedbackSerializer
queryset = ClientFeedBackModel.objects.all()
class FeedbackDeleteUpdateView(generics.RetrieveUpdateDestroyAPIView):
lookup_field = 'slug'
permission_classes = IsSuperUser,
serializer_class = ClientFeedbackSerializer
queryset = ClientFeedBackModel.objects.all()
class AddWebsiteAd(generics.CreateAPIView):
permission_classes = IsSuperUser,
serializer_class = WebsiteAdSerializer
queryset = WebsiteAdModel.objects.all()
class ViewWebsiteAd(generics.ListAPIView):
permission_classes = AllowAny,
serializer_class = WebsiteAdSerializer
queryset = WebsiteAdModel.objects.all()
class WebsiteAdDetailView(generics.RetrieveAPIView):
lookup_field = 'slug'
permission_classes = AllowAny,
serializer_class = WebsiteAdSerializer
queryset = WebsiteAdModel.objects.all()
class WebsiteAdDeleteUpdateView(generics.RetrieveUpdateDestroyAPIView):
lookup_field = 'slug'
permission_classes = IsSuperUser,
serializer_class = WebsiteAdSerializer
queryset = WebsiteAdModel.objects.all()
class AddBusinessPromotion(generics.CreateAPIView):
permission_classes = IsSuperUser,
serializer_class = BusinessPromotionSerializer
queryset = BusinessPromotionModel.objects.all()
class ViewBusinessPromotion(generics.ListAPIView):
permission_classes = AllowAny,
serializer_class = BusinessPromotionSerializer
queryset = BusinessPromotionModel.objects.all()
class BusinessPromotionDetailView(generics.RetrieveAPIView):
lookup_field = 'slug'
permission_classes = AllowAny,
serializer_class = BusinessPromotionSerializer
queryset = BusinessPromotionModel.objects.all()
class BusinessPromotionDeleteUpdateView(generics.RetrieveUpdateDestroyAPIView):
lookup_field = 'slug'
permission_classes = IsSuperUser,
serializer_class = BusinessPromotionSerializer
queryset = BusinessPromotionModel.objects.all()
class AddTeam(generics.CreateAPIView):
permission_classes = IsSuperUser,
serializer_class = TeamSerializer
queryset = TeamModel.objects.all()
class ViewTeam(generics.ListAPIView):
permission_classes = AllowAny,
serializer_class = TeamSerializer
queryset = TeamModel.objects.all()
class TeamDetailView(generics.RetrieveAPIView):
lookup_field = 'slug'
permission_classes = AllowAny,
serializer_class = TeamSerializer
queryset = TeamModel.objects.all()
class TeamDeleteUpdateView(generics.RetrieveUpdateDestroyAPIView):
lookup_field = 'slug'
permission_classes = IsSuperUser,
serializer_class = TeamSerializer
queryset = TeamModel.objects.all()
class AddAdvisoryBoard(generics.CreateAPIView):
permission_classes = IsSuperUser,
serializer_class = AdvisoryBoardSerializer
queryset = AdvisoryBoardModel.objects.all()
class ViewAdvisoryBoard(generics.ListAPIView):
permission_classes = IsSuperUser,
serializer_class = AdvisoryBoardSerializer
queryset = AdvisoryBoardModel.objects.all()
class AdvisoryBoardDetailView(generics.RetrieveAPIView):
lookup_field = 'slug'
permission_classes = IsSuperUser,
serializer_class = AdvisoryBoardSerializer
queryset = AdvisoryBoardModel.objects.all()
class AdvisoryBoardDeleteUpdateView(generics.RetrieveUpdateDestroyAPIView):
lookup_field = 'slug'
permission_classes = IsSuperUser,
serializer_class = AdvisoryBoardSerializer
queryset = AdvisoryBoardModel.objects.all()
class AddAnnouncement(generics.CreateAPIView):
permission_classes = IsSuperUser,
serializer_class = AnnouncementSerializer
queryset = AnnouncementModel.objects.all()
class ListAnnouncement(generics.ListAPIView):
permission_classes = AllowAny,
serializer_class = AnnouncementSerializer
queryset = AnnouncementModel.objects.all()
class AnnouncementDetail(generics.RetrieveAPIView):
lookup_field = 'slug'
permission_classes = AllowAny,
serializer_class = AnnouncementSerializer
queryset = AnnouncementModel.objects.all()
class AnnouncementDeleteUpdate(generics.RetrieveUpdateDestroyAPIView):
lookup_field = 'slug'
permission_classes = IsSuperUser,
serializer_class = AnnouncementSerializer
queryset = AnnouncementModel.objects.all()
class SuperadminProfileView(APIView):
permission_classes = IsSuperUser,
def get(self, request, *args, **kwargs):
user = get_user_from_token(request)
data = {'name': user.username, 'email': user.email}
return Response(data)
class AddJobClassified(generics.CreateAPIView):
permission_classes = IsSuperUser,
serializer_class = JobClassifiedSerializer
queryset = JobClassifiedModel.objects.all()
class ViewJobClassified(generics.ListAPIView):
permission_classes = AllowAny,
serializer_class = JobClassifiedSerializer
queryset = JobClassifiedModel.objects.all()
class JobClassifiedDetailView(generics.RetrieveAPIView):
lookup_field = 'slug'
permission_classes = AllowAny,
serializer_class = JobClassifiedSerializer
queryset = JobClassifiedModel.objects.all()
class JobClassifiedDeleteUpdateView(generics.RetrieveUpdateDestroyAPIView):
lookup_field = 'slug'
permission_classes = IsSuperUser,
serializer_class = JobClassifiedSerializer
queryset = JobClassifiedModel.objects.all()
class AddCustomerReviews(generics.CreateAPIView):
permission_classes = IsSuperUser,
serializer_class = CustomerReviewSerializer
queryset = CustomerReviewModel.objects.all()
class ViewCustomerReview(generics.ListAPIView):
permission_classes = IsClient,
serializer_class = CustomerReviewSerializer
queryset = CustomerReviewModel.objects.all()
class CustomerReviewDetailView(generics.RetrieveAPIView):
lookup_field = 'slug'
permission_classes = IsClient,
serializer_class = CustomerReviewSerializer
queryset = CustomerReviewModel.objects.all()
class CustomerReviewDeleteUpdateView(generics.RetrieveUpdateDestroyAPIView):
lookup_field = 'slug'
permission_classes = IsSuperUser,
serializer_class = CustomerReviewSerializer
queryset = CustomerReviewModel.objects.all()
class ClientComplain(APIView):
permission_classes = IsSuperUser,
serializer = ViewComplainSerializer(many=True)
class clientfeedback(APIView):
permission_classes = IsSuperUser,
def get(self, request, format=None):
feeds = ClientFeedBackModel.objects.filter(Class__admin=self.
request.user)
serializer = ClientFeedbackSerializer(feeds, many=True)
return Response(serializer.data)
class Enroll_Course(APIView):
permission_classes = IsSuperUser,
def post(self, request, format=None):
serializer = EnrollCourseSerializer(data=request.data)
print(serializer)
if serializer.is_valid():
course = serializer.validated_data.get('course', '')
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
else:
return Response(serializer.errors, status=status.
HTTP_400_BAD_REQUEST)
class ViewEnroll_Course(APIView):
permission_classes = IsSuperUser,
def get(self, request, *args, **kwargs):
course = self.kwargs['course_id']
client = self.kwargs['client_id']
data = Enroll_CourseModel.objects.filter(course=course, client=client)
serializer = ViewEnrollCourseSerializer(data, many=True)
return Response(serializer.data, status=status.HTTP_200_OK)
class DetailEnroll_CourseView(APIView):
permission_classes = IsSuperUser,
def get_object(self, pk):
try:
return Enroll_CourseModel.objects.get(id=pk)
except:
raise Http404
def get(self, request, pk, format=None):
data = self.get_object(pk)
serializer = ViewEnrollCourseSerializer(data)
return Response(serializer.data)
def put(self, request, pk, format=None):
data = self.get_object(pk)
serializer = ViewEnrollCourseSerializer(data, data=request.data)
if serializer.is_valid(raise_exception=True):
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
else:
return Response(serializer.errors, status=status.
HTTP_400_BAD_REQUEST)
def delete(self, request, pk, format=None):
data = self.get_object(pk)
data.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
class CourseDetail(APIView):
permission_classes = IsSuperUser,
def get_object(self, slug):
try:
return CourseModel.objects.get(slug=slug)
except CourseModel.DoesNotExist:
raise Http404
def get(self, request, slug, format=None):
data = self.get_object(slug)
if data.classes.school.admin == self.request.user:
serializer = ViewCourseSerializer(data)
return Response(serializer.data)
else:
return Response({'message':
'This course does not belong to your school'}, status=
status.HTTP_400_BAD_REQUEST)
def put(self, request, slug, format=None):
data = self.get_object(slug)
if data.course.client.admin == self.request.user:
serializer = CourseSerializer(data, data=request.data)
if serializer.is_valid(raise_exception=True):
course = serializer.validated_data.get('course', '')
if course.client.admin == self.request.user:
serializer.save()
return Response(serializer.data, status=status.
HTTP_201_CREATED)
return Response({'message':
'This Class does not belong to you'}, status=status.
HTTP_400_BAD_REQUEST)
else:
return Response(serializer.errors, status=status.
HTTP_400_BAD_REQUEST)
else:
return Response({'message':
'This course does not belong to you'}, status=status.
HTTP_400_BAD_REQUEST)
def delete(self, request, slug, format=None):
data = self.get_object(slug)
if data.course.client.admin == self.request.user:
data.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
else:
return Response({'message':
'This course does not belong to you'}, status=status.
HTTP_400_BAD_REQUEST)
class SchoolRegistrationView(RegisterView):
serializer_class = RegisterSchoolSerializer
permission_classes = IsSuperUser,
class Add_question(generics.CreateAPIView):
permission_classes = IsSuperUser,
def post(self, request, format=None):
serializer = QuestionSerializer(data=request.data)
print(serializer)
if serializer.is_valid():
course = serializer.validated_data.get('course', '')
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
else:
return Response(serializer.errors, status=status.
HTTP_400_BAD_REQUEST)
class Viewquestion(generics.ListAPIView):
permission_classes = IsSuperUser,
def get(self, request, *args, **kwargs):
course = self.kwargs['course_id']
data = QuestionModel.objects.filter(course_id=course)
serializer = QuestionSerializer(data, many=True)
return Response(serializer.data, status=status.HTTP_200_OK)
class QuestionDetail(APIView):
permission_classes = IsSuperUser,
def get_object(self, pk):
try:
return QuestionModel.objects.get(id=pk)
except:
raise Http404
def get(self, request, pk, format=None):
data = self.get_object(pk)
serializer = QuestionSerializer(data)
return Response(serializer.data)
def put(self, request, pk, format=None):
data = self.get_object(pk)
serializer = QuestionSerializer(data, data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
else:
return Response(serializer.errors, status=status.
HTTP_400_BAD_REQUEST)
def delete(self, request, pk, format=None):
data = self.get_object(pk)
data.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
class SubmittedQuestionView(APIView):
permission_classes = IsSuperUser,
def get(self, request, *args, **kwargs):
admin = self.request.user
course = self.kwargs['course_id']
client = self.kwargs['client_id']
data = Client_SubmitquestionModel.objects.filter(course__course=
course, client__client=client)
serializer = Client_submittedquestionSerializer(data, many=True)
return Response(serializer.data, status=status.HTTP_200_OK)
class AddonlineTest(generics.CreateAPIView):
permission_classes = IsSuperUser,
def post(self, request, format=None):
serializer = testSerializer(data=request.data)
print(serializer)
if serializer.is_valid():
course = serializer.validated_data.get('course', '')
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
else:
return Response(serializer.errors, status=status.
HTTP_400_BAD_REQUEST)
class ViewOnlinetest(generics.ListAPIView):
permission_classes = IsSuperUser,
def get(self, request, *args, **kwargs):
course = self.kwargs['course_id']
data = Client_testModel.objects.filter(course_id=course)
serializer = testSerializer(data, many=True)
return Response(serializer.data, status=status.HTTP_200_OK)
class onlinetestDetail(APIView):
permission_classes = IsSuperUser,
def get_object(self, pk):
try:
return Client_testModel.objects.get(id=pk)
except:
raise Http404
def get(self, request, pk, format=None):
data = self.get_object(pk)
serializer = testSerializer(data)
return Response(serializer.data)
def put(self, request, pk, format=None):
data = self.get_object(pk)
serializer = testSerializer(data, data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
else:
return Response(serializer.errors, status=status.
HTTP_400_BAD_REQUEST)
def delete(self, request, pk, format=None):
data = self.get_object(pk)
data.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
class SubmittedonlineTestView(APIView):
permission_classes = IsSuperUser,
def get(self, request, *args, **kwargs):
admin = self.request.user
course = self.kwargs['course_id']
client = self.kwargs['client_id']
data = Client_SubmittestModel.objects.filter(course__course=course,
client__client=client)
serializer = Client_submittedtestSerializer(data, many=True)
return Response(serializer.data, status=status.HTTP_200_OK)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class AddSchools(generics.CreateAPIView):
permission_classes = IsSuperUser,
serializer_class = SchoolSerializer
queryset = SchoolModel.objects.all()
class ViewSchool(generics.ListAPIView):
permission_classes = IsClient,
serializer_class = SchoolSerializer
queryset = SchoolModel.objects.all()
class SchoolDetailView(generics.RetrieveAPIView):
lookup_field = 'slug'
permission_classes = IsClient,
serializer_class = SchoolSerializer
queryset = SchoolModel.objects.all()
class SchoolDeleteUpdateView(generics.RetrieveUpdateDestroyAPIView):
lookup_field = 'slug'
permission_classes = IsSuperUser,
serializer_class = SchoolSerializer
queryset = SchoolModel.objects.all()
class AddBlogs(generics.CreateAPIView):
permission_classes = IsSuperUser,
serializer_class = BlogSerializer
queryset = BlogModel.objects.all()
class ViewBlog(generics.ListAPIView):
permission_classes = IsClient,
serializer_class = BlogSerializer
queryset = BlogModel.objects.all()
class BlogDetailView(generics.RetrieveAPIView):
lookup_field = 'slug'
permission_classes = IsClient,
serializer_class = BlogSerializer
queryset = BlogModel.objects.all()
class BlogDeleteUpdateView(generics.RetrieveUpdateDestroyAPIView):
lookup_field = 'slug'
permission_classes = IsSuperUser,
serializer_class = BlogSerializer
queryset = BlogModel.objects.all()
class AddEventView(generics.CreateAPIView):
permission_classes = IsSuperUser,
serializer_class = EventSerializer
queryset = EventModel.objects.all()
class ListEventView(generics.ListAPIView):
permission_classes = AllowAny,
serializer_class = EventSerializer
queryset = EventModel.objects.all()
class EventDetailView(generics.RetrieveAPIView):
lookup_field = 'slug'
permission_classes = AllowAny,
serializer_class = EventSerializer
queryset = EventModel.objects.all()
class EventDeleteUpdateView(generics.RetrieveUpdateDestroyAPIView):
lookup_field = 'slug'
permission_classes = IsSuperUser,
serializer_class = EventSerializer
queryset = EventModel.objects.all()
class AddBusinessPartners(generics.CreateAPIView):
permission_classes = IsSuperUser,
serializer_class = BusinessPartnersSerializer
queryset = BusinessPartnersModel.objects.all()
class ViewBusinessPartner(generics.ListAPIView):
permission_classes = AllowAny,
serializer_class = BusinessPartnersSerializer
queryset = BusinessPartnersModel.objects.all()
class BusinessPartnerDetailView(generics.RetrieveAPIView):
lookup_field = 'slug'
permission_classes = AllowAny,
serializer_class = BusinessPartnersSerializer
queryset = BusinessPartnersModel.objects.all()
class BusinessPartnerDeleteUpdateView(generics.RetrieveUpdateDestroyAPIView):
lookup_field = 'slug'
permission_classes = IsSuperUser,
serializer_class = BusinessPartnersSerializer
queryset = BusinessPartnersModel.objects.all()
class AddKidStory(generics.CreateAPIView):
permission_classes = IsStudent,
serializer_class = KidStorySerializer
queryset = KidStoryModel.objects.all()
def perform_create(self, serializer):
serializer.save(user=self.request.user)
class ViewKidStory(generics.ListAPIView):
permission_classes = AllowAny,
serializer_class = KidStorySerializer
queryset = KidStoryModel.objects.filter(status__exact='P')
class KidStoryDetailView(generics.RetrieveAPIView):
lookup_field = 'slug'
permission_classes = AllowAny,
serializer_class = KidStorySerializer
queryset = KidStoryModel.objects.filter(status__exact='P')
class KidStoryDeleteUpdateView(generics.RetrieveUpdateDestroyAPIView):
"""
Get: superadmin can see all stories (draft, published)
PATCH : superadmin can mark stories as published by changing status = P
Delete: superadmin can delete stories.
"""
lookup_field = 'slug'
permission_classes = IsSuperUser,
serializer_class = UpdateKidsStorySerializer
queryset = KidStoryModel.objects.all()
class AddKidTalent(generics.CreateAPIView):
permission_classes = IsStudentORClient,
serializer_class = KidTalentSerializer
queryset = KidTalentModel.objects.all()
def perform_create(self, serializer):
serializer.save(user=self.request.user)
class ViewKidTalent(generics.ListAPIView):
permission_classes = AllowAny,
serializer_class = KidTalentSerializer
queryset = KidTalentModel.objects.filter(status__exact='P')
class KidTalentDetailView(generics.RetrieveAPIView):
lookup_field = 'slug'
permission_classes = AllowAny,
serializer_class = KidTalentSerializer
queryset = KidTalentModel.objects.filter(status__exact='P')
class KidTalentDeleteUpdateView(generics.RetrieveUpdateDestroyAPIView):
"""
Get: superadmin can see all kids talent (draft, published)
PATCH : superadmin can mark kids talent as published by changing status = P
Delete: superadmin can delete kids talent.
"""
lookup_field = 'slug'
permission_classes = IsSuperUser,
serializer_class = UpdateKidsTalentSerializer
queryset = KidTalentModel.objects.all()
class AddCourses(generics.CreateAPIView):
permission_classes = IsSuperUser,
serializer_class = CourseSerializer
queryset = CourseModel.objects.all()
class ViewCourse(generics.ListAPIView):
permission_classes = AllowAny,
serializer_class = CourseSerializer
queryset = CourseModel.objects.all()
class CourseDetailView(generics.RetrieveAPIView):
lookup_field = 'slug'
permission_classes = AllowAny,
serializer_class = CourseSerializer
queryset = CourseModel.objects.all()
class CourseDeleteUpdateView(generics.RetrieveUpdateDestroyAPIView):
lookup_field = 'slug'
permission_classes = IsSuperUser,
serializer_class = CourseSerializer
queryset = CourseModel.objects.all()
class AddQuizContext(generics.CreateAPIView):
permission_classes = IsSuperUser,
serializer_class = QuizContextSerializer
queryset = QuizContextModel.objects.all()
class ViewQuizContext(generics.ListAPIView):
permission_classes = IsClient,
serializer_class = QuizContextSerializer
queryset = QuizContextModel.objects.all()
class QuizContextDetailView(generics.RetrieveAPIView):
lookup_field = 'slug'
permission_classes = IsClient,
serializer_class = QuizContextSerializer
queryset = QuizContextModel.objects.all()
class QuizContextDeleteUpdateView(generics.RetrieveUpdateDestroyAPIView):
lookup_field = 'slug'
permission_classes = IsSuperUser,
serializer_class = QuizContextSerializer
queryset = QuizContextModel.objects.all()
class AddFeedback(generics.CreateAPIView):
permission_classes = IsSuperUser,
serializer_class = ClientFeedbackSerializer
queryset = ClientFeedBackModel.objects.all()
class ViewFeedback(generics.ListAPIView):
permission_classes = IsClient,
serializer_class = ClientFeedbackSerializer
queryset = ClientFeedBackModel.objects.all()
class FeedbackDetailView(generics.RetrieveAPIView):
lookup_field = 'slug'
permission_classes = IsClient,
serializer_class = ClientFeedbackSerializer
queryset = ClientFeedBackModel.objects.all()
class FeedbackDeleteUpdateView(generics.RetrieveUpdateDestroyAPIView):
lookup_field = 'slug'
permission_classes = IsSuperUser,
serializer_class = ClientFeedbackSerializer
queryset = ClientFeedBackModel.objects.all()
class AddWebsiteAd(generics.CreateAPIView):
permission_classes = IsSuperUser,
serializer_class = WebsiteAdSerializer
queryset = WebsiteAdModel.objects.all()
class ViewWebsiteAd(generics.ListAPIView):
permission_classes = AllowAny,
serializer_class = WebsiteAdSerializer
queryset = WebsiteAdModel.objects.all()
class WebsiteAdDetailView(generics.RetrieveAPIView):
lookup_field = 'slug'
permission_classes = AllowAny,
serializer_class = WebsiteAdSerializer
queryset = WebsiteAdModel.objects.all()
class WebsiteAdDeleteUpdateView(generics.RetrieveUpdateDestroyAPIView):
lookup_field = 'slug'
permission_classes = IsSuperUser,
serializer_class = WebsiteAdSerializer
queryset = WebsiteAdModel.objects.all()
class AddBusinessPromotion(generics.CreateAPIView):
permission_classes = IsSuperUser,
serializer_class = BusinessPromotionSerializer
queryset = BusinessPromotionModel.objects.all()
class ViewBusinessPromotion(generics.ListAPIView):
permission_classes = AllowAny,
serializer_class = BusinessPromotionSerializer
queryset = BusinessPromotionModel.objects.all()
class BusinessPromotionDetailView(generics.RetrieveAPIView):
lookup_field = 'slug'
permission_classes = AllowAny,
serializer_class = BusinessPromotionSerializer
queryset = BusinessPromotionModel.objects.all()
class BusinessPromotionDeleteUpdateView(generics.RetrieveUpdateDestroyAPIView):
lookup_field = 'slug'
permission_classes = IsSuperUser,
serializer_class = BusinessPromotionSerializer
queryset = BusinessPromotionModel.objects.all()
class AddTeam(generics.CreateAPIView):
permission_classes = IsSuperUser,
serializer_class = TeamSerializer
queryset = TeamModel.objects.all()
class ViewTeam(generics.ListAPIView):
permission_classes = AllowAny,
serializer_class = TeamSerializer
queryset = TeamModel.objects.all()
class TeamDetailView(generics.RetrieveAPIView):
lookup_field = 'slug'
permission_classes = AllowAny,
serializer_class = TeamSerializer
queryset = TeamModel.objects.all()
class TeamDeleteUpdateView(generics.RetrieveUpdateDestroyAPIView):
lookup_field = 'slug'
permission_classes = IsSuperUser,
serializer_class = TeamSerializer
queryset = TeamModel.objects.all()
class AddAdvisoryBoard(generics.CreateAPIView):
permission_classes = IsSuperUser,
serializer_class = AdvisoryBoardSerializer
queryset = AdvisoryBoardModel.objects.all()
class ViewAdvisoryBoard(generics.ListAPIView):
permission_classes = IsSuperUser,
serializer_class = AdvisoryBoardSerializer
queryset = AdvisoryBoardModel.objects.all()
class AdvisoryBoardDetailView(generics.RetrieveAPIView):
lookup_field = 'slug'
permission_classes = IsSuperUser,
serializer_class = AdvisoryBoardSerializer
queryset = AdvisoryBoardModel.objects.all()
class AdvisoryBoardDeleteUpdateView(generics.RetrieveUpdateDestroyAPIView):
lookup_field = 'slug'
permission_classes = IsSuperUser,
serializer_class = AdvisoryBoardSerializer
queryset = AdvisoryBoardModel.objects.all()
class AddAnnouncement(generics.CreateAPIView):
permission_classes = IsSuperUser,
serializer_class = AnnouncementSerializer
queryset = AnnouncementModel.objects.all()
class ListAnnouncement(generics.ListAPIView):
permission_classes = AllowAny,
serializer_class = AnnouncementSerializer
queryset = AnnouncementModel.objects.all()
class AnnouncementDetail(generics.RetrieveAPIView):
lookup_field = 'slug'
permission_classes = AllowAny,
serializer_class = AnnouncementSerializer
queryset = AnnouncementModel.objects.all()
class AnnouncementDeleteUpdate(generics.RetrieveUpdateDestroyAPIView):
lookup_field = 'slug'
permission_classes = IsSuperUser,
serializer_class = AnnouncementSerializer
queryset = AnnouncementModel.objects.all()
class SuperadminProfileView(APIView):
permission_classes = IsSuperUser,
def get(self, request, *args, **kwargs):
user = get_user_from_token(request)
data = {'name': user.username, 'email': user.email}
return Response(data)
class AddJobClassified(generics.CreateAPIView):
permission_classes = IsSuperUser,
serializer_class = JobClassifiedSerializer
queryset = JobClassifiedModel.objects.all()
class ViewJobClassified(generics.ListAPIView):
permission_classes = AllowAny,
serializer_class = JobClassifiedSerializer
queryset = JobClassifiedModel.objects.all()
class JobClassifiedDetailView(generics.RetrieveAPIView):
lookup_field = 'slug'
permission_classes = AllowAny,
serializer_class = JobClassifiedSerializer
queryset = JobClassifiedModel.objects.all()
class JobClassifiedDeleteUpdateView(generics.RetrieveUpdateDestroyAPIView):
lookup_field = 'slug'
permission_classes = IsSuperUser,
serializer_class = JobClassifiedSerializer
queryset = JobClassifiedModel.objects.all()
class AddCustomerReviews(generics.CreateAPIView):
permission_classes = IsSuperUser,
serializer_class = CustomerReviewSerializer
queryset = CustomerReviewModel.objects.all()
class ViewCustomerReview(generics.ListAPIView):
permission_classes = IsClient,
serializer_class = CustomerReviewSerializer
queryset = CustomerReviewModel.objects.all()
class CustomerReviewDetailView(generics.RetrieveAPIView):
lookup_field = 'slug'
permission_classes = IsClient,
serializer_class = CustomerReviewSerializer
queryset = CustomerReviewModel.objects.all()
class CustomerReviewDeleteUpdateView(generics.RetrieveUpdateDestroyAPIView):
lookup_field = 'slug'
permission_classes = IsSuperUser,
serializer_class = CustomerReviewSerializer
queryset = CustomerReviewModel.objects.all()
class ClientComplain(APIView):
permission_classes = IsSuperUser,
serializer = ViewComplainSerializer(many=True)
class clientfeedback(APIView):
permission_classes = IsSuperUser,
def get(self, request, format=None):
feeds = ClientFeedBackModel.objects.filter(Class__admin=self.
request.user)
serializer = ClientFeedbackSerializer(feeds, many=True)
return Response(serializer.data)
class Enroll_Course(APIView):
permission_classes = IsSuperUser,
def post(self, request, format=None):
serializer = EnrollCourseSerializer(data=request.data)
print(serializer)
if serializer.is_valid():
course = serializer.validated_data.get('course', '')
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
else:
return Response(serializer.errors, status=status.
HTTP_400_BAD_REQUEST)
class ViewEnroll_Course(APIView):
permission_classes = IsSuperUser,
def get(self, request, *args, **kwargs):
course = self.kwargs['course_id']
client = self.kwargs['client_id']
data = Enroll_CourseModel.objects.filter(course=course, client=client)
serializer = ViewEnrollCourseSerializer(data, many=True)
return Response(serializer.data, status=status.HTTP_200_OK)
class DetailEnroll_CourseView(APIView):
permission_classes = IsSuperUser,
def get_object(self, pk):
try:
return Enroll_CourseModel.objects.get(id=pk)
except:
raise Http404
def get(self, request, pk, format=None):
data = self.get_object(pk)
serializer = ViewEnrollCourseSerializer(data)
return Response(serializer.data)
def put(self, request, pk, format=None):
data = self.get_object(pk)
serializer = ViewEnrollCourseSerializer(data, data=request.data)
if serializer.is_valid(raise_exception=True):
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
else:
return Response(serializer.errors, status=status.
HTTP_400_BAD_REQUEST)
def delete(self, request, pk, format=None):
data = self.get_object(pk)
data.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
class CourseDetail(APIView):
permission_classes = IsSuperUser,
def get_object(self, slug):
try:
return CourseModel.objects.get(slug=slug)
except CourseModel.DoesNotExist:
raise Http404
def get(self, request, slug, format=None):
data = self.get_object(slug)
if data.classes.school.admin == self.request.user:
serializer = ViewCourseSerializer(data)
return Response(serializer.data)
else:
return Response({'message':
'This course does not belong to your school'}, status=
status.HTTP_400_BAD_REQUEST)
def put(self, request, slug, format=None):
data = self.get_object(slug)
if data.course.client.admin == self.request.user:
serializer = CourseSerializer(data, data=request.data)
if serializer.is_valid(raise_exception=True):
course = serializer.validated_data.get('course', '')
if course.client.admin == self.request.user:
serializer.save()
return Response(serializer.data, status=status.
HTTP_201_CREATED)
return Response({'message':
'This Class does not belong to you'}, status=status.
HTTP_400_BAD_REQUEST)
else:
return Response(serializer.errors, status=status.
HTTP_400_BAD_REQUEST)
else:
return Response({'message':
'This course does not belong to you'}, status=status.
HTTP_400_BAD_REQUEST)
def delete(self, request, slug, format=None):
data = self.get_object(slug)
if data.course.client.admin == self.request.user:
data.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
else:
return Response({'message':
'This course does not belong to you'}, status=status.
HTTP_400_BAD_REQUEST)
class SchoolRegistrationView(RegisterView):
serializer_class = RegisterSchoolSerializer
permission_classes = IsSuperUser,
class Add_question(generics.CreateAPIView):
permission_classes = IsSuperUser,
def post(self, request, format=None):
serializer = QuestionSerializer(data=request.data)
print(serializer)
if serializer.is_valid():
course = serializer.validated_data.get('course', '')
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
else:
return Response(serializer.errors, status=status.
HTTP_400_BAD_REQUEST)
class Viewquestion(generics.ListAPIView):
permission_classes = IsSuperUser,
def get(self, request, *args, **kwargs):
course = self.kwargs['course_id']
data = QuestionModel.objects.filter(course_id=course)
serializer = QuestionSerializer(data, many=True)
return Response(serializer.data, status=status.HTTP_200_OK)
class QuestionDetail(APIView):
permission_classes = IsSuperUser,
def get_object(self, pk):
try:
return QuestionModel.objects.get(id=pk)
except:
raise Http404
def get(self, request, pk, format=None):
data = self.get_object(pk)
serializer = QuestionSerializer(data)
return Response(serializer.data)
def put(self, request, pk, format=None):
data = self.get_object(pk)
serializer = QuestionSerializer(data, data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
else:
return Response(serializer.errors, status=status.
HTTP_400_BAD_REQUEST)
def delete(self, request, pk, format=None):
data = self.get_object(pk)
data.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
class SubmittedQuestionView(APIView):
permission_classes = IsSuperUser,
def get(self, request, *args, **kwargs):
admin = self.request.user
course = self.kwargs['course_id']
client = self.kwargs['client_id']
data = Client_SubmitquestionModel.objects.filter(course__course=
course, client__client=client)
serializer = Client_submittedquestionSerializer(data, many=True)
return Response(serializer.data, status=status.HTTP_200_OK)
class AddonlineTest(generics.CreateAPIView):
permission_classes = IsSuperUser,
def post(self, request, format=None):
serializer = testSerializer(data=request.data)
print(serializer)
if serializer.is_valid():
course = serializer.validated_data.get('course', '')
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
else:
return Response(serializer.errors, status=status.
HTTP_400_BAD_REQUEST)
class ViewOnlinetest(generics.ListAPIView):
permission_classes = IsSuperUser,
def get(self, request, *args, **kwargs):
course = self.kwargs['course_id']
data = Client_testModel.objects.filter(course_id=course)
serializer = testSerializer(data, many=True)
return Response(serializer.data, status=status.HTTP_200_OK)
class onlinetestDetail(APIView):
permission_classes = IsSuperUser,
def get_object(self, pk):
try:
return Client_testModel.objects.get(id=pk)
except:
raise Http404
def get(self, request, pk, format=None):
data = self.get_object(pk)
serializer = testSerializer(data)
return Response(serializer.data)
def put(self, request, pk, format=None):
data = self.get_object(pk)
serializer = testSerializer(data, data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
else:
return Response(serializer.errors, status=status.
HTTP_400_BAD_REQUEST)
def delete(self, request, pk, format=None):
data = self.get_object(pk)
data.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
class SubmittedonlineTestView(APIView):
permission_classes = IsSuperUser,
def get(self, request, *args, **kwargs):
admin = self.request.user
course = self.kwargs['course_id']
client = self.kwargs['client_id']
data = Client_SubmittestModel.objects.filter(course__course=course,
client__client=client)
serializer = Client_submittedtestSerializer(data, many=True)
return Response(serializer.data, status=status.HTTP_200_OK)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class AddArticleView(generics.CreateAPIView):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class ListArticleView(generics.ListAPIView):
permission_classes = AllowAny,
serializer_class = ArticleSerializer
queryset = ArticleModel.objects.filter(status__exact='P')
class ArticleDetail(generics.RetrieveAPIView):
lookup_field = 'slug'
permission_classes = AllowAny,
serializer_class = ArticleSerializer
queryset = ArticleModel.objects.filter(status__exact='P')
class ArticleDeleteUpdate(generics.RetrieveUpdateDestroyAPIView):
"""
Get: superadmin can see all articles (draft, published)
PATCH : superadmin can mark article as published by changing status = P
Delete: superadmin can delete article.
"""
lookup_field = 'slug'
permission_classes = IsSuperUser,
serializer_class = UpdateArticleSerializer
queryset = ArticleModel.objects.all()
class AddQuestions(generics.CreateAPIView):
permission_classes = IsSuperUser,
serializer_class = QuestionSerializer
queryset = QuestionModel.objects.all()
class ViewQuestion(generics.ListAPIView):
permission_classes = IsClient,
serializer_class = QuestionSerializer
queryset = QuestionModel.objects.all()
class QuestionDetailView(generics.RetrieveAPIView):
lookup_field = 'slug'
permission_classes = IsClient,
serializer_class = QuestionSerializer
queryset = QuestionModel.objects.all()
class QuestionDeleteUpdateView(generics.RetrieveUpdateDestroyAPIView):
lookup_field = 'slug'
permission_classes = IsSuperUser,
serializer_class = QuestionSerializer
queryset = QuestionModel.objects.all()
class AddSchools(generics.CreateAPIView):
permission_classes = IsSuperUser,
serializer_class = SchoolSerializer
queryset = SchoolModel.objects.all()
class ViewSchool(generics.ListAPIView):
permission_classes = IsClient,
serializer_class = SchoolSerializer
queryset = SchoolModel.objects.all()
class SchoolDetailView(generics.RetrieveAPIView):
lookup_field = 'slug'
permission_classes = IsClient,
serializer_class = SchoolSerializer
queryset = SchoolModel.objects.all()
class SchoolDeleteUpdateView(generics.RetrieveUpdateDestroyAPIView):
lookup_field = 'slug'
permission_classes = IsSuperUser,
serializer_class = SchoolSerializer
queryset = SchoolModel.objects.all()
class AddBlogs(generics.CreateAPIView):
permission_classes = IsSuperUser,
serializer_class = BlogSerializer
queryset = BlogModel.objects.all()
class ViewBlog(generics.ListAPIView):
permission_classes = IsClient,
serializer_class = BlogSerializer
queryset = BlogModel.objects.all()
class BlogDetailView(generics.RetrieveAPIView):
lookup_field = 'slug'
permission_classes = IsClient,
serializer_class = BlogSerializer
queryset = BlogModel.objects.all()
class BlogDeleteUpdateView(generics.RetrieveUpdateDestroyAPIView):
lookup_field = 'slug'
permission_classes = IsSuperUser,
serializer_class = BlogSerializer
queryset = BlogModel.objects.all()
class AddEventView(generics.CreateAPIView):
permission_classes = IsSuperUser,
serializer_class = EventSerializer
queryset = EventModel.objects.all()
class ListEventView(generics.ListAPIView):
permission_classes = AllowAny,
serializer_class = EventSerializer
queryset = EventModel.objects.all()
class EventDetailView(generics.RetrieveAPIView):
lookup_field = 'slug'
permission_classes = AllowAny,
serializer_class = EventSerializer
queryset = EventModel.objects.all()
class EventDeleteUpdateView(generics.RetrieveUpdateDestroyAPIView):
lookup_field = 'slug'
permission_classes = IsSuperUser,
serializer_class = EventSerializer
queryset = EventModel.objects.all()
class AddBusinessPartners(generics.CreateAPIView):
permission_classes = IsSuperUser,
serializer_class = BusinessPartnersSerializer
queryset = BusinessPartnersModel.objects.all()
class ViewBusinessPartner(generics.ListAPIView):
permission_classes = AllowAny,
serializer_class = BusinessPartnersSerializer
queryset = BusinessPartnersModel.objects.all()
class BusinessPartnerDetailView(generics.RetrieveAPIView):
lookup_field = 'slug'
permission_classes = AllowAny,
serializer_class = BusinessPartnersSerializer
queryset = BusinessPartnersModel.objects.all()
class BusinessPartnerDeleteUpdateView(generics.RetrieveUpdateDestroyAPIView):
lookup_field = 'slug'
permission_classes = IsSuperUser,
serializer_class = BusinessPartnersSerializer
queryset = BusinessPartnersModel.objects.all()
class AddKidStory(generics.CreateAPIView):
permission_classes = IsStudent,
serializer_class = KidStorySerializer
queryset = KidStoryModel.objects.all()
def perform_create(self, serializer):
serializer.save(user=self.request.user)
class ViewKidStory(generics.ListAPIView):
permission_classes = AllowAny,
serializer_class = KidStorySerializer
queryset = KidStoryModel.objects.filter(status__exact='P')
class KidStoryDetailView(generics.RetrieveAPIView):
lookup_field = 'slug'
permission_classes = AllowAny,
serializer_class = KidStorySerializer
queryset = KidStoryModel.objects.filter(status__exact='P')
class KidStoryDeleteUpdateView(generics.RetrieveUpdateDestroyAPIView):
"""
Get: superadmin can see all stories (draft, published)
PATCH : superadmin can mark stories as published by changing status = P
Delete: superadmin can delete stories.
"""
lookup_field = 'slug'
permission_classes = IsSuperUser,
serializer_class = UpdateKidsStorySerializer
queryset = KidStoryModel.objects.all()
class AddKidTalent(generics.CreateAPIView):
permission_classes = IsStudentORClient,
serializer_class = KidTalentSerializer
queryset = KidTalentModel.objects.all()
def perform_create(self, serializer):
serializer.save(user=self.request.user)
class ViewKidTalent(generics.ListAPIView):
permission_classes = AllowAny,
serializer_class = KidTalentSerializer
queryset = KidTalentModel.objects.filter(status__exact='P')
class KidTalentDetailView(generics.RetrieveAPIView):
lookup_field = 'slug'
permission_classes = AllowAny,
serializer_class = KidTalentSerializer
queryset = KidTalentModel.objects.filter(status__exact='P')
class KidTalentDeleteUpdateView(generics.RetrieveUpdateDestroyAPIView):
"""
Get: superadmin can see all kids talent (draft, published)
PATCH : superadmin can mark kids talent as published by changing status = P
Delete: superadmin can delete kids talent.
"""
lookup_field = 'slug'
permission_classes = IsSuperUser,
serializer_class = UpdateKidsTalentSerializer
queryset = KidTalentModel.objects.all()
class AddCourses(generics.CreateAPIView):
permission_classes = IsSuperUser,
serializer_class = CourseSerializer
queryset = CourseModel.objects.all()
class ViewCourse(generics.ListAPIView):
permission_classes = AllowAny,
serializer_class = CourseSerializer
queryset = CourseModel.objects.all()
class CourseDetailView(generics.RetrieveAPIView):
lookup_field = 'slug'
permission_classes = AllowAny,
serializer_class = CourseSerializer
queryset = CourseModel.objects.all()
class CourseDeleteUpdateView(generics.RetrieveUpdateDestroyAPIView):
lookup_field = 'slug'
permission_classes = IsSuperUser,
serializer_class = CourseSerializer
queryset = CourseModel.objects.all()
class AddQuizContext(generics.CreateAPIView):
permission_classes = IsSuperUser,
serializer_class = QuizContextSerializer
queryset = QuizContextModel.objects.all()
class ViewQuizContext(generics.ListAPIView):
permission_classes = IsClient,
serializer_class = QuizContextSerializer
queryset = QuizContextModel.objects.all()
class QuizContextDetailView(generics.RetrieveAPIView):
lookup_field = 'slug'
permission_classes = IsClient,
serializer_class = QuizContextSerializer
queryset = QuizContextModel.objects.all()
class QuizContextDeleteUpdateView(generics.RetrieveUpdateDestroyAPIView):
lookup_field = 'slug'
permission_classes = IsSuperUser,
serializer_class = QuizContextSerializer
queryset = QuizContextModel.objects.all()
class AddFeedback(generics.CreateAPIView):
permission_classes = IsSuperUser,
serializer_class = ClientFeedbackSerializer
queryset = ClientFeedBackModel.objects.all()
class ViewFeedback(generics.ListAPIView):
permission_classes = IsClient,
serializer_class = ClientFeedbackSerializer
queryset = ClientFeedBackModel.objects.all()
class FeedbackDetailView(generics.RetrieveAPIView):
lookup_field = 'slug'
permission_classes = IsClient,
serializer_class = ClientFeedbackSerializer
queryset = ClientFeedBackModel.objects.all()
class FeedbackDeleteUpdateView(generics.RetrieveUpdateDestroyAPIView):
lookup_field = 'slug'
permission_classes = IsSuperUser,
serializer_class = ClientFeedbackSerializer
queryset = ClientFeedBackModel.objects.all()
class AddWebsiteAd(generics.CreateAPIView):
permission_classes = IsSuperUser,
serializer_class = WebsiteAdSerializer
queryset = WebsiteAdModel.objects.all()
class ViewWebsiteAd(generics.ListAPIView):
permission_classes = AllowAny,
serializer_class = WebsiteAdSerializer
queryset = WebsiteAdModel.objects.all()
class WebsiteAdDetailView(generics.RetrieveAPIView):
lookup_field = 'slug'
permission_classes = AllowAny,
serializer_class = WebsiteAdSerializer
queryset = WebsiteAdModel.objects.all()
class WebsiteAdDeleteUpdateView(generics.RetrieveUpdateDestroyAPIView):
lookup_field = 'slug'
permission_classes = IsSuperUser,
serializer_class = WebsiteAdSerializer
queryset = WebsiteAdModel.objects.all()
class AddBusinessPromotion(generics.CreateAPIView):
permission_classes = IsSuperUser,
serializer_class = BusinessPromotionSerializer
queryset = BusinessPromotionModel.objects.all()
class ViewBusinessPromotion(generics.ListAPIView):
permission_classes = AllowAny,
serializer_class = BusinessPromotionSerializer
queryset = BusinessPromotionModel.objects.all()
class BusinessPromotionDetailView(generics.RetrieveAPIView):
lookup_field = 'slug'
permission_classes = AllowAny,
serializer_class = BusinessPromotionSerializer
queryset = BusinessPromotionModel.objects.all()
class BusinessPromotionDeleteUpdateView(generics.RetrieveUpdateDestroyAPIView):
lookup_field = 'slug'
permission_classes = IsSuperUser,
serializer_class = BusinessPromotionSerializer
queryset = BusinessPromotionModel.objects.all()
class AddTeam(generics.CreateAPIView):
permission_classes = IsSuperUser,
serializer_class = TeamSerializer
queryset = TeamModel.objects.all()
class ViewTeam(generics.ListAPIView):
permission_classes = AllowAny,
serializer_class = TeamSerializer
queryset = TeamModel.objects.all()
class TeamDetailView(generics.RetrieveAPIView):
lookup_field = 'slug'
permission_classes = AllowAny,
serializer_class = TeamSerializer
queryset = TeamModel.objects.all()
class TeamDeleteUpdateView(generics.RetrieveUpdateDestroyAPIView):
lookup_field = 'slug'
permission_classes = IsSuperUser,
serializer_class = TeamSerializer
queryset = TeamModel.objects.all()
class AddAdvisoryBoard(generics.CreateAPIView):
permission_classes = IsSuperUser,
serializer_class = AdvisoryBoardSerializer
queryset = AdvisoryBoardModel.objects.all()
class ViewAdvisoryBoard(generics.ListAPIView):
permission_classes = IsSuperUser,
serializer_class = AdvisoryBoardSerializer
queryset = AdvisoryBoardModel.objects.all()
class AdvisoryBoardDetailView(generics.RetrieveAPIView):
lookup_field = 'slug'
permission_classes = IsSuperUser,
serializer_class = AdvisoryBoardSerializer
queryset = AdvisoryBoardModel.objects.all()
class AdvisoryBoardDeleteUpdateView(generics.RetrieveUpdateDestroyAPIView):
lookup_field = 'slug'
permission_classes = IsSuperUser,
serializer_class = AdvisoryBoardSerializer
queryset = AdvisoryBoardModel.objects.all()
class AddAnnouncement(generics.CreateAPIView):
permission_classes = IsSuperUser,
serializer_class = AnnouncementSerializer
queryset = AnnouncementModel.objects.all()
class ListAnnouncement(generics.ListAPIView):
permission_classes = AllowAny,
serializer_class = AnnouncementSerializer
queryset = AnnouncementModel.objects.all()
class AnnouncementDetail(generics.RetrieveAPIView):
lookup_field = 'slug'
permission_classes = AllowAny,
serializer_class = AnnouncementSerializer
queryset = AnnouncementModel.objects.all()
class AnnouncementDeleteUpdate(generics.RetrieveUpdateDestroyAPIView):
lookup_field = 'slug'
permission_classes = IsSuperUser,
serializer_class = AnnouncementSerializer
queryset = AnnouncementModel.objects.all()
class SuperadminProfileView(APIView):
permission_classes = IsSuperUser,
def get(self, request, *args, **kwargs):
user = get_user_from_token(request)
data = {'name': user.username, 'email': user.email}
return Response(data)
class AddJobClassified(generics.CreateAPIView):
permission_classes = IsSuperUser,
serializer_class = JobClassifiedSerializer
queryset = JobClassifiedModel.objects.all()
class ViewJobClassified(generics.ListAPIView):
permission_classes = AllowAny,
serializer_class = JobClassifiedSerializer
queryset = JobClassifiedModel.objects.all()
class JobClassifiedDetailView(generics.RetrieveAPIView):
lookup_field = 'slug'
permission_classes = AllowAny,
serializer_class = JobClassifiedSerializer
queryset = JobClassifiedModel.objects.all()
class JobClassifiedDeleteUpdateView(generics.RetrieveUpdateDestroyAPIView):
lookup_field = 'slug'
permission_classes = IsSuperUser,
serializer_class = JobClassifiedSerializer
queryset = JobClassifiedModel.objects.all()
class AddCustomerReviews(generics.CreateAPIView):
permission_classes = IsSuperUser,
serializer_class = CustomerReviewSerializer
queryset = CustomerReviewModel.objects.all()
class ViewCustomerReview(generics.ListAPIView):
permission_classes = IsClient,
serializer_class = CustomerReviewSerializer
queryset = CustomerReviewModel.objects.all()
class CustomerReviewDetailView(generics.RetrieveAPIView):
lookup_field = 'slug'
permission_classes = IsClient,
serializer_class = CustomerReviewSerializer
queryset = CustomerReviewModel.objects.all()
class CustomerReviewDeleteUpdateView(generics.RetrieveUpdateDestroyAPIView):
lookup_field = 'slug'
permission_classes = IsSuperUser,
serializer_class = CustomerReviewSerializer
queryset = CustomerReviewModel.objects.all()
class ClientComplain(APIView):
permission_classes = IsSuperUser,
serializer = ViewComplainSerializer(many=True)
class clientfeedback(APIView):
permission_classes = IsSuperUser,
def get(self, request, format=None):
feeds = ClientFeedBackModel.objects.filter(Class__admin=self.
request.user)
serializer = ClientFeedbackSerializer(feeds, many=True)
return Response(serializer.data)
class Enroll_Course(APIView):
permission_classes = IsSuperUser,
def post(self, request, format=None):
serializer = EnrollCourseSerializer(data=request.data)
print(serializer)
if serializer.is_valid():
course = serializer.validated_data.get('course', '')
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
else:
return Response(serializer.errors, status=status.
HTTP_400_BAD_REQUEST)
class ViewEnroll_Course(APIView):
permission_classes = IsSuperUser,
def get(self, request, *args, **kwargs):
course = self.kwargs['course_id']
client = self.kwargs['client_id']
data = Enroll_CourseModel.objects.filter(course=course, client=client)
serializer = ViewEnrollCourseSerializer(data, many=True)
return Response(serializer.data, status=status.HTTP_200_OK)
class DetailEnroll_CourseView(APIView):
permission_classes = IsSuperUser,
def get_object(self, pk):
try:
return Enroll_CourseModel.objects.get(id=pk)
except:
raise Http404
def get(self, request, pk, format=None):
data = self.get_object(pk)
serializer = ViewEnrollCourseSerializer(data)
return Response(serializer.data)
def put(self, request, pk, format=None):
data = self.get_object(pk)
serializer = ViewEnrollCourseSerializer(data, data=request.data)
if serializer.is_valid(raise_exception=True):
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
else:
return Response(serializer.errors, status=status.
HTTP_400_BAD_REQUEST)
def delete(self, request, pk, format=None):
data = self.get_object(pk)
data.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
class CourseDetail(APIView):
permission_classes = IsSuperUser,
def get_object(self, slug):
try:
return CourseModel.objects.get(slug=slug)
except CourseModel.DoesNotExist:
raise Http404
def get(self, request, slug, format=None):
data = self.get_object(slug)
if data.classes.school.admin == self.request.user:
serializer = ViewCourseSerializer(data)
return Response(serializer.data)
else:
return Response({'message':
'This course does not belong to your school'}, status=
status.HTTP_400_BAD_REQUEST)
def put(self, request, slug, format=None):
data = self.get_object(slug)
if data.course.client.admin == self.request.user:
serializer = CourseSerializer(data, data=request.data)
if serializer.is_valid(raise_exception=True):
course = serializer.validated_data.get('course', '')
if course.client.admin == self.request.user:
serializer.save()
return Response(serializer.data, status=status.
HTTP_201_CREATED)
return Response({'message':
'This Class does not belong to you'}, status=status.
HTTP_400_BAD_REQUEST)
else:
return Response(serializer.errors, status=status.
HTTP_400_BAD_REQUEST)
else:
return Response({'message':
'This course does not belong to you'}, status=status.
HTTP_400_BAD_REQUEST)
def delete(self, request, slug, format=None):
data = self.get_object(slug)
if data.course.client.admin == self.request.user:
data.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
else:
return Response({'message':
'This course does not belong to you'}, status=status.
HTTP_400_BAD_REQUEST)
class SchoolRegistrationView(RegisterView):
serializer_class = RegisterSchoolSerializer
permission_classes = IsSuperUser,
class Add_question(generics.CreateAPIView):
permission_classes = IsSuperUser,
def post(self, request, format=None):
serializer = QuestionSerializer(data=request.data)
print(serializer)
if serializer.is_valid():
course = serializer.validated_data.get('course', '')
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
else:
return Response(serializer.errors, status=status.
HTTP_400_BAD_REQUEST)
class Viewquestion(generics.ListAPIView):
permission_classes = IsSuperUser,
def get(self, request, *args, **kwargs):
course = self.kwargs['course_id']
data = QuestionModel.objects.filter(course_id=course)
serializer = QuestionSerializer(data, many=True)
return Response(serializer.data, status=status.HTTP_200_OK)
class QuestionDetail(APIView):
permission_classes = IsSuperUser,
def get_object(self, pk):
try:
return QuestionModel.objects.get(id=pk)
except:
raise Http404
def get(self, request, pk, format=None):
data = self.get_object(pk)
serializer = QuestionSerializer(data)
return Response(serializer.data)
def put(self, request, pk, format=None):
data = self.get_object(pk)
serializer = QuestionSerializer(data, data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
else:
return Response(serializer.errors, status=status.
HTTP_400_BAD_REQUEST)
def delete(self, request, pk, format=None):
data = self.get_object(pk)
data.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
class SubmittedQuestionView(APIView):
permission_classes = IsSuperUser,
def get(self, request, *args, **kwargs):
admin = self.request.user
course = self.kwargs['course_id']
client = self.kwargs['client_id']
data = Client_SubmitquestionModel.objects.filter(course__course=
course, client__client=client)
serializer = Client_submittedquestionSerializer(data, many=True)
return Response(serializer.data, status=status.HTTP_200_OK)
class AddonlineTest(generics.CreateAPIView):
permission_classes = IsSuperUser,
def post(self, request, format=None):
serializer = testSerializer(data=request.data)
print(serializer)
if serializer.is_valid():
course = serializer.validated_data.get('course', '')
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
else:
return Response(serializer.errors, status=status.
HTTP_400_BAD_REQUEST)
class ViewOnlinetest(generics.ListAPIView):
permission_classes = IsSuperUser,
def get(self, request, *args, **kwargs):
course = self.kwargs['course_id']
data = Client_testModel.objects.filter(course_id=course)
serializer = testSerializer(data, many=True)
return Response(serializer.data, status=status.HTTP_200_OK)
class onlinetestDetail(APIView):
permission_classes = IsSuperUser,
def get_object(self, pk):
try:
return Client_testModel.objects.get(id=pk)
except:
raise Http404
def get(self, request, pk, format=None):
data = self.get_object(pk)
serializer = testSerializer(data)
return Response(serializer.data)
def put(self, request, pk, format=None):
data = self.get_object(pk)
serializer = testSerializer(data, data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
else:
return Response(serializer.errors, status=status.
HTTP_400_BAD_REQUEST)
def delete(self, request, pk, format=None):
data = self.get_object(pk)
data.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
class SubmittedonlineTestView(APIView):
permission_classes = IsSuperUser,
def get(self, request, *args, **kwargs):
admin = self.request.user
course = self.kwargs['course_id']
client = self.kwargs['client_id']
data = Client_SubmittestModel.objects.filter(course__course=course,
client__client=client)
serializer = Client_submittedtestSerializer(data, many=True)
return Response(serializer.data, status=status.HTTP_200_OK)
<|reserved_special_token_1|>
import imp
from django.shortcuts import render
# ***************** API ****************
from django.views.decorators.csrf import csrf_exempt
from rest_framework.parsers import JSONParser,FileUploadParser,MultiPartParser,FormParser
from .models import *
from django.http import Http404
from .serializers import *
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework import status,viewsets,permissions
from rest_framework import generics
from rest_framework.permissions import AllowAny, IsAuthenticated
from django.contrib.auth import get_user_model
from client.models import ClientModel
from adminapp.models import SchoolModel
from adminapp.serializers import SchoolSerializer
from .custompermissions import *
from client.permissions import *
from rest_framework.authentication import SessionAuthentication
from Student.permissions import IsStudent
User = get_user_model()
def get_user_from_token(request):
token = request.user.auth_token #auth key(token) of current user 91391f4c12b94b753d08008150d2315d9d8d7e1e
print("token.user_id",token.user_id) #gives id of user (pk) 2
user = User.objects.get(id=token.user_id) #gives user name
return user
# Create your views here.
# class UserListView(generics.ListAPIView):
# parser_classes = (MultiPartParser,FormParser)
# queryset = UserModel.objects.all()
# serializer_class = UserSerializer
# class UserDetailView(generics.RetrieveAPIView):
# parser_classes = (MultiPartParser,FormParser)
# queryset = UserModel.objects.all()
# serializer_class = UserSerializer
class AddArticleView(generics.CreateAPIView):
#All authenticated users can add articles
permission_classes = (IsAuthenticated, )
serializer_class = ArticleSerializer
queryset = ArticleModel.objects.all()
def perform_create(self, serializer):
serializer.save(user=self.request.user)
class ListArticleView(generics.ListAPIView):
#Anyone can see the published Articles
permission_classes = (AllowAny, )
serializer_class = ArticleSerializer
queryset = ArticleModel.objects.filter(status__exact="P")
class ArticleDetail(generics.RetrieveAPIView):
#anyone can see detail of published article
lookup_field = 'slug'
permission_classes = (AllowAny, )
serializer_class = ArticleSerializer
queryset = ArticleModel.objects.filter(status__exact="P")
class ArticleDeleteUpdate(generics.RetrieveUpdateDestroyAPIView):
'''
Get: superadmin can see all articles (draft, published)
PATCH : superadmin can mark article as published by changing status = P
Delete: superadmin can delete article.
'''
lookup_field = 'slug'
permission_classes = (IsSuperUser, )
serializer_class = UpdateArticleSerializer
queryset = ArticleModel.objects.all()
class AddQuestions(generics.CreateAPIView):
permission_classes = (IsSuperUser, )
serializer_class = QuestionSerializer
queryset = QuestionModel.objects.all()
class ViewQuestion(generics.ListAPIView):
permission_classes = (IsClient, )
serializer_class = QuestionSerializer
queryset = QuestionModel.objects.all()
class QuestionDetailView(generics.RetrieveAPIView):
lookup_field = 'slug'
permission_classes = (IsClient, )
serializer_class = QuestionSerializer
queryset = QuestionModel.objects.all()
class QuestionDeleteUpdateView(generics.RetrieveUpdateDestroyAPIView):
lookup_field = 'slug'
permission_classes = (IsSuperUser, )
serializer_class = QuestionSerializer
queryset = QuestionModel.objects.all()
class AddSchools(generics.CreateAPIView):
permission_classes = (IsSuperUser, )
serializer_class = SchoolSerializer
queryset = SchoolModel.objects.all()
class ViewSchool(generics.ListAPIView):
permission_classes = (IsClient, )
serializer_class = SchoolSerializer
queryset = SchoolModel.objects.all()
class SchoolDetailView(generics.RetrieveAPIView):
lookup_field = 'slug'
permission_classes = (IsClient, )
serializer_class = SchoolSerializer
queryset = SchoolModel.objects.all()
class SchoolDeleteUpdateView(generics.RetrieveUpdateDestroyAPIView):
lookup_field = 'slug'
permission_classes = (IsSuperUser, )
serializer_class = SchoolSerializer
queryset = SchoolModel.objects.all()
class AddBlogs(generics.CreateAPIView):
permission_classes = (IsSuperUser, )
serializer_class = BlogSerializer
queryset = BlogModel.objects.all()
class ViewBlog(generics.ListAPIView):
permission_classes = (IsClient, )
serializer_class = BlogSerializer
queryset = BlogModel.objects.all()
class BlogDetailView(generics.RetrieveAPIView):
lookup_field = 'slug'
permission_classes = (IsClient, )
serializer_class = BlogSerializer
queryset = BlogModel.objects.all()
class BlogDeleteUpdateView(generics.RetrieveUpdateDestroyAPIView):
lookup_field = 'slug'
permission_classes = (IsSuperUser, )
serializer_class = BlogSerializer
queryset = BlogModel.objects.all()
class AddEventView(generics.CreateAPIView):
#only super user can add events
permission_classes = (IsSuperUser, )
serializer_class = EventSerializer
queryset = EventModel.objects.all()
class ListEventView(generics.ListAPIView):
#Anyone can see the events
permission_classes = (AllowAny, )
serializer_class = EventSerializer
queryset = EventModel.objects.all()
class EventDetailView(generics.RetrieveAPIView):
#Anyone can see the detail of events
lookup_field = 'slug'
permission_classes = (AllowAny, )
serializer_class = EventSerializer
queryset = EventModel.objects.all()
class EventDeleteUpdateView(generics.RetrieveUpdateDestroyAPIView):
#only superadmin can delete and update events
lookup_field = 'slug'
permission_classes = (IsSuperUser, )
serializer_class = EventSerializer
queryset = EventModel.objects.all()
class AddBusinessPartners(generics.CreateAPIView):
permission_classes = (IsSuperUser, )
serializer_class = BusinessPartnersSerializer
queryset = BusinessPartnersModel.objects.all()
class ViewBusinessPartner(generics.ListAPIView):
permission_classes = (AllowAny, )
serializer_class = BusinessPartnersSerializer
queryset = BusinessPartnersModel.objects.all()
class BusinessPartnerDetailView(generics.RetrieveAPIView):
lookup_field = 'slug'
permission_classes = (AllowAny, )
serializer_class = BusinessPartnersSerializer
queryset = BusinessPartnersModel.objects.all()
class BusinessPartnerDeleteUpdateView(generics.RetrieveUpdateDestroyAPIView):
lookup_field = 'slug'
permission_classes = (IsSuperUser, )
serializer_class = BusinessPartnersSerializer
queryset = BusinessPartnersModel.objects.all()
class AddKidStory(generics.CreateAPIView):
#Students can add kidstory
permission_classes = (IsStudent, )
serializer_class = KidStorySerializer
queryset = KidStoryModel.objects.all()
def perform_create(self, serializer):
serializer.save(user=self.request.user)
class ViewKidStory(generics.ListAPIView):
# anyone can see published kids story
permission_classes = (AllowAny, )
serializer_class = KidStorySerializer
queryset = KidStoryModel.objects.filter(status__exact="P")
class KidStoryDetailView(generics.RetrieveAPIView):
#anyone can see detail of published kids story
lookup_field = 'slug'
permission_classes = (AllowAny, )
serializer_class = KidStorySerializer
queryset = KidStoryModel.objects.filter(status__exact="P")
class KidStoryDeleteUpdateView(generics.RetrieveUpdateDestroyAPIView):
'''
Get: superadmin can see all stories (draft, published)
PATCH : superadmin can mark stories as published by changing status = P
Delete: superadmin can delete stories.
'''
lookup_field = 'slug'
permission_classes = (IsSuperUser, )
serializer_class = UpdateKidsStorySerializer
queryset = KidStoryModel.objects.all()
class AddKidTalent(generics.CreateAPIView):
#Students or client can add KidsTalent
permission_classes = (IsStudentORClient, )
serializer_class = KidTalentSerializer
queryset = KidTalentModel.objects.all()
def perform_create(self, serializer):
serializer.save(user=self.request.user)
class ViewKidTalent(generics.ListAPIView):
# anyone can see published kids talent
permission_classes = (AllowAny, )
serializer_class = KidTalentSerializer
queryset = KidTalentModel.objects.filter(status__exact="P")
class KidTalentDetailView(generics.RetrieveAPIView):
#anyone can see detail of published kids talent
lookup_field = 'slug'
permission_classes = (AllowAny, )
serializer_class = KidTalentSerializer
queryset = KidTalentModel.objects.filter(status__exact="P")
class KidTalentDeleteUpdateView(generics.RetrieveUpdateDestroyAPIView):
'''
Get: superadmin can see all kids talent (draft, published)
PATCH : superadmin can mark kids talent as published by changing status = P
Delete: superadmin can delete kids talent.
'''
lookup_field = 'slug'
permission_classes = (IsSuperUser, )
serializer_class = UpdateKidsTalentSerializer
queryset = KidTalentModel.objects.all()
class AddCourses(generics.CreateAPIView):
permission_classes = (IsSuperUser, )
serializer_class = CourseSerializer
queryset = CourseModel.objects.all()
class ViewCourse(generics.ListAPIView):
permission_classes = (AllowAny, )
serializer_class = CourseSerializer
queryset = CourseModel.objects.all()
class CourseDetailView(generics.RetrieveAPIView):
lookup_field = 'slug'
permission_classes = (AllowAny, )
serializer_class = CourseSerializer
queryset = CourseModel.objects.all()
class CourseDeleteUpdateView(generics.RetrieveUpdateDestroyAPIView):
lookup_field = 'slug'
permission_classes = (IsSuperUser, )
serializer_class = CourseSerializer
queryset = CourseModel.objects.all()
class AddQuizContext(generics.CreateAPIView):
permission_classes = (IsSuperUser, )
serializer_class = QuizContextSerializer
queryset = QuizContextModel.objects.all()
class ViewQuizContext(generics.ListAPIView):
permission_classes = (IsClient, )
serializer_class = QuizContextSerializer
queryset = QuizContextModel.objects.all()
class QuizContextDetailView(generics.RetrieveAPIView):
lookup_field = 'slug'
permission_classes = (IsClient, )
serializer_class = QuizContextSerializer
queryset = QuizContextModel.objects.all()
class QuizContextDeleteUpdateView(generics.RetrieveUpdateDestroyAPIView):
lookup_field = 'slug'
permission_classes = (IsSuperUser, )
serializer_class = QuizContextSerializer
queryset = QuizContextModel.objects.all()
class AddFeedback(generics.CreateAPIView):
permission_classes = (IsSuperUser, )
serializer_class = ClientFeedbackSerializer
queryset = ClientFeedBackModel.objects.all()
class ViewFeedback(generics.ListAPIView):
permission_classes = (IsClient, )
serializer_class = ClientFeedbackSerializer
queryset = ClientFeedBackModel.objects.all()
class FeedbackDetailView(generics.RetrieveAPIView):
lookup_field = 'slug'
permission_classes = (IsClient, )
serializer_class = ClientFeedbackSerializer
queryset = ClientFeedBackModel.objects.all()
class FeedbackDeleteUpdateView(generics.RetrieveUpdateDestroyAPIView):
lookup_field = 'slug'
permission_classes = (IsSuperUser, )
serializer_class = ClientFeedbackSerializer
queryset = ClientFeedBackModel.objects.all()
class AddWebsiteAd(generics.CreateAPIView):
permission_classes = (IsSuperUser, )
serializer_class = WebsiteAdSerializer
queryset = WebsiteAdModel.objects.all()
class ViewWebsiteAd(generics.ListAPIView):
permission_classes = (AllowAny, )
serializer_class = WebsiteAdSerializer
queryset = WebsiteAdModel.objects.all()
class WebsiteAdDetailView(generics.RetrieveAPIView):
lookup_field = 'slug'
permission_classes = (AllowAny, )
serializer_class = WebsiteAdSerializer
queryset = WebsiteAdModel.objects.all()
class WebsiteAdDeleteUpdateView(generics.RetrieveUpdateDestroyAPIView):
lookup_field = 'slug'
permission_classes = (IsSuperUser, )
serializer_class = WebsiteAdSerializer
queryset = WebsiteAdModel.objects.all()
# class AddApproval(generics.CreateAPIView):
# permission_classes = (IsSuperUser, )
# serializer_class = ApprovalSerializer
# queryset = ApprovalModel.objects.all()
# class ViewApproval(generics.ListAPIView):
# permission_classes = (IsClient, )
# serializer_class = ApprovalSerializer
# queryset = ApprovalModel.objects.all()
# class ApprovalDetailView(generics.RetrieveAPIView):
# lookup_field = 'slug'
# permission_classes = (IsClient, )
# serializer_class = ApprovalSerializer
# queryset = ApprovalModel.objects.all()
# class ApprovalDeleteUpdateView(generics.RetrieveUpdateDestroyAPIView):
# lookup_field = 'slug'
# permission_classes = (IsSuperUser, )
# serializer_class = ApprovalSerializer
# queryset = ApprovalModel.objects.all()
class AddBusinessPromotion(generics.CreateAPIView):
permission_classes = (IsSuperUser, )
serializer_class = BusinessPromotionSerializer
queryset = BusinessPromotionModel.objects.all()
class ViewBusinessPromotion(generics.ListAPIView):
permission_classes = (AllowAny, )
serializer_class = BusinessPromotionSerializer
queryset = BusinessPromotionModel.objects.all()
class BusinessPromotionDetailView(generics.RetrieveAPIView):
lookup_field = 'slug'
permission_classes = (AllowAny, )
serializer_class = BusinessPromotionSerializer
queryset = BusinessPromotionModel.objects.all()
class BusinessPromotionDeleteUpdateView(generics.RetrieveUpdateDestroyAPIView):
lookup_field = 'slug'
permission_classes = (IsSuperUser, )
serializer_class = BusinessPromotionSerializer
queryset = BusinessPromotionModel.objects.all()
class AddTeam(generics.CreateAPIView):
permission_classes = (IsSuperUser, )
serializer_class = TeamSerializer
queryset = TeamModel.objects.all()
class ViewTeam(generics.ListAPIView):
permission_classes = (AllowAny, )
serializer_class = TeamSerializer
queryset = TeamModel.objects.all()
class TeamDetailView(generics.RetrieveAPIView):
lookup_field = 'slug'
permission_classes = (AllowAny, )
serializer_class = TeamSerializer
queryset = TeamModel.objects.all()
class TeamDeleteUpdateView(generics.RetrieveUpdateDestroyAPIView):
lookup_field = 'slug'
permission_classes = (IsSuperUser, )
serializer_class = TeamSerializer
queryset = TeamModel.objects.all()
class AddAdvisoryBoard(generics.CreateAPIView):
permission_classes = (IsSuperUser, )
serializer_class = AdvisoryBoardSerializer
queryset = AdvisoryBoardModel.objects.all()
class ViewAdvisoryBoard(generics.ListAPIView):
permission_classes = (IsSuperUser, )
serializer_class = AdvisoryBoardSerializer
queryset = AdvisoryBoardModel.objects.all()
class AdvisoryBoardDetailView(generics.RetrieveAPIView):
lookup_field = 'slug'
permission_classes = (IsSuperUser, )
serializer_class = AdvisoryBoardSerializer
queryset = AdvisoryBoardModel.objects.all()
class AdvisoryBoardDeleteUpdateView(generics.RetrieveUpdateDestroyAPIView):
lookup_field = 'slug'
permission_classes = (IsSuperUser, )
serializer_class = AdvisoryBoardSerializer
queryset = AdvisoryBoardModel.objects.all()
class AddAnnouncement(generics.CreateAPIView):
permission_classes = (IsSuperUser, )
serializer_class = AnnouncementSerializer
queryset = AnnouncementModel.objects.all()
class ListAnnouncement(generics.ListAPIView):
permission_classes = (AllowAny, )
serializer_class = AnnouncementSerializer
queryset = AnnouncementModel.objects.all()
class AnnouncementDetail(generics.RetrieveAPIView):
lookup_field = 'slug'
permission_classes = (AllowAny, )
serializer_class = AnnouncementSerializer
queryset = AnnouncementModel.objects.all()
class AnnouncementDeleteUpdate(generics.RetrieveUpdateDestroyAPIView):
lookup_field = 'slug'
permission_classes = (IsSuperUser, )
serializer_class = AnnouncementSerializer
queryset = AnnouncementModel.objects.all()
class SuperadminProfileView(APIView):
permission_classes = (IsSuperUser, )
def get(self, request, *args, **kwargs):
user = get_user_from_token(request)
data = {
'name': user.username,
'email': user.email
}
return Response(data)
class AddJobClassified(generics.CreateAPIView):
permission_classes = (IsSuperUser, )
serializer_class = JobClassifiedSerializer
queryset = JobClassifiedModel.objects.all()
class ViewJobClassified(generics.ListAPIView):
permission_classes = (AllowAny, )
serializer_class = JobClassifiedSerializer
queryset = JobClassifiedModel.objects.all()
class JobClassifiedDetailView(generics.RetrieveAPIView):
lookup_field = 'slug'
permission_classes = (AllowAny, )
serializer_class = JobClassifiedSerializer
queryset = JobClassifiedModel.objects.all()
class JobClassifiedDeleteUpdateView(generics.RetrieveUpdateDestroyAPIView):
lookup_field = 'slug'
permission_classes = (IsSuperUser, )
serializer_class = JobClassifiedSerializer
queryset = JobClassifiedModel.objects.all()
class AddCustomerReviews(generics.CreateAPIView):
permission_classes = (IsSuperUser, )
serializer_class = CustomerReviewSerializer
queryset = CustomerReviewModel.objects.all()
class ViewCustomerReview(generics.ListAPIView):
permission_classes = (IsClient, )
serializer_class = CustomerReviewSerializer
queryset = CustomerReviewModel.objects.all()
class CustomerReviewDetailView(generics.RetrieveAPIView):
lookup_field = 'slug'
permission_classes = (IsClient, )
serializer_class = CustomerReviewSerializer
queryset = CustomerReviewModel.objects.all()
class CustomerReviewDeleteUpdateView(generics.RetrieveUpdateDestroyAPIView):
lookup_field = 'slug'
permission_classes = (IsSuperUser, )
serializer_class = CustomerReviewSerializer
queryset = CustomerReviewModel.objects.all()
class ClientComplain(APIView):
permission_classes = (IsSuperUser, )
serializer = ViewComplainSerializer(many=True)
class clientfeedback(APIView):
permission_classes = (IsSuperUser, )
def get(self, request, format=None):
feeds = ClientFeedBackModel.objects.filter(
Class__admin = self.request.user
)
serializer = ClientFeedbackSerializer(feeds, many=True)
return Response(serializer.data)
class Enroll_Course(APIView):
permission_classes = (IsSuperUser, )
def post(self, request, format=None):
serializer = EnrollCourseSerializer(data=request.data)
print(serializer)
if serializer.is_valid():
course = serializer.validated_data.get('course', '')
serializer.save()
return Response(serializer.data,status =status.HTTP_201_CREATED)
else:
return Response(serializer.errors,status =status.HTTP_400_BAD_REQUEST)
class ViewEnroll_Course(APIView):
permission_classes = (IsSuperUser, )
def get(self, request, *args, **kwargs):
course = self.kwargs['course_id']
client = self.kwargs['client_id']
data = Enroll_CourseModel.objects.filter(
course = course, client = client
)
serializer = ViewEnrollCourseSerializer(data, many=True)
return Response(serializer.data, status=status.HTTP_200_OK)
class DetailEnroll_CourseView(APIView):
permission_classes = (IsSuperUser, )
def get_object(self,pk):
try:
return Enroll_CourseModel.objects.get(id=pk)
except:
raise Http404
def get(self, request, pk, format=None):
data = self.get_object(pk)
serializer = ViewEnrollCourseSerializer(data)
return Response(serializer.data)
def put(self,request,pk,format=None):
data = self.get_object(pk)
serializer = ViewEnrollCourseSerializer(data,data = request.data)
if serializer.is_valid(raise_exception=True):
serializer.save()
return Response(serializer.data,status=status.HTTP_201_CREATED)
else:
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def delete(self,request,pk,format=None):
data = self.get_object(pk)
data.delete()
return Response(status = status.HTTP_204_NO_CONTENT)
class CourseDetail(APIView):
permission_classes = (IsSuperUser, )
def get_object(self, slug):
try:
return CourseModel.objects.get(slug=slug)
except CourseModel.DoesNotExist:
raise Http404
def get(self, request, slug, format=None):
data = self.get_object(slug)
if data.classes.school.admin == self.request.user:
serializer = ViewCourseSerializer(data)
return Response(serializer.data)
else:
return Response(
{'message':'This course does not belong to your school'},
status=status.HTTP_400_BAD_REQUEST
)
def put(self,request,slug,format=None):
data = self.get_object(slug)
if data.course.client.admin == self.request.user:
serializer = CourseSerializer(data,data = request.data)
if serializer.is_valid(raise_exception=True):
course = serializer.validated_data.get('course', '')
if course.client.admin == self.request.user:
serializer.save()
return Response(serializer.data,status=status.HTTP_201_CREATED)
return Response(
{'message':'This Class does not belong to you'},
status=status.HTTP_400_BAD_REQUEST
)
else:
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
else:
return Response(
{'message':'This course does not belong to you'},
status=status.HTTP_400_BAD_REQUEST
)
def delete(self,request,slug,format=None):
data = self.get_object(slug)
if data.course.client.admin == self.request.user:
data.delete()
return Response(status = status.HTTP_204_NO_CONTENT)
else:
return Response(
{'message':'This course does not belong to you'},
status=status.HTTP_400_BAD_REQUEST
)
class SchoolRegistrationView(RegisterView):
serializer_class = RegisterSchoolSerializer
permission_classes = (IsSuperUser,)
class Add_question(generics.CreateAPIView):
permission_classes = (IsSuperUser, )
def post(self,request,format=None):
serializer = QuestionSerializer(data=request.data)
print(serializer)
if serializer.is_valid():
course = serializer.validated_data.get('course', '')
serializer.save()
return Response(serializer.data,status =status.HTTP_201_CREATED)
else:
return Response(serializer.errors,status =status.HTTP_400_BAD_REQUEST)
class Viewquestion(generics.ListAPIView):
permission_classes = (IsSuperUser, )
def get(self, request, *args, **kwargs):
course = self.kwargs['course_id']
data = QuestionModel.objects.filter(
course_id = course)
serializer = QuestionSerializer(data, many=True)
return Response(serializer.data, status=status.HTTP_200_OK)
class QuestionDetail(APIView):
permission_classes = (IsSuperUser, )
def get_object(self,pk):
try:
return QuestionModel.objects.get(id=pk)
except:
raise Http404
def get(self,request,pk,format=None):
data = self.get_object(pk)
serializer = QuestionSerializer(data)
return Response(serializer.data)
def put(self,request,pk,format=None):
data = self.get_object(pk)
serializer = QuestionSerializer(data,data = request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data,status=status.HTTP_201_CREATED)
else:
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def delete(self,request,pk,format=None):
data = self.get_object(pk)
data.delete()
return Response(status = status.HTTP_204_NO_CONTENT)
class SubmittedQuestionView(APIView):
permission_classes = (IsSuperUser, )
def get(self, request, *args, **kwargs):
admin = self.request.user
course = self.kwargs['course_id']
client = self.kwargs['client_id']
data = Client_SubmitquestionModel.objects.filter(
course__course = course,
client__client = client
)
serializer = Client_submittedquestionSerializer(data, many=True)
return Response(serializer.data, status=status.HTTP_200_OK)
class AddonlineTest(generics.CreateAPIView):
permission_classes = (IsSuperUser, )
def post(self, request, format=None):
serializer = testSerializer(data=request.data)
print(serializer)
if serializer.is_valid():
course = serializer.validated_data.get('course', '')
serializer.save()
return Response(serializer.data,status =status.HTTP_201_CREATED)
else:
return Response(serializer.errors,status =status.HTTP_400_BAD_REQUEST)
class ViewOnlinetest(generics.ListAPIView):
permission_classes = (IsSuperUser, )
def get(self, request, *args, **kwargs):
course = self.kwargs['course_id']
data = Client_testModel.objects.filter(
course_id = course)
serializer = testSerializer(data, many=True)
return Response(serializer.data, status=status.HTTP_200_OK)
class onlinetestDetail(APIView):
permission_classes = (IsSuperUser, )
def get_object(self,pk):
try:
return Client_testModel.objects.get(id=pk)
except:
raise Http404
def get(self,request,pk,format=None):
data = self.get_object(pk)
serializer = testSerializer(data)
return Response(serializer.data)
def put(self,request,pk,format=None):
data = self.get_object(pk)
serializer = testSerializer(data,data = request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data,status=status.HTTP_201_CREATED)
else:
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def delete(self,request,pk,format=None):
data = self.get_object(pk)
data.delete()
return Response(status = status.HTTP_204_NO_CONTENT)
class SubmittedonlineTestView(APIView):
permission_classes = (IsSuperUser, )
def get(self, request, *args, **kwargs):
admin = self.request.user
course = self.kwargs['course_id']
client = self.kwargs['client_id']
data = Client_SubmittestModel.objects.filter(
course__course = course,
client__client = client
)
serializer = Client_submittedtestSerializer(data, many=True)
return Response(serializer.data, status=status.HTTP_200_OK)
|
flexible
|
{
"blob_id": "aec5280869a780bbd93ef24b659d9959f7b81426",
"index": 3545,
"step-1": "<mask token>\n\n\nclass AddBlogs(generics.CreateAPIView):\n permission_classes = IsSuperUser,\n serializer_class = BlogSerializer\n queryset = BlogModel.objects.all()\n\n\nclass ViewBlog(generics.ListAPIView):\n permission_classes = IsClient,\n serializer_class = BlogSerializer\n queryset = BlogModel.objects.all()\n\n\nclass BlogDetailView(generics.RetrieveAPIView):\n lookup_field = 'slug'\n permission_classes = IsClient,\n serializer_class = BlogSerializer\n queryset = BlogModel.objects.all()\n\n\nclass BlogDeleteUpdateView(generics.RetrieveUpdateDestroyAPIView):\n lookup_field = 'slug'\n permission_classes = IsSuperUser,\n serializer_class = BlogSerializer\n queryset = BlogModel.objects.all()\n\n\nclass AddEventView(generics.CreateAPIView):\n permission_classes = IsSuperUser,\n serializer_class = EventSerializer\n queryset = EventModel.objects.all()\n\n\nclass ListEventView(generics.ListAPIView):\n permission_classes = AllowAny,\n serializer_class = EventSerializer\n queryset = EventModel.objects.all()\n\n\nclass EventDetailView(generics.RetrieveAPIView):\n lookup_field = 'slug'\n permission_classes = AllowAny,\n serializer_class = EventSerializer\n queryset = EventModel.objects.all()\n\n\nclass EventDeleteUpdateView(generics.RetrieveUpdateDestroyAPIView):\n lookup_field = 'slug'\n permission_classes = IsSuperUser,\n serializer_class = EventSerializer\n queryset = EventModel.objects.all()\n\n\nclass AddBusinessPartners(generics.CreateAPIView):\n permission_classes = IsSuperUser,\n serializer_class = BusinessPartnersSerializer\n queryset = BusinessPartnersModel.objects.all()\n\n\nclass ViewBusinessPartner(generics.ListAPIView):\n permission_classes = AllowAny,\n serializer_class = BusinessPartnersSerializer\n queryset = BusinessPartnersModel.objects.all()\n\n\nclass BusinessPartnerDetailView(generics.RetrieveAPIView):\n lookup_field = 'slug'\n permission_classes = AllowAny,\n serializer_class = BusinessPartnersSerializer\n queryset = BusinessPartnersModel.objects.all()\n\n\nclass BusinessPartnerDeleteUpdateView(generics.RetrieveUpdateDestroyAPIView):\n lookup_field = 'slug'\n permission_classes = IsSuperUser,\n serializer_class = BusinessPartnersSerializer\n queryset = BusinessPartnersModel.objects.all()\n\n\nclass AddKidStory(generics.CreateAPIView):\n permission_classes = IsStudent,\n serializer_class = KidStorySerializer\n queryset = KidStoryModel.objects.all()\n\n def perform_create(self, serializer):\n serializer.save(user=self.request.user)\n\n\nclass ViewKidStory(generics.ListAPIView):\n permission_classes = AllowAny,\n serializer_class = KidStorySerializer\n queryset = KidStoryModel.objects.filter(status__exact='P')\n\n\nclass KidStoryDetailView(generics.RetrieveAPIView):\n lookup_field = 'slug'\n permission_classes = AllowAny,\n serializer_class = KidStorySerializer\n queryset = KidStoryModel.objects.filter(status__exact='P')\n\n\nclass KidStoryDeleteUpdateView(generics.RetrieveUpdateDestroyAPIView):\n \"\"\"\n Get: superadmin can see all stories (draft, published)\n PATCH : superadmin can mark stories as published by changing status = P\n Delete: superadmin can delete stories.\n \"\"\"\n lookup_field = 'slug'\n permission_classes = IsSuperUser,\n serializer_class = UpdateKidsStorySerializer\n queryset = KidStoryModel.objects.all()\n\n\nclass AddKidTalent(generics.CreateAPIView):\n permission_classes = IsStudentORClient,\n serializer_class = KidTalentSerializer\n queryset = KidTalentModel.objects.all()\n\n def perform_create(self, serializer):\n serializer.save(user=self.request.user)\n\n\nclass ViewKidTalent(generics.ListAPIView):\n permission_classes = AllowAny,\n serializer_class = KidTalentSerializer\n queryset = KidTalentModel.objects.filter(status__exact='P')\n\n\nclass KidTalentDetailView(generics.RetrieveAPIView):\n lookup_field = 'slug'\n permission_classes = AllowAny,\n serializer_class = KidTalentSerializer\n queryset = KidTalentModel.objects.filter(status__exact='P')\n\n\nclass KidTalentDeleteUpdateView(generics.RetrieveUpdateDestroyAPIView):\n \"\"\"\n Get: superadmin can see all kids talent (draft, published)\n PATCH : superadmin can mark kids talent as published by changing status = P\n Delete: superadmin can delete kids talent.\n \"\"\"\n lookup_field = 'slug'\n permission_classes = IsSuperUser,\n serializer_class = UpdateKidsTalentSerializer\n queryset = KidTalentModel.objects.all()\n\n\nclass AddCourses(generics.CreateAPIView):\n permission_classes = IsSuperUser,\n serializer_class = CourseSerializer\n queryset = CourseModel.objects.all()\n\n\nclass ViewCourse(generics.ListAPIView):\n permission_classes = AllowAny,\n serializer_class = CourseSerializer\n queryset = CourseModel.objects.all()\n\n\nclass CourseDetailView(generics.RetrieveAPIView):\n lookup_field = 'slug'\n permission_classes = AllowAny,\n serializer_class = CourseSerializer\n queryset = CourseModel.objects.all()\n\n\nclass CourseDeleteUpdateView(generics.RetrieveUpdateDestroyAPIView):\n lookup_field = 'slug'\n permission_classes = IsSuperUser,\n serializer_class = CourseSerializer\n queryset = CourseModel.objects.all()\n\n\nclass AddQuizContext(generics.CreateAPIView):\n permission_classes = IsSuperUser,\n serializer_class = QuizContextSerializer\n queryset = QuizContextModel.objects.all()\n\n\nclass ViewQuizContext(generics.ListAPIView):\n permission_classes = IsClient,\n serializer_class = QuizContextSerializer\n queryset = QuizContextModel.objects.all()\n\n\nclass QuizContextDetailView(generics.RetrieveAPIView):\n lookup_field = 'slug'\n permission_classes = IsClient,\n serializer_class = QuizContextSerializer\n queryset = QuizContextModel.objects.all()\n\n\nclass QuizContextDeleteUpdateView(generics.RetrieveUpdateDestroyAPIView):\n lookup_field = 'slug'\n permission_classes = IsSuperUser,\n serializer_class = QuizContextSerializer\n queryset = QuizContextModel.objects.all()\n\n\nclass AddFeedback(generics.CreateAPIView):\n permission_classes = IsSuperUser,\n serializer_class = ClientFeedbackSerializer\n queryset = ClientFeedBackModel.objects.all()\n\n\nclass ViewFeedback(generics.ListAPIView):\n permission_classes = IsClient,\n serializer_class = ClientFeedbackSerializer\n queryset = ClientFeedBackModel.objects.all()\n\n\nclass FeedbackDetailView(generics.RetrieveAPIView):\n lookup_field = 'slug'\n permission_classes = IsClient,\n serializer_class = ClientFeedbackSerializer\n queryset = ClientFeedBackModel.objects.all()\n\n\nclass FeedbackDeleteUpdateView(generics.RetrieveUpdateDestroyAPIView):\n lookup_field = 'slug'\n permission_classes = IsSuperUser,\n serializer_class = ClientFeedbackSerializer\n queryset = ClientFeedBackModel.objects.all()\n\n\nclass AddWebsiteAd(generics.CreateAPIView):\n permission_classes = IsSuperUser,\n serializer_class = WebsiteAdSerializer\n queryset = WebsiteAdModel.objects.all()\n\n\nclass ViewWebsiteAd(generics.ListAPIView):\n permission_classes = AllowAny,\n serializer_class = WebsiteAdSerializer\n queryset = WebsiteAdModel.objects.all()\n\n\nclass WebsiteAdDetailView(generics.RetrieveAPIView):\n lookup_field = 'slug'\n permission_classes = AllowAny,\n serializer_class = WebsiteAdSerializer\n queryset = WebsiteAdModel.objects.all()\n\n\nclass WebsiteAdDeleteUpdateView(generics.RetrieveUpdateDestroyAPIView):\n lookup_field = 'slug'\n permission_classes = IsSuperUser,\n serializer_class = WebsiteAdSerializer\n queryset = WebsiteAdModel.objects.all()\n\n\nclass AddBusinessPromotion(generics.CreateAPIView):\n permission_classes = IsSuperUser,\n serializer_class = BusinessPromotionSerializer\n queryset = BusinessPromotionModel.objects.all()\n\n\nclass ViewBusinessPromotion(generics.ListAPIView):\n permission_classes = AllowAny,\n serializer_class = BusinessPromotionSerializer\n queryset = BusinessPromotionModel.objects.all()\n\n\nclass BusinessPromotionDetailView(generics.RetrieveAPIView):\n lookup_field = 'slug'\n permission_classes = AllowAny,\n serializer_class = BusinessPromotionSerializer\n queryset = BusinessPromotionModel.objects.all()\n\n\nclass BusinessPromotionDeleteUpdateView(generics.RetrieveUpdateDestroyAPIView):\n lookup_field = 'slug'\n permission_classes = IsSuperUser,\n serializer_class = BusinessPromotionSerializer\n queryset = BusinessPromotionModel.objects.all()\n\n\nclass AddTeam(generics.CreateAPIView):\n permission_classes = IsSuperUser,\n serializer_class = TeamSerializer\n queryset = TeamModel.objects.all()\n\n\nclass ViewTeam(generics.ListAPIView):\n permission_classes = AllowAny,\n serializer_class = TeamSerializer\n queryset = TeamModel.objects.all()\n\n\nclass TeamDetailView(generics.RetrieveAPIView):\n lookup_field = 'slug'\n permission_classes = AllowAny,\n serializer_class = TeamSerializer\n queryset = TeamModel.objects.all()\n\n\nclass TeamDeleteUpdateView(generics.RetrieveUpdateDestroyAPIView):\n lookup_field = 'slug'\n permission_classes = IsSuperUser,\n serializer_class = TeamSerializer\n queryset = TeamModel.objects.all()\n\n\nclass AddAdvisoryBoard(generics.CreateAPIView):\n permission_classes = IsSuperUser,\n serializer_class = AdvisoryBoardSerializer\n queryset = AdvisoryBoardModel.objects.all()\n\n\nclass ViewAdvisoryBoard(generics.ListAPIView):\n permission_classes = IsSuperUser,\n serializer_class = AdvisoryBoardSerializer\n queryset = AdvisoryBoardModel.objects.all()\n\n\nclass AdvisoryBoardDetailView(generics.RetrieveAPIView):\n lookup_field = 'slug'\n permission_classes = IsSuperUser,\n serializer_class = AdvisoryBoardSerializer\n queryset = AdvisoryBoardModel.objects.all()\n\n\nclass AdvisoryBoardDeleteUpdateView(generics.RetrieveUpdateDestroyAPIView):\n lookup_field = 'slug'\n permission_classes = IsSuperUser,\n serializer_class = AdvisoryBoardSerializer\n queryset = AdvisoryBoardModel.objects.all()\n\n\nclass AddAnnouncement(generics.CreateAPIView):\n permission_classes = IsSuperUser,\n serializer_class = AnnouncementSerializer\n queryset = AnnouncementModel.objects.all()\n\n\nclass ListAnnouncement(generics.ListAPIView):\n permission_classes = AllowAny,\n serializer_class = AnnouncementSerializer\n queryset = AnnouncementModel.objects.all()\n\n\nclass AnnouncementDetail(generics.RetrieveAPIView):\n lookup_field = 'slug'\n permission_classes = AllowAny,\n serializer_class = AnnouncementSerializer\n queryset = AnnouncementModel.objects.all()\n\n\nclass AnnouncementDeleteUpdate(generics.RetrieveUpdateDestroyAPIView):\n lookup_field = 'slug'\n permission_classes = IsSuperUser,\n serializer_class = AnnouncementSerializer\n queryset = AnnouncementModel.objects.all()\n\n\nclass SuperadminProfileView(APIView):\n permission_classes = IsSuperUser,\n\n def get(self, request, *args, **kwargs):\n user = get_user_from_token(request)\n data = {'name': user.username, 'email': user.email}\n return Response(data)\n\n\nclass AddJobClassified(generics.CreateAPIView):\n permission_classes = IsSuperUser,\n serializer_class = JobClassifiedSerializer\n queryset = JobClassifiedModel.objects.all()\n\n\nclass ViewJobClassified(generics.ListAPIView):\n permission_classes = AllowAny,\n serializer_class = JobClassifiedSerializer\n queryset = JobClassifiedModel.objects.all()\n\n\nclass JobClassifiedDetailView(generics.RetrieveAPIView):\n lookup_field = 'slug'\n permission_classes = AllowAny,\n serializer_class = JobClassifiedSerializer\n queryset = JobClassifiedModel.objects.all()\n\n\nclass JobClassifiedDeleteUpdateView(generics.RetrieveUpdateDestroyAPIView):\n lookup_field = 'slug'\n permission_classes = IsSuperUser,\n serializer_class = JobClassifiedSerializer\n queryset = JobClassifiedModel.objects.all()\n\n\nclass AddCustomerReviews(generics.CreateAPIView):\n permission_classes = IsSuperUser,\n serializer_class = CustomerReviewSerializer\n queryset = CustomerReviewModel.objects.all()\n\n\nclass ViewCustomerReview(generics.ListAPIView):\n permission_classes = IsClient,\n serializer_class = CustomerReviewSerializer\n queryset = CustomerReviewModel.objects.all()\n\n\nclass CustomerReviewDetailView(generics.RetrieveAPIView):\n lookup_field = 'slug'\n permission_classes = IsClient,\n serializer_class = CustomerReviewSerializer\n queryset = CustomerReviewModel.objects.all()\n\n\nclass CustomerReviewDeleteUpdateView(generics.RetrieveUpdateDestroyAPIView):\n lookup_field = 'slug'\n permission_classes = IsSuperUser,\n serializer_class = CustomerReviewSerializer\n queryset = CustomerReviewModel.objects.all()\n\n\nclass ClientComplain(APIView):\n permission_classes = IsSuperUser,\n serializer = ViewComplainSerializer(many=True)\n\n\nclass clientfeedback(APIView):\n permission_classes = IsSuperUser,\n\n def get(self, request, format=None):\n feeds = ClientFeedBackModel.objects.filter(Class__admin=self.\n request.user)\n serializer = ClientFeedbackSerializer(feeds, many=True)\n return Response(serializer.data)\n\n\nclass Enroll_Course(APIView):\n permission_classes = IsSuperUser,\n\n def post(self, request, format=None):\n serializer = EnrollCourseSerializer(data=request.data)\n print(serializer)\n if serializer.is_valid():\n course = serializer.validated_data.get('course', '')\n serializer.save()\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n else:\n return Response(serializer.errors, status=status.\n HTTP_400_BAD_REQUEST)\n\n\nclass ViewEnroll_Course(APIView):\n permission_classes = IsSuperUser,\n\n def get(self, request, *args, **kwargs):\n course = self.kwargs['course_id']\n client = self.kwargs['client_id']\n data = Enroll_CourseModel.objects.filter(course=course, client=client)\n serializer = ViewEnrollCourseSerializer(data, many=True)\n return Response(serializer.data, status=status.HTTP_200_OK)\n\n\nclass DetailEnroll_CourseView(APIView):\n permission_classes = IsSuperUser,\n\n def get_object(self, pk):\n try:\n return Enroll_CourseModel.objects.get(id=pk)\n except:\n raise Http404\n\n def get(self, request, pk, format=None):\n data = self.get_object(pk)\n serializer = ViewEnrollCourseSerializer(data)\n return Response(serializer.data)\n\n def put(self, request, pk, format=None):\n data = self.get_object(pk)\n serializer = ViewEnrollCourseSerializer(data, data=request.data)\n if serializer.is_valid(raise_exception=True):\n serializer.save()\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n else:\n return Response(serializer.errors, status=status.\n HTTP_400_BAD_REQUEST)\n\n def delete(self, request, pk, format=None):\n data = self.get_object(pk)\n data.delete()\n return Response(status=status.HTTP_204_NO_CONTENT)\n\n\nclass CourseDetail(APIView):\n permission_classes = IsSuperUser,\n\n def get_object(self, slug):\n try:\n return CourseModel.objects.get(slug=slug)\n except CourseModel.DoesNotExist:\n raise Http404\n\n def get(self, request, slug, format=None):\n data = self.get_object(slug)\n if data.classes.school.admin == self.request.user:\n serializer = ViewCourseSerializer(data)\n return Response(serializer.data)\n else:\n return Response({'message':\n 'This course does not belong to your school'}, status=\n status.HTTP_400_BAD_REQUEST)\n\n def put(self, request, slug, format=None):\n data = self.get_object(slug)\n if data.course.client.admin == self.request.user:\n serializer = CourseSerializer(data, data=request.data)\n if serializer.is_valid(raise_exception=True):\n course = serializer.validated_data.get('course', '')\n if course.client.admin == self.request.user:\n serializer.save()\n return Response(serializer.data, status=status.\n HTTP_201_CREATED)\n return Response({'message':\n 'This Class does not belong to you'}, status=status.\n HTTP_400_BAD_REQUEST)\n else:\n return Response(serializer.errors, status=status.\n HTTP_400_BAD_REQUEST)\n else:\n return Response({'message':\n 'This course does not belong to you'}, status=status.\n HTTP_400_BAD_REQUEST)\n\n def delete(self, request, slug, format=None):\n data = self.get_object(slug)\n if data.course.client.admin == self.request.user:\n data.delete()\n return Response(status=status.HTTP_204_NO_CONTENT)\n else:\n return Response({'message':\n 'This course does not belong to you'}, status=status.\n HTTP_400_BAD_REQUEST)\n\n\nclass SchoolRegistrationView(RegisterView):\n serializer_class = RegisterSchoolSerializer\n permission_classes = IsSuperUser,\n\n\nclass Add_question(generics.CreateAPIView):\n permission_classes = IsSuperUser,\n\n def post(self, request, format=None):\n serializer = QuestionSerializer(data=request.data)\n print(serializer)\n if serializer.is_valid():\n course = serializer.validated_data.get('course', '')\n serializer.save()\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n else:\n return Response(serializer.errors, status=status.\n HTTP_400_BAD_REQUEST)\n\n\nclass Viewquestion(generics.ListAPIView):\n permission_classes = IsSuperUser,\n\n def get(self, request, *args, **kwargs):\n course = self.kwargs['course_id']\n data = QuestionModel.objects.filter(course_id=course)\n serializer = QuestionSerializer(data, many=True)\n return Response(serializer.data, status=status.HTTP_200_OK)\n\n\nclass QuestionDetail(APIView):\n permission_classes = IsSuperUser,\n\n def get_object(self, pk):\n try:\n return QuestionModel.objects.get(id=pk)\n except:\n raise Http404\n\n def get(self, request, pk, format=None):\n data = self.get_object(pk)\n serializer = QuestionSerializer(data)\n return Response(serializer.data)\n\n def put(self, request, pk, format=None):\n data = self.get_object(pk)\n serializer = QuestionSerializer(data, data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n else:\n return Response(serializer.errors, status=status.\n HTTP_400_BAD_REQUEST)\n\n def delete(self, request, pk, format=None):\n data = self.get_object(pk)\n data.delete()\n return Response(status=status.HTTP_204_NO_CONTENT)\n\n\nclass SubmittedQuestionView(APIView):\n permission_classes = IsSuperUser,\n\n def get(self, request, *args, **kwargs):\n admin = self.request.user\n course = self.kwargs['course_id']\n client = self.kwargs['client_id']\n data = Client_SubmitquestionModel.objects.filter(course__course=\n course, client__client=client)\n serializer = Client_submittedquestionSerializer(data, many=True)\n return Response(serializer.data, status=status.HTTP_200_OK)\n\n\nclass AddonlineTest(generics.CreateAPIView):\n permission_classes = IsSuperUser,\n\n def post(self, request, format=None):\n serializer = testSerializer(data=request.data)\n print(serializer)\n if serializer.is_valid():\n course = serializer.validated_data.get('course', '')\n serializer.save()\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n else:\n return Response(serializer.errors, status=status.\n HTTP_400_BAD_REQUEST)\n\n\nclass ViewOnlinetest(generics.ListAPIView):\n permission_classes = IsSuperUser,\n\n def get(self, request, *args, **kwargs):\n course = self.kwargs['course_id']\n data = Client_testModel.objects.filter(course_id=course)\n serializer = testSerializer(data, many=True)\n return Response(serializer.data, status=status.HTTP_200_OK)\n\n\nclass onlinetestDetail(APIView):\n permission_classes = IsSuperUser,\n\n def get_object(self, pk):\n try:\n return Client_testModel.objects.get(id=pk)\n except:\n raise Http404\n\n def get(self, request, pk, format=None):\n data = self.get_object(pk)\n serializer = testSerializer(data)\n return Response(serializer.data)\n\n def put(self, request, pk, format=None):\n data = self.get_object(pk)\n serializer = testSerializer(data, data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n else:\n return Response(serializer.errors, status=status.\n HTTP_400_BAD_REQUEST)\n\n def delete(self, request, pk, format=None):\n data = self.get_object(pk)\n data.delete()\n return Response(status=status.HTTP_204_NO_CONTENT)\n\n\nclass SubmittedonlineTestView(APIView):\n permission_classes = IsSuperUser,\n\n def get(self, request, *args, **kwargs):\n admin = self.request.user\n course = self.kwargs['course_id']\n client = self.kwargs['client_id']\n data = Client_SubmittestModel.objects.filter(course__course=course,\n client__client=client)\n serializer = Client_submittedtestSerializer(data, many=True)\n return Response(serializer.data, status=status.HTTP_200_OK)\n",
"step-2": "<mask token>\n\n\nclass SchoolDetailView(generics.RetrieveAPIView):\n lookup_field = 'slug'\n permission_classes = IsClient,\n serializer_class = SchoolSerializer\n queryset = SchoolModel.objects.all()\n\n\nclass SchoolDeleteUpdateView(generics.RetrieveUpdateDestroyAPIView):\n lookup_field = 'slug'\n permission_classes = IsSuperUser,\n serializer_class = SchoolSerializer\n queryset = SchoolModel.objects.all()\n\n\nclass AddBlogs(generics.CreateAPIView):\n permission_classes = IsSuperUser,\n serializer_class = BlogSerializer\n queryset = BlogModel.objects.all()\n\n\nclass ViewBlog(generics.ListAPIView):\n permission_classes = IsClient,\n serializer_class = BlogSerializer\n queryset = BlogModel.objects.all()\n\n\nclass BlogDetailView(generics.RetrieveAPIView):\n lookup_field = 'slug'\n permission_classes = IsClient,\n serializer_class = BlogSerializer\n queryset = BlogModel.objects.all()\n\n\nclass BlogDeleteUpdateView(generics.RetrieveUpdateDestroyAPIView):\n lookup_field = 'slug'\n permission_classes = IsSuperUser,\n serializer_class = BlogSerializer\n queryset = BlogModel.objects.all()\n\n\nclass AddEventView(generics.CreateAPIView):\n permission_classes = IsSuperUser,\n serializer_class = EventSerializer\n queryset = EventModel.objects.all()\n\n\nclass ListEventView(generics.ListAPIView):\n permission_classes = AllowAny,\n serializer_class = EventSerializer\n queryset = EventModel.objects.all()\n\n\nclass EventDetailView(generics.RetrieveAPIView):\n lookup_field = 'slug'\n permission_classes = AllowAny,\n serializer_class = EventSerializer\n queryset = EventModel.objects.all()\n\n\nclass EventDeleteUpdateView(generics.RetrieveUpdateDestroyAPIView):\n lookup_field = 'slug'\n permission_classes = IsSuperUser,\n serializer_class = EventSerializer\n queryset = EventModel.objects.all()\n\n\nclass AddBusinessPartners(generics.CreateAPIView):\n permission_classes = IsSuperUser,\n serializer_class = BusinessPartnersSerializer\n queryset = BusinessPartnersModel.objects.all()\n\n\nclass ViewBusinessPartner(generics.ListAPIView):\n permission_classes = AllowAny,\n serializer_class = BusinessPartnersSerializer\n queryset = BusinessPartnersModel.objects.all()\n\n\nclass BusinessPartnerDetailView(generics.RetrieveAPIView):\n lookup_field = 'slug'\n permission_classes = AllowAny,\n serializer_class = BusinessPartnersSerializer\n queryset = BusinessPartnersModel.objects.all()\n\n\nclass BusinessPartnerDeleteUpdateView(generics.RetrieveUpdateDestroyAPIView):\n lookup_field = 'slug'\n permission_classes = IsSuperUser,\n serializer_class = BusinessPartnersSerializer\n queryset = BusinessPartnersModel.objects.all()\n\n\nclass AddKidStory(generics.CreateAPIView):\n permission_classes = IsStudent,\n serializer_class = KidStorySerializer\n queryset = KidStoryModel.objects.all()\n\n def perform_create(self, serializer):\n serializer.save(user=self.request.user)\n\n\nclass ViewKidStory(generics.ListAPIView):\n permission_classes = AllowAny,\n serializer_class = KidStorySerializer\n queryset = KidStoryModel.objects.filter(status__exact='P')\n\n\nclass KidStoryDetailView(generics.RetrieveAPIView):\n lookup_field = 'slug'\n permission_classes = AllowAny,\n serializer_class = KidStorySerializer\n queryset = KidStoryModel.objects.filter(status__exact='P')\n\n\nclass KidStoryDeleteUpdateView(generics.RetrieveUpdateDestroyAPIView):\n \"\"\"\n Get: superadmin can see all stories (draft, published)\n PATCH : superadmin can mark stories as published by changing status = P\n Delete: superadmin can delete stories.\n \"\"\"\n lookup_field = 'slug'\n permission_classes = IsSuperUser,\n serializer_class = UpdateKidsStorySerializer\n queryset = KidStoryModel.objects.all()\n\n\nclass AddKidTalent(generics.CreateAPIView):\n permission_classes = IsStudentORClient,\n serializer_class = KidTalentSerializer\n queryset = KidTalentModel.objects.all()\n\n def perform_create(self, serializer):\n serializer.save(user=self.request.user)\n\n\nclass ViewKidTalent(generics.ListAPIView):\n permission_classes = AllowAny,\n serializer_class = KidTalentSerializer\n queryset = KidTalentModel.objects.filter(status__exact='P')\n\n\nclass KidTalentDetailView(generics.RetrieveAPIView):\n lookup_field = 'slug'\n permission_classes = AllowAny,\n serializer_class = KidTalentSerializer\n queryset = KidTalentModel.objects.filter(status__exact='P')\n\n\nclass KidTalentDeleteUpdateView(generics.RetrieveUpdateDestroyAPIView):\n \"\"\"\n Get: superadmin can see all kids talent (draft, published)\n PATCH : superadmin can mark kids talent as published by changing status = P\n Delete: superadmin can delete kids talent.\n \"\"\"\n lookup_field = 'slug'\n permission_classes = IsSuperUser,\n serializer_class = UpdateKidsTalentSerializer\n queryset = KidTalentModel.objects.all()\n\n\nclass AddCourses(generics.CreateAPIView):\n permission_classes = IsSuperUser,\n serializer_class = CourseSerializer\n queryset = CourseModel.objects.all()\n\n\nclass ViewCourse(generics.ListAPIView):\n permission_classes = AllowAny,\n serializer_class = CourseSerializer\n queryset = CourseModel.objects.all()\n\n\nclass CourseDetailView(generics.RetrieveAPIView):\n lookup_field = 'slug'\n permission_classes = AllowAny,\n serializer_class = CourseSerializer\n queryset = CourseModel.objects.all()\n\n\nclass CourseDeleteUpdateView(generics.RetrieveUpdateDestroyAPIView):\n lookup_field = 'slug'\n permission_classes = IsSuperUser,\n serializer_class = CourseSerializer\n queryset = CourseModel.objects.all()\n\n\nclass AddQuizContext(generics.CreateAPIView):\n permission_classes = IsSuperUser,\n serializer_class = QuizContextSerializer\n queryset = QuizContextModel.objects.all()\n\n\nclass ViewQuizContext(generics.ListAPIView):\n permission_classes = IsClient,\n serializer_class = QuizContextSerializer\n queryset = QuizContextModel.objects.all()\n\n\nclass QuizContextDetailView(generics.RetrieveAPIView):\n lookup_field = 'slug'\n permission_classes = IsClient,\n serializer_class = QuizContextSerializer\n queryset = QuizContextModel.objects.all()\n\n\nclass QuizContextDeleteUpdateView(generics.RetrieveUpdateDestroyAPIView):\n lookup_field = 'slug'\n permission_classes = IsSuperUser,\n serializer_class = QuizContextSerializer\n queryset = QuizContextModel.objects.all()\n\n\nclass AddFeedback(generics.CreateAPIView):\n permission_classes = IsSuperUser,\n serializer_class = ClientFeedbackSerializer\n queryset = ClientFeedBackModel.objects.all()\n\n\nclass ViewFeedback(generics.ListAPIView):\n permission_classes = IsClient,\n serializer_class = ClientFeedbackSerializer\n queryset = ClientFeedBackModel.objects.all()\n\n\nclass FeedbackDetailView(generics.RetrieveAPIView):\n lookup_field = 'slug'\n permission_classes = IsClient,\n serializer_class = ClientFeedbackSerializer\n queryset = ClientFeedBackModel.objects.all()\n\n\nclass FeedbackDeleteUpdateView(generics.RetrieveUpdateDestroyAPIView):\n lookup_field = 'slug'\n permission_classes = IsSuperUser,\n serializer_class = ClientFeedbackSerializer\n queryset = ClientFeedBackModel.objects.all()\n\n\nclass AddWebsiteAd(generics.CreateAPIView):\n permission_classes = IsSuperUser,\n serializer_class = WebsiteAdSerializer\n queryset = WebsiteAdModel.objects.all()\n\n\nclass ViewWebsiteAd(generics.ListAPIView):\n permission_classes = AllowAny,\n serializer_class = WebsiteAdSerializer\n queryset = WebsiteAdModel.objects.all()\n\n\nclass WebsiteAdDetailView(generics.RetrieveAPIView):\n lookup_field = 'slug'\n permission_classes = AllowAny,\n serializer_class = WebsiteAdSerializer\n queryset = WebsiteAdModel.objects.all()\n\n\nclass WebsiteAdDeleteUpdateView(generics.RetrieveUpdateDestroyAPIView):\n lookup_field = 'slug'\n permission_classes = IsSuperUser,\n serializer_class = WebsiteAdSerializer\n queryset = WebsiteAdModel.objects.all()\n\n\nclass AddBusinessPromotion(generics.CreateAPIView):\n permission_classes = IsSuperUser,\n serializer_class = BusinessPromotionSerializer\n queryset = BusinessPromotionModel.objects.all()\n\n\nclass ViewBusinessPromotion(generics.ListAPIView):\n permission_classes = AllowAny,\n serializer_class = BusinessPromotionSerializer\n queryset = BusinessPromotionModel.objects.all()\n\n\nclass BusinessPromotionDetailView(generics.RetrieveAPIView):\n lookup_field = 'slug'\n permission_classes = AllowAny,\n serializer_class = BusinessPromotionSerializer\n queryset = BusinessPromotionModel.objects.all()\n\n\nclass BusinessPromotionDeleteUpdateView(generics.RetrieveUpdateDestroyAPIView):\n lookup_field = 'slug'\n permission_classes = IsSuperUser,\n serializer_class = BusinessPromotionSerializer\n queryset = BusinessPromotionModel.objects.all()\n\n\nclass AddTeam(generics.CreateAPIView):\n permission_classes = IsSuperUser,\n serializer_class = TeamSerializer\n queryset = TeamModel.objects.all()\n\n\nclass ViewTeam(generics.ListAPIView):\n permission_classes = AllowAny,\n serializer_class = TeamSerializer\n queryset = TeamModel.objects.all()\n\n\nclass TeamDetailView(generics.RetrieveAPIView):\n lookup_field = 'slug'\n permission_classes = AllowAny,\n serializer_class = TeamSerializer\n queryset = TeamModel.objects.all()\n\n\nclass TeamDeleteUpdateView(generics.RetrieveUpdateDestroyAPIView):\n lookup_field = 'slug'\n permission_classes = IsSuperUser,\n serializer_class = TeamSerializer\n queryset = TeamModel.objects.all()\n\n\nclass AddAdvisoryBoard(generics.CreateAPIView):\n permission_classes = IsSuperUser,\n serializer_class = AdvisoryBoardSerializer\n queryset = AdvisoryBoardModel.objects.all()\n\n\nclass ViewAdvisoryBoard(generics.ListAPIView):\n permission_classes = IsSuperUser,\n serializer_class = AdvisoryBoardSerializer\n queryset = AdvisoryBoardModel.objects.all()\n\n\nclass AdvisoryBoardDetailView(generics.RetrieveAPIView):\n lookup_field = 'slug'\n permission_classes = IsSuperUser,\n serializer_class = AdvisoryBoardSerializer\n queryset = AdvisoryBoardModel.objects.all()\n\n\nclass AdvisoryBoardDeleteUpdateView(generics.RetrieveUpdateDestroyAPIView):\n lookup_field = 'slug'\n permission_classes = IsSuperUser,\n serializer_class = AdvisoryBoardSerializer\n queryset = AdvisoryBoardModel.objects.all()\n\n\nclass AddAnnouncement(generics.CreateAPIView):\n permission_classes = IsSuperUser,\n serializer_class = AnnouncementSerializer\n queryset = AnnouncementModel.objects.all()\n\n\nclass ListAnnouncement(generics.ListAPIView):\n permission_classes = AllowAny,\n serializer_class = AnnouncementSerializer\n queryset = AnnouncementModel.objects.all()\n\n\nclass AnnouncementDetail(generics.RetrieveAPIView):\n lookup_field = 'slug'\n permission_classes = AllowAny,\n serializer_class = AnnouncementSerializer\n queryset = AnnouncementModel.objects.all()\n\n\nclass AnnouncementDeleteUpdate(generics.RetrieveUpdateDestroyAPIView):\n lookup_field = 'slug'\n permission_classes = IsSuperUser,\n serializer_class = AnnouncementSerializer\n queryset = AnnouncementModel.objects.all()\n\n\nclass SuperadminProfileView(APIView):\n permission_classes = IsSuperUser,\n\n def get(self, request, *args, **kwargs):\n user = get_user_from_token(request)\n data = {'name': user.username, 'email': user.email}\n return Response(data)\n\n\nclass AddJobClassified(generics.CreateAPIView):\n permission_classes = IsSuperUser,\n serializer_class = JobClassifiedSerializer\n queryset = JobClassifiedModel.objects.all()\n\n\nclass ViewJobClassified(generics.ListAPIView):\n permission_classes = AllowAny,\n serializer_class = JobClassifiedSerializer\n queryset = JobClassifiedModel.objects.all()\n\n\nclass JobClassifiedDetailView(generics.RetrieveAPIView):\n lookup_field = 'slug'\n permission_classes = AllowAny,\n serializer_class = JobClassifiedSerializer\n queryset = JobClassifiedModel.objects.all()\n\n\nclass JobClassifiedDeleteUpdateView(generics.RetrieveUpdateDestroyAPIView):\n lookup_field = 'slug'\n permission_classes = IsSuperUser,\n serializer_class = JobClassifiedSerializer\n queryset = JobClassifiedModel.objects.all()\n\n\nclass AddCustomerReviews(generics.CreateAPIView):\n permission_classes = IsSuperUser,\n serializer_class = CustomerReviewSerializer\n queryset = CustomerReviewModel.objects.all()\n\n\nclass ViewCustomerReview(generics.ListAPIView):\n permission_classes = IsClient,\n serializer_class = CustomerReviewSerializer\n queryset = CustomerReviewModel.objects.all()\n\n\nclass CustomerReviewDetailView(generics.RetrieveAPIView):\n lookup_field = 'slug'\n permission_classes = IsClient,\n serializer_class = CustomerReviewSerializer\n queryset = CustomerReviewModel.objects.all()\n\n\nclass CustomerReviewDeleteUpdateView(generics.RetrieveUpdateDestroyAPIView):\n lookup_field = 'slug'\n permission_classes = IsSuperUser,\n serializer_class = CustomerReviewSerializer\n queryset = CustomerReviewModel.objects.all()\n\n\nclass ClientComplain(APIView):\n permission_classes = IsSuperUser,\n serializer = ViewComplainSerializer(many=True)\n\n\nclass clientfeedback(APIView):\n permission_classes = IsSuperUser,\n\n def get(self, request, format=None):\n feeds = ClientFeedBackModel.objects.filter(Class__admin=self.\n request.user)\n serializer = ClientFeedbackSerializer(feeds, many=True)\n return Response(serializer.data)\n\n\nclass Enroll_Course(APIView):\n permission_classes = IsSuperUser,\n\n def post(self, request, format=None):\n serializer = EnrollCourseSerializer(data=request.data)\n print(serializer)\n if serializer.is_valid():\n course = serializer.validated_data.get('course', '')\n serializer.save()\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n else:\n return Response(serializer.errors, status=status.\n HTTP_400_BAD_REQUEST)\n\n\nclass ViewEnroll_Course(APIView):\n permission_classes = IsSuperUser,\n\n def get(self, request, *args, **kwargs):\n course = self.kwargs['course_id']\n client = self.kwargs['client_id']\n data = Enroll_CourseModel.objects.filter(course=course, client=client)\n serializer = ViewEnrollCourseSerializer(data, many=True)\n return Response(serializer.data, status=status.HTTP_200_OK)\n\n\nclass DetailEnroll_CourseView(APIView):\n permission_classes = IsSuperUser,\n\n def get_object(self, pk):\n try:\n return Enroll_CourseModel.objects.get(id=pk)\n except:\n raise Http404\n\n def get(self, request, pk, format=None):\n data = self.get_object(pk)\n serializer = ViewEnrollCourseSerializer(data)\n return Response(serializer.data)\n\n def put(self, request, pk, format=None):\n data = self.get_object(pk)\n serializer = ViewEnrollCourseSerializer(data, data=request.data)\n if serializer.is_valid(raise_exception=True):\n serializer.save()\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n else:\n return Response(serializer.errors, status=status.\n HTTP_400_BAD_REQUEST)\n\n def delete(self, request, pk, format=None):\n data = self.get_object(pk)\n data.delete()\n return Response(status=status.HTTP_204_NO_CONTENT)\n\n\nclass CourseDetail(APIView):\n permission_classes = IsSuperUser,\n\n def get_object(self, slug):\n try:\n return CourseModel.objects.get(slug=slug)\n except CourseModel.DoesNotExist:\n raise Http404\n\n def get(self, request, slug, format=None):\n data = self.get_object(slug)\n if data.classes.school.admin == self.request.user:\n serializer = ViewCourseSerializer(data)\n return Response(serializer.data)\n else:\n return Response({'message':\n 'This course does not belong to your school'}, status=\n status.HTTP_400_BAD_REQUEST)\n\n def put(self, request, slug, format=None):\n data = self.get_object(slug)\n if data.course.client.admin == self.request.user:\n serializer = CourseSerializer(data, data=request.data)\n if serializer.is_valid(raise_exception=True):\n course = serializer.validated_data.get('course', '')\n if course.client.admin == self.request.user:\n serializer.save()\n return Response(serializer.data, status=status.\n HTTP_201_CREATED)\n return Response({'message':\n 'This Class does not belong to you'}, status=status.\n HTTP_400_BAD_REQUEST)\n else:\n return Response(serializer.errors, status=status.\n HTTP_400_BAD_REQUEST)\n else:\n return Response({'message':\n 'This course does not belong to you'}, status=status.\n HTTP_400_BAD_REQUEST)\n\n def delete(self, request, slug, format=None):\n data = self.get_object(slug)\n if data.course.client.admin == self.request.user:\n data.delete()\n return Response(status=status.HTTP_204_NO_CONTENT)\n else:\n return Response({'message':\n 'This course does not belong to you'}, status=status.\n HTTP_400_BAD_REQUEST)\n\n\nclass SchoolRegistrationView(RegisterView):\n serializer_class = RegisterSchoolSerializer\n permission_classes = IsSuperUser,\n\n\nclass Add_question(generics.CreateAPIView):\n permission_classes = IsSuperUser,\n\n def post(self, request, format=None):\n serializer = QuestionSerializer(data=request.data)\n print(serializer)\n if serializer.is_valid():\n course = serializer.validated_data.get('course', '')\n serializer.save()\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n else:\n return Response(serializer.errors, status=status.\n HTTP_400_BAD_REQUEST)\n\n\nclass Viewquestion(generics.ListAPIView):\n permission_classes = IsSuperUser,\n\n def get(self, request, *args, **kwargs):\n course = self.kwargs['course_id']\n data = QuestionModel.objects.filter(course_id=course)\n serializer = QuestionSerializer(data, many=True)\n return Response(serializer.data, status=status.HTTP_200_OK)\n\n\nclass QuestionDetail(APIView):\n permission_classes = IsSuperUser,\n\n def get_object(self, pk):\n try:\n return QuestionModel.objects.get(id=pk)\n except:\n raise Http404\n\n def get(self, request, pk, format=None):\n data = self.get_object(pk)\n serializer = QuestionSerializer(data)\n return Response(serializer.data)\n\n def put(self, request, pk, format=None):\n data = self.get_object(pk)\n serializer = QuestionSerializer(data, data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n else:\n return Response(serializer.errors, status=status.\n HTTP_400_BAD_REQUEST)\n\n def delete(self, request, pk, format=None):\n data = self.get_object(pk)\n data.delete()\n return Response(status=status.HTTP_204_NO_CONTENT)\n\n\nclass SubmittedQuestionView(APIView):\n permission_classes = IsSuperUser,\n\n def get(self, request, *args, **kwargs):\n admin = self.request.user\n course = self.kwargs['course_id']\n client = self.kwargs['client_id']\n data = Client_SubmitquestionModel.objects.filter(course__course=\n course, client__client=client)\n serializer = Client_submittedquestionSerializer(data, many=True)\n return Response(serializer.data, status=status.HTTP_200_OK)\n\n\nclass AddonlineTest(generics.CreateAPIView):\n permission_classes = IsSuperUser,\n\n def post(self, request, format=None):\n serializer = testSerializer(data=request.data)\n print(serializer)\n if serializer.is_valid():\n course = serializer.validated_data.get('course', '')\n serializer.save()\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n else:\n return Response(serializer.errors, status=status.\n HTTP_400_BAD_REQUEST)\n\n\nclass ViewOnlinetest(generics.ListAPIView):\n permission_classes = IsSuperUser,\n\n def get(self, request, *args, **kwargs):\n course = self.kwargs['course_id']\n data = Client_testModel.objects.filter(course_id=course)\n serializer = testSerializer(data, many=True)\n return Response(serializer.data, status=status.HTTP_200_OK)\n\n\nclass onlinetestDetail(APIView):\n permission_classes = IsSuperUser,\n\n def get_object(self, pk):\n try:\n return Client_testModel.objects.get(id=pk)\n except:\n raise Http404\n\n def get(self, request, pk, format=None):\n data = self.get_object(pk)\n serializer = testSerializer(data)\n return Response(serializer.data)\n\n def put(self, request, pk, format=None):\n data = self.get_object(pk)\n serializer = testSerializer(data, data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n else:\n return Response(serializer.errors, status=status.\n HTTP_400_BAD_REQUEST)\n\n def delete(self, request, pk, format=None):\n data = self.get_object(pk)\n data.delete()\n return Response(status=status.HTTP_204_NO_CONTENT)\n\n\nclass SubmittedonlineTestView(APIView):\n permission_classes = IsSuperUser,\n\n def get(self, request, *args, **kwargs):\n admin = self.request.user\n course = self.kwargs['course_id']\n client = self.kwargs['client_id']\n data = Client_SubmittestModel.objects.filter(course__course=course,\n client__client=client)\n serializer = Client_submittedtestSerializer(data, many=True)\n return Response(serializer.data, status=status.HTTP_200_OK)\n",
"step-3": "<mask token>\n\n\nclass AddSchools(generics.CreateAPIView):\n permission_classes = IsSuperUser,\n serializer_class = SchoolSerializer\n queryset = SchoolModel.objects.all()\n\n\nclass ViewSchool(generics.ListAPIView):\n permission_classes = IsClient,\n serializer_class = SchoolSerializer\n queryset = SchoolModel.objects.all()\n\n\nclass SchoolDetailView(generics.RetrieveAPIView):\n lookup_field = 'slug'\n permission_classes = IsClient,\n serializer_class = SchoolSerializer\n queryset = SchoolModel.objects.all()\n\n\nclass SchoolDeleteUpdateView(generics.RetrieveUpdateDestroyAPIView):\n lookup_field = 'slug'\n permission_classes = IsSuperUser,\n serializer_class = SchoolSerializer\n queryset = SchoolModel.objects.all()\n\n\nclass AddBlogs(generics.CreateAPIView):\n permission_classes = IsSuperUser,\n serializer_class = BlogSerializer\n queryset = BlogModel.objects.all()\n\n\nclass ViewBlog(generics.ListAPIView):\n permission_classes = IsClient,\n serializer_class = BlogSerializer\n queryset = BlogModel.objects.all()\n\n\nclass BlogDetailView(generics.RetrieveAPIView):\n lookup_field = 'slug'\n permission_classes = IsClient,\n serializer_class = BlogSerializer\n queryset = BlogModel.objects.all()\n\n\nclass BlogDeleteUpdateView(generics.RetrieveUpdateDestroyAPIView):\n lookup_field = 'slug'\n permission_classes = IsSuperUser,\n serializer_class = BlogSerializer\n queryset = BlogModel.objects.all()\n\n\nclass AddEventView(generics.CreateAPIView):\n permission_classes = IsSuperUser,\n serializer_class = EventSerializer\n queryset = EventModel.objects.all()\n\n\nclass ListEventView(generics.ListAPIView):\n permission_classes = AllowAny,\n serializer_class = EventSerializer\n queryset = EventModel.objects.all()\n\n\nclass EventDetailView(generics.RetrieveAPIView):\n lookup_field = 'slug'\n permission_classes = AllowAny,\n serializer_class = EventSerializer\n queryset = EventModel.objects.all()\n\n\nclass EventDeleteUpdateView(generics.RetrieveUpdateDestroyAPIView):\n lookup_field = 'slug'\n permission_classes = IsSuperUser,\n serializer_class = EventSerializer\n queryset = EventModel.objects.all()\n\n\nclass AddBusinessPartners(generics.CreateAPIView):\n permission_classes = IsSuperUser,\n serializer_class = BusinessPartnersSerializer\n queryset = BusinessPartnersModel.objects.all()\n\n\nclass ViewBusinessPartner(generics.ListAPIView):\n permission_classes = AllowAny,\n serializer_class = BusinessPartnersSerializer\n queryset = BusinessPartnersModel.objects.all()\n\n\nclass BusinessPartnerDetailView(generics.RetrieveAPIView):\n lookup_field = 'slug'\n permission_classes = AllowAny,\n serializer_class = BusinessPartnersSerializer\n queryset = BusinessPartnersModel.objects.all()\n\n\nclass BusinessPartnerDeleteUpdateView(generics.RetrieveUpdateDestroyAPIView):\n lookup_field = 'slug'\n permission_classes = IsSuperUser,\n serializer_class = BusinessPartnersSerializer\n queryset = BusinessPartnersModel.objects.all()\n\n\nclass AddKidStory(generics.CreateAPIView):\n permission_classes = IsStudent,\n serializer_class = KidStorySerializer\n queryset = KidStoryModel.objects.all()\n\n def perform_create(self, serializer):\n serializer.save(user=self.request.user)\n\n\nclass ViewKidStory(generics.ListAPIView):\n permission_classes = AllowAny,\n serializer_class = KidStorySerializer\n queryset = KidStoryModel.objects.filter(status__exact='P')\n\n\nclass KidStoryDetailView(generics.RetrieveAPIView):\n lookup_field = 'slug'\n permission_classes = AllowAny,\n serializer_class = KidStorySerializer\n queryset = KidStoryModel.objects.filter(status__exact='P')\n\n\nclass KidStoryDeleteUpdateView(generics.RetrieveUpdateDestroyAPIView):\n \"\"\"\n Get: superadmin can see all stories (draft, published)\n PATCH : superadmin can mark stories as published by changing status = P\n Delete: superadmin can delete stories.\n \"\"\"\n lookup_field = 'slug'\n permission_classes = IsSuperUser,\n serializer_class = UpdateKidsStorySerializer\n queryset = KidStoryModel.objects.all()\n\n\nclass AddKidTalent(generics.CreateAPIView):\n permission_classes = IsStudentORClient,\n serializer_class = KidTalentSerializer\n queryset = KidTalentModel.objects.all()\n\n def perform_create(self, serializer):\n serializer.save(user=self.request.user)\n\n\nclass ViewKidTalent(generics.ListAPIView):\n permission_classes = AllowAny,\n serializer_class = KidTalentSerializer\n queryset = KidTalentModel.objects.filter(status__exact='P')\n\n\nclass KidTalentDetailView(generics.RetrieveAPIView):\n lookup_field = 'slug'\n permission_classes = AllowAny,\n serializer_class = KidTalentSerializer\n queryset = KidTalentModel.objects.filter(status__exact='P')\n\n\nclass KidTalentDeleteUpdateView(generics.RetrieveUpdateDestroyAPIView):\n \"\"\"\n Get: superadmin can see all kids talent (draft, published)\n PATCH : superadmin can mark kids talent as published by changing status = P\n Delete: superadmin can delete kids talent.\n \"\"\"\n lookup_field = 'slug'\n permission_classes = IsSuperUser,\n serializer_class = UpdateKidsTalentSerializer\n queryset = KidTalentModel.objects.all()\n\n\nclass AddCourses(generics.CreateAPIView):\n permission_classes = IsSuperUser,\n serializer_class = CourseSerializer\n queryset = CourseModel.objects.all()\n\n\nclass ViewCourse(generics.ListAPIView):\n permission_classes = AllowAny,\n serializer_class = CourseSerializer\n queryset = CourseModel.objects.all()\n\n\nclass CourseDetailView(generics.RetrieveAPIView):\n lookup_field = 'slug'\n permission_classes = AllowAny,\n serializer_class = CourseSerializer\n queryset = CourseModel.objects.all()\n\n\nclass CourseDeleteUpdateView(generics.RetrieveUpdateDestroyAPIView):\n lookup_field = 'slug'\n permission_classes = IsSuperUser,\n serializer_class = CourseSerializer\n queryset = CourseModel.objects.all()\n\n\nclass AddQuizContext(generics.CreateAPIView):\n permission_classes = IsSuperUser,\n serializer_class = QuizContextSerializer\n queryset = QuizContextModel.objects.all()\n\n\nclass ViewQuizContext(generics.ListAPIView):\n permission_classes = IsClient,\n serializer_class = QuizContextSerializer\n queryset = QuizContextModel.objects.all()\n\n\nclass QuizContextDetailView(generics.RetrieveAPIView):\n lookup_field = 'slug'\n permission_classes = IsClient,\n serializer_class = QuizContextSerializer\n queryset = QuizContextModel.objects.all()\n\n\nclass QuizContextDeleteUpdateView(generics.RetrieveUpdateDestroyAPIView):\n lookup_field = 'slug'\n permission_classes = IsSuperUser,\n serializer_class = QuizContextSerializer\n queryset = QuizContextModel.objects.all()\n\n\nclass AddFeedback(generics.CreateAPIView):\n permission_classes = IsSuperUser,\n serializer_class = ClientFeedbackSerializer\n queryset = ClientFeedBackModel.objects.all()\n\n\nclass ViewFeedback(generics.ListAPIView):\n permission_classes = IsClient,\n serializer_class = ClientFeedbackSerializer\n queryset = ClientFeedBackModel.objects.all()\n\n\nclass FeedbackDetailView(generics.RetrieveAPIView):\n lookup_field = 'slug'\n permission_classes = IsClient,\n serializer_class = ClientFeedbackSerializer\n queryset = ClientFeedBackModel.objects.all()\n\n\nclass FeedbackDeleteUpdateView(generics.RetrieveUpdateDestroyAPIView):\n lookup_field = 'slug'\n permission_classes = IsSuperUser,\n serializer_class = ClientFeedbackSerializer\n queryset = ClientFeedBackModel.objects.all()\n\n\nclass AddWebsiteAd(generics.CreateAPIView):\n permission_classes = IsSuperUser,\n serializer_class = WebsiteAdSerializer\n queryset = WebsiteAdModel.objects.all()\n\n\nclass ViewWebsiteAd(generics.ListAPIView):\n permission_classes = AllowAny,\n serializer_class = WebsiteAdSerializer\n queryset = WebsiteAdModel.objects.all()\n\n\nclass WebsiteAdDetailView(generics.RetrieveAPIView):\n lookup_field = 'slug'\n permission_classes = AllowAny,\n serializer_class = WebsiteAdSerializer\n queryset = WebsiteAdModel.objects.all()\n\n\nclass WebsiteAdDeleteUpdateView(generics.RetrieveUpdateDestroyAPIView):\n lookup_field = 'slug'\n permission_classes = IsSuperUser,\n serializer_class = WebsiteAdSerializer\n queryset = WebsiteAdModel.objects.all()\n\n\nclass AddBusinessPromotion(generics.CreateAPIView):\n permission_classes = IsSuperUser,\n serializer_class = BusinessPromotionSerializer\n queryset = BusinessPromotionModel.objects.all()\n\n\nclass ViewBusinessPromotion(generics.ListAPIView):\n permission_classes = AllowAny,\n serializer_class = BusinessPromotionSerializer\n queryset = BusinessPromotionModel.objects.all()\n\n\nclass BusinessPromotionDetailView(generics.RetrieveAPIView):\n lookup_field = 'slug'\n permission_classes = AllowAny,\n serializer_class = BusinessPromotionSerializer\n queryset = BusinessPromotionModel.objects.all()\n\n\nclass BusinessPromotionDeleteUpdateView(generics.RetrieveUpdateDestroyAPIView):\n lookup_field = 'slug'\n permission_classes = IsSuperUser,\n serializer_class = BusinessPromotionSerializer\n queryset = BusinessPromotionModel.objects.all()\n\n\nclass AddTeam(generics.CreateAPIView):\n permission_classes = IsSuperUser,\n serializer_class = TeamSerializer\n queryset = TeamModel.objects.all()\n\n\nclass ViewTeam(generics.ListAPIView):\n permission_classes = AllowAny,\n serializer_class = TeamSerializer\n queryset = TeamModel.objects.all()\n\n\nclass TeamDetailView(generics.RetrieveAPIView):\n lookup_field = 'slug'\n permission_classes = AllowAny,\n serializer_class = TeamSerializer\n queryset = TeamModel.objects.all()\n\n\nclass TeamDeleteUpdateView(generics.RetrieveUpdateDestroyAPIView):\n lookup_field = 'slug'\n permission_classes = IsSuperUser,\n serializer_class = TeamSerializer\n queryset = TeamModel.objects.all()\n\n\nclass AddAdvisoryBoard(generics.CreateAPIView):\n permission_classes = IsSuperUser,\n serializer_class = AdvisoryBoardSerializer\n queryset = AdvisoryBoardModel.objects.all()\n\n\nclass ViewAdvisoryBoard(generics.ListAPIView):\n permission_classes = IsSuperUser,\n serializer_class = AdvisoryBoardSerializer\n queryset = AdvisoryBoardModel.objects.all()\n\n\nclass AdvisoryBoardDetailView(generics.RetrieveAPIView):\n lookup_field = 'slug'\n permission_classes = IsSuperUser,\n serializer_class = AdvisoryBoardSerializer\n queryset = AdvisoryBoardModel.objects.all()\n\n\nclass AdvisoryBoardDeleteUpdateView(generics.RetrieveUpdateDestroyAPIView):\n lookup_field = 'slug'\n permission_classes = IsSuperUser,\n serializer_class = AdvisoryBoardSerializer\n queryset = AdvisoryBoardModel.objects.all()\n\n\nclass AddAnnouncement(generics.CreateAPIView):\n permission_classes = IsSuperUser,\n serializer_class = AnnouncementSerializer\n queryset = AnnouncementModel.objects.all()\n\n\nclass ListAnnouncement(generics.ListAPIView):\n permission_classes = AllowAny,\n serializer_class = AnnouncementSerializer\n queryset = AnnouncementModel.objects.all()\n\n\nclass AnnouncementDetail(generics.RetrieveAPIView):\n lookup_field = 'slug'\n permission_classes = AllowAny,\n serializer_class = AnnouncementSerializer\n queryset = AnnouncementModel.objects.all()\n\n\nclass AnnouncementDeleteUpdate(generics.RetrieveUpdateDestroyAPIView):\n lookup_field = 'slug'\n permission_classes = IsSuperUser,\n serializer_class = AnnouncementSerializer\n queryset = AnnouncementModel.objects.all()\n\n\nclass SuperadminProfileView(APIView):\n permission_classes = IsSuperUser,\n\n def get(self, request, *args, **kwargs):\n user = get_user_from_token(request)\n data = {'name': user.username, 'email': user.email}\n return Response(data)\n\n\nclass AddJobClassified(generics.CreateAPIView):\n permission_classes = IsSuperUser,\n serializer_class = JobClassifiedSerializer\n queryset = JobClassifiedModel.objects.all()\n\n\nclass ViewJobClassified(generics.ListAPIView):\n permission_classes = AllowAny,\n serializer_class = JobClassifiedSerializer\n queryset = JobClassifiedModel.objects.all()\n\n\nclass JobClassifiedDetailView(generics.RetrieveAPIView):\n lookup_field = 'slug'\n permission_classes = AllowAny,\n serializer_class = JobClassifiedSerializer\n queryset = JobClassifiedModel.objects.all()\n\n\nclass JobClassifiedDeleteUpdateView(generics.RetrieveUpdateDestroyAPIView):\n lookup_field = 'slug'\n permission_classes = IsSuperUser,\n serializer_class = JobClassifiedSerializer\n queryset = JobClassifiedModel.objects.all()\n\n\nclass AddCustomerReviews(generics.CreateAPIView):\n permission_classes = IsSuperUser,\n serializer_class = CustomerReviewSerializer\n queryset = CustomerReviewModel.objects.all()\n\n\nclass ViewCustomerReview(generics.ListAPIView):\n permission_classes = IsClient,\n serializer_class = CustomerReviewSerializer\n queryset = CustomerReviewModel.objects.all()\n\n\nclass CustomerReviewDetailView(generics.RetrieveAPIView):\n lookup_field = 'slug'\n permission_classes = IsClient,\n serializer_class = CustomerReviewSerializer\n queryset = CustomerReviewModel.objects.all()\n\n\nclass CustomerReviewDeleteUpdateView(generics.RetrieveUpdateDestroyAPIView):\n lookup_field = 'slug'\n permission_classes = IsSuperUser,\n serializer_class = CustomerReviewSerializer\n queryset = CustomerReviewModel.objects.all()\n\n\nclass ClientComplain(APIView):\n permission_classes = IsSuperUser,\n serializer = ViewComplainSerializer(many=True)\n\n\nclass clientfeedback(APIView):\n permission_classes = IsSuperUser,\n\n def get(self, request, format=None):\n feeds = ClientFeedBackModel.objects.filter(Class__admin=self.\n request.user)\n serializer = ClientFeedbackSerializer(feeds, many=True)\n return Response(serializer.data)\n\n\nclass Enroll_Course(APIView):\n permission_classes = IsSuperUser,\n\n def post(self, request, format=None):\n serializer = EnrollCourseSerializer(data=request.data)\n print(serializer)\n if serializer.is_valid():\n course = serializer.validated_data.get('course', '')\n serializer.save()\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n else:\n return Response(serializer.errors, status=status.\n HTTP_400_BAD_REQUEST)\n\n\nclass ViewEnroll_Course(APIView):\n permission_classes = IsSuperUser,\n\n def get(self, request, *args, **kwargs):\n course = self.kwargs['course_id']\n client = self.kwargs['client_id']\n data = Enroll_CourseModel.objects.filter(course=course, client=client)\n serializer = ViewEnrollCourseSerializer(data, many=True)\n return Response(serializer.data, status=status.HTTP_200_OK)\n\n\nclass DetailEnroll_CourseView(APIView):\n permission_classes = IsSuperUser,\n\n def get_object(self, pk):\n try:\n return Enroll_CourseModel.objects.get(id=pk)\n except:\n raise Http404\n\n def get(self, request, pk, format=None):\n data = self.get_object(pk)\n serializer = ViewEnrollCourseSerializer(data)\n return Response(serializer.data)\n\n def put(self, request, pk, format=None):\n data = self.get_object(pk)\n serializer = ViewEnrollCourseSerializer(data, data=request.data)\n if serializer.is_valid(raise_exception=True):\n serializer.save()\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n else:\n return Response(serializer.errors, status=status.\n HTTP_400_BAD_REQUEST)\n\n def delete(self, request, pk, format=None):\n data = self.get_object(pk)\n data.delete()\n return Response(status=status.HTTP_204_NO_CONTENT)\n\n\nclass CourseDetail(APIView):\n permission_classes = IsSuperUser,\n\n def get_object(self, slug):\n try:\n return CourseModel.objects.get(slug=slug)\n except CourseModel.DoesNotExist:\n raise Http404\n\n def get(self, request, slug, format=None):\n data = self.get_object(slug)\n if data.classes.school.admin == self.request.user:\n serializer = ViewCourseSerializer(data)\n return Response(serializer.data)\n else:\n return Response({'message':\n 'This course does not belong to your school'}, status=\n status.HTTP_400_BAD_REQUEST)\n\n def put(self, request, slug, format=None):\n data = self.get_object(slug)\n if data.course.client.admin == self.request.user:\n serializer = CourseSerializer(data, data=request.data)\n if serializer.is_valid(raise_exception=True):\n course = serializer.validated_data.get('course', '')\n if course.client.admin == self.request.user:\n serializer.save()\n return Response(serializer.data, status=status.\n HTTP_201_CREATED)\n return Response({'message':\n 'This Class does not belong to you'}, status=status.\n HTTP_400_BAD_REQUEST)\n else:\n return Response(serializer.errors, status=status.\n HTTP_400_BAD_REQUEST)\n else:\n return Response({'message':\n 'This course does not belong to you'}, status=status.\n HTTP_400_BAD_REQUEST)\n\n def delete(self, request, slug, format=None):\n data = self.get_object(slug)\n if data.course.client.admin == self.request.user:\n data.delete()\n return Response(status=status.HTTP_204_NO_CONTENT)\n else:\n return Response({'message':\n 'This course does not belong to you'}, status=status.\n HTTP_400_BAD_REQUEST)\n\n\nclass SchoolRegistrationView(RegisterView):\n serializer_class = RegisterSchoolSerializer\n permission_classes = IsSuperUser,\n\n\nclass Add_question(generics.CreateAPIView):\n permission_classes = IsSuperUser,\n\n def post(self, request, format=None):\n serializer = QuestionSerializer(data=request.data)\n print(serializer)\n if serializer.is_valid():\n course = serializer.validated_data.get('course', '')\n serializer.save()\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n else:\n return Response(serializer.errors, status=status.\n HTTP_400_BAD_REQUEST)\n\n\nclass Viewquestion(generics.ListAPIView):\n permission_classes = IsSuperUser,\n\n def get(self, request, *args, **kwargs):\n course = self.kwargs['course_id']\n data = QuestionModel.objects.filter(course_id=course)\n serializer = QuestionSerializer(data, many=True)\n return Response(serializer.data, status=status.HTTP_200_OK)\n\n\nclass QuestionDetail(APIView):\n permission_classes = IsSuperUser,\n\n def get_object(self, pk):\n try:\n return QuestionModel.objects.get(id=pk)\n except:\n raise Http404\n\n def get(self, request, pk, format=None):\n data = self.get_object(pk)\n serializer = QuestionSerializer(data)\n return Response(serializer.data)\n\n def put(self, request, pk, format=None):\n data = self.get_object(pk)\n serializer = QuestionSerializer(data, data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n else:\n return Response(serializer.errors, status=status.\n HTTP_400_BAD_REQUEST)\n\n def delete(self, request, pk, format=None):\n data = self.get_object(pk)\n data.delete()\n return Response(status=status.HTTP_204_NO_CONTENT)\n\n\nclass SubmittedQuestionView(APIView):\n permission_classes = IsSuperUser,\n\n def get(self, request, *args, **kwargs):\n admin = self.request.user\n course = self.kwargs['course_id']\n client = self.kwargs['client_id']\n data = Client_SubmitquestionModel.objects.filter(course__course=\n course, client__client=client)\n serializer = Client_submittedquestionSerializer(data, many=True)\n return Response(serializer.data, status=status.HTTP_200_OK)\n\n\nclass AddonlineTest(generics.CreateAPIView):\n permission_classes = IsSuperUser,\n\n def post(self, request, format=None):\n serializer = testSerializer(data=request.data)\n print(serializer)\n if serializer.is_valid():\n course = serializer.validated_data.get('course', '')\n serializer.save()\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n else:\n return Response(serializer.errors, status=status.\n HTTP_400_BAD_REQUEST)\n\n\nclass ViewOnlinetest(generics.ListAPIView):\n permission_classes = IsSuperUser,\n\n def get(self, request, *args, **kwargs):\n course = self.kwargs['course_id']\n data = Client_testModel.objects.filter(course_id=course)\n serializer = testSerializer(data, many=True)\n return Response(serializer.data, status=status.HTTP_200_OK)\n\n\nclass onlinetestDetail(APIView):\n permission_classes = IsSuperUser,\n\n def get_object(self, pk):\n try:\n return Client_testModel.objects.get(id=pk)\n except:\n raise Http404\n\n def get(self, request, pk, format=None):\n data = self.get_object(pk)\n serializer = testSerializer(data)\n return Response(serializer.data)\n\n def put(self, request, pk, format=None):\n data = self.get_object(pk)\n serializer = testSerializer(data, data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n else:\n return Response(serializer.errors, status=status.\n HTTP_400_BAD_REQUEST)\n\n def delete(self, request, pk, format=None):\n data = self.get_object(pk)\n data.delete()\n return Response(status=status.HTTP_204_NO_CONTENT)\n\n\nclass SubmittedonlineTestView(APIView):\n permission_classes = IsSuperUser,\n\n def get(self, request, *args, **kwargs):\n admin = self.request.user\n course = self.kwargs['course_id']\n client = self.kwargs['client_id']\n data = Client_SubmittestModel.objects.filter(course__course=course,\n client__client=client)\n serializer = Client_submittedtestSerializer(data, many=True)\n return Response(serializer.data, status=status.HTTP_200_OK)\n",
"step-4": "<mask token>\n\n\nclass AddArticleView(generics.CreateAPIView):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\nclass ListArticleView(generics.ListAPIView):\n permission_classes = AllowAny,\n serializer_class = ArticleSerializer\n queryset = ArticleModel.objects.filter(status__exact='P')\n\n\nclass ArticleDetail(generics.RetrieveAPIView):\n lookup_field = 'slug'\n permission_classes = AllowAny,\n serializer_class = ArticleSerializer\n queryset = ArticleModel.objects.filter(status__exact='P')\n\n\nclass ArticleDeleteUpdate(generics.RetrieveUpdateDestroyAPIView):\n \"\"\"\n Get: superadmin can see all articles (draft, published)\n PATCH : superadmin can mark article as published by changing status = P\n Delete: superadmin can delete article.\n \"\"\"\n lookup_field = 'slug'\n permission_classes = IsSuperUser,\n serializer_class = UpdateArticleSerializer\n queryset = ArticleModel.objects.all()\n\n\nclass AddQuestions(generics.CreateAPIView):\n permission_classes = IsSuperUser,\n serializer_class = QuestionSerializer\n queryset = QuestionModel.objects.all()\n\n\nclass ViewQuestion(generics.ListAPIView):\n permission_classes = IsClient,\n serializer_class = QuestionSerializer\n queryset = QuestionModel.objects.all()\n\n\nclass QuestionDetailView(generics.RetrieveAPIView):\n lookup_field = 'slug'\n permission_classes = IsClient,\n serializer_class = QuestionSerializer\n queryset = QuestionModel.objects.all()\n\n\nclass QuestionDeleteUpdateView(generics.RetrieveUpdateDestroyAPIView):\n lookup_field = 'slug'\n permission_classes = IsSuperUser,\n serializer_class = QuestionSerializer\n queryset = QuestionModel.objects.all()\n\n\nclass AddSchools(generics.CreateAPIView):\n permission_classes = IsSuperUser,\n serializer_class = SchoolSerializer\n queryset = SchoolModel.objects.all()\n\n\nclass ViewSchool(generics.ListAPIView):\n permission_classes = IsClient,\n serializer_class = SchoolSerializer\n queryset = SchoolModel.objects.all()\n\n\nclass SchoolDetailView(generics.RetrieveAPIView):\n lookup_field = 'slug'\n permission_classes = IsClient,\n serializer_class = SchoolSerializer\n queryset = SchoolModel.objects.all()\n\n\nclass SchoolDeleteUpdateView(generics.RetrieveUpdateDestroyAPIView):\n lookup_field = 'slug'\n permission_classes = IsSuperUser,\n serializer_class = SchoolSerializer\n queryset = SchoolModel.objects.all()\n\n\nclass AddBlogs(generics.CreateAPIView):\n permission_classes = IsSuperUser,\n serializer_class = BlogSerializer\n queryset = BlogModel.objects.all()\n\n\nclass ViewBlog(generics.ListAPIView):\n permission_classes = IsClient,\n serializer_class = BlogSerializer\n queryset = BlogModel.objects.all()\n\n\nclass BlogDetailView(generics.RetrieveAPIView):\n lookup_field = 'slug'\n permission_classes = IsClient,\n serializer_class = BlogSerializer\n queryset = BlogModel.objects.all()\n\n\nclass BlogDeleteUpdateView(generics.RetrieveUpdateDestroyAPIView):\n lookup_field = 'slug'\n permission_classes = IsSuperUser,\n serializer_class = BlogSerializer\n queryset = BlogModel.objects.all()\n\n\nclass AddEventView(generics.CreateAPIView):\n permission_classes = IsSuperUser,\n serializer_class = EventSerializer\n queryset = EventModel.objects.all()\n\n\nclass ListEventView(generics.ListAPIView):\n permission_classes = AllowAny,\n serializer_class = EventSerializer\n queryset = EventModel.objects.all()\n\n\nclass EventDetailView(generics.RetrieveAPIView):\n lookup_field = 'slug'\n permission_classes = AllowAny,\n serializer_class = EventSerializer\n queryset = EventModel.objects.all()\n\n\nclass EventDeleteUpdateView(generics.RetrieveUpdateDestroyAPIView):\n lookup_field = 'slug'\n permission_classes = IsSuperUser,\n serializer_class = EventSerializer\n queryset = EventModel.objects.all()\n\n\nclass AddBusinessPartners(generics.CreateAPIView):\n permission_classes = IsSuperUser,\n serializer_class = BusinessPartnersSerializer\n queryset = BusinessPartnersModel.objects.all()\n\n\nclass ViewBusinessPartner(generics.ListAPIView):\n permission_classes = AllowAny,\n serializer_class = BusinessPartnersSerializer\n queryset = BusinessPartnersModel.objects.all()\n\n\nclass BusinessPartnerDetailView(generics.RetrieveAPIView):\n lookup_field = 'slug'\n permission_classes = AllowAny,\n serializer_class = BusinessPartnersSerializer\n queryset = BusinessPartnersModel.objects.all()\n\n\nclass BusinessPartnerDeleteUpdateView(generics.RetrieveUpdateDestroyAPIView):\n lookup_field = 'slug'\n permission_classes = IsSuperUser,\n serializer_class = BusinessPartnersSerializer\n queryset = BusinessPartnersModel.objects.all()\n\n\nclass AddKidStory(generics.CreateAPIView):\n permission_classes = IsStudent,\n serializer_class = KidStorySerializer\n queryset = KidStoryModel.objects.all()\n\n def perform_create(self, serializer):\n serializer.save(user=self.request.user)\n\n\nclass ViewKidStory(generics.ListAPIView):\n permission_classes = AllowAny,\n serializer_class = KidStorySerializer\n queryset = KidStoryModel.objects.filter(status__exact='P')\n\n\nclass KidStoryDetailView(generics.RetrieveAPIView):\n lookup_field = 'slug'\n permission_classes = AllowAny,\n serializer_class = KidStorySerializer\n queryset = KidStoryModel.objects.filter(status__exact='P')\n\n\nclass KidStoryDeleteUpdateView(generics.RetrieveUpdateDestroyAPIView):\n \"\"\"\n Get: superadmin can see all stories (draft, published)\n PATCH : superadmin can mark stories as published by changing status = P\n Delete: superadmin can delete stories.\n \"\"\"\n lookup_field = 'slug'\n permission_classes = IsSuperUser,\n serializer_class = UpdateKidsStorySerializer\n queryset = KidStoryModel.objects.all()\n\n\nclass AddKidTalent(generics.CreateAPIView):\n permission_classes = IsStudentORClient,\n serializer_class = KidTalentSerializer\n queryset = KidTalentModel.objects.all()\n\n def perform_create(self, serializer):\n serializer.save(user=self.request.user)\n\n\nclass ViewKidTalent(generics.ListAPIView):\n permission_classes = AllowAny,\n serializer_class = KidTalentSerializer\n queryset = KidTalentModel.objects.filter(status__exact='P')\n\n\nclass KidTalentDetailView(generics.RetrieveAPIView):\n lookup_field = 'slug'\n permission_classes = AllowAny,\n serializer_class = KidTalentSerializer\n queryset = KidTalentModel.objects.filter(status__exact='P')\n\n\nclass KidTalentDeleteUpdateView(generics.RetrieveUpdateDestroyAPIView):\n \"\"\"\n Get: superadmin can see all kids talent (draft, published)\n PATCH : superadmin can mark kids talent as published by changing status = P\n Delete: superadmin can delete kids talent.\n \"\"\"\n lookup_field = 'slug'\n permission_classes = IsSuperUser,\n serializer_class = UpdateKidsTalentSerializer\n queryset = KidTalentModel.objects.all()\n\n\nclass AddCourses(generics.CreateAPIView):\n permission_classes = IsSuperUser,\n serializer_class = CourseSerializer\n queryset = CourseModel.objects.all()\n\n\nclass ViewCourse(generics.ListAPIView):\n permission_classes = AllowAny,\n serializer_class = CourseSerializer\n queryset = CourseModel.objects.all()\n\n\nclass CourseDetailView(generics.RetrieveAPIView):\n lookup_field = 'slug'\n permission_classes = AllowAny,\n serializer_class = CourseSerializer\n queryset = CourseModel.objects.all()\n\n\nclass CourseDeleteUpdateView(generics.RetrieveUpdateDestroyAPIView):\n lookup_field = 'slug'\n permission_classes = IsSuperUser,\n serializer_class = CourseSerializer\n queryset = CourseModel.objects.all()\n\n\nclass AddQuizContext(generics.CreateAPIView):\n permission_classes = IsSuperUser,\n serializer_class = QuizContextSerializer\n queryset = QuizContextModel.objects.all()\n\n\nclass ViewQuizContext(generics.ListAPIView):\n permission_classes = IsClient,\n serializer_class = QuizContextSerializer\n queryset = QuizContextModel.objects.all()\n\n\nclass QuizContextDetailView(generics.RetrieveAPIView):\n lookup_field = 'slug'\n permission_classes = IsClient,\n serializer_class = QuizContextSerializer\n queryset = QuizContextModel.objects.all()\n\n\nclass QuizContextDeleteUpdateView(generics.RetrieveUpdateDestroyAPIView):\n lookup_field = 'slug'\n permission_classes = IsSuperUser,\n serializer_class = QuizContextSerializer\n queryset = QuizContextModel.objects.all()\n\n\nclass AddFeedback(generics.CreateAPIView):\n permission_classes = IsSuperUser,\n serializer_class = ClientFeedbackSerializer\n queryset = ClientFeedBackModel.objects.all()\n\n\nclass ViewFeedback(generics.ListAPIView):\n permission_classes = IsClient,\n serializer_class = ClientFeedbackSerializer\n queryset = ClientFeedBackModel.objects.all()\n\n\nclass FeedbackDetailView(generics.RetrieveAPIView):\n lookup_field = 'slug'\n permission_classes = IsClient,\n serializer_class = ClientFeedbackSerializer\n queryset = ClientFeedBackModel.objects.all()\n\n\nclass FeedbackDeleteUpdateView(generics.RetrieveUpdateDestroyAPIView):\n lookup_field = 'slug'\n permission_classes = IsSuperUser,\n serializer_class = ClientFeedbackSerializer\n queryset = ClientFeedBackModel.objects.all()\n\n\nclass AddWebsiteAd(generics.CreateAPIView):\n permission_classes = IsSuperUser,\n serializer_class = WebsiteAdSerializer\n queryset = WebsiteAdModel.objects.all()\n\n\nclass ViewWebsiteAd(generics.ListAPIView):\n permission_classes = AllowAny,\n serializer_class = WebsiteAdSerializer\n queryset = WebsiteAdModel.objects.all()\n\n\nclass WebsiteAdDetailView(generics.RetrieveAPIView):\n lookup_field = 'slug'\n permission_classes = AllowAny,\n serializer_class = WebsiteAdSerializer\n queryset = WebsiteAdModel.objects.all()\n\n\nclass WebsiteAdDeleteUpdateView(generics.RetrieveUpdateDestroyAPIView):\n lookup_field = 'slug'\n permission_classes = IsSuperUser,\n serializer_class = WebsiteAdSerializer\n queryset = WebsiteAdModel.objects.all()\n\n\nclass AddBusinessPromotion(generics.CreateAPIView):\n permission_classes = IsSuperUser,\n serializer_class = BusinessPromotionSerializer\n queryset = BusinessPromotionModel.objects.all()\n\n\nclass ViewBusinessPromotion(generics.ListAPIView):\n permission_classes = AllowAny,\n serializer_class = BusinessPromotionSerializer\n queryset = BusinessPromotionModel.objects.all()\n\n\nclass BusinessPromotionDetailView(generics.RetrieveAPIView):\n lookup_field = 'slug'\n permission_classes = AllowAny,\n serializer_class = BusinessPromotionSerializer\n queryset = BusinessPromotionModel.objects.all()\n\n\nclass BusinessPromotionDeleteUpdateView(generics.RetrieveUpdateDestroyAPIView):\n lookup_field = 'slug'\n permission_classes = IsSuperUser,\n serializer_class = BusinessPromotionSerializer\n queryset = BusinessPromotionModel.objects.all()\n\n\nclass AddTeam(generics.CreateAPIView):\n permission_classes = IsSuperUser,\n serializer_class = TeamSerializer\n queryset = TeamModel.objects.all()\n\n\nclass ViewTeam(generics.ListAPIView):\n permission_classes = AllowAny,\n serializer_class = TeamSerializer\n queryset = TeamModel.objects.all()\n\n\nclass TeamDetailView(generics.RetrieveAPIView):\n lookup_field = 'slug'\n permission_classes = AllowAny,\n serializer_class = TeamSerializer\n queryset = TeamModel.objects.all()\n\n\nclass TeamDeleteUpdateView(generics.RetrieveUpdateDestroyAPIView):\n lookup_field = 'slug'\n permission_classes = IsSuperUser,\n serializer_class = TeamSerializer\n queryset = TeamModel.objects.all()\n\n\nclass AddAdvisoryBoard(generics.CreateAPIView):\n permission_classes = IsSuperUser,\n serializer_class = AdvisoryBoardSerializer\n queryset = AdvisoryBoardModel.objects.all()\n\n\nclass ViewAdvisoryBoard(generics.ListAPIView):\n permission_classes = IsSuperUser,\n serializer_class = AdvisoryBoardSerializer\n queryset = AdvisoryBoardModel.objects.all()\n\n\nclass AdvisoryBoardDetailView(generics.RetrieveAPIView):\n lookup_field = 'slug'\n permission_classes = IsSuperUser,\n serializer_class = AdvisoryBoardSerializer\n queryset = AdvisoryBoardModel.objects.all()\n\n\nclass AdvisoryBoardDeleteUpdateView(generics.RetrieveUpdateDestroyAPIView):\n lookup_field = 'slug'\n permission_classes = IsSuperUser,\n serializer_class = AdvisoryBoardSerializer\n queryset = AdvisoryBoardModel.objects.all()\n\n\nclass AddAnnouncement(generics.CreateAPIView):\n permission_classes = IsSuperUser,\n serializer_class = AnnouncementSerializer\n queryset = AnnouncementModel.objects.all()\n\n\nclass ListAnnouncement(generics.ListAPIView):\n permission_classes = AllowAny,\n serializer_class = AnnouncementSerializer\n queryset = AnnouncementModel.objects.all()\n\n\nclass AnnouncementDetail(generics.RetrieveAPIView):\n lookup_field = 'slug'\n permission_classes = AllowAny,\n serializer_class = AnnouncementSerializer\n queryset = AnnouncementModel.objects.all()\n\n\nclass AnnouncementDeleteUpdate(generics.RetrieveUpdateDestroyAPIView):\n lookup_field = 'slug'\n permission_classes = IsSuperUser,\n serializer_class = AnnouncementSerializer\n queryset = AnnouncementModel.objects.all()\n\n\nclass SuperadminProfileView(APIView):\n permission_classes = IsSuperUser,\n\n def get(self, request, *args, **kwargs):\n user = get_user_from_token(request)\n data = {'name': user.username, 'email': user.email}\n return Response(data)\n\n\nclass AddJobClassified(generics.CreateAPIView):\n permission_classes = IsSuperUser,\n serializer_class = JobClassifiedSerializer\n queryset = JobClassifiedModel.objects.all()\n\n\nclass ViewJobClassified(generics.ListAPIView):\n permission_classes = AllowAny,\n serializer_class = JobClassifiedSerializer\n queryset = JobClassifiedModel.objects.all()\n\n\nclass JobClassifiedDetailView(generics.RetrieveAPIView):\n lookup_field = 'slug'\n permission_classes = AllowAny,\n serializer_class = JobClassifiedSerializer\n queryset = JobClassifiedModel.objects.all()\n\n\nclass JobClassifiedDeleteUpdateView(generics.RetrieveUpdateDestroyAPIView):\n lookup_field = 'slug'\n permission_classes = IsSuperUser,\n serializer_class = JobClassifiedSerializer\n queryset = JobClassifiedModel.objects.all()\n\n\nclass AddCustomerReviews(generics.CreateAPIView):\n permission_classes = IsSuperUser,\n serializer_class = CustomerReviewSerializer\n queryset = CustomerReviewModel.objects.all()\n\n\nclass ViewCustomerReview(generics.ListAPIView):\n permission_classes = IsClient,\n serializer_class = CustomerReviewSerializer\n queryset = CustomerReviewModel.objects.all()\n\n\nclass CustomerReviewDetailView(generics.RetrieveAPIView):\n lookup_field = 'slug'\n permission_classes = IsClient,\n serializer_class = CustomerReviewSerializer\n queryset = CustomerReviewModel.objects.all()\n\n\nclass CustomerReviewDeleteUpdateView(generics.RetrieveUpdateDestroyAPIView):\n lookup_field = 'slug'\n permission_classes = IsSuperUser,\n serializer_class = CustomerReviewSerializer\n queryset = CustomerReviewModel.objects.all()\n\n\nclass ClientComplain(APIView):\n permission_classes = IsSuperUser,\n serializer = ViewComplainSerializer(many=True)\n\n\nclass clientfeedback(APIView):\n permission_classes = IsSuperUser,\n\n def get(self, request, format=None):\n feeds = ClientFeedBackModel.objects.filter(Class__admin=self.\n request.user)\n serializer = ClientFeedbackSerializer(feeds, many=True)\n return Response(serializer.data)\n\n\nclass Enroll_Course(APIView):\n permission_classes = IsSuperUser,\n\n def post(self, request, format=None):\n serializer = EnrollCourseSerializer(data=request.data)\n print(serializer)\n if serializer.is_valid():\n course = serializer.validated_data.get('course', '')\n serializer.save()\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n else:\n return Response(serializer.errors, status=status.\n HTTP_400_BAD_REQUEST)\n\n\nclass ViewEnroll_Course(APIView):\n permission_classes = IsSuperUser,\n\n def get(self, request, *args, **kwargs):\n course = self.kwargs['course_id']\n client = self.kwargs['client_id']\n data = Enroll_CourseModel.objects.filter(course=course, client=client)\n serializer = ViewEnrollCourseSerializer(data, many=True)\n return Response(serializer.data, status=status.HTTP_200_OK)\n\n\nclass DetailEnroll_CourseView(APIView):\n permission_classes = IsSuperUser,\n\n def get_object(self, pk):\n try:\n return Enroll_CourseModel.objects.get(id=pk)\n except:\n raise Http404\n\n def get(self, request, pk, format=None):\n data = self.get_object(pk)\n serializer = ViewEnrollCourseSerializer(data)\n return Response(serializer.data)\n\n def put(self, request, pk, format=None):\n data = self.get_object(pk)\n serializer = ViewEnrollCourseSerializer(data, data=request.data)\n if serializer.is_valid(raise_exception=True):\n serializer.save()\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n else:\n return Response(serializer.errors, status=status.\n HTTP_400_BAD_REQUEST)\n\n def delete(self, request, pk, format=None):\n data = self.get_object(pk)\n data.delete()\n return Response(status=status.HTTP_204_NO_CONTENT)\n\n\nclass CourseDetail(APIView):\n permission_classes = IsSuperUser,\n\n def get_object(self, slug):\n try:\n return CourseModel.objects.get(slug=slug)\n except CourseModel.DoesNotExist:\n raise Http404\n\n def get(self, request, slug, format=None):\n data = self.get_object(slug)\n if data.classes.school.admin == self.request.user:\n serializer = ViewCourseSerializer(data)\n return Response(serializer.data)\n else:\n return Response({'message':\n 'This course does not belong to your school'}, status=\n status.HTTP_400_BAD_REQUEST)\n\n def put(self, request, slug, format=None):\n data = self.get_object(slug)\n if data.course.client.admin == self.request.user:\n serializer = CourseSerializer(data, data=request.data)\n if serializer.is_valid(raise_exception=True):\n course = serializer.validated_data.get('course', '')\n if course.client.admin == self.request.user:\n serializer.save()\n return Response(serializer.data, status=status.\n HTTP_201_CREATED)\n return Response({'message':\n 'This Class does not belong to you'}, status=status.\n HTTP_400_BAD_REQUEST)\n else:\n return Response(serializer.errors, status=status.\n HTTP_400_BAD_REQUEST)\n else:\n return Response({'message':\n 'This course does not belong to you'}, status=status.\n HTTP_400_BAD_REQUEST)\n\n def delete(self, request, slug, format=None):\n data = self.get_object(slug)\n if data.course.client.admin == self.request.user:\n data.delete()\n return Response(status=status.HTTP_204_NO_CONTENT)\n else:\n return Response({'message':\n 'This course does not belong to you'}, status=status.\n HTTP_400_BAD_REQUEST)\n\n\nclass SchoolRegistrationView(RegisterView):\n serializer_class = RegisterSchoolSerializer\n permission_classes = IsSuperUser,\n\n\nclass Add_question(generics.CreateAPIView):\n permission_classes = IsSuperUser,\n\n def post(self, request, format=None):\n serializer = QuestionSerializer(data=request.data)\n print(serializer)\n if serializer.is_valid():\n course = serializer.validated_data.get('course', '')\n serializer.save()\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n else:\n return Response(serializer.errors, status=status.\n HTTP_400_BAD_REQUEST)\n\n\nclass Viewquestion(generics.ListAPIView):\n permission_classes = IsSuperUser,\n\n def get(self, request, *args, **kwargs):\n course = self.kwargs['course_id']\n data = QuestionModel.objects.filter(course_id=course)\n serializer = QuestionSerializer(data, many=True)\n return Response(serializer.data, status=status.HTTP_200_OK)\n\n\nclass QuestionDetail(APIView):\n permission_classes = IsSuperUser,\n\n def get_object(self, pk):\n try:\n return QuestionModel.objects.get(id=pk)\n except:\n raise Http404\n\n def get(self, request, pk, format=None):\n data = self.get_object(pk)\n serializer = QuestionSerializer(data)\n return Response(serializer.data)\n\n def put(self, request, pk, format=None):\n data = self.get_object(pk)\n serializer = QuestionSerializer(data, data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n else:\n return Response(serializer.errors, status=status.\n HTTP_400_BAD_REQUEST)\n\n def delete(self, request, pk, format=None):\n data = self.get_object(pk)\n data.delete()\n return Response(status=status.HTTP_204_NO_CONTENT)\n\n\nclass SubmittedQuestionView(APIView):\n permission_classes = IsSuperUser,\n\n def get(self, request, *args, **kwargs):\n admin = self.request.user\n course = self.kwargs['course_id']\n client = self.kwargs['client_id']\n data = Client_SubmitquestionModel.objects.filter(course__course=\n course, client__client=client)\n serializer = Client_submittedquestionSerializer(data, many=True)\n return Response(serializer.data, status=status.HTTP_200_OK)\n\n\nclass AddonlineTest(generics.CreateAPIView):\n permission_classes = IsSuperUser,\n\n def post(self, request, format=None):\n serializer = testSerializer(data=request.data)\n print(serializer)\n if serializer.is_valid():\n course = serializer.validated_data.get('course', '')\n serializer.save()\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n else:\n return Response(serializer.errors, status=status.\n HTTP_400_BAD_REQUEST)\n\n\nclass ViewOnlinetest(generics.ListAPIView):\n permission_classes = IsSuperUser,\n\n def get(self, request, *args, **kwargs):\n course = self.kwargs['course_id']\n data = Client_testModel.objects.filter(course_id=course)\n serializer = testSerializer(data, many=True)\n return Response(serializer.data, status=status.HTTP_200_OK)\n\n\nclass onlinetestDetail(APIView):\n permission_classes = IsSuperUser,\n\n def get_object(self, pk):\n try:\n return Client_testModel.objects.get(id=pk)\n except:\n raise Http404\n\n def get(self, request, pk, format=None):\n data = self.get_object(pk)\n serializer = testSerializer(data)\n return Response(serializer.data)\n\n def put(self, request, pk, format=None):\n data = self.get_object(pk)\n serializer = testSerializer(data, data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n else:\n return Response(serializer.errors, status=status.\n HTTP_400_BAD_REQUEST)\n\n def delete(self, request, pk, format=None):\n data = self.get_object(pk)\n data.delete()\n return Response(status=status.HTTP_204_NO_CONTENT)\n\n\nclass SubmittedonlineTestView(APIView):\n permission_classes = IsSuperUser,\n\n def get(self, request, *args, **kwargs):\n admin = self.request.user\n course = self.kwargs['course_id']\n client = self.kwargs['client_id']\n data = Client_SubmittestModel.objects.filter(course__course=course,\n client__client=client)\n serializer = Client_submittedtestSerializer(data, many=True)\n return Response(serializer.data, status=status.HTTP_200_OK)\n",
"step-5": "import imp\nfrom django.shortcuts import render\n\n# ***************** API ****************\nfrom django.views.decorators.csrf import csrf_exempt\nfrom rest_framework.parsers import JSONParser,FileUploadParser,MultiPartParser,FormParser\nfrom .models import *\nfrom django.http import Http404\nfrom .serializers import *\nfrom rest_framework.views import APIView\nfrom rest_framework.response import Response\nfrom rest_framework import status,viewsets,permissions\nfrom rest_framework import generics\nfrom rest_framework.permissions import AllowAny, IsAuthenticated\nfrom django.contrib.auth import get_user_model\nfrom client.models import ClientModel\nfrom adminapp.models import SchoolModel\nfrom adminapp.serializers import SchoolSerializer\n\nfrom .custompermissions import *\nfrom client.permissions import *\nfrom rest_framework.authentication import SessionAuthentication\nfrom Student.permissions import IsStudent\n\nUser = get_user_model()\n\ndef get_user_from_token(request):\n\ttoken = request.user.auth_token #auth key(token) of current user 91391f4c12b94b753d08008150d2315d9d8d7e1e\n\tprint(\"token.user_id\",token.user_id) #gives id of user (pk) 2\n\tuser = User.objects.get(id=token.user_id) #gives user name\n\treturn user\n\n# Create your views here.\n\n# class UserListView(generics.ListAPIView):\n# parser_classes = (MultiPartParser,FormParser)\n# queryset = UserModel.objects.all()\n# serializer_class = UserSerializer\n\n# class UserDetailView(generics.RetrieveAPIView):\n# parser_classes = (MultiPartParser,FormParser)\n# queryset = UserModel.objects.all()\n# serializer_class = UserSerializer\n\nclass AddArticleView(generics.CreateAPIView):\n #All authenticated users can add articles\n permission_classes = (IsAuthenticated, )\n serializer_class = ArticleSerializer\n queryset = ArticleModel.objects.all()\n\n def perform_create(self, serializer):\n serializer.save(user=self.request.user)\n\n\nclass ListArticleView(generics.ListAPIView):\n #Anyone can see the published Articles\n permission_classes = (AllowAny, )\n serializer_class = ArticleSerializer\n queryset = ArticleModel.objects.filter(status__exact=\"P\")\n\n\nclass ArticleDetail(generics.RetrieveAPIView):\n #anyone can see detail of published article\n lookup_field = 'slug'\n permission_classes = (AllowAny, )\n serializer_class = ArticleSerializer\n queryset = ArticleModel.objects.filter(status__exact=\"P\")\n\n\nclass ArticleDeleteUpdate(generics.RetrieveUpdateDestroyAPIView):\n '''\n Get: superadmin can see all articles (draft, published)\n PATCH : superadmin can mark article as published by changing status = P\n Delete: superadmin can delete article.\n '''\n lookup_field = 'slug'\n permission_classes = (IsSuperUser, )\n serializer_class = UpdateArticleSerializer\n queryset = ArticleModel.objects.all()\n\n\nclass AddQuestions(generics.CreateAPIView):\n permission_classes = (IsSuperUser, )\n serializer_class = QuestionSerializer\n queryset = QuestionModel.objects.all()\n\nclass ViewQuestion(generics.ListAPIView):\n permission_classes = (IsClient, )\n serializer_class = QuestionSerializer\n queryset = QuestionModel.objects.all()\n\n\nclass QuestionDetailView(generics.RetrieveAPIView):\n lookup_field = 'slug'\n permission_classes = (IsClient, )\n serializer_class = QuestionSerializer\n queryset = QuestionModel.objects.all()\n\nclass QuestionDeleteUpdateView(generics.RetrieveUpdateDestroyAPIView):\n lookup_field = 'slug'\n permission_classes = (IsSuperUser, )\n serializer_class = QuestionSerializer\n queryset = QuestionModel.objects.all()\n\n\nclass AddSchools(generics.CreateAPIView):\n permission_classes = (IsSuperUser, )\n serializer_class = SchoolSerializer\n queryset = SchoolModel.objects.all()\n\nclass ViewSchool(generics.ListAPIView):\n permission_classes = (IsClient, )\n serializer_class = SchoolSerializer\n queryset = SchoolModel.objects.all()\n\n\nclass SchoolDetailView(generics.RetrieveAPIView):\n lookup_field = 'slug'\n permission_classes = (IsClient, )\n serializer_class = SchoolSerializer\n queryset = SchoolModel.objects.all()\n\nclass SchoolDeleteUpdateView(generics.RetrieveUpdateDestroyAPIView):\n lookup_field = 'slug'\n permission_classes = (IsSuperUser, )\n serializer_class = SchoolSerializer\n queryset = SchoolModel.objects.all()\n\n\nclass AddBlogs(generics.CreateAPIView):\n permission_classes = (IsSuperUser, )\n serializer_class = BlogSerializer\n queryset = BlogModel.objects.all()\n\nclass ViewBlog(generics.ListAPIView):\n permission_classes = (IsClient, )\n serializer_class = BlogSerializer\n queryset = BlogModel.objects.all()\n\n\nclass BlogDetailView(generics.RetrieveAPIView):\n lookup_field = 'slug'\n permission_classes = (IsClient, )\n serializer_class = BlogSerializer\n queryset = BlogModel.objects.all()\n\nclass BlogDeleteUpdateView(generics.RetrieveUpdateDestroyAPIView):\n lookup_field = 'slug'\n permission_classes = (IsSuperUser, )\n serializer_class = BlogSerializer\n queryset = BlogModel.objects.all()\n\n\nclass AddEventView(generics.CreateAPIView):\n #only super user can add events\n permission_classes = (IsSuperUser, )\n serializer_class = EventSerializer\n queryset = EventModel.objects.all()\n\n\nclass ListEventView(generics.ListAPIView):\n #Anyone can see the events\n permission_classes = (AllowAny, )\n serializer_class = EventSerializer\n queryset = EventModel.objects.all()\n\n\nclass EventDetailView(generics.RetrieveAPIView):\n #Anyone can see the detail of events\n lookup_field = 'slug'\n permission_classes = (AllowAny, )\n serializer_class = EventSerializer\n queryset = EventModel.objects.all()\n\nclass EventDeleteUpdateView(generics.RetrieveUpdateDestroyAPIView):\n #only superadmin can delete and update events\n lookup_field = 'slug'\n permission_classes = (IsSuperUser, )\n serializer_class = EventSerializer\n queryset = EventModel.objects.all()\n\nclass AddBusinessPartners(generics.CreateAPIView):\n permission_classes = (IsSuperUser, )\n serializer_class = BusinessPartnersSerializer\n queryset = BusinessPartnersModel.objects.all()\n\nclass ViewBusinessPartner(generics.ListAPIView):\n permission_classes = (AllowAny, )\n serializer_class = BusinessPartnersSerializer\n queryset = BusinessPartnersModel.objects.all()\n\n\nclass BusinessPartnerDetailView(generics.RetrieveAPIView):\n lookup_field = 'slug'\n permission_classes = (AllowAny, )\n serializer_class = BusinessPartnersSerializer\n queryset = BusinessPartnersModel.objects.all()\n\nclass BusinessPartnerDeleteUpdateView(generics.RetrieveUpdateDestroyAPIView):\n lookup_field = 'slug'\n permission_classes = (IsSuperUser, )\n serializer_class = BusinessPartnersSerializer\n queryset = BusinessPartnersModel.objects.all()\n\nclass AddKidStory(generics.CreateAPIView):\n #Students can add kidstory\n permission_classes = (IsStudent, )\n serializer_class = KidStorySerializer\n queryset = KidStoryModel.objects.all()\n\n def perform_create(self, serializer):\n serializer.save(user=self.request.user)\n\nclass ViewKidStory(generics.ListAPIView):\n # anyone can see published kids story\n permission_classes = (AllowAny, )\n serializer_class = KidStorySerializer\n queryset = KidStoryModel.objects.filter(status__exact=\"P\")\n\n\nclass KidStoryDetailView(generics.RetrieveAPIView):\n #anyone can see detail of published kids story\n lookup_field = 'slug'\n permission_classes = (AllowAny, )\n serializer_class = KidStorySerializer\n queryset = KidStoryModel.objects.filter(status__exact=\"P\")\n\nclass KidStoryDeleteUpdateView(generics.RetrieveUpdateDestroyAPIView):\n '''\n Get: superadmin can see all stories (draft, published)\n PATCH : superadmin can mark stories as published by changing status = P\n Delete: superadmin can delete stories.\n '''\n lookup_field = 'slug'\n permission_classes = (IsSuperUser, )\n serializer_class = UpdateKidsStorySerializer\n queryset = KidStoryModel.objects.all()\n\n\nclass AddKidTalent(generics.CreateAPIView):\n #Students or client can add KidsTalent\n permission_classes = (IsStudentORClient, )\n serializer_class = KidTalentSerializer\n queryset = KidTalentModel.objects.all()\n\n def perform_create(self, serializer):\n serializer.save(user=self.request.user)\n\nclass ViewKidTalent(generics.ListAPIView):\n # anyone can see published kids talent\n permission_classes = (AllowAny, )\n serializer_class = KidTalentSerializer\n queryset = KidTalentModel.objects.filter(status__exact=\"P\")\n\n\nclass KidTalentDetailView(generics.RetrieveAPIView):\n #anyone can see detail of published kids talent\n lookup_field = 'slug'\n permission_classes = (AllowAny, )\n serializer_class = KidTalentSerializer\n queryset = KidTalentModel.objects.filter(status__exact=\"P\")\n\nclass KidTalentDeleteUpdateView(generics.RetrieveUpdateDestroyAPIView):\n '''\n Get: superadmin can see all kids talent (draft, published)\n PATCH : superadmin can mark kids talent as published by changing status = P\n Delete: superadmin can delete kids talent.\n '''\n lookup_field = 'slug'\n permission_classes = (IsSuperUser, )\n serializer_class = UpdateKidsTalentSerializer\n queryset = KidTalentModel.objects.all()\n\n\nclass AddCourses(generics.CreateAPIView):\n permission_classes = (IsSuperUser, )\n serializer_class = CourseSerializer\n queryset = CourseModel.objects.all()\n\nclass ViewCourse(generics.ListAPIView):\n permission_classes = (AllowAny, )\n serializer_class = CourseSerializer\n queryset = CourseModel.objects.all()\n\n\nclass CourseDetailView(generics.RetrieveAPIView):\n lookup_field = 'slug'\n permission_classes = (AllowAny, )\n serializer_class = CourseSerializer\n queryset = CourseModel.objects.all()\n\nclass CourseDeleteUpdateView(generics.RetrieveUpdateDestroyAPIView):\n lookup_field = 'slug'\n permission_classes = (IsSuperUser, )\n serializer_class = CourseSerializer\n queryset = CourseModel.objects.all()\n\n\nclass AddQuizContext(generics.CreateAPIView):\n permission_classes = (IsSuperUser, )\n serializer_class = QuizContextSerializer\n queryset = QuizContextModel.objects.all()\n\nclass ViewQuizContext(generics.ListAPIView):\n permission_classes = (IsClient, )\n serializer_class = QuizContextSerializer\n queryset = QuizContextModel.objects.all()\n\n\nclass QuizContextDetailView(generics.RetrieveAPIView):\n lookup_field = 'slug'\n permission_classes = (IsClient, )\n serializer_class = QuizContextSerializer\n queryset = QuizContextModel.objects.all()\n\nclass QuizContextDeleteUpdateView(generics.RetrieveUpdateDestroyAPIView):\n lookup_field = 'slug'\n permission_classes = (IsSuperUser, )\n serializer_class = QuizContextSerializer\n queryset = QuizContextModel.objects.all()\n\n\nclass AddFeedback(generics.CreateAPIView):\n permission_classes = (IsSuperUser, )\n serializer_class = ClientFeedbackSerializer\n queryset = ClientFeedBackModel.objects.all()\n\nclass ViewFeedback(generics.ListAPIView):\n permission_classes = (IsClient, )\n serializer_class = ClientFeedbackSerializer\n queryset = ClientFeedBackModel.objects.all()\n\n\nclass FeedbackDetailView(generics.RetrieveAPIView):\n lookup_field = 'slug'\n permission_classes = (IsClient, )\n serializer_class = ClientFeedbackSerializer\n queryset = ClientFeedBackModel.objects.all()\n\nclass FeedbackDeleteUpdateView(generics.RetrieveUpdateDestroyAPIView):\n lookup_field = 'slug'\n permission_classes = (IsSuperUser, )\n serializer_class = ClientFeedbackSerializer\n queryset = ClientFeedBackModel.objects.all()\n\n\nclass AddWebsiteAd(generics.CreateAPIView):\n permission_classes = (IsSuperUser, )\n serializer_class = WebsiteAdSerializer\n queryset = WebsiteAdModel.objects.all()\n\nclass ViewWebsiteAd(generics.ListAPIView):\n permission_classes = (AllowAny, )\n serializer_class = WebsiteAdSerializer\n queryset = WebsiteAdModel.objects.all()\n\n\nclass WebsiteAdDetailView(generics.RetrieveAPIView):\n lookup_field = 'slug'\n permission_classes = (AllowAny, )\n serializer_class = WebsiteAdSerializer\n queryset = WebsiteAdModel.objects.all()\n\nclass WebsiteAdDeleteUpdateView(generics.RetrieveUpdateDestroyAPIView):\n lookup_field = 'slug'\n permission_classes = (IsSuperUser, )\n serializer_class = WebsiteAdSerializer\n queryset = WebsiteAdModel.objects.all()\n\n\n\n\n\n# class AddApproval(generics.CreateAPIView):\n# permission_classes = (IsSuperUser, )\n# serializer_class = ApprovalSerializer\n# queryset = ApprovalModel.objects.all()\n\n# class ViewApproval(generics.ListAPIView):\n# permission_classes = (IsClient, )\n# serializer_class = ApprovalSerializer\n# queryset = ApprovalModel.objects.all()\n\n\n# class ApprovalDetailView(generics.RetrieveAPIView):\n# lookup_field = 'slug'\n# permission_classes = (IsClient, )\n# serializer_class = ApprovalSerializer\n# queryset = ApprovalModel.objects.all()\n\n# class ApprovalDeleteUpdateView(generics.RetrieveUpdateDestroyAPIView):\n# lookup_field = 'slug'\n# permission_classes = (IsSuperUser, )\n# serializer_class = ApprovalSerializer\n# queryset = ApprovalModel.objects.all()\n\n\nclass AddBusinessPromotion(generics.CreateAPIView):\n permission_classes = (IsSuperUser, )\n serializer_class = BusinessPromotionSerializer\n queryset = BusinessPromotionModel.objects.all()\n\nclass ViewBusinessPromotion(generics.ListAPIView):\n permission_classes = (AllowAny, )\n serializer_class = BusinessPromotionSerializer\n queryset = BusinessPromotionModel.objects.all()\n\n\nclass BusinessPromotionDetailView(generics.RetrieveAPIView):\n lookup_field = 'slug'\n permission_classes = (AllowAny, )\n serializer_class = BusinessPromotionSerializer\n queryset = BusinessPromotionModel.objects.all()\n\nclass BusinessPromotionDeleteUpdateView(generics.RetrieveUpdateDestroyAPIView):\n lookup_field = 'slug'\n permission_classes = (IsSuperUser, )\n serializer_class = BusinessPromotionSerializer\n queryset = BusinessPromotionModel.objects.all()\n\n\nclass AddTeam(generics.CreateAPIView):\n permission_classes = (IsSuperUser, )\n serializer_class = TeamSerializer\n queryset = TeamModel.objects.all()\n\nclass ViewTeam(generics.ListAPIView):\n permission_classes = (AllowAny, )\n serializer_class = TeamSerializer\n queryset = TeamModel.objects.all()\n\n\nclass TeamDetailView(generics.RetrieveAPIView):\n lookup_field = 'slug'\n permission_classes = (AllowAny, )\n serializer_class = TeamSerializer\n queryset = TeamModel.objects.all()\n\nclass TeamDeleteUpdateView(generics.RetrieveUpdateDestroyAPIView):\n lookup_field = 'slug'\n permission_classes = (IsSuperUser, )\n serializer_class = TeamSerializer\n queryset = TeamModel.objects.all()\n\n\nclass AddAdvisoryBoard(generics.CreateAPIView):\n permission_classes = (IsSuperUser, )\n serializer_class = AdvisoryBoardSerializer\n queryset = AdvisoryBoardModel.objects.all()\n\nclass ViewAdvisoryBoard(generics.ListAPIView):\n permission_classes = (IsSuperUser, )\n serializer_class = AdvisoryBoardSerializer\n queryset = AdvisoryBoardModel.objects.all()\n\n\nclass AdvisoryBoardDetailView(generics.RetrieveAPIView):\n lookup_field = 'slug'\n permission_classes = (IsSuperUser, )\n serializer_class = AdvisoryBoardSerializer\n queryset = AdvisoryBoardModel.objects.all()\n\nclass AdvisoryBoardDeleteUpdateView(generics.RetrieveUpdateDestroyAPIView):\n lookup_field = 'slug'\n permission_classes = (IsSuperUser, )\n serializer_class = AdvisoryBoardSerializer\n queryset = AdvisoryBoardModel.objects.all()\n\n\n\nclass AddAnnouncement(generics.CreateAPIView):\n permission_classes = (IsSuperUser, )\n serializer_class = AnnouncementSerializer\n queryset = AnnouncementModel.objects.all()\n\n\nclass ListAnnouncement(generics.ListAPIView):\n permission_classes = (AllowAny, )\n serializer_class = AnnouncementSerializer\n queryset = AnnouncementModel.objects.all()\n\n\nclass AnnouncementDetail(generics.RetrieveAPIView):\n lookup_field = 'slug'\n permission_classes = (AllowAny, )\n serializer_class = AnnouncementSerializer\n queryset = AnnouncementModel.objects.all()\n\nclass AnnouncementDeleteUpdate(generics.RetrieveUpdateDestroyAPIView):\n lookup_field = 'slug'\n permission_classes = (IsSuperUser, )\n serializer_class = AnnouncementSerializer\n queryset = AnnouncementModel.objects.all()\n\n\nclass SuperadminProfileView(APIView):\n permission_classes = (IsSuperUser, )\n\n def get(self, request, *args, **kwargs):\n user = get_user_from_token(request)\n data = {\n 'name': user.username,\n 'email': user.email\n }\n return Response(data)\n\n\n\nclass AddJobClassified(generics.CreateAPIView):\n permission_classes = (IsSuperUser, )\n serializer_class = JobClassifiedSerializer\n queryset = JobClassifiedModel.objects.all()\n\nclass ViewJobClassified(generics.ListAPIView):\n permission_classes = (AllowAny, )\n serializer_class = JobClassifiedSerializer\n queryset = JobClassifiedModel.objects.all()\n\n\nclass JobClassifiedDetailView(generics.RetrieveAPIView):\n lookup_field = 'slug'\n permission_classes = (AllowAny, )\n serializer_class = JobClassifiedSerializer\n queryset = JobClassifiedModel.objects.all()\n\nclass JobClassifiedDeleteUpdateView(generics.RetrieveUpdateDestroyAPIView):\n lookup_field = 'slug'\n permission_classes = (IsSuperUser, )\n serializer_class = JobClassifiedSerializer\n queryset = JobClassifiedModel.objects.all()\n\n\n\nclass AddCustomerReviews(generics.CreateAPIView):\n permission_classes = (IsSuperUser, )\n serializer_class = CustomerReviewSerializer\n queryset = CustomerReviewModel.objects.all()\n\nclass ViewCustomerReview(generics.ListAPIView):\n permission_classes = (IsClient, )\n serializer_class = CustomerReviewSerializer\n queryset = CustomerReviewModel.objects.all()\n\n\nclass CustomerReviewDetailView(generics.RetrieveAPIView):\n lookup_field = 'slug'\n permission_classes = (IsClient, )\n serializer_class = CustomerReviewSerializer\n queryset = CustomerReviewModel.objects.all()\n\nclass CustomerReviewDeleteUpdateView(generics.RetrieveUpdateDestroyAPIView):\n lookup_field = 'slug'\n permission_classes = (IsSuperUser, )\n serializer_class = CustomerReviewSerializer\n queryset = CustomerReviewModel.objects.all()\n\n\n\nclass ClientComplain(APIView):\n\n permission_classes = (IsSuperUser, )\n serializer = ViewComplainSerializer(many=True)\n\n\nclass clientfeedback(APIView):\n\n permission_classes = (IsSuperUser, )\n\n def get(self, request, format=None):\n feeds = ClientFeedBackModel.objects.filter(\n Class__admin = self.request.user\n )\n serializer = ClientFeedbackSerializer(feeds, many=True)\n return Response(serializer.data)\n\nclass Enroll_Course(APIView):\n permission_classes = (IsSuperUser, )\n def post(self, request, format=None):\n serializer = EnrollCourseSerializer(data=request.data)\n print(serializer)\n if serializer.is_valid():\n course = serializer.validated_data.get('course', '')\n serializer.save()\n return Response(serializer.data,status =status.HTTP_201_CREATED)\n \n else:\n return Response(serializer.errors,status =status.HTTP_400_BAD_REQUEST)\nclass ViewEnroll_Course(APIView):\n permission_classes = (IsSuperUser, )\n \n def get(self, request, *args, **kwargs):\n course = self.kwargs['course_id']\n client = self.kwargs['client_id']\n data = Enroll_CourseModel.objects.filter(\n course = course, client = client\n )\n serializer = ViewEnrollCourseSerializer(data, many=True)\n return Response(serializer.data, status=status.HTTP_200_OK)\n\nclass DetailEnroll_CourseView(APIView):\n permission_classes = (IsSuperUser, )\n\n def get_object(self,pk):\n try:\n return Enroll_CourseModel.objects.get(id=pk)\n except:\n raise Http404\n\n def get(self, request, pk, format=None):\n data = self.get_object(pk)\n serializer = ViewEnrollCourseSerializer(data)\n return Response(serializer.data)\n\n def put(self,request,pk,format=None):\n data = self.get_object(pk)\n serializer = ViewEnrollCourseSerializer(data,data = request.data)\n if serializer.is_valid(raise_exception=True):\n serializer.save()\n return Response(serializer.data,status=status.HTTP_201_CREATED)\n else:\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n \n\n def delete(self,request,pk,format=None):\n data = self.get_object(pk)\n data.delete()\n return Response(status = status.HTTP_204_NO_CONTENT)\n \n \nclass CourseDetail(APIView):\n permission_classes = (IsSuperUser, )\n\n def get_object(self, slug):\n try:\n return CourseModel.objects.get(slug=slug)\n except CourseModel.DoesNotExist:\n raise Http404\n\n def get(self, request, slug, format=None):\n data = self.get_object(slug)\n if data.classes.school.admin == self.request.user:\n serializer = ViewCourseSerializer(data)\n return Response(serializer.data)\n else:\n return Response(\n {'message':'This course does not belong to your school'}, \n status=status.HTTP_400_BAD_REQUEST\n )\n\n def put(self,request,slug,format=None):\n data = self.get_object(slug)\n if data.course.client.admin == self.request.user:\n serializer = CourseSerializer(data,data = request.data)\n if serializer.is_valid(raise_exception=True):\n course = serializer.validated_data.get('course', '')\n if course.client.admin == self.request.user:\n serializer.save()\n return Response(serializer.data,status=status.HTTP_201_CREATED)\n return Response(\n {'message':'This Class does not belong to you'}, \n status=status.HTTP_400_BAD_REQUEST\n )\n else:\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n else:\n return Response(\n {'message':'This course does not belong to you'}, \n status=status.HTTP_400_BAD_REQUEST\n )\n\n def delete(self,request,slug,format=None):\n data = self.get_object(slug)\n if data.course.client.admin == self.request.user:\n data.delete()\n return Response(status = status.HTTP_204_NO_CONTENT)\n else:\n return Response(\n {'message':'This course does not belong to you'}, \n status=status.HTTP_400_BAD_REQUEST\n )\n\nclass SchoolRegistrationView(RegisterView):\n serializer_class = RegisterSchoolSerializer\n permission_classes = (IsSuperUser,)\n \nclass Add_question(generics.CreateAPIView):\n permission_classes = (IsSuperUser, )\n def post(self,request,format=None):\n serializer = QuestionSerializer(data=request.data)\n print(serializer)\n if serializer.is_valid():\n course = serializer.validated_data.get('course', '')\n serializer.save()\n return Response(serializer.data,status =status.HTTP_201_CREATED)\n \n else:\n return Response(serializer.errors,status =status.HTTP_400_BAD_REQUEST)\n\nclass Viewquestion(generics.ListAPIView):\n permission_classes = (IsSuperUser, )\n \n def get(self, request, *args, **kwargs):\n course = self.kwargs['course_id']\n data = QuestionModel.objects.filter(\n course_id = course)\n serializer = QuestionSerializer(data, many=True)\n return Response(serializer.data, status=status.HTTP_200_OK)\n \n\n\nclass QuestionDetail(APIView):\n permission_classes = (IsSuperUser, )\n\n def get_object(self,pk):\n try:\n return QuestionModel.objects.get(id=pk)\n except:\n raise Http404\n\n def get(self,request,pk,format=None):\n data = self.get_object(pk)\n serializer = QuestionSerializer(data)\n return Response(serializer.data)\n \n\n def put(self,request,pk,format=None):\n data = self.get_object(pk)\n serializer = QuestionSerializer(data,data = request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data,status=status.HTTP_201_CREATED)\n else:\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n \n \n \n def delete(self,request,pk,format=None):\n data = self.get_object(pk)\n data.delete()\n return Response(status = status.HTTP_204_NO_CONTENT)\nclass SubmittedQuestionView(APIView):\n permission_classes = (IsSuperUser, )\n \n def get(self, request, *args, **kwargs):\n admin = self.request.user\n course = self.kwargs['course_id']\n client = self.kwargs['client_id']\n data = Client_SubmitquestionModel.objects.filter(\n course__course = course,\n client__client = client\n )\n serializer = Client_submittedquestionSerializer(data, many=True)\n return Response(serializer.data, status=status.HTTP_200_OK) \n\nclass AddonlineTest(generics.CreateAPIView):\n permission_classes = (IsSuperUser, )\n def post(self, request, format=None):\n serializer = testSerializer(data=request.data)\n print(serializer)\n if serializer.is_valid():\n course = serializer.validated_data.get('course', '')\n serializer.save()\n return Response(serializer.data,status =status.HTTP_201_CREATED)\n \n else:\n return Response(serializer.errors,status =status.HTTP_400_BAD_REQUEST)\n\nclass ViewOnlinetest(generics.ListAPIView):\n permission_classes = (IsSuperUser, )\n \n def get(self, request, *args, **kwargs):\n course = self.kwargs['course_id']\n data = Client_testModel.objects.filter(\n course_id = course)\n serializer = testSerializer(data, many=True)\n return Response(serializer.data, status=status.HTTP_200_OK)\n \n\n\nclass onlinetestDetail(APIView):\n permission_classes = (IsSuperUser, )\n\n def get_object(self,pk):\n try:\n return Client_testModel.objects.get(id=pk)\n except:\n raise Http404\n\n def get(self,request,pk,format=None):\n data = self.get_object(pk)\n serializer = testSerializer(data)\n return Response(serializer.data)\n \n\n def put(self,request,pk,format=None):\n data = self.get_object(pk)\n serializer = testSerializer(data,data = request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data,status=status.HTTP_201_CREATED)\n else:\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n \n \n \n def delete(self,request,pk,format=None):\n data = self.get_object(pk)\n data.delete()\n return Response(status = status.HTTP_204_NO_CONTENT) \n\nclass SubmittedonlineTestView(APIView):\n permission_classes = (IsSuperUser, )\n \n def get(self, request, *args, **kwargs):\n admin = self.request.user\n course = self.kwargs['course_id']\n client = self.kwargs['client_id']\n data = Client_SubmittestModel.objects.filter(\n course__course = course,\n client__client = client\n )\n serializer = Client_submittedtestSerializer(data, many=True)\n return Response(serializer.data, status=status.HTTP_200_OK)",
"step-ids": [
182,
186,
190,
206,
212
]
}
|
[
182,
186,
190,
206,
212
] |
<|reserved_special_token_0|>
def get_symbolic_state_probabilities_1222():
num_of_servers = 1
threshold = 2
system_capacity = 2
buffer_capacity = 2
sym_pi_1222 = get_symbolic_pi(num_of_servers=num_of_servers, threshold=
threshold, system_capacity=system_capacity, buffer_capacity=
buffer_capacity)
all_states_1222 = abg.markov.build_states(threshold=threshold,
system_capacity=system_capacity, buffer_capacity=buffer_capacity)
sym_state_probs_1222 = [(0) for _ in range(len(all_states_1222))]
sym_state_probs_1222[0] = sym.factor(sym_pi_1222[a])
sym_state_probs_1222[1] = sym.factor(sym_pi_1222[b])
sym_state_probs_1222[2] = sym.factor(sym_pi_1222[c])
sym_state_probs_1222[3] = sym.factor(sym_pi_1222[d])
sym_state_probs_1222[4] = sym.factor(sym_pi_1222[e])
sym_state_recursive_ratios_1222 = sym.zeros(buffer_capacity + 1,
system_capacity + 1)
sym_state_recursive_ratios_1222[0, 0] = 1
sym_state_recursive_ratios_1222[0, 1] = sym.factor(sym_state_probs_1222
[1] / sym_state_probs_1222[0])
sym_state_recursive_ratios_1222[0, 2] = sym.factor(sym_state_probs_1222
[2] / sym_state_probs_1222[1])
sym_state_recursive_ratios_1222[1, 2] = sym.factor(sym_state_probs_1222
[3] / sym_state_probs_1222[2])
sym_state_recursive_ratios_1222[2, 2] = sym.factor(sym_state_probs_1222
[4] / sym_state_probs_1222[3])
return sym_state_probs_1222, sym_state_recursive_ratios_1222
<|reserved_special_token_0|>
def get_symbolic_state_probabilities_1123():
num_of_servers = 1
threshold = 1
system_capacity = 2
buffer_capacity = 3
Q_sym_1123 = abg.markov.get_symbolic_transition_matrix(num_of_servers,
threshold, system_capacity, buffer_capacity)
p00, p01, p11, p21, p31, p02, p12, p22, p32 = sym.symbols(
'p00, p01, p11, p21, p31, p02, p12, p22, p32')
pi_1123 = sym.Matrix([p00, p01, p11, p21, p31, p02, p12, p22, p32])
dimension_1123 = Q_sym_1123.shape[0]
M_sym_1123 = sym.Matrix([Q_sym_1123.transpose()[:-1, :], sym.ones(1,
dimension_1123)])
sym_diff_equations_1123 = M_sym_1123 @ pi_1123
b_sym_1123 = sym.Matrix([sym.zeros(dimension_1123 - 1, 1), [1]])
eq0_1123 = sym.Eq(sym_diff_equations_1123[0], b_sym_1123[0])
eq1_1123 = sym.Eq(sym_diff_equations_1123[1], b_sym_1123[1])
eq2_1123 = sym.Eq(sym_diff_equations_1123[2], b_sym_1123[2])
eq3_1123 = sym.Eq(sym_diff_equations_1123[3], b_sym_1123[3])
eq4_1123 = sym.Eq(sym_diff_equations_1123[4], b_sym_1123[4])
eq5_1123 = sym.Eq(sym_diff_equations_1123[5], b_sym_1123[5])
eq6_1123 = sym.Eq(sym_diff_equations_1123[6], b_sym_1123[6])
eq7_1123 = sym.Eq(sym_diff_equations_1123[7], b_sym_1123[7])
eq8_1123 = sym.Eq(sym_diff_equations_1123[8], b_sym_1123[8])
sym_state_probs_1123 = sym.solve([eq0_1123, eq1_1123, eq2_1123,
eq3_1123, eq4_1123, eq5_1123, eq6_1123, eq7_1123, eq8_1123], (p00,
p01, p11, p21, p31, p02, p12, p22, p32))
sym_state_recursive_ratios_1123 = sym.zeros(buffer_capacity + 1,
system_capacity + 1)
sym_state_recursive_ratios_1123[0, 0] = 1
sym_state_recursive_ratios_1123[0, 1] = sym.factor(sym_state_probs_1123
[p01] / sym_state_probs_1123[p00])
sym_state_recursive_ratios_1123[1, 1] = sym.factor(sym_state_probs_1123
[p11] / sym_state_probs_1123[p01])
sym_state_recursive_ratios_1123[2, 1] = sym.factor(sym_state_probs_1123
[p21] / sym_state_probs_1123[p11])
sym_state_recursive_ratios_1123[3, 1] = sym.factor(sym_state_probs_1123
[p31] / sym_state_probs_1123[p21])
sym_state_recursive_ratios_1123[0, 2] = sym.factor(sym_state_probs_1123
[p02] / sym_state_probs_1123[p01])
sym_state_recursive_ratios_1123[1, 2] = sym.factor(sym_state_probs_1123
[p12] / sym_state_probs_1123[p02])
sym_state_recursive_ratios_1123[2, 2] = sym.factor(sym_state_probs_1123
[p22] / sym_state_probs_1123[p12])
sym_state_recursive_ratios_1123[2, 2] = sym.factor(sym_state_probs_1123
[p32] / sym_state_probs_1123[p22])
sym_state_recursive_ratios_right_1123 = (sym_state_recursive_ratios_1123
.copy())
sym_state_recursive_ratios_right_1123[1, 2] = sym.factor(
sym_state_probs_1123[p12] / sym_state_probs_1123[p11])
sym_state_recursive_ratios_right_1123[2, 2] = sym.factor(
sym_state_probs_1123[p22] / sym_state_probs_1123[p21])
sym_state_recursive_ratios_right_1123[3, 2] = sym.factor(
sym_state_probs_1123[p32] / sym_state_probs_1123[p22])
sym_state_recursive_ratios_P0_1123 = sym.zeros(buffer_capacity + 1,
system_capacity + 1)
sym_state_recursive_ratios_P0_1123[0, 0] = 1
sym_state_recursive_ratios_P0_1123[0, 1] = sym.factor(
sym_state_probs_1123[p01] / sym_state_probs_1123[p00])
sym_state_recursive_ratios_P0_1123[1, 1] = sym.factor(
sym_state_probs_1123[p11] / sym_state_probs_1123[p00])
sym_state_recursive_ratios_P0_1123[2, 1] = sym.factor(
sym_state_probs_1123[p21] / sym_state_probs_1123[p00])
sym_state_recursive_ratios_P0_1123[3, 1] = sym.factor(
sym_state_probs_1123[p31] / sym_state_probs_1123[p00])
sym_state_recursive_ratios_P0_1123[0, 2] = sym.factor(
sym_state_probs_1123[p02] / sym_state_probs_1123[p00])
sym_state_recursive_ratios_P0_1123[1, 2] = sym.factor(
sym_state_probs_1123[p12] / sym_state_probs_1123[p00])
sym_state_recursive_ratios_P0_1123[2, 2] = sym.factor(
sym_state_probs_1123[p22] / sym_state_probs_1123[p00])
sym_state_recursive_ratios_P0_1123[3, 2] = sym.factor(
sym_state_probs_1123[p32] / sym_state_probs_1123[p00])
return (sym_state_probs_1123, sym_state_recursive_ratios_1123,
sym_state_recursive_ratios_right_1123,
sym_state_recursive_ratios_P0_1123)
<|reserved_special_token_0|>
def get_symbolic_state_probabilities_1151():
num_of_servers = 1
threshold = 1
system_capacity = 5
buffer_capacity = 1
Q_sym_1151 = abg.markov.get_symbolic_transition_matrix(num_of_servers,
threshold, system_capacity, buffer_capacity)
p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15 = sym.symbols(
'p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15')
pi_1151 = sym.Matrix([p00, p01, p11, p02, p12, p03, p13, p04, p14, p05,
p15])
dimension_1151 = Q_sym_1151.shape[0]
M_sym_1151 = sym.Matrix([Q_sym_1151.transpose()[:-1, :], sym.ones(1,
dimension_1151)])
sym_diff_equations_1151 = M_sym_1151 @ pi_1151
b_sym_1151 = sym.Matrix([sym.zeros(dimension_1151 - 1, 1), [1]])
eq0_1151 = sym.Eq(sym_diff_equations_1151[0], b_sym_1151[0])
eq1_1151 = sym.Eq(sym_diff_equations_1151[1], b_sym_1151[1])
eq2_1151 = sym.Eq(sym_diff_equations_1151[2], b_sym_1151[2])
eq3_1151 = sym.Eq(sym_diff_equations_1151[3], b_sym_1151[3])
eq4_1151 = sym.Eq(sym_diff_equations_1151[4], b_sym_1151[4])
eq5_1151 = sym.Eq(sym_diff_equations_1151[5], b_sym_1151[5])
eq6_1151 = sym.Eq(sym_diff_equations_1151[6], b_sym_1151[6])
eq7_1151 = sym.Eq(sym_diff_equations_1151[7], b_sym_1151[7])
eq8_1151 = sym.Eq(sym_diff_equations_1151[8], b_sym_1151[8])
eq9_1151 = sym.Eq(sym_diff_equations_1151[9], b_sym_1151[9])
eq10_1151 = sym.Eq(sym_diff_equations_1151[10], b_sym_1151[10])
sym_state_probs_1151 = sym.solve([eq0_1151, eq1_1151, eq2_1151,
eq3_1151, eq4_1151, eq5_1151, eq6_1151, eq7_1151, eq8_1151,
eq9_1151, eq10_1151], (p00, p01, p11, p02, p12, p03, p13, p04, p14,
p05, p15))
sym_state_recursive_ratios_1151 = sym.zeros(buffer_capacity + 1,
system_capacity + 1)
sym_state_recursive_ratios_1151[0, 0] = 1
sym_state_recursive_ratios_1151[0, 1] = sym.factor(sym_state_probs_1151
[p01] / sym_state_probs_1151[p00])
sym_state_recursive_ratios_1151[1, 1] = sym.factor(sym_state_probs_1151
[p11] / sym_state_probs_1151[p01])
sym_state_recursive_ratios_1151[0, 2] = sym.factor(sym_state_probs_1151
[p02] / sym_state_probs_1151[p01])
sym_state_recursive_ratios_1151[1, 2] = sym.factor(sym_state_probs_1151
[p12] / sym_state_probs_1151[p02])
sym_state_recursive_ratios_1151[0, 3] = sym.factor(sym_state_probs_1151
[p03] / sym_state_probs_1151[p02])
sym_state_recursive_ratios_1151[1, 3] = sym.factor(sym_state_probs_1151
[p13] / sym_state_probs_1151[p03])
sym_state_recursive_ratios_1151[0, 4] = sym.factor(sym_state_probs_1151
[p04] / sym_state_probs_1151[p03])
sym_state_recursive_ratios_1151[1, 4] = sym.factor(sym_state_probs_1151
[p14] / sym_state_probs_1151[p04])
sym_state_recursive_ratios_1151[0, 5] = sym.factor(sym_state_probs_1151
[p05] / sym_state_probs_1151[p04])
sym_state_recursive_ratios_1151[1, 5] = sym.factor(sym_state_probs_1151
[p15] / sym_state_probs_1151[p05])
sym_state_recursive_ratios_right_1151 = (sym_state_recursive_ratios_1151
.copy())
sym_state_recursive_ratios_right_1151[1, 2] = sym.factor(
sym_state_probs_1151[p12] / sym_state_probs_1151[p11])
sym_state_recursive_ratios_right_1151[1, 3] = sym.factor(
sym_state_probs_1151[p13] / sym_state_probs_1151[p12])
sym_state_recursive_ratios_right_1151[1, 4] = sym.factor(
sym_state_probs_1151[p14] / sym_state_probs_1151[p13])
sym_state_recursive_ratios_right_1151[1, 5] = sym.factor(
sym_state_probs_1151[p15] / sym_state_probs_1151[p14])
sym_state_recursive_ratios_P0_1151 = sym.zeros(buffer_capacity + 1,
system_capacity + 1)
sym_state_recursive_ratios_P0_1151[0, 0] = 1
sym_state_recursive_ratios_P0_1151[0, 1] = sym.factor(
sym_state_probs_1151[p01] / sym_state_probs_1151[p00])
sym_state_recursive_ratios_P0_1151[1, 1] = sym.factor(
sym_state_probs_1151[p11] / sym_state_probs_1151[p00])
sym_state_recursive_ratios_P0_1151[0, 2] = sym.factor(
sym_state_probs_1151[p02] / sym_state_probs_1151[p00])
sym_state_recursive_ratios_P0_1151[1, 2] = sym.factor(
sym_state_probs_1151[p12] / sym_state_probs_1151[p00])
sym_state_recursive_ratios_P0_1151[0, 3] = sym.factor(
sym_state_probs_1151[p03] / sym_state_probs_1151[p00])
sym_state_recursive_ratios_P0_1151[1, 3] = sym.factor(
sym_state_probs_1151[p13] / sym_state_probs_1151[p00])
sym_state_recursive_ratios_P0_1151[0, 4] = sym.factor(
sym_state_probs_1151[p04] / sym_state_probs_1151[p00])
sym_state_recursive_ratios_P0_1151[1, 4] = sym.factor(
sym_state_probs_1151[p14] / sym_state_probs_1151[p00])
sym_state_recursive_ratios_P0_1151[0, 5] = sym.factor(
sym_state_probs_1151[p05] / sym_state_probs_1151[p00])
sym_state_recursive_ratios_P0_1151[1, 5] = sym.factor(
sym_state_probs_1151[p15] / sym_state_probs_1151[p00])
return (sym_state_probs_1151, sym_state_recursive_ratios_1151,
sym_state_recursive_ratios_right_1151,
sym_state_recursive_ratios_P0_1151)
def get_symbolic_state_probabilities_1161():
num_of_servers = 1
threshold = 1
system_capacity = 6
buffer_capacity = 1
Q_sym_1161 = abg.markov.get_symbolic_transition_matrix(num_of_servers,
threshold, system_capacity, buffer_capacity)
p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15, p06, p16 = (sym.
symbols(
'p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15, p06, p16'))
pi_1161 = sym.Matrix([p00, p01, p11, p02, p12, p03, p13, p04, p14, p05,
p15, p06, p16])
dimension_1161 = Q_sym_1161.shape[0]
M_sym_1161 = sym.Matrix([Q_sym_1161.transpose()[:-1, :], sym.ones(1,
dimension_1161)])
sym_diff_equations_1161 = M_sym_1161 @ pi_1161
b_sym_1161 = sym.Matrix([sym.zeros(dimension_1161 - 1, 1), [1]])
eq0_1161 = sym.Eq(sym_diff_equations_1161[0], b_sym_1161[0])
eq1_1161 = sym.Eq(sym_diff_equations_1161[1], b_sym_1161[1])
eq2_1161 = sym.Eq(sym_diff_equations_1161[2], b_sym_1161[2])
eq3_1161 = sym.Eq(sym_diff_equations_1161[3], b_sym_1161[3])
eq4_1161 = sym.Eq(sym_diff_equations_1161[4], b_sym_1161[4])
eq5_1161 = sym.Eq(sym_diff_equations_1161[5], b_sym_1161[5])
eq6_1161 = sym.Eq(sym_diff_equations_1161[6], b_sym_1161[6])
eq7_1161 = sym.Eq(sym_diff_equations_1161[7], b_sym_1161[7])
eq8_1161 = sym.Eq(sym_diff_equations_1161[8], b_sym_1161[8])
eq9_1161 = sym.Eq(sym_diff_equations_1161[9], b_sym_1161[9])
eq10_1161 = sym.Eq(sym_diff_equations_1161[10], b_sym_1161[10])
eq11_1161 = sym.Eq(sym_diff_equations_1161[11], b_sym_1161[11])
eq12_1161 = sym.Eq(sym_diff_equations_1161[12], b_sym_1161[12])
sym_state_probs_1161 = sym.solve([eq0_1161, eq1_1161, eq2_1161,
eq3_1161, eq4_1161, eq5_1161, eq6_1161, eq7_1161, eq8_1161,
eq9_1161, eq10_1161, eq11_1161, eq12_1161], (p00, p01, p11, p02,
p12, p03, p13, p04, p14, p05, p15, p06, p16))
sym_state_recursive_ratios_1161 = sym.zeros(buffer_capacity + 1,
system_capacity + 1)
sym_state_recursive_ratios_1161[0, 0] = 1
sym_state_recursive_ratios_1161[0, 1] = sym.factor(sym_state_probs_1161
[p01] / sym_state_probs_1161[p00])
sym_state_recursive_ratios_1161[1, 1] = sym.factor(sym_state_probs_1161
[p11] / sym_state_probs_1161[p01])
sym_state_recursive_ratios_1161[0, 2] = sym.factor(sym_state_probs_1161
[p02] / sym_state_probs_1161[p01])
sym_state_recursive_ratios_1161[1, 2] = sym.factor(sym_state_probs_1161
[p12] / sym_state_probs_1161[p02])
sym_state_recursive_ratios_1161[0, 3] = sym.factor(sym_state_probs_1161
[p03] / sym_state_probs_1161[p02])
sym_state_recursive_ratios_1161[1, 3] = sym.factor(sym_state_probs_1161
[p13] / sym_state_probs_1161[p03])
sym_state_recursive_ratios_1161[0, 4] = sym.factor(sym_state_probs_1161
[p04] / sym_state_probs_1161[p03])
sym_state_recursive_ratios_1161[1, 4] = sym.factor(sym_state_probs_1161
[p14] / sym_state_probs_1161[p04])
sym_state_recursive_ratios_1161[0, 5] = sym.factor(sym_state_probs_1161
[p05] / sym_state_probs_1161[p04])
sym_state_recursive_ratios_1161[1, 5] = sym.factor(sym_state_probs_1161
[p15] / sym_state_probs_1161[p05])
sym_state_recursive_ratios_1161[0, 6] = sym.factor(sym_state_probs_1161
[p06] / sym_state_probs_1161[p05])
sym_state_recursive_ratios_1161[1, 6] = sym.factor(sym_state_probs_1161
[p16] / sym_state_probs_1161[p06])
sym_state_recursive_ratios_right_1161 = (sym_state_recursive_ratios_1161
.copy())
sym_state_recursive_ratios_right_1161[1, 2] = sym.factor(
sym_state_probs_1161[p12] / sym_state_probs_1161[p11])
sym_state_recursive_ratios_right_1161[1, 3] = sym.factor(
sym_state_probs_1161[p13] / sym_state_probs_1161[p12])
sym_state_recursive_ratios_right_1161[1, 4] = sym.factor(
sym_state_probs_1161[p14] / sym_state_probs_1161[p13])
sym_state_recursive_ratios_right_1161[1, 5] = sym.factor(
sym_state_probs_1161[p15] / sym_state_probs_1161[p14])
sym_state_recursive_ratios_right_1161[1, 6] = sym.factor(
sym_state_probs_1161[p16] / sym_state_probs_1161[p15])
sym_state_recursive_ratios_P0_1161 = sym.zeros(buffer_capacity + 1,
system_capacity + 1)
sym_state_recursive_ratios_P0_1161[0, 0] = 1
sym_state_recursive_ratios_P0_1161[0, 1] = sym.factor(
sym_state_probs_1161[p01] / sym_state_probs_1161[p00])
sym_state_recursive_ratios_P0_1161[1, 1] = sym.factor(
sym_state_probs_1161[p11] / sym_state_probs_1161[p00])
sym_state_recursive_ratios_P0_1161[0, 2] = sym.factor(
sym_state_probs_1161[p02] / sym_state_probs_1161[p00])
sym_state_recursive_ratios_P0_1161[1, 2] = sym.factor(
sym_state_probs_1161[p12] / sym_state_probs_1161[p00])
sym_state_recursive_ratios_P0_1161[0, 3] = sym.factor(
sym_state_probs_1161[p03] / sym_state_probs_1161[p00])
sym_state_recursive_ratios_P0_1161[1, 3] = sym.factor(
sym_state_probs_1161[p13] / sym_state_probs_1161[p00])
sym_state_recursive_ratios_P0_1161[0, 4] = sym.factor(
sym_state_probs_1161[p04] / sym_state_probs_1161[p00])
sym_state_recursive_ratios_P0_1161[1, 4] = sym.factor(
sym_state_probs_1161[p14] / sym_state_probs_1161[p00])
sym_state_recursive_ratios_P0_1161[0, 5] = sym.factor(
sym_state_probs_1161[p05] / sym_state_probs_1161[p00])
sym_state_recursive_ratios_P0_1161[1, 5] = sym.factor(
sym_state_probs_1161[p15] / sym_state_probs_1161[p00])
sym_state_recursive_ratios_P0_1161[0, 6] = sym.factor(
sym_state_probs_1161[p06] / sym_state_probs_1161[p00])
sym_state_recursive_ratios_P0_1161[1, 6] = sym.factor(
sym_state_probs_1161[p16] / sym_state_probs_1161[p00])
return (sym_state_probs_1161, sym_state_recursive_ratios_1161,
sym_state_recursive_ratios_right_1161,
sym_state_recursive_ratios_P0_1161)
def get_symbolic_state_probabilities_1171():
num_of_servers = 1
threshold = 1
system_capacity = 7
buffer_capacity = 1
Q_sym_1171 = abg.markov.get_symbolic_transition_matrix(num_of_servers,
threshold, system_capacity, buffer_capacity)
(p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15, p06, p16, p07, p17
) = (sym.symbols(
'p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15, p06, p16, p07, p17'
))
pi_1171 = sym.Matrix([p00, p01, p11, p02, p12, p03, p13, p04, p14, p05,
p15, p06, p16, p07, p17])
dimension_1171 = Q_sym_1171.shape[0]
M_sym_1171 = sym.Matrix([Q_sym_1171.transpose()[:-1, :], sym.ones(1,
dimension_1171)])
sym_diff_equations_1171 = M_sym_1171 @ pi_1171
b_sym_1171 = sym.Matrix([sym.zeros(dimension_1171 - 1, 1), [1]])
eq0_1171 = sym.Eq(sym_diff_equations_1171[0], b_sym_1171[0])
eq1_1171 = sym.Eq(sym_diff_equations_1171[1], b_sym_1171[1])
eq2_1171 = sym.Eq(sym_diff_equations_1171[2], b_sym_1171[2])
eq3_1171 = sym.Eq(sym_diff_equations_1171[3], b_sym_1171[3])
eq4_1171 = sym.Eq(sym_diff_equations_1171[4], b_sym_1171[4])
eq5_1171 = sym.Eq(sym_diff_equations_1171[5], b_sym_1171[5])
eq6_1171 = sym.Eq(sym_diff_equations_1171[6], b_sym_1171[6])
eq7_1171 = sym.Eq(sym_diff_equations_1171[7], b_sym_1171[7])
eq8_1171 = sym.Eq(sym_diff_equations_1171[8], b_sym_1171[8])
eq9_1171 = sym.Eq(sym_diff_equations_1171[9], b_sym_1171[9])
eq10_1171 = sym.Eq(sym_diff_equations_1171[10], b_sym_1171[10])
eq11_1171 = sym.Eq(sym_diff_equations_1171[11], b_sym_1171[11])
eq12_1171 = sym.Eq(sym_diff_equations_1171[12], b_sym_1171[12])
eq13_1171 = sym.Eq(sym_diff_equations_1171[13], b_sym_1171[13])
eq14_1171 = sym.Eq(sym_diff_equations_1171[14], b_sym_1171[14])
sym_state_probs_1171 = sym.solve([eq0_1171, eq1_1171, eq2_1171,
eq3_1171, eq4_1171, eq5_1171, eq6_1171, eq7_1171, eq8_1171,
eq9_1171, eq10_1171, eq11_1171, eq12_1171, eq13_1171, eq14_1171], (
p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15, p06, p16,
p07, p17))
sym_state_recursive_ratios_1171 = sym.zeros(buffer_capacity + 1,
system_capacity + 1)
sym_state_recursive_ratios_1171[0, 0] = 1
sym_state_recursive_ratios_1171[0, 1] = sym.factor(sym_state_probs_1171
[p01] / sym_state_probs_1171[p00])
sym_state_recursive_ratios_1171[1, 1] = sym.factor(sym_state_probs_1171
[p11] / sym_state_probs_1171[p01])
sym_state_recursive_ratios_1171[0, 2] = sym.factor(sym_state_probs_1171
[p02] / sym_state_probs_1171[p01])
sym_state_recursive_ratios_1171[1, 2] = sym.factor(sym_state_probs_1171
[p12] / sym_state_probs_1171[p02])
sym_state_recursive_ratios_1171[0, 3] = sym.factor(sym_state_probs_1171
[p03] / sym_state_probs_1171[p02])
sym_state_recursive_ratios_1171[1, 3] = sym.factor(sym_state_probs_1171
[p13] / sym_state_probs_1171[p03])
sym_state_recursive_ratios_1171[0, 4] = sym.factor(sym_state_probs_1171
[p04] / sym_state_probs_1171[p03])
sym_state_recursive_ratios_1171[1, 4] = sym.factor(sym_state_probs_1171
[p14] / sym_state_probs_1171[p04])
sym_state_recursive_ratios_1171[0, 5] = sym.factor(sym_state_probs_1171
[p05] / sym_state_probs_1171[p04])
sym_state_recursive_ratios_1171[1, 5] = sym.factor(sym_state_probs_1171
[p15] / sym_state_probs_1171[p05])
sym_state_recursive_ratios_1171[0, 6] = sym.factor(sym_state_probs_1171
[p06] / sym_state_probs_1171[p05])
sym_state_recursive_ratios_1171[1, 6] = sym.factor(sym_state_probs_1171
[p16] / sym_state_probs_1171[p06])
sym_state_recursive_ratios_1171[0, 7] = sym.factor(sym_state_probs_1171
[p07] / sym_state_probs_1171[p06])
sym_state_recursive_ratios_1171[1, 7] = sym.factor(sym_state_probs_1171
[p17] / sym_state_probs_1171[p07])
sym_state_recursive_ratios_right_1171 = (sym_state_recursive_ratios_1171
.copy())
sym_state_recursive_ratios_right_1171[1, 2] = sym.factor(
sym_state_probs_1171[p12] / sym_state_probs_1171[p11])
sym_state_recursive_ratios_right_1171[1, 3] = sym.factor(
sym_state_probs_1171[p13] / sym_state_probs_1171[p12])
sym_state_recursive_ratios_right_1171[1, 4] = sym.factor(
sym_state_probs_1171[p14] / sym_state_probs_1171[p13])
sym_state_recursive_ratios_right_1171[1, 5] = sym.factor(
sym_state_probs_1171[p15] / sym_state_probs_1171[p14])
sym_state_recursive_ratios_right_1171[1, 6] = sym.factor(
sym_state_probs_1171[p16] / sym_state_probs_1171[p15])
sym_state_recursive_ratios_right_1171[1, 7] = sym.factor(
sym_state_probs_1171[p17] / sym_state_probs_1171[p16])
sym_state_recursive_ratios_P0_1171 = sym.zeros(buffer_capacity + 1,
system_capacity + 1)
sym_state_recursive_ratios_P0_1171[0, 0] = 1
sym_state_recursive_ratios_P0_1171[0, 1] = sym.factor(
sym_state_probs_1171[p01] / sym_state_probs_1171[p00])
sym_state_recursive_ratios_P0_1171[1, 1] = sym.factor(
sym_state_probs_1171[p11] / sym_state_probs_1171[p00])
sym_state_recursive_ratios_P0_1171[0, 2] = sym.factor(
sym_state_probs_1171[p02] / sym_state_probs_1171[p00])
sym_state_recursive_ratios_P0_1171[1, 2] = sym.factor(
sym_state_probs_1171[p12] / sym_state_probs_1171[p00])
sym_state_recursive_ratios_P0_1171[0, 3] = sym.factor(
sym_state_probs_1171[p03] / sym_state_probs_1171[p00])
sym_state_recursive_ratios_P0_1171[1, 3] = sym.factor(
sym_state_probs_1171[p13] / sym_state_probs_1171[p00])
sym_state_recursive_ratios_P0_1171[0, 4] = sym.factor(
sym_state_probs_1171[p04] / sym_state_probs_1171[p00])
sym_state_recursive_ratios_P0_1171[1, 4] = sym.factor(
sym_state_probs_1171[p14] / sym_state_probs_1171[p00])
sym_state_recursive_ratios_P0_1171[0, 5] = sym.factor(
sym_state_probs_1171[p05] / sym_state_probs_1171[p00])
sym_state_recursive_ratios_P0_1171[1, 5] = sym.factor(
sym_state_probs_1171[p15] / sym_state_probs_1171[p00])
sym_state_recursive_ratios_P0_1171[0, 6] = sym.factor(
sym_state_probs_1171[p06] / sym_state_probs_1171[p00])
sym_state_recursive_ratios_P0_1171[1, 6] = sym.factor(
sym_state_probs_1171[p16] / sym_state_probs_1171[p00])
sym_state_recursive_ratios_P0_1171[0, 7] = sym.factor(
sym_state_probs_1171[p07] / sym_state_probs_1171[p00])
sym_state_recursive_ratios_P0_1171[1, 7] = sym.factor(
sym_state_probs_1171[p17] / sym_state_probs_1171[p00])
return (sym_state_probs_1171, sym_state_recursive_ratios_1171,
sym_state_recursive_ratios_right_1171,
sym_state_recursive_ratios_P0_1171)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def get_symbolic_pi(num_of_servers, threshold, system_capacity, buffer_capacity
):
Q_sym = abg.markov.get_symbolic_transition_matrix(num_of_servers=
num_of_servers, threshold=threshold, system_capacity=
system_capacity, buffer_capacity=buffer_capacity)
dimension = Q_sym.shape[0]
if dimension > 7:
return 'Capacity of 6 exceeded'
M_sym = sym.Matrix([Q_sym.transpose()[:-1, :], sym.ones(1, dimension)])
b_sym = sym.Matrix([sym.zeros(dimension - 1, 1), [1]])
system = M_sym.col_insert(dimension, b_sym)
sol = sym.solve_linear_system_LU(system, [a, b, c, d, e, f, g])
return sol
def get_symbolic_state_probabilities_1222():
num_of_servers = 1
threshold = 2
system_capacity = 2
buffer_capacity = 2
sym_pi_1222 = get_symbolic_pi(num_of_servers=num_of_servers, threshold=
threshold, system_capacity=system_capacity, buffer_capacity=
buffer_capacity)
all_states_1222 = abg.markov.build_states(threshold=threshold,
system_capacity=system_capacity, buffer_capacity=buffer_capacity)
sym_state_probs_1222 = [(0) for _ in range(len(all_states_1222))]
sym_state_probs_1222[0] = sym.factor(sym_pi_1222[a])
sym_state_probs_1222[1] = sym.factor(sym_pi_1222[b])
sym_state_probs_1222[2] = sym.factor(sym_pi_1222[c])
sym_state_probs_1222[3] = sym.factor(sym_pi_1222[d])
sym_state_probs_1222[4] = sym.factor(sym_pi_1222[e])
sym_state_recursive_ratios_1222 = sym.zeros(buffer_capacity + 1,
system_capacity + 1)
sym_state_recursive_ratios_1222[0, 0] = 1
sym_state_recursive_ratios_1222[0, 1] = sym.factor(sym_state_probs_1222
[1] / sym_state_probs_1222[0])
sym_state_recursive_ratios_1222[0, 2] = sym.factor(sym_state_probs_1222
[2] / sym_state_probs_1222[1])
sym_state_recursive_ratios_1222[1, 2] = sym.factor(sym_state_probs_1222
[3] / sym_state_probs_1222[2])
sym_state_recursive_ratios_1222[2, 2] = sym.factor(sym_state_probs_1222
[4] / sym_state_probs_1222[3])
return sym_state_probs_1222, sym_state_recursive_ratios_1222
<|reserved_special_token_0|>
def get_symbolic_state_probabilities_1122():
threshold = 1
system_capacity = 2
buffer_capacity = 2
all_states_1122 = abg.markov.build_states(threshold=threshold,
system_capacity=system_capacity, buffer_capacity=buffer_capacity)
sym_state_probs_1122 = [(0) for _ in range(len(all_states_1122))]
sym_Lambda = sym.symbols('Lambda')
sym_lambda_1 = sym.symbols('lambda_1')
sym_lambda_2 = sym.symbols('lambda_2')
sym_mu = sym.symbols('mu')
sym_state_probs_1122[0] = (sym_mu ** 6 + 2 * sym_lambda_2 * sym_mu ** 5 +
sym_lambda_2 ** 2 * sym_mu ** 4)
sym_state_probs_1122[1] = sym_Lambda * sym_mu ** 3 * (sym_mu ** 2 + 2 *
sym_mu * sym_lambda_2 + sym_lambda_2 ** 2)
sym_state_probs_1122[2] = sym_Lambda * sym_lambda_2 * sym_mu ** 2 * (
sym_lambda_2 ** 2 + sym_lambda_2 * sym_lambda_1 + sym_lambda_1 *
sym_mu + sym_mu ** 2 + 2 * sym_lambda_2 * sym_mu)
sym_state_probs_1122[3] = sym_Lambda * sym_lambda_2 ** 2 * sym_mu * (
sym_lambda_2 ** 2 + 2 * sym_lambda_1 * sym_lambda_2 + 3 *
sym_lambda_1 * sym_mu + sym_mu ** 2 + 2 * sym_lambda_2 * sym_mu +
sym_lambda_1 ** 2)
sym_state_probs_1122[4] = sym_Lambda * sym_lambda_1 * sym_mu ** 3 * (
sym_lambda_2 + sym_mu)
sym_state_probs_1122[5
] = sym_Lambda * sym_lambda_1 * sym_lambda_2 * sym_mu ** 2 * (2 *
sym_mu + sym_lambda_1 + sym_lambda_2)
sym_state_probs_1122[6] = sym_Lambda * sym_lambda_1 * sym_lambda_2 ** 2 * (
sym_lambda_1 ** 2 + 4 * sym_lambda_1 * sym_mu + 2 * sym_lambda_1 *
sym_lambda_2 + 3 * sym_mu ** 2 + sym_lambda_2 ** 2 + 3 *
sym_lambda_2 * sym_mu)
total_1122 = np.sum(sym_state_probs_1122)
sym_state_probs_1122 = [(i / total_1122) for i in sym_state_probs_1122]
sym_state_recursive_ratios_1122 = sym.zeros(buffer_capacity + 1,
system_capacity + 1)
sym_state_recursive_ratios_1122[0, 0] = 1
sym_state_recursive_ratios_1122[0, 1] = sym.factor(sym_state_probs_1122
[1] / sym_state_probs_1122[0])
sym_state_recursive_ratios_1122[1, 1] = sym.factor(sym_state_probs_1122
[2] / sym_state_probs_1122[1])
sym_state_recursive_ratios_1122[2, 1] = sym.factor(sym_state_probs_1122
[3] / sym_state_probs_1122[2])
sym_state_recursive_ratios_1122[0, 2] = sym.factor(sym_state_probs_1122
[4] / sym_state_probs_1122[1])
sym_state_recursive_ratios_1122[1, 2] = sym.factor(sym_state_probs_1122
[5] / sym_state_probs_1122[4])
sym_state_recursive_ratios_1122[2, 2] = sym.factor(sym_state_probs_1122
[6] / sym_state_probs_1122[5])
sym_state_recursive_ratios_right_1122 = (sym_state_recursive_ratios_1122
.copy())
sym_state_recursive_ratios_right_1122[1, 2] = sym.factor(
sym_state_probs_1122[5] / sym_state_probs_1122[2])
sym_state_recursive_ratios_right_1122[2, 2] = sym.factor(
sym_state_probs_1122[6] / sym_state_probs_1122[3])
sym_state_recursive_ratios_P0_1122 = sym.zeros(buffer_capacity + 1,
system_capacity + 1)
sym_state_recursive_ratios_P0_1122[0, 0] = 1
sym_state_recursive_ratios_P0_1122[0, 1] = sym.factor(
sym_state_probs_1122[1] / sym_state_probs_1122[0])
sym_state_recursive_ratios_P0_1122[1, 1] = sym.factor(
sym_state_probs_1122[2] / sym_state_probs_1122[0])
sym_state_recursive_ratios_P0_1122[2, 1] = sym.factor(
sym_state_probs_1122[3] / sym_state_probs_1122[0])
sym_state_recursive_ratios_P0_1122[0, 2] = sym.factor(
sym_state_probs_1122[4] / sym_state_probs_1122[0])
sym_state_recursive_ratios_P0_1122[1, 2] = sym.factor(
sym_state_probs_1122[5] / sym_state_probs_1122[0])
sym_state_recursive_ratios_P0_1122[2, 2] = sym.factor(
sym_state_probs_1122[6] / sym_state_probs_1122[0])
return (sym_state_probs_1122, sym_state_recursive_ratios_1122,
sym_state_recursive_ratios_right_1122,
sym_state_recursive_ratios_P0_1122)
def get_symbolic_state_probabilities_1123():
num_of_servers = 1
threshold = 1
system_capacity = 2
buffer_capacity = 3
Q_sym_1123 = abg.markov.get_symbolic_transition_matrix(num_of_servers,
threshold, system_capacity, buffer_capacity)
p00, p01, p11, p21, p31, p02, p12, p22, p32 = sym.symbols(
'p00, p01, p11, p21, p31, p02, p12, p22, p32')
pi_1123 = sym.Matrix([p00, p01, p11, p21, p31, p02, p12, p22, p32])
dimension_1123 = Q_sym_1123.shape[0]
M_sym_1123 = sym.Matrix([Q_sym_1123.transpose()[:-1, :], sym.ones(1,
dimension_1123)])
sym_diff_equations_1123 = M_sym_1123 @ pi_1123
b_sym_1123 = sym.Matrix([sym.zeros(dimension_1123 - 1, 1), [1]])
eq0_1123 = sym.Eq(sym_diff_equations_1123[0], b_sym_1123[0])
eq1_1123 = sym.Eq(sym_diff_equations_1123[1], b_sym_1123[1])
eq2_1123 = sym.Eq(sym_diff_equations_1123[2], b_sym_1123[2])
eq3_1123 = sym.Eq(sym_diff_equations_1123[3], b_sym_1123[3])
eq4_1123 = sym.Eq(sym_diff_equations_1123[4], b_sym_1123[4])
eq5_1123 = sym.Eq(sym_diff_equations_1123[5], b_sym_1123[5])
eq6_1123 = sym.Eq(sym_diff_equations_1123[6], b_sym_1123[6])
eq7_1123 = sym.Eq(sym_diff_equations_1123[7], b_sym_1123[7])
eq8_1123 = sym.Eq(sym_diff_equations_1123[8], b_sym_1123[8])
sym_state_probs_1123 = sym.solve([eq0_1123, eq1_1123, eq2_1123,
eq3_1123, eq4_1123, eq5_1123, eq6_1123, eq7_1123, eq8_1123], (p00,
p01, p11, p21, p31, p02, p12, p22, p32))
sym_state_recursive_ratios_1123 = sym.zeros(buffer_capacity + 1,
system_capacity + 1)
sym_state_recursive_ratios_1123[0, 0] = 1
sym_state_recursive_ratios_1123[0, 1] = sym.factor(sym_state_probs_1123
[p01] / sym_state_probs_1123[p00])
sym_state_recursive_ratios_1123[1, 1] = sym.factor(sym_state_probs_1123
[p11] / sym_state_probs_1123[p01])
sym_state_recursive_ratios_1123[2, 1] = sym.factor(sym_state_probs_1123
[p21] / sym_state_probs_1123[p11])
sym_state_recursive_ratios_1123[3, 1] = sym.factor(sym_state_probs_1123
[p31] / sym_state_probs_1123[p21])
sym_state_recursive_ratios_1123[0, 2] = sym.factor(sym_state_probs_1123
[p02] / sym_state_probs_1123[p01])
sym_state_recursive_ratios_1123[1, 2] = sym.factor(sym_state_probs_1123
[p12] / sym_state_probs_1123[p02])
sym_state_recursive_ratios_1123[2, 2] = sym.factor(sym_state_probs_1123
[p22] / sym_state_probs_1123[p12])
sym_state_recursive_ratios_1123[2, 2] = sym.factor(sym_state_probs_1123
[p32] / sym_state_probs_1123[p22])
sym_state_recursive_ratios_right_1123 = (sym_state_recursive_ratios_1123
.copy())
sym_state_recursive_ratios_right_1123[1, 2] = sym.factor(
sym_state_probs_1123[p12] / sym_state_probs_1123[p11])
sym_state_recursive_ratios_right_1123[2, 2] = sym.factor(
sym_state_probs_1123[p22] / sym_state_probs_1123[p21])
sym_state_recursive_ratios_right_1123[3, 2] = sym.factor(
sym_state_probs_1123[p32] / sym_state_probs_1123[p22])
sym_state_recursive_ratios_P0_1123 = sym.zeros(buffer_capacity + 1,
system_capacity + 1)
sym_state_recursive_ratios_P0_1123[0, 0] = 1
sym_state_recursive_ratios_P0_1123[0, 1] = sym.factor(
sym_state_probs_1123[p01] / sym_state_probs_1123[p00])
sym_state_recursive_ratios_P0_1123[1, 1] = sym.factor(
sym_state_probs_1123[p11] / sym_state_probs_1123[p00])
sym_state_recursive_ratios_P0_1123[2, 1] = sym.factor(
sym_state_probs_1123[p21] / sym_state_probs_1123[p00])
sym_state_recursive_ratios_P0_1123[3, 1] = sym.factor(
sym_state_probs_1123[p31] / sym_state_probs_1123[p00])
sym_state_recursive_ratios_P0_1123[0, 2] = sym.factor(
sym_state_probs_1123[p02] / sym_state_probs_1123[p00])
sym_state_recursive_ratios_P0_1123[1, 2] = sym.factor(
sym_state_probs_1123[p12] / sym_state_probs_1123[p00])
sym_state_recursive_ratios_P0_1123[2, 2] = sym.factor(
sym_state_probs_1123[p22] / sym_state_probs_1123[p00])
sym_state_recursive_ratios_P0_1123[3, 2] = sym.factor(
sym_state_probs_1123[p32] / sym_state_probs_1123[p00])
return (sym_state_probs_1123, sym_state_recursive_ratios_1123,
sym_state_recursive_ratios_right_1123,
sym_state_recursive_ratios_P0_1123)
def get_symbolic_state_probabilities_1341():
threshold = 3
system_capacity = 4
buffer_capacity = 1
all_states_1341 = abg.markov.build_states(threshold=threshold,
system_capacity=system_capacity, buffer_capacity=buffer_capacity)
sym_state_probs_1341 = [(0) for _ in range(len(all_states_1341))]
sym_Lambda = sym.symbols('Lambda')
sym_lambda_1 = sym.symbols('lambda_1')
sym_lambda_2 = sym.symbols('lambda_2')
sym_mu = sym.symbols('mu')
sym_state_probs_1341[0] = sym_lambda_2 * sym_mu ** 5 + sym_mu ** 6
sym_state_probs_1341[1
] = sym_Lambda * sym_lambda_2 * sym_mu ** 4 + sym_Lambda * sym_mu ** 5
sym_state_probs_1341[2] = (sym_Lambda ** 2 * sym_lambda_2 * sym_mu ** 3 +
sym_Lambda ** 2 * sym_mu ** 4)
sym_state_probs_1341[3] = (sym_Lambda ** 3 * sym_lambda_2 * sym_mu ** 2 +
sym_Lambda ** 3 * sym_mu ** 3)
sym_state_probs_1341[4] = (sym_Lambda ** 3 * sym_lambda_1 *
sym_lambda_2 * sym_mu + sym_Lambda ** 3 * sym_lambda_2 * sym_mu **
2 + sym_Lambda ** 3 * sym_lambda_2 * sym_lambda_2 * sym_mu)
sym_state_probs_1341[5] = sym_Lambda ** 3 * sym_lambda_1 * sym_mu ** 2
sym_state_probs_1341[6] = (sym_Lambda ** 3 * sym_lambda_1 ** 2 *
sym_lambda_2 + sym_Lambda ** 3 * sym_lambda_1 * sym_lambda_2 ** 2 +
2 * sym_Lambda ** 3 * sym_lambda_1 * sym_lambda_2 * sym_mu)
total_1341 = np.sum(sym_state_probs_1341)
sym_state_probs_1341 = [(i / total_1341) for i in sym_state_probs_1341]
sym_state_recursive_ratios_1341 = sym.zeros(buffer_capacity + 1,
system_capacity + 1)
sym_state_recursive_ratios_1341[0, 0] = 1
sym_state_recursive_ratios_1341[0, 1] = sym.factor(sym_state_probs_1341
[1] / sym_state_probs_1341[0])
sym_state_recursive_ratios_1341[0, 2] = sym.factor(sym_state_probs_1341
[2] / sym_state_probs_1341[1])
sym_state_recursive_ratios_1341[0, 3] = sym.factor(sym_state_probs_1341
[3] / sym_state_probs_1341[2])
sym_state_recursive_ratios_1341[0, 4] = sym.factor(sym_state_probs_1341
[5] / sym_state_probs_1341[3])
sym_state_recursive_ratios_1341[1, 3] = sym.factor(sym_state_probs_1341
[4] / sym_state_probs_1341[3])
sym_state_recursive_ratios_1341[1, 4] = sym.factor(sym_state_probs_1341
[6] / sym_state_probs_1341[5])
sym_state_recursive_ratios_right_1341 = (sym_state_recursive_ratios_1341
.copy())
sym_state_recursive_ratios_right_1341[1, 4] = sym.factor(
sym_state_probs_1341[6] / sym_state_probs_1341[4])
sym_state_recursive_ratios_P0_1341 = sym.zeros(buffer_capacity + 1,
system_capacity + 1)
sym_state_recursive_ratios_P0_1341[0, 0] = 1
sym_state_recursive_ratios_P0_1341[0, 1] = sym.factor(
sym_state_probs_1341[1] / sym_state_probs_1341[0])
sym_state_recursive_ratios_P0_1341[0, 2] = sym.factor(
sym_state_probs_1341[2] / sym_state_probs_1341[0])
sym_state_recursive_ratios_P0_1341[0, 3] = sym.factor(
sym_state_probs_1341[3] / sym_state_probs_1341[0])
sym_state_recursive_ratios_P0_1341[1, 3] = sym.factor(
sym_state_probs_1341[4] / sym_state_probs_1341[0])
sym_state_recursive_ratios_P0_1341[0, 4] = sym.factor(
sym_state_probs_1341[5] / sym_state_probs_1341[0])
sym_state_recursive_ratios_P0_1341[1, 4] = sym.factor(
sym_state_probs_1341[6] / sym_state_probs_1341[0])
return (sym_state_probs_1341, sym_state_recursive_ratios_1341,
sym_state_recursive_ratios_right_1341,
sym_state_recursive_ratios_P0_1341)
def get_symbolic_state_probabilities_1131():
threshold = 1
system_capacity = 3
buffer_capacity = 1
all_states_1131 = abg.markov.build_states(threshold=threshold,
system_capacity=system_capacity, buffer_capacity=buffer_capacity)
sym_state_probs_1131 = [(0) for _ in range(len(all_states_1131))]
sym_Lambda = sym.symbols('Lambda')
sym_lambda_1 = sym.symbols('lambda_1')
sym_lambda_2 = sym.symbols('lambda_2')
sym_mu = sym.symbols('mu')
sym_state_probs_1131[0] = (sym_mu ** 6 + 2 * (sym_lambda_2 * sym_mu **
5) + sym_lambda_2 ** 2 * sym_mu ** 4 + sym_lambda_1 * sym_lambda_2 *
sym_mu ** 4)
sym_state_probs_1131[1] = sym_state_probs_1131[0] * sym_Lambda / sym_mu
sym_state_probs_1131[2] = (sym_Lambda * sym_lambda_1 ** 2 *
sym_lambda_2 * sym_mu ** 2 + sym_Lambda * sym_lambda_2 *
sym_lambda_1 * sym_mu ** 3 + 2 * (sym_Lambda * sym_lambda_1 *
sym_lambda_2 ** 2 * sym_mu ** 2) + 2 * (sym_Lambda * sym_lambda_2 **
2 * sym_mu ** 3) + sym_Lambda * sym_lambda_2 ** 3 * sym_mu ** 2 +
sym_Lambda * sym_lambda_2 * sym_mu ** 4)
sym_state_probs_1131[3] = sym_Lambda * sym_lambda_1 * sym_mu ** 3 * (
sym_lambda_2 + sym_mu)
sym_state_probs_1131[4
] = sym_Lambda * sym_lambda_2 * sym_lambda_1 * sym_mu * (
sym_lambda_2 ** 2 + 2 * sym_lambda_2 * sym_lambda_1 + 3 *
sym_lambda_2 * sym_mu + sym_lambda_1 ** 2 + 2 * sym_lambda_1 *
sym_mu + 2 * sym_mu ** 2)
sym_state_probs_1131[5] = sym_Lambda * sym_lambda_1 ** 2 * sym_mu ** 3
sym_state_probs_1131[6] = sym_Lambda * sym_lambda_2 * sym_lambda_1 ** 2 * (
sym_lambda_2 ** 2 + 2 * sym_lambda_2 * sym_lambda_1 + 3 *
sym_lambda_2 * sym_mu + sym_lambda_1 ** 2 + 2 * sym_lambda_1 *
sym_mu + 3 * sym_mu ** 2)
denominator = (sym_Lambda * sym_lambda_2 ** 3 * sym_lambda_1 ** 2 +
sym_Lambda * sym_lambda_2 ** 3 * sym_lambda_1 * sym_mu + sym_Lambda *
sym_lambda_2 ** 3 * sym_mu ** 2 + 2 * sym_Lambda * sym_lambda_2 **
2 * sym_lambda_1 ** 3 + 5 * sym_Lambda * sym_lambda_2 ** 2 *
sym_lambda_1 ** 2 * sym_mu + 5 * sym_Lambda * sym_lambda_2 ** 2 *
sym_lambda_1 * sym_mu ** 2 + 3 * sym_Lambda * sym_lambda_2 ** 2 *
sym_mu ** 3 + sym_Lambda * sym_lambda_2 * sym_lambda_1 ** 4 + 3 *
sym_Lambda * sym_lambda_2 * sym_lambda_1 ** 3 * sym_mu + 6 *
sym_Lambda * sym_lambda_2 * sym_lambda_1 ** 2 * sym_mu ** 2 + 5 *
sym_Lambda * sym_lambda_2 * sym_lambda_1 * sym_mu ** 3 + 3 *
sym_Lambda * sym_lambda_2 * sym_mu ** 4 + sym_Lambda * sym_lambda_1 **
2 * sym_mu ** 3 + sym_Lambda * sym_lambda_1 * sym_mu ** 4 +
sym_Lambda * sym_mu ** 5 + sym_lambda_2 ** 2 * sym_mu ** 4 +
sym_lambda_2 * sym_lambda_1 * sym_mu ** 4 + 2 * sym_lambda_2 *
sym_mu ** 5 + sym_mu ** 6)
sym_state_probs_1131 = [(i / denominator) for i in sym_state_probs_1131]
sym_state_recursive_ratios_1131 = sym.zeros(buffer_capacity + 1,
system_capacity + 1)
sym_state_recursive_ratios_1131[0, 0] = 1
sym_state_recursive_ratios_1131[0, 1] = sym.factor(sym_state_probs_1131
[1] / sym_state_probs_1131[0])
sym_state_recursive_ratios_1131[1, 1] = sym.factor(sym_state_probs_1131
[2] / sym_state_probs_1131[1])
sym_state_recursive_ratios_1131[0, 2] = sym.factor(sym_state_probs_1131
[3] / sym_state_probs_1131[1])
sym_state_recursive_ratios_1131[1, 2] = sym.factor(sym_state_probs_1131
[4] / sym_state_probs_1131[3])
sym_state_recursive_ratios_1131[0, 3] = sym.factor(sym_state_probs_1131
[5] / sym_state_probs_1131[3])
sym_state_recursive_ratios_1131[1, 3] = sym.factor(sym_state_probs_1131
[6] / sym_state_probs_1131[5])
sym_state_recursive_ratios_right_1131 = (sym_state_recursive_ratios_1131
.copy())
sym_state_recursive_ratios_right_1131[1, 2] = sym.factor(
sym_state_probs_1131[4] / sym_state_probs_1131[2])
sym_state_recursive_ratios_right_1131[1, 3] = sym.factor(
sym_state_probs_1131[6] / sym_state_probs_1131[4])
sym_state_recursive_ratios_P0_1131 = sym.zeros(buffer_capacity + 1,
system_capacity + 1)
sym_state_recursive_ratios_P0_1131[0, 0] = 1
sym_state_recursive_ratios_P0_1131[0, 1] = sym.factor(
sym_state_probs_1131[1] / sym_state_probs_1131[0])
sym_state_recursive_ratios_P0_1131[1, 1] = sym.factor(
sym_state_probs_1131[2] / sym_state_probs_1131[0])
sym_state_recursive_ratios_P0_1131[0, 2] = sym.factor(
sym_state_probs_1131[3] / sym_state_probs_1131[0])
sym_state_recursive_ratios_P0_1131[1, 2] = sym.factor(
sym_state_probs_1131[4] / sym_state_probs_1131[0])
sym_state_recursive_ratios_P0_1131[0, 3] = sym.factor(
sym_state_probs_1131[5] / sym_state_probs_1131[0])
sym_state_recursive_ratios_P0_1131[1, 3] = sym.factor(
sym_state_probs_1131[6] / sym_state_probs_1131[0])
return (sym_state_probs_1131, sym_state_recursive_ratios_1131,
sym_state_recursive_ratios_right_1131,
sym_state_recursive_ratios_P0_1131)
def get_symbolic_state_probabilities_1132():
num_of_servers = 1
threshold = 1
system_capacity = 3
buffer_capacity = 2
Q_sym_1132 = abg.markov.get_symbolic_transition_matrix(num_of_servers,
threshold, system_capacity, buffer_capacity)
p00, p01, p11, p21, p02, p12, p22, p03, p13, p23 = sym.symbols(
'p00, p01, p11, p21, p02, p12, p22, p03, p13, p23')
pi_1132 = sym.Matrix([p00, p01, p11, p21, p02, p12, p22, p03, p13, p23])
dimension_1132 = Q_sym_1132.shape[0]
M_sym_1132 = sym.Matrix([Q_sym_1132.transpose()[:-1, :], sym.ones(1,
dimension_1132)])
sym_diff_equations_1132 = M_sym_1132 @ pi_1132
b_sym_1132 = sym.Matrix([sym.zeros(dimension_1132 - 1, 1), [1]])
eq0_1132 = sym.Eq(sym_diff_equations_1132[0], b_sym_1132[0])
eq1_1132 = sym.Eq(sym_diff_equations_1132[1], b_sym_1132[1])
eq2_1132 = sym.Eq(sym_diff_equations_1132[2], b_sym_1132[2])
eq3_1132 = sym.Eq(sym_diff_equations_1132[3], b_sym_1132[3])
eq4_1132 = sym.Eq(sym_diff_equations_1132[4], b_sym_1132[4])
eq5_1132 = sym.Eq(sym_diff_equations_1132[5], b_sym_1132[5])
eq6_1132 = sym.Eq(sym_diff_equations_1132[6], b_sym_1132[6])
eq7_1132 = sym.Eq(sym_diff_equations_1132[7], b_sym_1132[7])
eq8_1132 = sym.Eq(sym_diff_equations_1132[8], b_sym_1132[8])
eq9_1132 = sym.Eq(sym_diff_equations_1132[9], b_sym_1132[9])
sym_state_probs_1132 = sym.solve([eq0_1132, eq1_1132, eq2_1132,
eq3_1132, eq4_1132, eq5_1132, eq6_1132, eq7_1132, eq8_1132,
eq9_1132], (p00, p01, p11, p21, p02, p12, p22, p03, p13, p23))
sym_state_recursive_ratios_1132 = sym.zeros(buffer_capacity + 1,
system_capacity + 1)
sym_state_recursive_ratios_1132[0, 0] = 1
sym_state_recursive_ratios_1132[0, 1] = sym.factor(sym_state_probs_1132
[p01] / sym_state_probs_1132[p00])
sym_state_recursive_ratios_1132[1, 1] = sym.factor(sym_state_probs_1132
[p11] / sym_state_probs_1132[p01])
sym_state_recursive_ratios_1132[2, 1] = sym.factor(sym_state_probs_1132
[p21] / sym_state_probs_1132[p11])
sym_state_recursive_ratios_1132[0, 2] = sym.factor(sym_state_probs_1132
[p02] / sym_state_probs_1132[p01])
sym_state_recursive_ratios_1132[1, 2] = sym.factor(sym_state_probs_1132
[p12] / sym_state_probs_1132[p02])
sym_state_recursive_ratios_1132[2, 2] = sym.factor(sym_state_probs_1132
[p22] / sym_state_probs_1132[p12])
sym_state_recursive_ratios_1132[0, 3] = sym.factor(sym_state_probs_1132
[p03] / sym_state_probs_1132[p02])
sym_state_recursive_ratios_1132[1, 3] = sym.factor(sym_state_probs_1132
[p13] / sym_state_probs_1132[p03])
sym_state_recursive_ratios_1132[2, 3] = sym.factor(sym_state_probs_1132
[p23] / sym_state_probs_1132[p13])
sym_state_recursive_ratios_right_1132 = (sym_state_recursive_ratios_1132
.copy())
sym_state_recursive_ratios_right_1132[1, 2] = sym.factor(
sym_state_probs_1132[p12] / sym_state_probs_1132[p11])
sym_state_recursive_ratios_right_1132[1, 3] = sym.factor(
sym_state_probs_1132[p13] / sym_state_probs_1132[p12])
sym_state_recursive_ratios_right_1132[2, 2] = sym.factor(
sym_state_probs_1132[p22] / sym_state_probs_1132[p21])
sym_state_recursive_ratios_right_1132[2, 3] = sym.factor(
sym_state_probs_1132[p23] / sym_state_probs_1132[p22])
sym_state_recursive_ratios_P0_1132 = sym.zeros(buffer_capacity + 1,
system_capacity + 1)
sym_state_recursive_ratios_P0_1132[0, 0] = 1
sym_state_recursive_ratios_P0_1132[0, 1] = sym.factor(
sym_state_probs_1132[p01] / sym_state_probs_1132[p00])
sym_state_recursive_ratios_P0_1132[1, 1] = sym.factor(
sym_state_probs_1132[p11] / sym_state_probs_1132[p00])
sym_state_recursive_ratios_P0_1132[2, 1] = sym.factor(
sym_state_probs_1132[p21] / sym_state_probs_1132[p00])
sym_state_recursive_ratios_P0_1132[0, 2] = sym.factor(
sym_state_probs_1132[p02] / sym_state_probs_1132[p00])
sym_state_recursive_ratios_P0_1132[1, 2] = sym.factor(
sym_state_probs_1132[p12] / sym_state_probs_1132[p00])
sym_state_recursive_ratios_P0_1132[2, 2] = sym.factor(
sym_state_probs_1132[p22] / sym_state_probs_1132[p00])
sym_state_recursive_ratios_P0_1132[0, 3] = sym.factor(
sym_state_probs_1132[p03] / sym_state_probs_1132[p00])
sym_state_recursive_ratios_P0_1132[1, 3] = sym.factor(
sym_state_probs_1132[p13] / sym_state_probs_1132[p00])
sym_state_recursive_ratios_P0_1132[2, 3] = sym.factor(
sym_state_probs_1132[p23] / sym_state_probs_1132[p00])
return (sym_state_probs_1132, sym_state_recursive_ratios_1132,
sym_state_recursive_ratios_right_1132,
sym_state_recursive_ratios_P0_1132)
<|reserved_special_token_0|>
def get_symbolic_state_probabilities_1151():
num_of_servers = 1
threshold = 1
system_capacity = 5
buffer_capacity = 1
Q_sym_1151 = abg.markov.get_symbolic_transition_matrix(num_of_servers,
threshold, system_capacity, buffer_capacity)
p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15 = sym.symbols(
'p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15')
pi_1151 = sym.Matrix([p00, p01, p11, p02, p12, p03, p13, p04, p14, p05,
p15])
dimension_1151 = Q_sym_1151.shape[0]
M_sym_1151 = sym.Matrix([Q_sym_1151.transpose()[:-1, :], sym.ones(1,
dimension_1151)])
sym_diff_equations_1151 = M_sym_1151 @ pi_1151
b_sym_1151 = sym.Matrix([sym.zeros(dimension_1151 - 1, 1), [1]])
eq0_1151 = sym.Eq(sym_diff_equations_1151[0], b_sym_1151[0])
eq1_1151 = sym.Eq(sym_diff_equations_1151[1], b_sym_1151[1])
eq2_1151 = sym.Eq(sym_diff_equations_1151[2], b_sym_1151[2])
eq3_1151 = sym.Eq(sym_diff_equations_1151[3], b_sym_1151[3])
eq4_1151 = sym.Eq(sym_diff_equations_1151[4], b_sym_1151[4])
eq5_1151 = sym.Eq(sym_diff_equations_1151[5], b_sym_1151[5])
eq6_1151 = sym.Eq(sym_diff_equations_1151[6], b_sym_1151[6])
eq7_1151 = sym.Eq(sym_diff_equations_1151[7], b_sym_1151[7])
eq8_1151 = sym.Eq(sym_diff_equations_1151[8], b_sym_1151[8])
eq9_1151 = sym.Eq(sym_diff_equations_1151[9], b_sym_1151[9])
eq10_1151 = sym.Eq(sym_diff_equations_1151[10], b_sym_1151[10])
sym_state_probs_1151 = sym.solve([eq0_1151, eq1_1151, eq2_1151,
eq3_1151, eq4_1151, eq5_1151, eq6_1151, eq7_1151, eq8_1151,
eq9_1151, eq10_1151], (p00, p01, p11, p02, p12, p03, p13, p04, p14,
p05, p15))
sym_state_recursive_ratios_1151 = sym.zeros(buffer_capacity + 1,
system_capacity + 1)
sym_state_recursive_ratios_1151[0, 0] = 1
sym_state_recursive_ratios_1151[0, 1] = sym.factor(sym_state_probs_1151
[p01] / sym_state_probs_1151[p00])
sym_state_recursive_ratios_1151[1, 1] = sym.factor(sym_state_probs_1151
[p11] / sym_state_probs_1151[p01])
sym_state_recursive_ratios_1151[0, 2] = sym.factor(sym_state_probs_1151
[p02] / sym_state_probs_1151[p01])
sym_state_recursive_ratios_1151[1, 2] = sym.factor(sym_state_probs_1151
[p12] / sym_state_probs_1151[p02])
sym_state_recursive_ratios_1151[0, 3] = sym.factor(sym_state_probs_1151
[p03] / sym_state_probs_1151[p02])
sym_state_recursive_ratios_1151[1, 3] = sym.factor(sym_state_probs_1151
[p13] / sym_state_probs_1151[p03])
sym_state_recursive_ratios_1151[0, 4] = sym.factor(sym_state_probs_1151
[p04] / sym_state_probs_1151[p03])
sym_state_recursive_ratios_1151[1, 4] = sym.factor(sym_state_probs_1151
[p14] / sym_state_probs_1151[p04])
sym_state_recursive_ratios_1151[0, 5] = sym.factor(sym_state_probs_1151
[p05] / sym_state_probs_1151[p04])
sym_state_recursive_ratios_1151[1, 5] = sym.factor(sym_state_probs_1151
[p15] / sym_state_probs_1151[p05])
sym_state_recursive_ratios_right_1151 = (sym_state_recursive_ratios_1151
.copy())
sym_state_recursive_ratios_right_1151[1, 2] = sym.factor(
sym_state_probs_1151[p12] / sym_state_probs_1151[p11])
sym_state_recursive_ratios_right_1151[1, 3] = sym.factor(
sym_state_probs_1151[p13] / sym_state_probs_1151[p12])
sym_state_recursive_ratios_right_1151[1, 4] = sym.factor(
sym_state_probs_1151[p14] / sym_state_probs_1151[p13])
sym_state_recursive_ratios_right_1151[1, 5] = sym.factor(
sym_state_probs_1151[p15] / sym_state_probs_1151[p14])
sym_state_recursive_ratios_P0_1151 = sym.zeros(buffer_capacity + 1,
system_capacity + 1)
sym_state_recursive_ratios_P0_1151[0, 0] = 1
sym_state_recursive_ratios_P0_1151[0, 1] = sym.factor(
sym_state_probs_1151[p01] / sym_state_probs_1151[p00])
sym_state_recursive_ratios_P0_1151[1, 1] = sym.factor(
sym_state_probs_1151[p11] / sym_state_probs_1151[p00])
sym_state_recursive_ratios_P0_1151[0, 2] = sym.factor(
sym_state_probs_1151[p02] / sym_state_probs_1151[p00])
sym_state_recursive_ratios_P0_1151[1, 2] = sym.factor(
sym_state_probs_1151[p12] / sym_state_probs_1151[p00])
sym_state_recursive_ratios_P0_1151[0, 3] = sym.factor(
sym_state_probs_1151[p03] / sym_state_probs_1151[p00])
sym_state_recursive_ratios_P0_1151[1, 3] = sym.factor(
sym_state_probs_1151[p13] / sym_state_probs_1151[p00])
sym_state_recursive_ratios_P0_1151[0, 4] = sym.factor(
sym_state_probs_1151[p04] / sym_state_probs_1151[p00])
sym_state_recursive_ratios_P0_1151[1, 4] = sym.factor(
sym_state_probs_1151[p14] / sym_state_probs_1151[p00])
sym_state_recursive_ratios_P0_1151[0, 5] = sym.factor(
sym_state_probs_1151[p05] / sym_state_probs_1151[p00])
sym_state_recursive_ratios_P0_1151[1, 5] = sym.factor(
sym_state_probs_1151[p15] / sym_state_probs_1151[p00])
return (sym_state_probs_1151, sym_state_recursive_ratios_1151,
sym_state_recursive_ratios_right_1151,
sym_state_recursive_ratios_P0_1151)
def get_symbolic_state_probabilities_1161():
num_of_servers = 1
threshold = 1
system_capacity = 6
buffer_capacity = 1
Q_sym_1161 = abg.markov.get_symbolic_transition_matrix(num_of_servers,
threshold, system_capacity, buffer_capacity)
p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15, p06, p16 = (sym.
symbols(
'p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15, p06, p16'))
pi_1161 = sym.Matrix([p00, p01, p11, p02, p12, p03, p13, p04, p14, p05,
p15, p06, p16])
dimension_1161 = Q_sym_1161.shape[0]
M_sym_1161 = sym.Matrix([Q_sym_1161.transpose()[:-1, :], sym.ones(1,
dimension_1161)])
sym_diff_equations_1161 = M_sym_1161 @ pi_1161
b_sym_1161 = sym.Matrix([sym.zeros(dimension_1161 - 1, 1), [1]])
eq0_1161 = sym.Eq(sym_diff_equations_1161[0], b_sym_1161[0])
eq1_1161 = sym.Eq(sym_diff_equations_1161[1], b_sym_1161[1])
eq2_1161 = sym.Eq(sym_diff_equations_1161[2], b_sym_1161[2])
eq3_1161 = sym.Eq(sym_diff_equations_1161[3], b_sym_1161[3])
eq4_1161 = sym.Eq(sym_diff_equations_1161[4], b_sym_1161[4])
eq5_1161 = sym.Eq(sym_diff_equations_1161[5], b_sym_1161[5])
eq6_1161 = sym.Eq(sym_diff_equations_1161[6], b_sym_1161[6])
eq7_1161 = sym.Eq(sym_diff_equations_1161[7], b_sym_1161[7])
eq8_1161 = sym.Eq(sym_diff_equations_1161[8], b_sym_1161[8])
eq9_1161 = sym.Eq(sym_diff_equations_1161[9], b_sym_1161[9])
eq10_1161 = sym.Eq(sym_diff_equations_1161[10], b_sym_1161[10])
eq11_1161 = sym.Eq(sym_diff_equations_1161[11], b_sym_1161[11])
eq12_1161 = sym.Eq(sym_diff_equations_1161[12], b_sym_1161[12])
sym_state_probs_1161 = sym.solve([eq0_1161, eq1_1161, eq2_1161,
eq3_1161, eq4_1161, eq5_1161, eq6_1161, eq7_1161, eq8_1161,
eq9_1161, eq10_1161, eq11_1161, eq12_1161], (p00, p01, p11, p02,
p12, p03, p13, p04, p14, p05, p15, p06, p16))
sym_state_recursive_ratios_1161 = sym.zeros(buffer_capacity + 1,
system_capacity + 1)
sym_state_recursive_ratios_1161[0, 0] = 1
sym_state_recursive_ratios_1161[0, 1] = sym.factor(sym_state_probs_1161
[p01] / sym_state_probs_1161[p00])
sym_state_recursive_ratios_1161[1, 1] = sym.factor(sym_state_probs_1161
[p11] / sym_state_probs_1161[p01])
sym_state_recursive_ratios_1161[0, 2] = sym.factor(sym_state_probs_1161
[p02] / sym_state_probs_1161[p01])
sym_state_recursive_ratios_1161[1, 2] = sym.factor(sym_state_probs_1161
[p12] / sym_state_probs_1161[p02])
sym_state_recursive_ratios_1161[0, 3] = sym.factor(sym_state_probs_1161
[p03] / sym_state_probs_1161[p02])
sym_state_recursive_ratios_1161[1, 3] = sym.factor(sym_state_probs_1161
[p13] / sym_state_probs_1161[p03])
sym_state_recursive_ratios_1161[0, 4] = sym.factor(sym_state_probs_1161
[p04] / sym_state_probs_1161[p03])
sym_state_recursive_ratios_1161[1, 4] = sym.factor(sym_state_probs_1161
[p14] / sym_state_probs_1161[p04])
sym_state_recursive_ratios_1161[0, 5] = sym.factor(sym_state_probs_1161
[p05] / sym_state_probs_1161[p04])
sym_state_recursive_ratios_1161[1, 5] = sym.factor(sym_state_probs_1161
[p15] / sym_state_probs_1161[p05])
sym_state_recursive_ratios_1161[0, 6] = sym.factor(sym_state_probs_1161
[p06] / sym_state_probs_1161[p05])
sym_state_recursive_ratios_1161[1, 6] = sym.factor(sym_state_probs_1161
[p16] / sym_state_probs_1161[p06])
sym_state_recursive_ratios_right_1161 = (sym_state_recursive_ratios_1161
.copy())
sym_state_recursive_ratios_right_1161[1, 2] = sym.factor(
sym_state_probs_1161[p12] / sym_state_probs_1161[p11])
sym_state_recursive_ratios_right_1161[1, 3] = sym.factor(
sym_state_probs_1161[p13] / sym_state_probs_1161[p12])
sym_state_recursive_ratios_right_1161[1, 4] = sym.factor(
sym_state_probs_1161[p14] / sym_state_probs_1161[p13])
sym_state_recursive_ratios_right_1161[1, 5] = sym.factor(
sym_state_probs_1161[p15] / sym_state_probs_1161[p14])
sym_state_recursive_ratios_right_1161[1, 6] = sym.factor(
sym_state_probs_1161[p16] / sym_state_probs_1161[p15])
sym_state_recursive_ratios_P0_1161 = sym.zeros(buffer_capacity + 1,
system_capacity + 1)
sym_state_recursive_ratios_P0_1161[0, 0] = 1
sym_state_recursive_ratios_P0_1161[0, 1] = sym.factor(
sym_state_probs_1161[p01] / sym_state_probs_1161[p00])
sym_state_recursive_ratios_P0_1161[1, 1] = sym.factor(
sym_state_probs_1161[p11] / sym_state_probs_1161[p00])
sym_state_recursive_ratios_P0_1161[0, 2] = sym.factor(
sym_state_probs_1161[p02] / sym_state_probs_1161[p00])
sym_state_recursive_ratios_P0_1161[1, 2] = sym.factor(
sym_state_probs_1161[p12] / sym_state_probs_1161[p00])
sym_state_recursive_ratios_P0_1161[0, 3] = sym.factor(
sym_state_probs_1161[p03] / sym_state_probs_1161[p00])
sym_state_recursive_ratios_P0_1161[1, 3] = sym.factor(
sym_state_probs_1161[p13] / sym_state_probs_1161[p00])
sym_state_recursive_ratios_P0_1161[0, 4] = sym.factor(
sym_state_probs_1161[p04] / sym_state_probs_1161[p00])
sym_state_recursive_ratios_P0_1161[1, 4] = sym.factor(
sym_state_probs_1161[p14] / sym_state_probs_1161[p00])
sym_state_recursive_ratios_P0_1161[0, 5] = sym.factor(
sym_state_probs_1161[p05] / sym_state_probs_1161[p00])
sym_state_recursive_ratios_P0_1161[1, 5] = sym.factor(
sym_state_probs_1161[p15] / sym_state_probs_1161[p00])
sym_state_recursive_ratios_P0_1161[0, 6] = sym.factor(
sym_state_probs_1161[p06] / sym_state_probs_1161[p00])
sym_state_recursive_ratios_P0_1161[1, 6] = sym.factor(
sym_state_probs_1161[p16] / sym_state_probs_1161[p00])
return (sym_state_probs_1161, sym_state_recursive_ratios_1161,
sym_state_recursive_ratios_right_1161,
sym_state_recursive_ratios_P0_1161)
def get_symbolic_state_probabilities_1171():
num_of_servers = 1
threshold = 1
system_capacity = 7
buffer_capacity = 1
Q_sym_1171 = abg.markov.get_symbolic_transition_matrix(num_of_servers,
threshold, system_capacity, buffer_capacity)
(p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15, p06, p16, p07, p17
) = (sym.symbols(
'p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15, p06, p16, p07, p17'
))
pi_1171 = sym.Matrix([p00, p01, p11, p02, p12, p03, p13, p04, p14, p05,
p15, p06, p16, p07, p17])
dimension_1171 = Q_sym_1171.shape[0]
M_sym_1171 = sym.Matrix([Q_sym_1171.transpose()[:-1, :], sym.ones(1,
dimension_1171)])
sym_diff_equations_1171 = M_sym_1171 @ pi_1171
b_sym_1171 = sym.Matrix([sym.zeros(dimension_1171 - 1, 1), [1]])
eq0_1171 = sym.Eq(sym_diff_equations_1171[0], b_sym_1171[0])
eq1_1171 = sym.Eq(sym_diff_equations_1171[1], b_sym_1171[1])
eq2_1171 = sym.Eq(sym_diff_equations_1171[2], b_sym_1171[2])
eq3_1171 = sym.Eq(sym_diff_equations_1171[3], b_sym_1171[3])
eq4_1171 = sym.Eq(sym_diff_equations_1171[4], b_sym_1171[4])
eq5_1171 = sym.Eq(sym_diff_equations_1171[5], b_sym_1171[5])
eq6_1171 = sym.Eq(sym_diff_equations_1171[6], b_sym_1171[6])
eq7_1171 = sym.Eq(sym_diff_equations_1171[7], b_sym_1171[7])
eq8_1171 = sym.Eq(sym_diff_equations_1171[8], b_sym_1171[8])
eq9_1171 = sym.Eq(sym_diff_equations_1171[9], b_sym_1171[9])
eq10_1171 = sym.Eq(sym_diff_equations_1171[10], b_sym_1171[10])
eq11_1171 = sym.Eq(sym_diff_equations_1171[11], b_sym_1171[11])
eq12_1171 = sym.Eq(sym_diff_equations_1171[12], b_sym_1171[12])
eq13_1171 = sym.Eq(sym_diff_equations_1171[13], b_sym_1171[13])
eq14_1171 = sym.Eq(sym_diff_equations_1171[14], b_sym_1171[14])
sym_state_probs_1171 = sym.solve([eq0_1171, eq1_1171, eq2_1171,
eq3_1171, eq4_1171, eq5_1171, eq6_1171, eq7_1171, eq8_1171,
eq9_1171, eq10_1171, eq11_1171, eq12_1171, eq13_1171, eq14_1171], (
p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15, p06, p16,
p07, p17))
sym_state_recursive_ratios_1171 = sym.zeros(buffer_capacity + 1,
system_capacity + 1)
sym_state_recursive_ratios_1171[0, 0] = 1
sym_state_recursive_ratios_1171[0, 1] = sym.factor(sym_state_probs_1171
[p01] / sym_state_probs_1171[p00])
sym_state_recursive_ratios_1171[1, 1] = sym.factor(sym_state_probs_1171
[p11] / sym_state_probs_1171[p01])
sym_state_recursive_ratios_1171[0, 2] = sym.factor(sym_state_probs_1171
[p02] / sym_state_probs_1171[p01])
sym_state_recursive_ratios_1171[1, 2] = sym.factor(sym_state_probs_1171
[p12] / sym_state_probs_1171[p02])
sym_state_recursive_ratios_1171[0, 3] = sym.factor(sym_state_probs_1171
[p03] / sym_state_probs_1171[p02])
sym_state_recursive_ratios_1171[1, 3] = sym.factor(sym_state_probs_1171
[p13] / sym_state_probs_1171[p03])
sym_state_recursive_ratios_1171[0, 4] = sym.factor(sym_state_probs_1171
[p04] / sym_state_probs_1171[p03])
sym_state_recursive_ratios_1171[1, 4] = sym.factor(sym_state_probs_1171
[p14] / sym_state_probs_1171[p04])
sym_state_recursive_ratios_1171[0, 5] = sym.factor(sym_state_probs_1171
[p05] / sym_state_probs_1171[p04])
sym_state_recursive_ratios_1171[1, 5] = sym.factor(sym_state_probs_1171
[p15] / sym_state_probs_1171[p05])
sym_state_recursive_ratios_1171[0, 6] = sym.factor(sym_state_probs_1171
[p06] / sym_state_probs_1171[p05])
sym_state_recursive_ratios_1171[1, 6] = sym.factor(sym_state_probs_1171
[p16] / sym_state_probs_1171[p06])
sym_state_recursive_ratios_1171[0, 7] = sym.factor(sym_state_probs_1171
[p07] / sym_state_probs_1171[p06])
sym_state_recursive_ratios_1171[1, 7] = sym.factor(sym_state_probs_1171
[p17] / sym_state_probs_1171[p07])
sym_state_recursive_ratios_right_1171 = (sym_state_recursive_ratios_1171
.copy())
sym_state_recursive_ratios_right_1171[1, 2] = sym.factor(
sym_state_probs_1171[p12] / sym_state_probs_1171[p11])
sym_state_recursive_ratios_right_1171[1, 3] = sym.factor(
sym_state_probs_1171[p13] / sym_state_probs_1171[p12])
sym_state_recursive_ratios_right_1171[1, 4] = sym.factor(
sym_state_probs_1171[p14] / sym_state_probs_1171[p13])
sym_state_recursive_ratios_right_1171[1, 5] = sym.factor(
sym_state_probs_1171[p15] / sym_state_probs_1171[p14])
sym_state_recursive_ratios_right_1171[1, 6] = sym.factor(
sym_state_probs_1171[p16] / sym_state_probs_1171[p15])
sym_state_recursive_ratios_right_1171[1, 7] = sym.factor(
sym_state_probs_1171[p17] / sym_state_probs_1171[p16])
sym_state_recursive_ratios_P0_1171 = sym.zeros(buffer_capacity + 1,
system_capacity + 1)
sym_state_recursive_ratios_P0_1171[0, 0] = 1
sym_state_recursive_ratios_P0_1171[0, 1] = sym.factor(
sym_state_probs_1171[p01] / sym_state_probs_1171[p00])
sym_state_recursive_ratios_P0_1171[1, 1] = sym.factor(
sym_state_probs_1171[p11] / sym_state_probs_1171[p00])
sym_state_recursive_ratios_P0_1171[0, 2] = sym.factor(
sym_state_probs_1171[p02] / sym_state_probs_1171[p00])
sym_state_recursive_ratios_P0_1171[1, 2] = sym.factor(
sym_state_probs_1171[p12] / sym_state_probs_1171[p00])
sym_state_recursive_ratios_P0_1171[0, 3] = sym.factor(
sym_state_probs_1171[p03] / sym_state_probs_1171[p00])
sym_state_recursive_ratios_P0_1171[1, 3] = sym.factor(
sym_state_probs_1171[p13] / sym_state_probs_1171[p00])
sym_state_recursive_ratios_P0_1171[0, 4] = sym.factor(
sym_state_probs_1171[p04] / sym_state_probs_1171[p00])
sym_state_recursive_ratios_P0_1171[1, 4] = sym.factor(
sym_state_probs_1171[p14] / sym_state_probs_1171[p00])
sym_state_recursive_ratios_P0_1171[0, 5] = sym.factor(
sym_state_probs_1171[p05] / sym_state_probs_1171[p00])
sym_state_recursive_ratios_P0_1171[1, 5] = sym.factor(
sym_state_probs_1171[p15] / sym_state_probs_1171[p00])
sym_state_recursive_ratios_P0_1171[0, 6] = sym.factor(
sym_state_probs_1171[p06] / sym_state_probs_1171[p00])
sym_state_recursive_ratios_P0_1171[1, 6] = sym.factor(
sym_state_probs_1171[p16] / sym_state_probs_1171[p00])
sym_state_recursive_ratios_P0_1171[0, 7] = sym.factor(
sym_state_probs_1171[p07] / sym_state_probs_1171[p00])
sym_state_recursive_ratios_P0_1171[1, 7] = sym.factor(
sym_state_probs_1171[p17] / sym_state_probs_1171[p00])
return (sym_state_probs_1171, sym_state_recursive_ratios_1171,
sym_state_recursive_ratios_right_1171,
sym_state_recursive_ratios_P0_1171)
def get_symbolic_state_probabilities_1181():
num_of_servers = 1
threshold = 1
system_capacity = 8
buffer_capacity = 1
Q_sym_1181 = abg.markov.get_symbolic_transition_matrix(num_of_servers,
threshold, system_capacity, buffer_capacity)
(p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15, p06, p16, p07,
p17, p08, p18) = (sym.symbols(
'p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15, p06, p16, p07, p17, p08, p18'
))
pi_1181 = sym.Matrix([p00, p01, p11, p02, p12, p03, p13, p04, p14, p05,
p15, p06, p16, p07, p17, p08, p18])
dimension_1181 = Q_sym_1181.shape[0]
M_sym_1181 = sym.Matrix([Q_sym_1181.transpose()[:-1, :], sym.ones(1,
dimension_1181)])
sym_diff_equations_1181 = M_sym_1181 @ pi_1181
b_sym_1181 = sym.Matrix([sym.zeros(dimension_1181 - 1, 1), [1]])
eq0_1181 = sym.Eq(sym_diff_equations_1181[0], b_sym_1181[0])
eq1_1181 = sym.Eq(sym_diff_equations_1181[1], b_sym_1181[1])
eq2_1181 = sym.Eq(sym_diff_equations_1181[2], b_sym_1181[2])
eq3_1181 = sym.Eq(sym_diff_equations_1181[3], b_sym_1181[3])
eq4_1181 = sym.Eq(sym_diff_equations_1181[4], b_sym_1181[4])
eq5_1181 = sym.Eq(sym_diff_equations_1181[5], b_sym_1181[5])
eq6_1181 = sym.Eq(sym_diff_equations_1181[6], b_sym_1181[6])
eq7_1181 = sym.Eq(sym_diff_equations_1181[7], b_sym_1181[7])
eq8_1181 = sym.Eq(sym_diff_equations_1181[8], b_sym_1181[8])
eq9_1181 = sym.Eq(sym_diff_equations_1181[9], b_sym_1181[9])
eq10_1181 = sym.Eq(sym_diff_equations_1181[10], b_sym_1181[10])
eq11_1181 = sym.Eq(sym_diff_equations_1181[11], b_sym_1181[11])
eq12_1181 = sym.Eq(sym_diff_equations_1181[12], b_sym_1181[12])
eq13_1181 = sym.Eq(sym_diff_equations_1181[13], b_sym_1181[13])
eq14_1181 = sym.Eq(sym_diff_equations_1181[14], b_sym_1181[14])
eq15_1181 = sym.Eq(sym_diff_equations_1181[15], b_sym_1181[15])
eq16_1181 = sym.Eq(sym_diff_equations_1181[16], b_sym_1181[16])
sym_state_probs_1181 = sym.solve([eq0_1181, eq1_1181, eq2_1181,
eq3_1181, eq4_1181, eq5_1181, eq6_1181, eq7_1181, eq8_1181,
eq9_1181, eq10_1181, eq11_1181, eq12_1181, eq13_1181, eq14_1181,
eq15_1181, eq16_1181], (p00, p01, p11, p02, p12, p03, p13, p04, p14,
p05, p15, p06, p16, p07, p17, p08, p18))
sym_state_recursive_ratios_1181 = sym.zeros(buffer_capacity + 1,
system_capacity + 1)
sym_state_recursive_ratios_1181[0, 0] = 1
sym_state_recursive_ratios_1181[0, 1] = sym.factor(sym_state_probs_1181
[p01] / sym_state_probs_1181[p00])
sym_state_recursive_ratios_1181[1, 1] = sym.factor(sym_state_probs_1181
[p11] / sym_state_probs_1181[p01])
sym_state_recursive_ratios_1181[0, 2] = sym.factor(sym_state_probs_1181
[p02] / sym_state_probs_1181[p01])
sym_state_recursive_ratios_1181[1, 2] = sym.factor(sym_state_probs_1181
[p12] / sym_state_probs_1181[p02])
sym_state_recursive_ratios_1181[0, 3] = sym.factor(sym_state_probs_1181
[p03] / sym_state_probs_1181[p02])
sym_state_recursive_ratios_1181[1, 3] = sym.factor(sym_state_probs_1181
[p13] / sym_state_probs_1181[p03])
sym_state_recursive_ratios_1181[0, 4] = sym.factor(sym_state_probs_1181
[p04] / sym_state_probs_1181[p03])
sym_state_recursive_ratios_1181[1, 4] = sym.factor(sym_state_probs_1181
[p14] / sym_state_probs_1181[p04])
sym_state_recursive_ratios_1181[0, 5] = sym.factor(sym_state_probs_1181
[p05] / sym_state_probs_1181[p04])
sym_state_recursive_ratios_1181[1, 5] = sym.factor(sym_state_probs_1181
[p15] / sym_state_probs_1181[p05])
sym_state_recursive_ratios_1181[0, 6] = sym.factor(sym_state_probs_1181
[p06] / sym_state_probs_1181[p05])
sym_state_recursive_ratios_1181[1, 6] = sym.factor(sym_state_probs_1181
[p16] / sym_state_probs_1181[p06])
sym_state_recursive_ratios_1181[0, 7] = sym.factor(sym_state_probs_1181
[p07] / sym_state_probs_1181[p06])
sym_state_recursive_ratios_1181[1, 7] = sym.factor(sym_state_probs_1181
[p17] / sym_state_probs_1181[p07])
sym_state_recursive_ratios_1181[0, 8] = sym.factor(sym_state_probs_1181
[p08] / sym_state_probs_1181[p07])
sym_state_recursive_ratios_1181[1, 8] = sym.factor(sym_state_probs_1181
[p18] / sym_state_probs_1181[p08])
sym_state_recursive_ratios_right_1181 = (sym_state_recursive_ratios_1181
.copy())
sym_state_recursive_ratios_right_1181[1, 2] = sym.factor(
sym_state_probs_1181[p12] / sym_state_probs_1181[p11])
sym_state_recursive_ratios_right_1181[1, 3] = sym.factor(
sym_state_probs_1181[p13] / sym_state_probs_1181[p12])
sym_state_recursive_ratios_right_1181[1, 4] = sym.factor(
sym_state_probs_1181[p14] / sym_state_probs_1181[p13])
sym_state_recursive_ratios_right_1181[1, 5] = sym.factor(
sym_state_probs_1181[p15] / sym_state_probs_1181[p14])
sym_state_recursive_ratios_right_1181[1, 6] = sym.factor(
sym_state_probs_1181[p16] / sym_state_probs_1181[p15])
sym_state_recursive_ratios_right_1181[1, 7] = sym.factor(
sym_state_probs_1181[p17] / sym_state_probs_1181[p16])
sym_state_recursive_ratios_right_1181[1, 8] = sym.factor(
sym_state_probs_1181[p18] / sym_state_probs_1181[p17])
sym_state_recursive_ratios_P0_1181 = sym.zeros(buffer_capacity + 1,
system_capacity + 1)
sym_state_recursive_ratios_P0_1181[0, 0] = 1
sym_state_recursive_ratios_P0_1181[0, 1] = sym.factor(
sym_state_probs_1181[p01] / sym_state_probs_1181[p00])
sym_state_recursive_ratios_P0_1181[1, 1] = sym.factor(
sym_state_probs_1181[p11] / sym_state_probs_1181[p00])
sym_state_recursive_ratios_P0_1181[0, 2] = sym.factor(
sym_state_probs_1181[p02] / sym_state_probs_1181[p00])
sym_state_recursive_ratios_P0_1181[1, 2] = sym.factor(
sym_state_probs_1181[p12] / sym_state_probs_1181[p00])
sym_state_recursive_ratios_P0_1181[0, 3] = sym.factor(
sym_state_probs_1181[p03] / sym_state_probs_1181[p00])
sym_state_recursive_ratios_P0_1181[1, 3] = sym.factor(
sym_state_probs_1181[p13] / sym_state_probs_1181[p00])
sym_state_recursive_ratios_P0_1181[0, 4] = sym.factor(
sym_state_probs_1181[p04] / sym_state_probs_1181[p00])
sym_state_recursive_ratios_P0_1181[1, 4] = sym.factor(
sym_state_probs_1181[p14] / sym_state_probs_1181[p00])
sym_state_recursive_ratios_P0_1181[0, 5] = sym.factor(
sym_state_probs_1181[p05] / sym_state_probs_1181[p00])
sym_state_recursive_ratios_P0_1181[1, 5] = sym.factor(
sym_state_probs_1181[p15] / sym_state_probs_1181[p00])
sym_state_recursive_ratios_P0_1181[0, 6] = sym.factor(
sym_state_probs_1181[p06] / sym_state_probs_1181[p00])
sym_state_recursive_ratios_P0_1181[1, 6] = sym.factor(
sym_state_probs_1181[p16] / sym_state_probs_1181[p00])
sym_state_recursive_ratios_P0_1181[0, 7] = sym.factor(
sym_state_probs_1181[p07] / sym_state_probs_1181[p00])
sym_state_recursive_ratios_P0_1181[1, 7] = sym.factor(
sym_state_probs_1181[p17] / sym_state_probs_1181[p00])
sym_state_recursive_ratios_P0_1181[0, 8] = sym.factor(
sym_state_probs_1181[p08] / sym_state_probs_1181[p00])
sym_state_recursive_ratios_P0_1181[1, 8] = sym.factor(
sym_state_probs_1181[p18] / sym_state_probs_1181[p00])
return (sym_state_probs_1181, sym_state_recursive_ratios_1181,
sym_state_recursive_ratios_right_1181,
sym_state_recursive_ratios_P0_1181)
def get_symbolic_state_probabilities_1191():
num_of_servers = 1
threshold = 1
system_capacity = 9
buffer_capacity = 1
Q_sym_1191 = abg.markov.get_symbolic_transition_matrix(num_of_servers,
threshold, system_capacity, buffer_capacity)
(p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15, p06, p16, p07,
p17, p08, p18, p09, p19) = (sym.symbols(
'p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15, p06, p16, p07, p17, p08, p18, p09, p19'
))
pi_1191 = sym.Matrix([p00, p01, p11, p02, p12, p03, p13, p04, p14, p05,
p15, p06, p16, p07, p17, p08, p18, p09, p19])
dimension_1191 = Q_sym_1191.shape[0]
M_sym_1191 = sym.Matrix([Q_sym_1191.transpose()[:-1, :], sym.ones(1,
dimension_1191)])
sym_diff_equations_1191 = M_sym_1191 @ pi_1191
b_sym_1191 = sym.Matrix([sym.zeros(dimension_1191 - 1, 1), [1]])
eq0_1191 = sym.Eq(sym_diff_equations_1191[0], b_sym_1191[0])
eq1_1191 = sym.Eq(sym_diff_equations_1191[1], b_sym_1191[1])
eq2_1191 = sym.Eq(sym_diff_equations_1191[2], b_sym_1191[2])
eq3_1191 = sym.Eq(sym_diff_equations_1191[3], b_sym_1191[3])
eq4_1191 = sym.Eq(sym_diff_equations_1191[4], b_sym_1191[4])
eq5_1191 = sym.Eq(sym_diff_equations_1191[5], b_sym_1191[5])
eq6_1191 = sym.Eq(sym_diff_equations_1191[6], b_sym_1191[6])
eq7_1191 = sym.Eq(sym_diff_equations_1191[7], b_sym_1191[7])
eq8_1191 = sym.Eq(sym_diff_equations_1191[8], b_sym_1191[8])
eq9_1191 = sym.Eq(sym_diff_equations_1191[9], b_sym_1191[9])
eq10_1191 = sym.Eq(sym_diff_equations_1191[10], b_sym_1191[10])
eq11_1191 = sym.Eq(sym_diff_equations_1191[11], b_sym_1191[11])
eq12_1191 = sym.Eq(sym_diff_equations_1191[12], b_sym_1191[12])
eq13_1191 = sym.Eq(sym_diff_equations_1191[13], b_sym_1191[13])
eq14_1191 = sym.Eq(sym_diff_equations_1191[14], b_sym_1191[14])
eq15_1191 = sym.Eq(sym_diff_equations_1191[15], b_sym_1191[15])
eq16_1191 = sym.Eq(sym_diff_equations_1191[16], b_sym_1191[16])
eq17_1191 = sym.Eq(sym_diff_equations_1191[17], b_sym_1191[17])
eq18_1191 = sym.Eq(sym_diff_equations_1191[18], b_sym_1191[18])
sym_state_probs_1191 = sym.solve([eq0_1191, eq1_1191, eq2_1191,
eq3_1191, eq4_1191, eq5_1191, eq6_1191, eq7_1191, eq8_1191,
eq9_1191, eq10_1191, eq11_1191, eq12_1191, eq13_1191, eq14_1191,
eq15_1191, eq16_1191, eq17_1191, eq18_1191], (p00, p01, p11, p02,
p12, p03, p13, p04, p14, p05, p15, p06, p16, p07, p17, p08, p18,
p09, p19))
sym_state_recursive_ratios_1191 = sym.zeros(buffer_capacity + 1,
system_capacity + 1)
sym_state_recursive_ratios_1191[0, 0] = 1
sym_state_recursive_ratios_1191[0, 1] = sym.factor(sym_state_probs_1191
[p01] / sym_state_probs_1191[p00])
sym_state_recursive_ratios_1191[1, 1] = sym.factor(sym_state_probs_1191
[p11] / sym_state_probs_1191[p01])
sym_state_recursive_ratios_1191[0, 2] = sym.factor(sym_state_probs_1191
[p02] / sym_state_probs_1191[p01])
sym_state_recursive_ratios_1191[1, 2] = sym.factor(sym_state_probs_1191
[p12] / sym_state_probs_1191[p02])
sym_state_recursive_ratios_1191[0, 3] = sym.factor(sym_state_probs_1191
[p03] / sym_state_probs_1191[p02])
sym_state_recursive_ratios_1191[1, 3] = sym.factor(sym_state_probs_1191
[p13] / sym_state_probs_1191[p03])
sym_state_recursive_ratios_1191[0, 4] = sym.factor(sym_state_probs_1191
[p04] / sym_state_probs_1191[p03])
sym_state_recursive_ratios_1191[1, 4] = sym.factor(sym_state_probs_1191
[p14] / sym_state_probs_1191[p04])
sym_state_recursive_ratios_1191[0, 5] = sym.factor(sym_state_probs_1191
[p05] / sym_state_probs_1191[p04])
sym_state_recursive_ratios_1191[1, 5] = sym.factor(sym_state_probs_1191
[p15] / sym_state_probs_1191[p05])
sym_state_recursive_ratios_1191[0, 6] = sym.factor(sym_state_probs_1191
[p06] / sym_state_probs_1191[p05])
sym_state_recursive_ratios_1191[1, 6] = sym.factor(sym_state_probs_1191
[p16] / sym_state_probs_1191[p06])
sym_state_recursive_ratios_1191[0, 7] = sym.factor(sym_state_probs_1191
[p07] / sym_state_probs_1191[p06])
sym_state_recursive_ratios_1191[1, 7] = sym.factor(sym_state_probs_1191
[p17] / sym_state_probs_1191[p07])
sym_state_recursive_ratios_1191[0, 8] = sym.factor(sym_state_probs_1191
[p08] / sym_state_probs_1191[p07])
sym_state_recursive_ratios_1191[1, 8] = sym.factor(sym_state_probs_1191
[p18] / sym_state_probs_1191[p08])
sym_state_recursive_ratios_1191[0, 9] = sym.factor(sym_state_probs_1191
[p09] / sym_state_probs_1191[p08])
sym_state_recursive_ratios_1191[1, 9] = sym.factor(sym_state_probs_1191
[p19] / sym_state_probs_1191[p09])
sym_state_recursive_ratios_right_1191 = (sym_state_recursive_ratios_1191
.copy())
sym_state_recursive_ratios_right_1191[1, 2] = sym.factor(
sym_state_probs_1191[p12] / sym_state_probs_1191[p11])
sym_state_recursive_ratios_right_1191[1, 3] = sym.factor(
sym_state_probs_1191[p13] / sym_state_probs_1191[p12])
sym_state_recursive_ratios_right_1191[1, 4] = sym.factor(
sym_state_probs_1191[p14] / sym_state_probs_1191[p13])
sym_state_recursive_ratios_right_1191[1, 5] = sym.factor(
sym_state_probs_1191[p15] / sym_state_probs_1191[p14])
sym_state_recursive_ratios_right_1191[1, 6] = sym.factor(
sym_state_probs_1191[p16] / sym_state_probs_1191[p15])
sym_state_recursive_ratios_right_1191[1, 7] = sym.factor(
sym_state_probs_1191[p17] / sym_state_probs_1191[p16])
sym_state_recursive_ratios_right_1191[1, 8] = sym.factor(
sym_state_probs_1191[p18] / sym_state_probs_1191[p17])
sym_state_recursive_ratios_right_1191[1, 8] = sym.factor(
sym_state_probs_1191[p18] / sym_state_probs_1191[p17])
sym_state_recursive_ratios_P0_1191 = sym.zeros(buffer_capacity + 1,
system_capacity + 1)
sym_state_recursive_ratios_P0_1191[0, 0] = 1
sym_state_recursive_ratios_P0_1191[0, 1] = sym.factor(
sym_state_probs_1191[p01] / sym_state_probs_1191[p00])
sym_state_recursive_ratios_P0_1191[1, 1] = sym.factor(
sym_state_probs_1191[p11] / sym_state_probs_1191[p00])
sym_state_recursive_ratios_P0_1191[0, 2] = sym.factor(
sym_state_probs_1191[p02] / sym_state_probs_1191[p00])
sym_state_recursive_ratios_P0_1191[1, 2] = sym.factor(
sym_state_probs_1191[p12] / sym_state_probs_1191[p00])
sym_state_recursive_ratios_P0_1191[0, 3] = sym.factor(
sym_state_probs_1191[p03] / sym_state_probs_1191[p00])
sym_state_recursive_ratios_P0_1191[1, 3] = sym.factor(
sym_state_probs_1191[p13] / sym_state_probs_1191[p00])
sym_state_recursive_ratios_P0_1191[0, 4] = sym.factor(
sym_state_probs_1191[p04] / sym_state_probs_1191[p00])
sym_state_recursive_ratios_P0_1191[1, 4] = sym.factor(
sym_state_probs_1191[p14] / sym_state_probs_1191[p00])
sym_state_recursive_ratios_P0_1191[0, 5] = sym.factor(
sym_state_probs_1191[p05] / sym_state_probs_1191[p00])
sym_state_recursive_ratios_P0_1191[1, 5] = sym.factor(
sym_state_probs_1191[p15] / sym_state_probs_1191[p00])
sym_state_recursive_ratios_P0_1191[0, 6] = sym.factor(
sym_state_probs_1191[p06] / sym_state_probs_1191[p00])
sym_state_recursive_ratios_P0_1191[1, 6] = sym.factor(
sym_state_probs_1191[p16] / sym_state_probs_1191[p00])
sym_state_recursive_ratios_P0_1191[0, 7] = sym.factor(
sym_state_probs_1191[p07] / sym_state_probs_1191[p00])
sym_state_recursive_ratios_P0_1191[1, 7] = sym.factor(
sym_state_probs_1191[p17] / sym_state_probs_1191[p00])
sym_state_recursive_ratios_P0_1191[0, 8] = sym.factor(
sym_state_probs_1191[p08] / sym_state_probs_1191[p00])
sym_state_recursive_ratios_P0_1191[1, 8] = sym.factor(
sym_state_probs_1191[p18] / sym_state_probs_1191[p00])
sym_state_recursive_ratios_P0_1191[0, 9] = sym.factor(
sym_state_probs_1191[p09] / sym_state_probs_1191[p00])
sym_state_recursive_ratios_P0_1191[1, 9] = sym.factor(
sym_state_probs_1191[p19] / sym_state_probs_1191[p00])
return (sym_state_probs_1191, sym_state_recursive_ratios_1191,
sym_state_recursive_ratios_right_1191,
sym_state_recursive_ratios_P0_1191)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def get_symbolic_pi(num_of_servers, threshold, system_capacity, buffer_capacity
):
Q_sym = abg.markov.get_symbolic_transition_matrix(num_of_servers=
num_of_servers, threshold=threshold, system_capacity=
system_capacity, buffer_capacity=buffer_capacity)
dimension = Q_sym.shape[0]
if dimension > 7:
return 'Capacity of 6 exceeded'
M_sym = sym.Matrix([Q_sym.transpose()[:-1, :], sym.ones(1, dimension)])
b_sym = sym.Matrix([sym.zeros(dimension - 1, 1), [1]])
system = M_sym.col_insert(dimension, b_sym)
sol = sym.solve_linear_system_LU(system, [a, b, c, d, e, f, g])
return sol
def get_symbolic_state_probabilities_1222():
num_of_servers = 1
threshold = 2
system_capacity = 2
buffer_capacity = 2
sym_pi_1222 = get_symbolic_pi(num_of_servers=num_of_servers, threshold=
threshold, system_capacity=system_capacity, buffer_capacity=
buffer_capacity)
all_states_1222 = abg.markov.build_states(threshold=threshold,
system_capacity=system_capacity, buffer_capacity=buffer_capacity)
sym_state_probs_1222 = [(0) for _ in range(len(all_states_1222))]
sym_state_probs_1222[0] = sym.factor(sym_pi_1222[a])
sym_state_probs_1222[1] = sym.factor(sym_pi_1222[b])
sym_state_probs_1222[2] = sym.factor(sym_pi_1222[c])
sym_state_probs_1222[3] = sym.factor(sym_pi_1222[d])
sym_state_probs_1222[4] = sym.factor(sym_pi_1222[e])
sym_state_recursive_ratios_1222 = sym.zeros(buffer_capacity + 1,
system_capacity + 1)
sym_state_recursive_ratios_1222[0, 0] = 1
sym_state_recursive_ratios_1222[0, 1] = sym.factor(sym_state_probs_1222
[1] / sym_state_probs_1222[0])
sym_state_recursive_ratios_1222[0, 2] = sym.factor(sym_state_probs_1222
[2] / sym_state_probs_1222[1])
sym_state_recursive_ratios_1222[1, 2] = sym.factor(sym_state_probs_1222
[3] / sym_state_probs_1222[2])
sym_state_recursive_ratios_1222[2, 2] = sym.factor(sym_state_probs_1222
[4] / sym_state_probs_1222[3])
return sym_state_probs_1222, sym_state_recursive_ratios_1222
def get_symbolic_state_probabilities_1121():
num_of_servers = 1
threshold = 1
system_capacity = 2
buffer_capacity = 1
all_states_1121 = abg.markov.build_states(threshold=threshold,
system_capacity=system_capacity, buffer_capacity=buffer_capacity)
sym_pi_1121 = get_symbolic_pi(num_of_servers=num_of_servers, threshold=
threshold, system_capacity=system_capacity, buffer_capacity=
buffer_capacity)
sym_state_probs_1121 = [(0) for _ in range(len(all_states_1121))]
sym_state_probs_1121[0] = sym.factor(sym_pi_1121[a])
sym_state_probs_1121[1] = sym.factor(sym_pi_1121[b])
sym_state_probs_1121[2] = sym.factor(sym_pi_1121[c])
sym_state_probs_1121[3] = sym.factor(sym_pi_1121[d])
sym_state_probs_1121[4] = sym.factor(sym_pi_1121[e])
sym_state_recursive_ratios_1121 = sym.zeros(buffer_capacity + 1,
system_capacity + 1)
sym_state_recursive_ratios_1121[0, 0] = 1
sym_state_recursive_ratios_1121[0, 1] = sym.factor(sym_state_probs_1121
[1] / sym_state_probs_1121[0])
sym_state_recursive_ratios_1121[1, 1] = sym.factor(sym_state_probs_1121
[2] / sym_state_probs_1121[1])
sym_state_recursive_ratios_1121[0, 2] = sym.factor(sym_state_probs_1121
[3] / sym_state_probs_1121[1])
sym_state_recursive_ratios_1121[1, 2] = sym.factor(sym_state_probs_1121
[4] / sym_state_probs_1121[3])
sym_state_recursive_ratios_right_1121 = (sym_state_recursive_ratios_1121
.copy())
sym_state_recursive_ratios_right_1121[1, 2] = sym.factor(
sym_state_probs_1121[4] / sym_state_probs_1121[2])
sym_state_recursive_ratios_P0_1121 = sym.zeros(buffer_capacity + 1,
system_capacity + 1)
sym_state_recursive_ratios_P0_1121[0, 0] = 1
sym_state_recursive_ratios_P0_1121[0, 1] = sym.factor(
sym_state_probs_1121[1] / sym_state_probs_1121[0])
sym_state_recursive_ratios_P0_1121[1, 1] = sym.factor(
sym_state_probs_1121[2] / sym_state_probs_1121[0])
sym_state_recursive_ratios_P0_1121[0, 2] = sym.factor(
sym_state_probs_1121[3] / sym_state_probs_1121[0])
sym_state_recursive_ratios_P0_1121[1, 2] = sym.factor(
sym_state_probs_1121[4] / sym_state_probs_1121[0])
return (sym_state_probs_1121, sym_state_recursive_ratios_1121,
sym_state_recursive_ratios_right_1121,
sym_state_recursive_ratios_P0_1121)
def get_symbolic_state_probabilities_1122():
threshold = 1
system_capacity = 2
buffer_capacity = 2
all_states_1122 = abg.markov.build_states(threshold=threshold,
system_capacity=system_capacity, buffer_capacity=buffer_capacity)
sym_state_probs_1122 = [(0) for _ in range(len(all_states_1122))]
sym_Lambda = sym.symbols('Lambda')
sym_lambda_1 = sym.symbols('lambda_1')
sym_lambda_2 = sym.symbols('lambda_2')
sym_mu = sym.symbols('mu')
sym_state_probs_1122[0] = (sym_mu ** 6 + 2 * sym_lambda_2 * sym_mu ** 5 +
sym_lambda_2 ** 2 * sym_mu ** 4)
sym_state_probs_1122[1] = sym_Lambda * sym_mu ** 3 * (sym_mu ** 2 + 2 *
sym_mu * sym_lambda_2 + sym_lambda_2 ** 2)
sym_state_probs_1122[2] = sym_Lambda * sym_lambda_2 * sym_mu ** 2 * (
sym_lambda_2 ** 2 + sym_lambda_2 * sym_lambda_1 + sym_lambda_1 *
sym_mu + sym_mu ** 2 + 2 * sym_lambda_2 * sym_mu)
sym_state_probs_1122[3] = sym_Lambda * sym_lambda_2 ** 2 * sym_mu * (
sym_lambda_2 ** 2 + 2 * sym_lambda_1 * sym_lambda_2 + 3 *
sym_lambda_1 * sym_mu + sym_mu ** 2 + 2 * sym_lambda_2 * sym_mu +
sym_lambda_1 ** 2)
sym_state_probs_1122[4] = sym_Lambda * sym_lambda_1 * sym_mu ** 3 * (
sym_lambda_2 + sym_mu)
sym_state_probs_1122[5
] = sym_Lambda * sym_lambda_1 * sym_lambda_2 * sym_mu ** 2 * (2 *
sym_mu + sym_lambda_1 + sym_lambda_2)
sym_state_probs_1122[6] = sym_Lambda * sym_lambda_1 * sym_lambda_2 ** 2 * (
sym_lambda_1 ** 2 + 4 * sym_lambda_1 * sym_mu + 2 * sym_lambda_1 *
sym_lambda_2 + 3 * sym_mu ** 2 + sym_lambda_2 ** 2 + 3 *
sym_lambda_2 * sym_mu)
total_1122 = np.sum(sym_state_probs_1122)
sym_state_probs_1122 = [(i / total_1122) for i in sym_state_probs_1122]
sym_state_recursive_ratios_1122 = sym.zeros(buffer_capacity + 1,
system_capacity + 1)
sym_state_recursive_ratios_1122[0, 0] = 1
sym_state_recursive_ratios_1122[0, 1] = sym.factor(sym_state_probs_1122
[1] / sym_state_probs_1122[0])
sym_state_recursive_ratios_1122[1, 1] = sym.factor(sym_state_probs_1122
[2] / sym_state_probs_1122[1])
sym_state_recursive_ratios_1122[2, 1] = sym.factor(sym_state_probs_1122
[3] / sym_state_probs_1122[2])
sym_state_recursive_ratios_1122[0, 2] = sym.factor(sym_state_probs_1122
[4] / sym_state_probs_1122[1])
sym_state_recursive_ratios_1122[1, 2] = sym.factor(sym_state_probs_1122
[5] / sym_state_probs_1122[4])
sym_state_recursive_ratios_1122[2, 2] = sym.factor(sym_state_probs_1122
[6] / sym_state_probs_1122[5])
sym_state_recursive_ratios_right_1122 = (sym_state_recursive_ratios_1122
.copy())
sym_state_recursive_ratios_right_1122[1, 2] = sym.factor(
sym_state_probs_1122[5] / sym_state_probs_1122[2])
sym_state_recursive_ratios_right_1122[2, 2] = sym.factor(
sym_state_probs_1122[6] / sym_state_probs_1122[3])
sym_state_recursive_ratios_P0_1122 = sym.zeros(buffer_capacity + 1,
system_capacity + 1)
sym_state_recursive_ratios_P0_1122[0, 0] = 1
sym_state_recursive_ratios_P0_1122[0, 1] = sym.factor(
sym_state_probs_1122[1] / sym_state_probs_1122[0])
sym_state_recursive_ratios_P0_1122[1, 1] = sym.factor(
sym_state_probs_1122[2] / sym_state_probs_1122[0])
sym_state_recursive_ratios_P0_1122[2, 1] = sym.factor(
sym_state_probs_1122[3] / sym_state_probs_1122[0])
sym_state_recursive_ratios_P0_1122[0, 2] = sym.factor(
sym_state_probs_1122[4] / sym_state_probs_1122[0])
sym_state_recursive_ratios_P0_1122[1, 2] = sym.factor(
sym_state_probs_1122[5] / sym_state_probs_1122[0])
sym_state_recursive_ratios_P0_1122[2, 2] = sym.factor(
sym_state_probs_1122[6] / sym_state_probs_1122[0])
return (sym_state_probs_1122, sym_state_recursive_ratios_1122,
sym_state_recursive_ratios_right_1122,
sym_state_recursive_ratios_P0_1122)
def get_symbolic_state_probabilities_1123():
num_of_servers = 1
threshold = 1
system_capacity = 2
buffer_capacity = 3
Q_sym_1123 = abg.markov.get_symbolic_transition_matrix(num_of_servers,
threshold, system_capacity, buffer_capacity)
p00, p01, p11, p21, p31, p02, p12, p22, p32 = sym.symbols(
'p00, p01, p11, p21, p31, p02, p12, p22, p32')
pi_1123 = sym.Matrix([p00, p01, p11, p21, p31, p02, p12, p22, p32])
dimension_1123 = Q_sym_1123.shape[0]
M_sym_1123 = sym.Matrix([Q_sym_1123.transpose()[:-1, :], sym.ones(1,
dimension_1123)])
sym_diff_equations_1123 = M_sym_1123 @ pi_1123
b_sym_1123 = sym.Matrix([sym.zeros(dimension_1123 - 1, 1), [1]])
eq0_1123 = sym.Eq(sym_diff_equations_1123[0], b_sym_1123[0])
eq1_1123 = sym.Eq(sym_diff_equations_1123[1], b_sym_1123[1])
eq2_1123 = sym.Eq(sym_diff_equations_1123[2], b_sym_1123[2])
eq3_1123 = sym.Eq(sym_diff_equations_1123[3], b_sym_1123[3])
eq4_1123 = sym.Eq(sym_diff_equations_1123[4], b_sym_1123[4])
eq5_1123 = sym.Eq(sym_diff_equations_1123[5], b_sym_1123[5])
eq6_1123 = sym.Eq(sym_diff_equations_1123[6], b_sym_1123[6])
eq7_1123 = sym.Eq(sym_diff_equations_1123[7], b_sym_1123[7])
eq8_1123 = sym.Eq(sym_diff_equations_1123[8], b_sym_1123[8])
sym_state_probs_1123 = sym.solve([eq0_1123, eq1_1123, eq2_1123,
eq3_1123, eq4_1123, eq5_1123, eq6_1123, eq7_1123, eq8_1123], (p00,
p01, p11, p21, p31, p02, p12, p22, p32))
sym_state_recursive_ratios_1123 = sym.zeros(buffer_capacity + 1,
system_capacity + 1)
sym_state_recursive_ratios_1123[0, 0] = 1
sym_state_recursive_ratios_1123[0, 1] = sym.factor(sym_state_probs_1123
[p01] / sym_state_probs_1123[p00])
sym_state_recursive_ratios_1123[1, 1] = sym.factor(sym_state_probs_1123
[p11] / sym_state_probs_1123[p01])
sym_state_recursive_ratios_1123[2, 1] = sym.factor(sym_state_probs_1123
[p21] / sym_state_probs_1123[p11])
sym_state_recursive_ratios_1123[3, 1] = sym.factor(sym_state_probs_1123
[p31] / sym_state_probs_1123[p21])
sym_state_recursive_ratios_1123[0, 2] = sym.factor(sym_state_probs_1123
[p02] / sym_state_probs_1123[p01])
sym_state_recursive_ratios_1123[1, 2] = sym.factor(sym_state_probs_1123
[p12] / sym_state_probs_1123[p02])
sym_state_recursive_ratios_1123[2, 2] = sym.factor(sym_state_probs_1123
[p22] / sym_state_probs_1123[p12])
sym_state_recursive_ratios_1123[2, 2] = sym.factor(sym_state_probs_1123
[p32] / sym_state_probs_1123[p22])
sym_state_recursive_ratios_right_1123 = (sym_state_recursive_ratios_1123
.copy())
sym_state_recursive_ratios_right_1123[1, 2] = sym.factor(
sym_state_probs_1123[p12] / sym_state_probs_1123[p11])
sym_state_recursive_ratios_right_1123[2, 2] = sym.factor(
sym_state_probs_1123[p22] / sym_state_probs_1123[p21])
sym_state_recursive_ratios_right_1123[3, 2] = sym.factor(
sym_state_probs_1123[p32] / sym_state_probs_1123[p22])
sym_state_recursive_ratios_P0_1123 = sym.zeros(buffer_capacity + 1,
system_capacity + 1)
sym_state_recursive_ratios_P0_1123[0, 0] = 1
sym_state_recursive_ratios_P0_1123[0, 1] = sym.factor(
sym_state_probs_1123[p01] / sym_state_probs_1123[p00])
sym_state_recursive_ratios_P0_1123[1, 1] = sym.factor(
sym_state_probs_1123[p11] / sym_state_probs_1123[p00])
sym_state_recursive_ratios_P0_1123[2, 1] = sym.factor(
sym_state_probs_1123[p21] / sym_state_probs_1123[p00])
sym_state_recursive_ratios_P0_1123[3, 1] = sym.factor(
sym_state_probs_1123[p31] / sym_state_probs_1123[p00])
sym_state_recursive_ratios_P0_1123[0, 2] = sym.factor(
sym_state_probs_1123[p02] / sym_state_probs_1123[p00])
sym_state_recursive_ratios_P0_1123[1, 2] = sym.factor(
sym_state_probs_1123[p12] / sym_state_probs_1123[p00])
sym_state_recursive_ratios_P0_1123[2, 2] = sym.factor(
sym_state_probs_1123[p22] / sym_state_probs_1123[p00])
sym_state_recursive_ratios_P0_1123[3, 2] = sym.factor(
sym_state_probs_1123[p32] / sym_state_probs_1123[p00])
return (sym_state_probs_1123, sym_state_recursive_ratios_1123,
sym_state_recursive_ratios_right_1123,
sym_state_recursive_ratios_P0_1123)
def get_symbolic_state_probabilities_1341():
threshold = 3
system_capacity = 4
buffer_capacity = 1
all_states_1341 = abg.markov.build_states(threshold=threshold,
system_capacity=system_capacity, buffer_capacity=buffer_capacity)
sym_state_probs_1341 = [(0) for _ in range(len(all_states_1341))]
sym_Lambda = sym.symbols('Lambda')
sym_lambda_1 = sym.symbols('lambda_1')
sym_lambda_2 = sym.symbols('lambda_2')
sym_mu = sym.symbols('mu')
sym_state_probs_1341[0] = sym_lambda_2 * sym_mu ** 5 + sym_mu ** 6
sym_state_probs_1341[1
] = sym_Lambda * sym_lambda_2 * sym_mu ** 4 + sym_Lambda * sym_mu ** 5
sym_state_probs_1341[2] = (sym_Lambda ** 2 * sym_lambda_2 * sym_mu ** 3 +
sym_Lambda ** 2 * sym_mu ** 4)
sym_state_probs_1341[3] = (sym_Lambda ** 3 * sym_lambda_2 * sym_mu ** 2 +
sym_Lambda ** 3 * sym_mu ** 3)
sym_state_probs_1341[4] = (sym_Lambda ** 3 * sym_lambda_1 *
sym_lambda_2 * sym_mu + sym_Lambda ** 3 * sym_lambda_2 * sym_mu **
2 + sym_Lambda ** 3 * sym_lambda_2 * sym_lambda_2 * sym_mu)
sym_state_probs_1341[5] = sym_Lambda ** 3 * sym_lambda_1 * sym_mu ** 2
sym_state_probs_1341[6] = (sym_Lambda ** 3 * sym_lambda_1 ** 2 *
sym_lambda_2 + sym_Lambda ** 3 * sym_lambda_1 * sym_lambda_2 ** 2 +
2 * sym_Lambda ** 3 * sym_lambda_1 * sym_lambda_2 * sym_mu)
total_1341 = np.sum(sym_state_probs_1341)
sym_state_probs_1341 = [(i / total_1341) for i in sym_state_probs_1341]
sym_state_recursive_ratios_1341 = sym.zeros(buffer_capacity + 1,
system_capacity + 1)
sym_state_recursive_ratios_1341[0, 0] = 1
sym_state_recursive_ratios_1341[0, 1] = sym.factor(sym_state_probs_1341
[1] / sym_state_probs_1341[0])
sym_state_recursive_ratios_1341[0, 2] = sym.factor(sym_state_probs_1341
[2] / sym_state_probs_1341[1])
sym_state_recursive_ratios_1341[0, 3] = sym.factor(sym_state_probs_1341
[3] / sym_state_probs_1341[2])
sym_state_recursive_ratios_1341[0, 4] = sym.factor(sym_state_probs_1341
[5] / sym_state_probs_1341[3])
sym_state_recursive_ratios_1341[1, 3] = sym.factor(sym_state_probs_1341
[4] / sym_state_probs_1341[3])
sym_state_recursive_ratios_1341[1, 4] = sym.factor(sym_state_probs_1341
[6] / sym_state_probs_1341[5])
sym_state_recursive_ratios_right_1341 = (sym_state_recursive_ratios_1341
.copy())
sym_state_recursive_ratios_right_1341[1, 4] = sym.factor(
sym_state_probs_1341[6] / sym_state_probs_1341[4])
sym_state_recursive_ratios_P0_1341 = sym.zeros(buffer_capacity + 1,
system_capacity + 1)
sym_state_recursive_ratios_P0_1341[0, 0] = 1
sym_state_recursive_ratios_P0_1341[0, 1] = sym.factor(
sym_state_probs_1341[1] / sym_state_probs_1341[0])
sym_state_recursive_ratios_P0_1341[0, 2] = sym.factor(
sym_state_probs_1341[2] / sym_state_probs_1341[0])
sym_state_recursive_ratios_P0_1341[0, 3] = sym.factor(
sym_state_probs_1341[3] / sym_state_probs_1341[0])
sym_state_recursive_ratios_P0_1341[1, 3] = sym.factor(
sym_state_probs_1341[4] / sym_state_probs_1341[0])
sym_state_recursive_ratios_P0_1341[0, 4] = sym.factor(
sym_state_probs_1341[5] / sym_state_probs_1341[0])
sym_state_recursive_ratios_P0_1341[1, 4] = sym.factor(
sym_state_probs_1341[6] / sym_state_probs_1341[0])
return (sym_state_probs_1341, sym_state_recursive_ratios_1341,
sym_state_recursive_ratios_right_1341,
sym_state_recursive_ratios_P0_1341)
def get_symbolic_state_probabilities_1131():
threshold = 1
system_capacity = 3
buffer_capacity = 1
all_states_1131 = abg.markov.build_states(threshold=threshold,
system_capacity=system_capacity, buffer_capacity=buffer_capacity)
sym_state_probs_1131 = [(0) for _ in range(len(all_states_1131))]
sym_Lambda = sym.symbols('Lambda')
sym_lambda_1 = sym.symbols('lambda_1')
sym_lambda_2 = sym.symbols('lambda_2')
sym_mu = sym.symbols('mu')
sym_state_probs_1131[0] = (sym_mu ** 6 + 2 * (sym_lambda_2 * sym_mu **
5) + sym_lambda_2 ** 2 * sym_mu ** 4 + sym_lambda_1 * sym_lambda_2 *
sym_mu ** 4)
sym_state_probs_1131[1] = sym_state_probs_1131[0] * sym_Lambda / sym_mu
sym_state_probs_1131[2] = (sym_Lambda * sym_lambda_1 ** 2 *
sym_lambda_2 * sym_mu ** 2 + sym_Lambda * sym_lambda_2 *
sym_lambda_1 * sym_mu ** 3 + 2 * (sym_Lambda * sym_lambda_1 *
sym_lambda_2 ** 2 * sym_mu ** 2) + 2 * (sym_Lambda * sym_lambda_2 **
2 * sym_mu ** 3) + sym_Lambda * sym_lambda_2 ** 3 * sym_mu ** 2 +
sym_Lambda * sym_lambda_2 * sym_mu ** 4)
sym_state_probs_1131[3] = sym_Lambda * sym_lambda_1 * sym_mu ** 3 * (
sym_lambda_2 + sym_mu)
sym_state_probs_1131[4
] = sym_Lambda * sym_lambda_2 * sym_lambda_1 * sym_mu * (
sym_lambda_2 ** 2 + 2 * sym_lambda_2 * sym_lambda_1 + 3 *
sym_lambda_2 * sym_mu + sym_lambda_1 ** 2 + 2 * sym_lambda_1 *
sym_mu + 2 * sym_mu ** 2)
sym_state_probs_1131[5] = sym_Lambda * sym_lambda_1 ** 2 * sym_mu ** 3
sym_state_probs_1131[6] = sym_Lambda * sym_lambda_2 * sym_lambda_1 ** 2 * (
sym_lambda_2 ** 2 + 2 * sym_lambda_2 * sym_lambda_1 + 3 *
sym_lambda_2 * sym_mu + sym_lambda_1 ** 2 + 2 * sym_lambda_1 *
sym_mu + 3 * sym_mu ** 2)
denominator = (sym_Lambda * sym_lambda_2 ** 3 * sym_lambda_1 ** 2 +
sym_Lambda * sym_lambda_2 ** 3 * sym_lambda_1 * sym_mu + sym_Lambda *
sym_lambda_2 ** 3 * sym_mu ** 2 + 2 * sym_Lambda * sym_lambda_2 **
2 * sym_lambda_1 ** 3 + 5 * sym_Lambda * sym_lambda_2 ** 2 *
sym_lambda_1 ** 2 * sym_mu + 5 * sym_Lambda * sym_lambda_2 ** 2 *
sym_lambda_1 * sym_mu ** 2 + 3 * sym_Lambda * sym_lambda_2 ** 2 *
sym_mu ** 3 + sym_Lambda * sym_lambda_2 * sym_lambda_1 ** 4 + 3 *
sym_Lambda * sym_lambda_2 * sym_lambda_1 ** 3 * sym_mu + 6 *
sym_Lambda * sym_lambda_2 * sym_lambda_1 ** 2 * sym_mu ** 2 + 5 *
sym_Lambda * sym_lambda_2 * sym_lambda_1 * sym_mu ** 3 + 3 *
sym_Lambda * sym_lambda_2 * sym_mu ** 4 + sym_Lambda * sym_lambda_1 **
2 * sym_mu ** 3 + sym_Lambda * sym_lambda_1 * sym_mu ** 4 +
sym_Lambda * sym_mu ** 5 + sym_lambda_2 ** 2 * sym_mu ** 4 +
sym_lambda_2 * sym_lambda_1 * sym_mu ** 4 + 2 * sym_lambda_2 *
sym_mu ** 5 + sym_mu ** 6)
sym_state_probs_1131 = [(i / denominator) for i in sym_state_probs_1131]
sym_state_recursive_ratios_1131 = sym.zeros(buffer_capacity + 1,
system_capacity + 1)
sym_state_recursive_ratios_1131[0, 0] = 1
sym_state_recursive_ratios_1131[0, 1] = sym.factor(sym_state_probs_1131
[1] / sym_state_probs_1131[0])
sym_state_recursive_ratios_1131[1, 1] = sym.factor(sym_state_probs_1131
[2] / sym_state_probs_1131[1])
sym_state_recursive_ratios_1131[0, 2] = sym.factor(sym_state_probs_1131
[3] / sym_state_probs_1131[1])
sym_state_recursive_ratios_1131[1, 2] = sym.factor(sym_state_probs_1131
[4] / sym_state_probs_1131[3])
sym_state_recursive_ratios_1131[0, 3] = sym.factor(sym_state_probs_1131
[5] / sym_state_probs_1131[3])
sym_state_recursive_ratios_1131[1, 3] = sym.factor(sym_state_probs_1131
[6] / sym_state_probs_1131[5])
sym_state_recursive_ratios_right_1131 = (sym_state_recursive_ratios_1131
.copy())
sym_state_recursive_ratios_right_1131[1, 2] = sym.factor(
sym_state_probs_1131[4] / sym_state_probs_1131[2])
sym_state_recursive_ratios_right_1131[1, 3] = sym.factor(
sym_state_probs_1131[6] / sym_state_probs_1131[4])
sym_state_recursive_ratios_P0_1131 = sym.zeros(buffer_capacity + 1,
system_capacity + 1)
sym_state_recursive_ratios_P0_1131[0, 0] = 1
sym_state_recursive_ratios_P0_1131[0, 1] = sym.factor(
sym_state_probs_1131[1] / sym_state_probs_1131[0])
sym_state_recursive_ratios_P0_1131[1, 1] = sym.factor(
sym_state_probs_1131[2] / sym_state_probs_1131[0])
sym_state_recursive_ratios_P0_1131[0, 2] = sym.factor(
sym_state_probs_1131[3] / sym_state_probs_1131[0])
sym_state_recursive_ratios_P0_1131[1, 2] = sym.factor(
sym_state_probs_1131[4] / sym_state_probs_1131[0])
sym_state_recursive_ratios_P0_1131[0, 3] = sym.factor(
sym_state_probs_1131[5] / sym_state_probs_1131[0])
sym_state_recursive_ratios_P0_1131[1, 3] = sym.factor(
sym_state_probs_1131[6] / sym_state_probs_1131[0])
return (sym_state_probs_1131, sym_state_recursive_ratios_1131,
sym_state_recursive_ratios_right_1131,
sym_state_recursive_ratios_P0_1131)
def get_symbolic_state_probabilities_1132():
num_of_servers = 1
threshold = 1
system_capacity = 3
buffer_capacity = 2
Q_sym_1132 = abg.markov.get_symbolic_transition_matrix(num_of_servers,
threshold, system_capacity, buffer_capacity)
p00, p01, p11, p21, p02, p12, p22, p03, p13, p23 = sym.symbols(
'p00, p01, p11, p21, p02, p12, p22, p03, p13, p23')
pi_1132 = sym.Matrix([p00, p01, p11, p21, p02, p12, p22, p03, p13, p23])
dimension_1132 = Q_sym_1132.shape[0]
M_sym_1132 = sym.Matrix([Q_sym_1132.transpose()[:-1, :], sym.ones(1,
dimension_1132)])
sym_diff_equations_1132 = M_sym_1132 @ pi_1132
b_sym_1132 = sym.Matrix([sym.zeros(dimension_1132 - 1, 1), [1]])
eq0_1132 = sym.Eq(sym_diff_equations_1132[0], b_sym_1132[0])
eq1_1132 = sym.Eq(sym_diff_equations_1132[1], b_sym_1132[1])
eq2_1132 = sym.Eq(sym_diff_equations_1132[2], b_sym_1132[2])
eq3_1132 = sym.Eq(sym_diff_equations_1132[3], b_sym_1132[3])
eq4_1132 = sym.Eq(sym_diff_equations_1132[4], b_sym_1132[4])
eq5_1132 = sym.Eq(sym_diff_equations_1132[5], b_sym_1132[5])
eq6_1132 = sym.Eq(sym_diff_equations_1132[6], b_sym_1132[6])
eq7_1132 = sym.Eq(sym_diff_equations_1132[7], b_sym_1132[7])
eq8_1132 = sym.Eq(sym_diff_equations_1132[8], b_sym_1132[8])
eq9_1132 = sym.Eq(sym_diff_equations_1132[9], b_sym_1132[9])
sym_state_probs_1132 = sym.solve([eq0_1132, eq1_1132, eq2_1132,
eq3_1132, eq4_1132, eq5_1132, eq6_1132, eq7_1132, eq8_1132,
eq9_1132], (p00, p01, p11, p21, p02, p12, p22, p03, p13, p23))
sym_state_recursive_ratios_1132 = sym.zeros(buffer_capacity + 1,
system_capacity + 1)
sym_state_recursive_ratios_1132[0, 0] = 1
sym_state_recursive_ratios_1132[0, 1] = sym.factor(sym_state_probs_1132
[p01] / sym_state_probs_1132[p00])
sym_state_recursive_ratios_1132[1, 1] = sym.factor(sym_state_probs_1132
[p11] / sym_state_probs_1132[p01])
sym_state_recursive_ratios_1132[2, 1] = sym.factor(sym_state_probs_1132
[p21] / sym_state_probs_1132[p11])
sym_state_recursive_ratios_1132[0, 2] = sym.factor(sym_state_probs_1132
[p02] / sym_state_probs_1132[p01])
sym_state_recursive_ratios_1132[1, 2] = sym.factor(sym_state_probs_1132
[p12] / sym_state_probs_1132[p02])
sym_state_recursive_ratios_1132[2, 2] = sym.factor(sym_state_probs_1132
[p22] / sym_state_probs_1132[p12])
sym_state_recursive_ratios_1132[0, 3] = sym.factor(sym_state_probs_1132
[p03] / sym_state_probs_1132[p02])
sym_state_recursive_ratios_1132[1, 3] = sym.factor(sym_state_probs_1132
[p13] / sym_state_probs_1132[p03])
sym_state_recursive_ratios_1132[2, 3] = sym.factor(sym_state_probs_1132
[p23] / sym_state_probs_1132[p13])
sym_state_recursive_ratios_right_1132 = (sym_state_recursive_ratios_1132
.copy())
sym_state_recursive_ratios_right_1132[1, 2] = sym.factor(
sym_state_probs_1132[p12] / sym_state_probs_1132[p11])
sym_state_recursive_ratios_right_1132[1, 3] = sym.factor(
sym_state_probs_1132[p13] / sym_state_probs_1132[p12])
sym_state_recursive_ratios_right_1132[2, 2] = sym.factor(
sym_state_probs_1132[p22] / sym_state_probs_1132[p21])
sym_state_recursive_ratios_right_1132[2, 3] = sym.factor(
sym_state_probs_1132[p23] / sym_state_probs_1132[p22])
sym_state_recursive_ratios_P0_1132 = sym.zeros(buffer_capacity + 1,
system_capacity + 1)
sym_state_recursive_ratios_P0_1132[0, 0] = 1
sym_state_recursive_ratios_P0_1132[0, 1] = sym.factor(
sym_state_probs_1132[p01] / sym_state_probs_1132[p00])
sym_state_recursive_ratios_P0_1132[1, 1] = sym.factor(
sym_state_probs_1132[p11] / sym_state_probs_1132[p00])
sym_state_recursive_ratios_P0_1132[2, 1] = sym.factor(
sym_state_probs_1132[p21] / sym_state_probs_1132[p00])
sym_state_recursive_ratios_P0_1132[0, 2] = sym.factor(
sym_state_probs_1132[p02] / sym_state_probs_1132[p00])
sym_state_recursive_ratios_P0_1132[1, 2] = sym.factor(
sym_state_probs_1132[p12] / sym_state_probs_1132[p00])
sym_state_recursive_ratios_P0_1132[2, 2] = sym.factor(
sym_state_probs_1132[p22] / sym_state_probs_1132[p00])
sym_state_recursive_ratios_P0_1132[0, 3] = sym.factor(
sym_state_probs_1132[p03] / sym_state_probs_1132[p00])
sym_state_recursive_ratios_P0_1132[1, 3] = sym.factor(
sym_state_probs_1132[p13] / sym_state_probs_1132[p00])
sym_state_recursive_ratios_P0_1132[2, 3] = sym.factor(
sym_state_probs_1132[p23] / sym_state_probs_1132[p00])
return (sym_state_probs_1132, sym_state_recursive_ratios_1132,
sym_state_recursive_ratios_right_1132,
sym_state_recursive_ratios_P0_1132)
<|reserved_special_token_0|>
def get_symbolic_state_probabilities_1151():
num_of_servers = 1
threshold = 1
system_capacity = 5
buffer_capacity = 1
Q_sym_1151 = abg.markov.get_symbolic_transition_matrix(num_of_servers,
threshold, system_capacity, buffer_capacity)
p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15 = sym.symbols(
'p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15')
pi_1151 = sym.Matrix([p00, p01, p11, p02, p12, p03, p13, p04, p14, p05,
p15])
dimension_1151 = Q_sym_1151.shape[0]
M_sym_1151 = sym.Matrix([Q_sym_1151.transpose()[:-1, :], sym.ones(1,
dimension_1151)])
sym_diff_equations_1151 = M_sym_1151 @ pi_1151
b_sym_1151 = sym.Matrix([sym.zeros(dimension_1151 - 1, 1), [1]])
eq0_1151 = sym.Eq(sym_diff_equations_1151[0], b_sym_1151[0])
eq1_1151 = sym.Eq(sym_diff_equations_1151[1], b_sym_1151[1])
eq2_1151 = sym.Eq(sym_diff_equations_1151[2], b_sym_1151[2])
eq3_1151 = sym.Eq(sym_diff_equations_1151[3], b_sym_1151[3])
eq4_1151 = sym.Eq(sym_diff_equations_1151[4], b_sym_1151[4])
eq5_1151 = sym.Eq(sym_diff_equations_1151[5], b_sym_1151[5])
eq6_1151 = sym.Eq(sym_diff_equations_1151[6], b_sym_1151[6])
eq7_1151 = sym.Eq(sym_diff_equations_1151[7], b_sym_1151[7])
eq8_1151 = sym.Eq(sym_diff_equations_1151[8], b_sym_1151[8])
eq9_1151 = sym.Eq(sym_diff_equations_1151[9], b_sym_1151[9])
eq10_1151 = sym.Eq(sym_diff_equations_1151[10], b_sym_1151[10])
sym_state_probs_1151 = sym.solve([eq0_1151, eq1_1151, eq2_1151,
eq3_1151, eq4_1151, eq5_1151, eq6_1151, eq7_1151, eq8_1151,
eq9_1151, eq10_1151], (p00, p01, p11, p02, p12, p03, p13, p04, p14,
p05, p15))
sym_state_recursive_ratios_1151 = sym.zeros(buffer_capacity + 1,
system_capacity + 1)
sym_state_recursive_ratios_1151[0, 0] = 1
sym_state_recursive_ratios_1151[0, 1] = sym.factor(sym_state_probs_1151
[p01] / sym_state_probs_1151[p00])
sym_state_recursive_ratios_1151[1, 1] = sym.factor(sym_state_probs_1151
[p11] / sym_state_probs_1151[p01])
sym_state_recursive_ratios_1151[0, 2] = sym.factor(sym_state_probs_1151
[p02] / sym_state_probs_1151[p01])
sym_state_recursive_ratios_1151[1, 2] = sym.factor(sym_state_probs_1151
[p12] / sym_state_probs_1151[p02])
sym_state_recursive_ratios_1151[0, 3] = sym.factor(sym_state_probs_1151
[p03] / sym_state_probs_1151[p02])
sym_state_recursive_ratios_1151[1, 3] = sym.factor(sym_state_probs_1151
[p13] / sym_state_probs_1151[p03])
sym_state_recursive_ratios_1151[0, 4] = sym.factor(sym_state_probs_1151
[p04] / sym_state_probs_1151[p03])
sym_state_recursive_ratios_1151[1, 4] = sym.factor(sym_state_probs_1151
[p14] / sym_state_probs_1151[p04])
sym_state_recursive_ratios_1151[0, 5] = sym.factor(sym_state_probs_1151
[p05] / sym_state_probs_1151[p04])
sym_state_recursive_ratios_1151[1, 5] = sym.factor(sym_state_probs_1151
[p15] / sym_state_probs_1151[p05])
sym_state_recursive_ratios_right_1151 = (sym_state_recursive_ratios_1151
.copy())
sym_state_recursive_ratios_right_1151[1, 2] = sym.factor(
sym_state_probs_1151[p12] / sym_state_probs_1151[p11])
sym_state_recursive_ratios_right_1151[1, 3] = sym.factor(
sym_state_probs_1151[p13] / sym_state_probs_1151[p12])
sym_state_recursive_ratios_right_1151[1, 4] = sym.factor(
sym_state_probs_1151[p14] / sym_state_probs_1151[p13])
sym_state_recursive_ratios_right_1151[1, 5] = sym.factor(
sym_state_probs_1151[p15] / sym_state_probs_1151[p14])
sym_state_recursive_ratios_P0_1151 = sym.zeros(buffer_capacity + 1,
system_capacity + 1)
sym_state_recursive_ratios_P0_1151[0, 0] = 1
sym_state_recursive_ratios_P0_1151[0, 1] = sym.factor(
sym_state_probs_1151[p01] / sym_state_probs_1151[p00])
sym_state_recursive_ratios_P0_1151[1, 1] = sym.factor(
sym_state_probs_1151[p11] / sym_state_probs_1151[p00])
sym_state_recursive_ratios_P0_1151[0, 2] = sym.factor(
sym_state_probs_1151[p02] / sym_state_probs_1151[p00])
sym_state_recursive_ratios_P0_1151[1, 2] = sym.factor(
sym_state_probs_1151[p12] / sym_state_probs_1151[p00])
sym_state_recursive_ratios_P0_1151[0, 3] = sym.factor(
sym_state_probs_1151[p03] / sym_state_probs_1151[p00])
sym_state_recursive_ratios_P0_1151[1, 3] = sym.factor(
sym_state_probs_1151[p13] / sym_state_probs_1151[p00])
sym_state_recursive_ratios_P0_1151[0, 4] = sym.factor(
sym_state_probs_1151[p04] / sym_state_probs_1151[p00])
sym_state_recursive_ratios_P0_1151[1, 4] = sym.factor(
sym_state_probs_1151[p14] / sym_state_probs_1151[p00])
sym_state_recursive_ratios_P0_1151[0, 5] = sym.factor(
sym_state_probs_1151[p05] / sym_state_probs_1151[p00])
sym_state_recursive_ratios_P0_1151[1, 5] = sym.factor(
sym_state_probs_1151[p15] / sym_state_probs_1151[p00])
return (sym_state_probs_1151, sym_state_recursive_ratios_1151,
sym_state_recursive_ratios_right_1151,
sym_state_recursive_ratios_P0_1151)
def get_symbolic_state_probabilities_1161():
num_of_servers = 1
threshold = 1
system_capacity = 6
buffer_capacity = 1
Q_sym_1161 = abg.markov.get_symbolic_transition_matrix(num_of_servers,
threshold, system_capacity, buffer_capacity)
p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15, p06, p16 = (sym.
symbols(
'p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15, p06, p16'))
pi_1161 = sym.Matrix([p00, p01, p11, p02, p12, p03, p13, p04, p14, p05,
p15, p06, p16])
dimension_1161 = Q_sym_1161.shape[0]
M_sym_1161 = sym.Matrix([Q_sym_1161.transpose()[:-1, :], sym.ones(1,
dimension_1161)])
sym_diff_equations_1161 = M_sym_1161 @ pi_1161
b_sym_1161 = sym.Matrix([sym.zeros(dimension_1161 - 1, 1), [1]])
eq0_1161 = sym.Eq(sym_diff_equations_1161[0], b_sym_1161[0])
eq1_1161 = sym.Eq(sym_diff_equations_1161[1], b_sym_1161[1])
eq2_1161 = sym.Eq(sym_diff_equations_1161[2], b_sym_1161[2])
eq3_1161 = sym.Eq(sym_diff_equations_1161[3], b_sym_1161[3])
eq4_1161 = sym.Eq(sym_diff_equations_1161[4], b_sym_1161[4])
eq5_1161 = sym.Eq(sym_diff_equations_1161[5], b_sym_1161[5])
eq6_1161 = sym.Eq(sym_diff_equations_1161[6], b_sym_1161[6])
eq7_1161 = sym.Eq(sym_diff_equations_1161[7], b_sym_1161[7])
eq8_1161 = sym.Eq(sym_diff_equations_1161[8], b_sym_1161[8])
eq9_1161 = sym.Eq(sym_diff_equations_1161[9], b_sym_1161[9])
eq10_1161 = sym.Eq(sym_diff_equations_1161[10], b_sym_1161[10])
eq11_1161 = sym.Eq(sym_diff_equations_1161[11], b_sym_1161[11])
eq12_1161 = sym.Eq(sym_diff_equations_1161[12], b_sym_1161[12])
sym_state_probs_1161 = sym.solve([eq0_1161, eq1_1161, eq2_1161,
eq3_1161, eq4_1161, eq5_1161, eq6_1161, eq7_1161, eq8_1161,
eq9_1161, eq10_1161, eq11_1161, eq12_1161], (p00, p01, p11, p02,
p12, p03, p13, p04, p14, p05, p15, p06, p16))
sym_state_recursive_ratios_1161 = sym.zeros(buffer_capacity + 1,
system_capacity + 1)
sym_state_recursive_ratios_1161[0, 0] = 1
sym_state_recursive_ratios_1161[0, 1] = sym.factor(sym_state_probs_1161
[p01] / sym_state_probs_1161[p00])
sym_state_recursive_ratios_1161[1, 1] = sym.factor(sym_state_probs_1161
[p11] / sym_state_probs_1161[p01])
sym_state_recursive_ratios_1161[0, 2] = sym.factor(sym_state_probs_1161
[p02] / sym_state_probs_1161[p01])
sym_state_recursive_ratios_1161[1, 2] = sym.factor(sym_state_probs_1161
[p12] / sym_state_probs_1161[p02])
sym_state_recursive_ratios_1161[0, 3] = sym.factor(sym_state_probs_1161
[p03] / sym_state_probs_1161[p02])
sym_state_recursive_ratios_1161[1, 3] = sym.factor(sym_state_probs_1161
[p13] / sym_state_probs_1161[p03])
sym_state_recursive_ratios_1161[0, 4] = sym.factor(sym_state_probs_1161
[p04] / sym_state_probs_1161[p03])
sym_state_recursive_ratios_1161[1, 4] = sym.factor(sym_state_probs_1161
[p14] / sym_state_probs_1161[p04])
sym_state_recursive_ratios_1161[0, 5] = sym.factor(sym_state_probs_1161
[p05] / sym_state_probs_1161[p04])
sym_state_recursive_ratios_1161[1, 5] = sym.factor(sym_state_probs_1161
[p15] / sym_state_probs_1161[p05])
sym_state_recursive_ratios_1161[0, 6] = sym.factor(sym_state_probs_1161
[p06] / sym_state_probs_1161[p05])
sym_state_recursive_ratios_1161[1, 6] = sym.factor(sym_state_probs_1161
[p16] / sym_state_probs_1161[p06])
sym_state_recursive_ratios_right_1161 = (sym_state_recursive_ratios_1161
.copy())
sym_state_recursive_ratios_right_1161[1, 2] = sym.factor(
sym_state_probs_1161[p12] / sym_state_probs_1161[p11])
sym_state_recursive_ratios_right_1161[1, 3] = sym.factor(
sym_state_probs_1161[p13] / sym_state_probs_1161[p12])
sym_state_recursive_ratios_right_1161[1, 4] = sym.factor(
sym_state_probs_1161[p14] / sym_state_probs_1161[p13])
sym_state_recursive_ratios_right_1161[1, 5] = sym.factor(
sym_state_probs_1161[p15] / sym_state_probs_1161[p14])
sym_state_recursive_ratios_right_1161[1, 6] = sym.factor(
sym_state_probs_1161[p16] / sym_state_probs_1161[p15])
sym_state_recursive_ratios_P0_1161 = sym.zeros(buffer_capacity + 1,
system_capacity + 1)
sym_state_recursive_ratios_P0_1161[0, 0] = 1
sym_state_recursive_ratios_P0_1161[0, 1] = sym.factor(
sym_state_probs_1161[p01] / sym_state_probs_1161[p00])
sym_state_recursive_ratios_P0_1161[1, 1] = sym.factor(
sym_state_probs_1161[p11] / sym_state_probs_1161[p00])
sym_state_recursive_ratios_P0_1161[0, 2] = sym.factor(
sym_state_probs_1161[p02] / sym_state_probs_1161[p00])
sym_state_recursive_ratios_P0_1161[1, 2] = sym.factor(
sym_state_probs_1161[p12] / sym_state_probs_1161[p00])
sym_state_recursive_ratios_P0_1161[0, 3] = sym.factor(
sym_state_probs_1161[p03] / sym_state_probs_1161[p00])
sym_state_recursive_ratios_P0_1161[1, 3] = sym.factor(
sym_state_probs_1161[p13] / sym_state_probs_1161[p00])
sym_state_recursive_ratios_P0_1161[0, 4] = sym.factor(
sym_state_probs_1161[p04] / sym_state_probs_1161[p00])
sym_state_recursive_ratios_P0_1161[1, 4] = sym.factor(
sym_state_probs_1161[p14] / sym_state_probs_1161[p00])
sym_state_recursive_ratios_P0_1161[0, 5] = sym.factor(
sym_state_probs_1161[p05] / sym_state_probs_1161[p00])
sym_state_recursive_ratios_P0_1161[1, 5] = sym.factor(
sym_state_probs_1161[p15] / sym_state_probs_1161[p00])
sym_state_recursive_ratios_P0_1161[0, 6] = sym.factor(
sym_state_probs_1161[p06] / sym_state_probs_1161[p00])
sym_state_recursive_ratios_P0_1161[1, 6] = sym.factor(
sym_state_probs_1161[p16] / sym_state_probs_1161[p00])
return (sym_state_probs_1161, sym_state_recursive_ratios_1161,
sym_state_recursive_ratios_right_1161,
sym_state_recursive_ratios_P0_1161)
def get_symbolic_state_probabilities_1171():
num_of_servers = 1
threshold = 1
system_capacity = 7
buffer_capacity = 1
Q_sym_1171 = abg.markov.get_symbolic_transition_matrix(num_of_servers,
threshold, system_capacity, buffer_capacity)
(p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15, p06, p16, p07, p17
) = (sym.symbols(
'p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15, p06, p16, p07, p17'
))
pi_1171 = sym.Matrix([p00, p01, p11, p02, p12, p03, p13, p04, p14, p05,
p15, p06, p16, p07, p17])
dimension_1171 = Q_sym_1171.shape[0]
M_sym_1171 = sym.Matrix([Q_sym_1171.transpose()[:-1, :], sym.ones(1,
dimension_1171)])
sym_diff_equations_1171 = M_sym_1171 @ pi_1171
b_sym_1171 = sym.Matrix([sym.zeros(dimension_1171 - 1, 1), [1]])
eq0_1171 = sym.Eq(sym_diff_equations_1171[0], b_sym_1171[0])
eq1_1171 = sym.Eq(sym_diff_equations_1171[1], b_sym_1171[1])
eq2_1171 = sym.Eq(sym_diff_equations_1171[2], b_sym_1171[2])
eq3_1171 = sym.Eq(sym_diff_equations_1171[3], b_sym_1171[3])
eq4_1171 = sym.Eq(sym_diff_equations_1171[4], b_sym_1171[4])
eq5_1171 = sym.Eq(sym_diff_equations_1171[5], b_sym_1171[5])
eq6_1171 = sym.Eq(sym_diff_equations_1171[6], b_sym_1171[6])
eq7_1171 = sym.Eq(sym_diff_equations_1171[7], b_sym_1171[7])
eq8_1171 = sym.Eq(sym_diff_equations_1171[8], b_sym_1171[8])
eq9_1171 = sym.Eq(sym_diff_equations_1171[9], b_sym_1171[9])
eq10_1171 = sym.Eq(sym_diff_equations_1171[10], b_sym_1171[10])
eq11_1171 = sym.Eq(sym_diff_equations_1171[11], b_sym_1171[11])
eq12_1171 = sym.Eq(sym_diff_equations_1171[12], b_sym_1171[12])
eq13_1171 = sym.Eq(sym_diff_equations_1171[13], b_sym_1171[13])
eq14_1171 = sym.Eq(sym_diff_equations_1171[14], b_sym_1171[14])
sym_state_probs_1171 = sym.solve([eq0_1171, eq1_1171, eq2_1171,
eq3_1171, eq4_1171, eq5_1171, eq6_1171, eq7_1171, eq8_1171,
eq9_1171, eq10_1171, eq11_1171, eq12_1171, eq13_1171, eq14_1171], (
p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15, p06, p16,
p07, p17))
sym_state_recursive_ratios_1171 = sym.zeros(buffer_capacity + 1,
system_capacity + 1)
sym_state_recursive_ratios_1171[0, 0] = 1
sym_state_recursive_ratios_1171[0, 1] = sym.factor(sym_state_probs_1171
[p01] / sym_state_probs_1171[p00])
sym_state_recursive_ratios_1171[1, 1] = sym.factor(sym_state_probs_1171
[p11] / sym_state_probs_1171[p01])
sym_state_recursive_ratios_1171[0, 2] = sym.factor(sym_state_probs_1171
[p02] / sym_state_probs_1171[p01])
sym_state_recursive_ratios_1171[1, 2] = sym.factor(sym_state_probs_1171
[p12] / sym_state_probs_1171[p02])
sym_state_recursive_ratios_1171[0, 3] = sym.factor(sym_state_probs_1171
[p03] / sym_state_probs_1171[p02])
sym_state_recursive_ratios_1171[1, 3] = sym.factor(sym_state_probs_1171
[p13] / sym_state_probs_1171[p03])
sym_state_recursive_ratios_1171[0, 4] = sym.factor(sym_state_probs_1171
[p04] / sym_state_probs_1171[p03])
sym_state_recursive_ratios_1171[1, 4] = sym.factor(sym_state_probs_1171
[p14] / sym_state_probs_1171[p04])
sym_state_recursive_ratios_1171[0, 5] = sym.factor(sym_state_probs_1171
[p05] / sym_state_probs_1171[p04])
sym_state_recursive_ratios_1171[1, 5] = sym.factor(sym_state_probs_1171
[p15] / sym_state_probs_1171[p05])
sym_state_recursive_ratios_1171[0, 6] = sym.factor(sym_state_probs_1171
[p06] / sym_state_probs_1171[p05])
sym_state_recursive_ratios_1171[1, 6] = sym.factor(sym_state_probs_1171
[p16] / sym_state_probs_1171[p06])
sym_state_recursive_ratios_1171[0, 7] = sym.factor(sym_state_probs_1171
[p07] / sym_state_probs_1171[p06])
sym_state_recursive_ratios_1171[1, 7] = sym.factor(sym_state_probs_1171
[p17] / sym_state_probs_1171[p07])
sym_state_recursive_ratios_right_1171 = (sym_state_recursive_ratios_1171
.copy())
sym_state_recursive_ratios_right_1171[1, 2] = sym.factor(
sym_state_probs_1171[p12] / sym_state_probs_1171[p11])
sym_state_recursive_ratios_right_1171[1, 3] = sym.factor(
sym_state_probs_1171[p13] / sym_state_probs_1171[p12])
sym_state_recursive_ratios_right_1171[1, 4] = sym.factor(
sym_state_probs_1171[p14] / sym_state_probs_1171[p13])
sym_state_recursive_ratios_right_1171[1, 5] = sym.factor(
sym_state_probs_1171[p15] / sym_state_probs_1171[p14])
sym_state_recursive_ratios_right_1171[1, 6] = sym.factor(
sym_state_probs_1171[p16] / sym_state_probs_1171[p15])
sym_state_recursive_ratios_right_1171[1, 7] = sym.factor(
sym_state_probs_1171[p17] / sym_state_probs_1171[p16])
sym_state_recursive_ratios_P0_1171 = sym.zeros(buffer_capacity + 1,
system_capacity + 1)
sym_state_recursive_ratios_P0_1171[0, 0] = 1
sym_state_recursive_ratios_P0_1171[0, 1] = sym.factor(
sym_state_probs_1171[p01] / sym_state_probs_1171[p00])
sym_state_recursive_ratios_P0_1171[1, 1] = sym.factor(
sym_state_probs_1171[p11] / sym_state_probs_1171[p00])
sym_state_recursive_ratios_P0_1171[0, 2] = sym.factor(
sym_state_probs_1171[p02] / sym_state_probs_1171[p00])
sym_state_recursive_ratios_P0_1171[1, 2] = sym.factor(
sym_state_probs_1171[p12] / sym_state_probs_1171[p00])
sym_state_recursive_ratios_P0_1171[0, 3] = sym.factor(
sym_state_probs_1171[p03] / sym_state_probs_1171[p00])
sym_state_recursive_ratios_P0_1171[1, 3] = sym.factor(
sym_state_probs_1171[p13] / sym_state_probs_1171[p00])
sym_state_recursive_ratios_P0_1171[0, 4] = sym.factor(
sym_state_probs_1171[p04] / sym_state_probs_1171[p00])
sym_state_recursive_ratios_P0_1171[1, 4] = sym.factor(
sym_state_probs_1171[p14] / sym_state_probs_1171[p00])
sym_state_recursive_ratios_P0_1171[0, 5] = sym.factor(
sym_state_probs_1171[p05] / sym_state_probs_1171[p00])
sym_state_recursive_ratios_P0_1171[1, 5] = sym.factor(
sym_state_probs_1171[p15] / sym_state_probs_1171[p00])
sym_state_recursive_ratios_P0_1171[0, 6] = sym.factor(
sym_state_probs_1171[p06] / sym_state_probs_1171[p00])
sym_state_recursive_ratios_P0_1171[1, 6] = sym.factor(
sym_state_probs_1171[p16] / sym_state_probs_1171[p00])
sym_state_recursive_ratios_P0_1171[0, 7] = sym.factor(
sym_state_probs_1171[p07] / sym_state_probs_1171[p00])
sym_state_recursive_ratios_P0_1171[1, 7] = sym.factor(
sym_state_probs_1171[p17] / sym_state_probs_1171[p00])
return (sym_state_probs_1171, sym_state_recursive_ratios_1171,
sym_state_recursive_ratios_right_1171,
sym_state_recursive_ratios_P0_1171)
def get_symbolic_state_probabilities_1181():
num_of_servers = 1
threshold = 1
system_capacity = 8
buffer_capacity = 1
Q_sym_1181 = abg.markov.get_symbolic_transition_matrix(num_of_servers,
threshold, system_capacity, buffer_capacity)
(p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15, p06, p16, p07,
p17, p08, p18) = (sym.symbols(
'p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15, p06, p16, p07, p17, p08, p18'
))
pi_1181 = sym.Matrix([p00, p01, p11, p02, p12, p03, p13, p04, p14, p05,
p15, p06, p16, p07, p17, p08, p18])
dimension_1181 = Q_sym_1181.shape[0]
M_sym_1181 = sym.Matrix([Q_sym_1181.transpose()[:-1, :], sym.ones(1,
dimension_1181)])
sym_diff_equations_1181 = M_sym_1181 @ pi_1181
b_sym_1181 = sym.Matrix([sym.zeros(dimension_1181 - 1, 1), [1]])
eq0_1181 = sym.Eq(sym_diff_equations_1181[0], b_sym_1181[0])
eq1_1181 = sym.Eq(sym_diff_equations_1181[1], b_sym_1181[1])
eq2_1181 = sym.Eq(sym_diff_equations_1181[2], b_sym_1181[2])
eq3_1181 = sym.Eq(sym_diff_equations_1181[3], b_sym_1181[3])
eq4_1181 = sym.Eq(sym_diff_equations_1181[4], b_sym_1181[4])
eq5_1181 = sym.Eq(sym_diff_equations_1181[5], b_sym_1181[5])
eq6_1181 = sym.Eq(sym_diff_equations_1181[6], b_sym_1181[6])
eq7_1181 = sym.Eq(sym_diff_equations_1181[7], b_sym_1181[7])
eq8_1181 = sym.Eq(sym_diff_equations_1181[8], b_sym_1181[8])
eq9_1181 = sym.Eq(sym_diff_equations_1181[9], b_sym_1181[9])
eq10_1181 = sym.Eq(sym_diff_equations_1181[10], b_sym_1181[10])
eq11_1181 = sym.Eq(sym_diff_equations_1181[11], b_sym_1181[11])
eq12_1181 = sym.Eq(sym_diff_equations_1181[12], b_sym_1181[12])
eq13_1181 = sym.Eq(sym_diff_equations_1181[13], b_sym_1181[13])
eq14_1181 = sym.Eq(sym_diff_equations_1181[14], b_sym_1181[14])
eq15_1181 = sym.Eq(sym_diff_equations_1181[15], b_sym_1181[15])
eq16_1181 = sym.Eq(sym_diff_equations_1181[16], b_sym_1181[16])
sym_state_probs_1181 = sym.solve([eq0_1181, eq1_1181, eq2_1181,
eq3_1181, eq4_1181, eq5_1181, eq6_1181, eq7_1181, eq8_1181,
eq9_1181, eq10_1181, eq11_1181, eq12_1181, eq13_1181, eq14_1181,
eq15_1181, eq16_1181], (p00, p01, p11, p02, p12, p03, p13, p04, p14,
p05, p15, p06, p16, p07, p17, p08, p18))
sym_state_recursive_ratios_1181 = sym.zeros(buffer_capacity + 1,
system_capacity + 1)
sym_state_recursive_ratios_1181[0, 0] = 1
sym_state_recursive_ratios_1181[0, 1] = sym.factor(sym_state_probs_1181
[p01] / sym_state_probs_1181[p00])
sym_state_recursive_ratios_1181[1, 1] = sym.factor(sym_state_probs_1181
[p11] / sym_state_probs_1181[p01])
sym_state_recursive_ratios_1181[0, 2] = sym.factor(sym_state_probs_1181
[p02] / sym_state_probs_1181[p01])
sym_state_recursive_ratios_1181[1, 2] = sym.factor(sym_state_probs_1181
[p12] / sym_state_probs_1181[p02])
sym_state_recursive_ratios_1181[0, 3] = sym.factor(sym_state_probs_1181
[p03] / sym_state_probs_1181[p02])
sym_state_recursive_ratios_1181[1, 3] = sym.factor(sym_state_probs_1181
[p13] / sym_state_probs_1181[p03])
sym_state_recursive_ratios_1181[0, 4] = sym.factor(sym_state_probs_1181
[p04] / sym_state_probs_1181[p03])
sym_state_recursive_ratios_1181[1, 4] = sym.factor(sym_state_probs_1181
[p14] / sym_state_probs_1181[p04])
sym_state_recursive_ratios_1181[0, 5] = sym.factor(sym_state_probs_1181
[p05] / sym_state_probs_1181[p04])
sym_state_recursive_ratios_1181[1, 5] = sym.factor(sym_state_probs_1181
[p15] / sym_state_probs_1181[p05])
sym_state_recursive_ratios_1181[0, 6] = sym.factor(sym_state_probs_1181
[p06] / sym_state_probs_1181[p05])
sym_state_recursive_ratios_1181[1, 6] = sym.factor(sym_state_probs_1181
[p16] / sym_state_probs_1181[p06])
sym_state_recursive_ratios_1181[0, 7] = sym.factor(sym_state_probs_1181
[p07] / sym_state_probs_1181[p06])
sym_state_recursive_ratios_1181[1, 7] = sym.factor(sym_state_probs_1181
[p17] / sym_state_probs_1181[p07])
sym_state_recursive_ratios_1181[0, 8] = sym.factor(sym_state_probs_1181
[p08] / sym_state_probs_1181[p07])
sym_state_recursive_ratios_1181[1, 8] = sym.factor(sym_state_probs_1181
[p18] / sym_state_probs_1181[p08])
sym_state_recursive_ratios_right_1181 = (sym_state_recursive_ratios_1181
.copy())
sym_state_recursive_ratios_right_1181[1, 2] = sym.factor(
sym_state_probs_1181[p12] / sym_state_probs_1181[p11])
sym_state_recursive_ratios_right_1181[1, 3] = sym.factor(
sym_state_probs_1181[p13] / sym_state_probs_1181[p12])
sym_state_recursive_ratios_right_1181[1, 4] = sym.factor(
sym_state_probs_1181[p14] / sym_state_probs_1181[p13])
sym_state_recursive_ratios_right_1181[1, 5] = sym.factor(
sym_state_probs_1181[p15] / sym_state_probs_1181[p14])
sym_state_recursive_ratios_right_1181[1, 6] = sym.factor(
sym_state_probs_1181[p16] / sym_state_probs_1181[p15])
sym_state_recursive_ratios_right_1181[1, 7] = sym.factor(
sym_state_probs_1181[p17] / sym_state_probs_1181[p16])
sym_state_recursive_ratios_right_1181[1, 8] = sym.factor(
sym_state_probs_1181[p18] / sym_state_probs_1181[p17])
sym_state_recursive_ratios_P0_1181 = sym.zeros(buffer_capacity + 1,
system_capacity + 1)
sym_state_recursive_ratios_P0_1181[0, 0] = 1
sym_state_recursive_ratios_P0_1181[0, 1] = sym.factor(
sym_state_probs_1181[p01] / sym_state_probs_1181[p00])
sym_state_recursive_ratios_P0_1181[1, 1] = sym.factor(
sym_state_probs_1181[p11] / sym_state_probs_1181[p00])
sym_state_recursive_ratios_P0_1181[0, 2] = sym.factor(
sym_state_probs_1181[p02] / sym_state_probs_1181[p00])
sym_state_recursive_ratios_P0_1181[1, 2] = sym.factor(
sym_state_probs_1181[p12] / sym_state_probs_1181[p00])
sym_state_recursive_ratios_P0_1181[0, 3] = sym.factor(
sym_state_probs_1181[p03] / sym_state_probs_1181[p00])
sym_state_recursive_ratios_P0_1181[1, 3] = sym.factor(
sym_state_probs_1181[p13] / sym_state_probs_1181[p00])
sym_state_recursive_ratios_P0_1181[0, 4] = sym.factor(
sym_state_probs_1181[p04] / sym_state_probs_1181[p00])
sym_state_recursive_ratios_P0_1181[1, 4] = sym.factor(
sym_state_probs_1181[p14] / sym_state_probs_1181[p00])
sym_state_recursive_ratios_P0_1181[0, 5] = sym.factor(
sym_state_probs_1181[p05] / sym_state_probs_1181[p00])
sym_state_recursive_ratios_P0_1181[1, 5] = sym.factor(
sym_state_probs_1181[p15] / sym_state_probs_1181[p00])
sym_state_recursive_ratios_P0_1181[0, 6] = sym.factor(
sym_state_probs_1181[p06] / sym_state_probs_1181[p00])
sym_state_recursive_ratios_P0_1181[1, 6] = sym.factor(
sym_state_probs_1181[p16] / sym_state_probs_1181[p00])
sym_state_recursive_ratios_P0_1181[0, 7] = sym.factor(
sym_state_probs_1181[p07] / sym_state_probs_1181[p00])
sym_state_recursive_ratios_P0_1181[1, 7] = sym.factor(
sym_state_probs_1181[p17] / sym_state_probs_1181[p00])
sym_state_recursive_ratios_P0_1181[0, 8] = sym.factor(
sym_state_probs_1181[p08] / sym_state_probs_1181[p00])
sym_state_recursive_ratios_P0_1181[1, 8] = sym.factor(
sym_state_probs_1181[p18] / sym_state_probs_1181[p00])
return (sym_state_probs_1181, sym_state_recursive_ratios_1181,
sym_state_recursive_ratios_right_1181,
sym_state_recursive_ratios_P0_1181)
def get_symbolic_state_probabilities_1191():
num_of_servers = 1
threshold = 1
system_capacity = 9
buffer_capacity = 1
Q_sym_1191 = abg.markov.get_symbolic_transition_matrix(num_of_servers,
threshold, system_capacity, buffer_capacity)
(p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15, p06, p16, p07,
p17, p08, p18, p09, p19) = (sym.symbols(
'p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15, p06, p16, p07, p17, p08, p18, p09, p19'
))
pi_1191 = sym.Matrix([p00, p01, p11, p02, p12, p03, p13, p04, p14, p05,
p15, p06, p16, p07, p17, p08, p18, p09, p19])
dimension_1191 = Q_sym_1191.shape[0]
M_sym_1191 = sym.Matrix([Q_sym_1191.transpose()[:-1, :], sym.ones(1,
dimension_1191)])
sym_diff_equations_1191 = M_sym_1191 @ pi_1191
b_sym_1191 = sym.Matrix([sym.zeros(dimension_1191 - 1, 1), [1]])
eq0_1191 = sym.Eq(sym_diff_equations_1191[0], b_sym_1191[0])
eq1_1191 = sym.Eq(sym_diff_equations_1191[1], b_sym_1191[1])
eq2_1191 = sym.Eq(sym_diff_equations_1191[2], b_sym_1191[2])
eq3_1191 = sym.Eq(sym_diff_equations_1191[3], b_sym_1191[3])
eq4_1191 = sym.Eq(sym_diff_equations_1191[4], b_sym_1191[4])
eq5_1191 = sym.Eq(sym_diff_equations_1191[5], b_sym_1191[5])
eq6_1191 = sym.Eq(sym_diff_equations_1191[6], b_sym_1191[6])
eq7_1191 = sym.Eq(sym_diff_equations_1191[7], b_sym_1191[7])
eq8_1191 = sym.Eq(sym_diff_equations_1191[8], b_sym_1191[8])
eq9_1191 = sym.Eq(sym_diff_equations_1191[9], b_sym_1191[9])
eq10_1191 = sym.Eq(sym_diff_equations_1191[10], b_sym_1191[10])
eq11_1191 = sym.Eq(sym_diff_equations_1191[11], b_sym_1191[11])
eq12_1191 = sym.Eq(sym_diff_equations_1191[12], b_sym_1191[12])
eq13_1191 = sym.Eq(sym_diff_equations_1191[13], b_sym_1191[13])
eq14_1191 = sym.Eq(sym_diff_equations_1191[14], b_sym_1191[14])
eq15_1191 = sym.Eq(sym_diff_equations_1191[15], b_sym_1191[15])
eq16_1191 = sym.Eq(sym_diff_equations_1191[16], b_sym_1191[16])
eq17_1191 = sym.Eq(sym_diff_equations_1191[17], b_sym_1191[17])
eq18_1191 = sym.Eq(sym_diff_equations_1191[18], b_sym_1191[18])
sym_state_probs_1191 = sym.solve([eq0_1191, eq1_1191, eq2_1191,
eq3_1191, eq4_1191, eq5_1191, eq6_1191, eq7_1191, eq8_1191,
eq9_1191, eq10_1191, eq11_1191, eq12_1191, eq13_1191, eq14_1191,
eq15_1191, eq16_1191, eq17_1191, eq18_1191], (p00, p01, p11, p02,
p12, p03, p13, p04, p14, p05, p15, p06, p16, p07, p17, p08, p18,
p09, p19))
sym_state_recursive_ratios_1191 = sym.zeros(buffer_capacity + 1,
system_capacity + 1)
sym_state_recursive_ratios_1191[0, 0] = 1
sym_state_recursive_ratios_1191[0, 1] = sym.factor(sym_state_probs_1191
[p01] / sym_state_probs_1191[p00])
sym_state_recursive_ratios_1191[1, 1] = sym.factor(sym_state_probs_1191
[p11] / sym_state_probs_1191[p01])
sym_state_recursive_ratios_1191[0, 2] = sym.factor(sym_state_probs_1191
[p02] / sym_state_probs_1191[p01])
sym_state_recursive_ratios_1191[1, 2] = sym.factor(sym_state_probs_1191
[p12] / sym_state_probs_1191[p02])
sym_state_recursive_ratios_1191[0, 3] = sym.factor(sym_state_probs_1191
[p03] / sym_state_probs_1191[p02])
sym_state_recursive_ratios_1191[1, 3] = sym.factor(sym_state_probs_1191
[p13] / sym_state_probs_1191[p03])
sym_state_recursive_ratios_1191[0, 4] = sym.factor(sym_state_probs_1191
[p04] / sym_state_probs_1191[p03])
sym_state_recursive_ratios_1191[1, 4] = sym.factor(sym_state_probs_1191
[p14] / sym_state_probs_1191[p04])
sym_state_recursive_ratios_1191[0, 5] = sym.factor(sym_state_probs_1191
[p05] / sym_state_probs_1191[p04])
sym_state_recursive_ratios_1191[1, 5] = sym.factor(sym_state_probs_1191
[p15] / sym_state_probs_1191[p05])
sym_state_recursive_ratios_1191[0, 6] = sym.factor(sym_state_probs_1191
[p06] / sym_state_probs_1191[p05])
sym_state_recursive_ratios_1191[1, 6] = sym.factor(sym_state_probs_1191
[p16] / sym_state_probs_1191[p06])
sym_state_recursive_ratios_1191[0, 7] = sym.factor(sym_state_probs_1191
[p07] / sym_state_probs_1191[p06])
sym_state_recursive_ratios_1191[1, 7] = sym.factor(sym_state_probs_1191
[p17] / sym_state_probs_1191[p07])
sym_state_recursive_ratios_1191[0, 8] = sym.factor(sym_state_probs_1191
[p08] / sym_state_probs_1191[p07])
sym_state_recursive_ratios_1191[1, 8] = sym.factor(sym_state_probs_1191
[p18] / sym_state_probs_1191[p08])
sym_state_recursive_ratios_1191[0, 9] = sym.factor(sym_state_probs_1191
[p09] / sym_state_probs_1191[p08])
sym_state_recursive_ratios_1191[1, 9] = sym.factor(sym_state_probs_1191
[p19] / sym_state_probs_1191[p09])
sym_state_recursive_ratios_right_1191 = (sym_state_recursive_ratios_1191
.copy())
sym_state_recursive_ratios_right_1191[1, 2] = sym.factor(
sym_state_probs_1191[p12] / sym_state_probs_1191[p11])
sym_state_recursive_ratios_right_1191[1, 3] = sym.factor(
sym_state_probs_1191[p13] / sym_state_probs_1191[p12])
sym_state_recursive_ratios_right_1191[1, 4] = sym.factor(
sym_state_probs_1191[p14] / sym_state_probs_1191[p13])
sym_state_recursive_ratios_right_1191[1, 5] = sym.factor(
sym_state_probs_1191[p15] / sym_state_probs_1191[p14])
sym_state_recursive_ratios_right_1191[1, 6] = sym.factor(
sym_state_probs_1191[p16] / sym_state_probs_1191[p15])
sym_state_recursive_ratios_right_1191[1, 7] = sym.factor(
sym_state_probs_1191[p17] / sym_state_probs_1191[p16])
sym_state_recursive_ratios_right_1191[1, 8] = sym.factor(
sym_state_probs_1191[p18] / sym_state_probs_1191[p17])
sym_state_recursive_ratios_right_1191[1, 8] = sym.factor(
sym_state_probs_1191[p18] / sym_state_probs_1191[p17])
sym_state_recursive_ratios_P0_1191 = sym.zeros(buffer_capacity + 1,
system_capacity + 1)
sym_state_recursive_ratios_P0_1191[0, 0] = 1
sym_state_recursive_ratios_P0_1191[0, 1] = sym.factor(
sym_state_probs_1191[p01] / sym_state_probs_1191[p00])
sym_state_recursive_ratios_P0_1191[1, 1] = sym.factor(
sym_state_probs_1191[p11] / sym_state_probs_1191[p00])
sym_state_recursive_ratios_P0_1191[0, 2] = sym.factor(
sym_state_probs_1191[p02] / sym_state_probs_1191[p00])
sym_state_recursive_ratios_P0_1191[1, 2] = sym.factor(
sym_state_probs_1191[p12] / sym_state_probs_1191[p00])
sym_state_recursive_ratios_P0_1191[0, 3] = sym.factor(
sym_state_probs_1191[p03] / sym_state_probs_1191[p00])
sym_state_recursive_ratios_P0_1191[1, 3] = sym.factor(
sym_state_probs_1191[p13] / sym_state_probs_1191[p00])
sym_state_recursive_ratios_P0_1191[0, 4] = sym.factor(
sym_state_probs_1191[p04] / sym_state_probs_1191[p00])
sym_state_recursive_ratios_P0_1191[1, 4] = sym.factor(
sym_state_probs_1191[p14] / sym_state_probs_1191[p00])
sym_state_recursive_ratios_P0_1191[0, 5] = sym.factor(
sym_state_probs_1191[p05] / sym_state_probs_1191[p00])
sym_state_recursive_ratios_P0_1191[1, 5] = sym.factor(
sym_state_probs_1191[p15] / sym_state_probs_1191[p00])
sym_state_recursive_ratios_P0_1191[0, 6] = sym.factor(
sym_state_probs_1191[p06] / sym_state_probs_1191[p00])
sym_state_recursive_ratios_P0_1191[1, 6] = sym.factor(
sym_state_probs_1191[p16] / sym_state_probs_1191[p00])
sym_state_recursive_ratios_P0_1191[0, 7] = sym.factor(
sym_state_probs_1191[p07] / sym_state_probs_1191[p00])
sym_state_recursive_ratios_P0_1191[1, 7] = sym.factor(
sym_state_probs_1191[p17] / sym_state_probs_1191[p00])
sym_state_recursive_ratios_P0_1191[0, 8] = sym.factor(
sym_state_probs_1191[p08] / sym_state_probs_1191[p00])
sym_state_recursive_ratios_P0_1191[1, 8] = sym.factor(
sym_state_probs_1191[p18] / sym_state_probs_1191[p00])
sym_state_recursive_ratios_P0_1191[0, 9] = sym.factor(
sym_state_probs_1191[p09] / sym_state_probs_1191[p00])
sym_state_recursive_ratios_P0_1191[1, 9] = sym.factor(
sym_state_probs_1191[p19] / sym_state_probs_1191[p00])
return (sym_state_probs_1191, sym_state_recursive_ratios_1191,
sym_state_recursive_ratios_right_1191,
sym_state_recursive_ratios_P0_1191)
<|reserved_special_token_1|>
import ambulance_game as abg
import numpy as np
import sympy as sym
from sympy.abc import a, b, c, d, e, f, g, h, i, j
def get_symbolic_pi(num_of_servers, threshold, system_capacity, buffer_capacity
):
Q_sym = abg.markov.get_symbolic_transition_matrix(num_of_servers=
num_of_servers, threshold=threshold, system_capacity=
system_capacity, buffer_capacity=buffer_capacity)
dimension = Q_sym.shape[0]
if dimension > 7:
return 'Capacity of 6 exceeded'
M_sym = sym.Matrix([Q_sym.transpose()[:-1, :], sym.ones(1, dimension)])
b_sym = sym.Matrix([sym.zeros(dimension - 1, 1), [1]])
system = M_sym.col_insert(dimension, b_sym)
sol = sym.solve_linear_system_LU(system, [a, b, c, d, e, f, g])
return sol
def get_symbolic_state_probabilities_1222():
num_of_servers = 1
threshold = 2
system_capacity = 2
buffer_capacity = 2
sym_pi_1222 = get_symbolic_pi(num_of_servers=num_of_servers, threshold=
threshold, system_capacity=system_capacity, buffer_capacity=
buffer_capacity)
all_states_1222 = abg.markov.build_states(threshold=threshold,
system_capacity=system_capacity, buffer_capacity=buffer_capacity)
sym_state_probs_1222 = [(0) for _ in range(len(all_states_1222))]
sym_state_probs_1222[0] = sym.factor(sym_pi_1222[a])
sym_state_probs_1222[1] = sym.factor(sym_pi_1222[b])
sym_state_probs_1222[2] = sym.factor(sym_pi_1222[c])
sym_state_probs_1222[3] = sym.factor(sym_pi_1222[d])
sym_state_probs_1222[4] = sym.factor(sym_pi_1222[e])
sym_state_recursive_ratios_1222 = sym.zeros(buffer_capacity + 1,
system_capacity + 1)
sym_state_recursive_ratios_1222[0, 0] = 1
sym_state_recursive_ratios_1222[0, 1] = sym.factor(sym_state_probs_1222
[1] / sym_state_probs_1222[0])
sym_state_recursive_ratios_1222[0, 2] = sym.factor(sym_state_probs_1222
[2] / sym_state_probs_1222[1])
sym_state_recursive_ratios_1222[1, 2] = sym.factor(sym_state_probs_1222
[3] / sym_state_probs_1222[2])
sym_state_recursive_ratios_1222[2, 2] = sym.factor(sym_state_probs_1222
[4] / sym_state_probs_1222[3])
return sym_state_probs_1222, sym_state_recursive_ratios_1222
def get_symbolic_state_probabilities_1121():
num_of_servers = 1
threshold = 1
system_capacity = 2
buffer_capacity = 1
all_states_1121 = abg.markov.build_states(threshold=threshold,
system_capacity=system_capacity, buffer_capacity=buffer_capacity)
sym_pi_1121 = get_symbolic_pi(num_of_servers=num_of_servers, threshold=
threshold, system_capacity=system_capacity, buffer_capacity=
buffer_capacity)
sym_state_probs_1121 = [(0) for _ in range(len(all_states_1121))]
sym_state_probs_1121[0] = sym.factor(sym_pi_1121[a])
sym_state_probs_1121[1] = sym.factor(sym_pi_1121[b])
sym_state_probs_1121[2] = sym.factor(sym_pi_1121[c])
sym_state_probs_1121[3] = sym.factor(sym_pi_1121[d])
sym_state_probs_1121[4] = sym.factor(sym_pi_1121[e])
sym_state_recursive_ratios_1121 = sym.zeros(buffer_capacity + 1,
system_capacity + 1)
sym_state_recursive_ratios_1121[0, 0] = 1
sym_state_recursive_ratios_1121[0, 1] = sym.factor(sym_state_probs_1121
[1] / sym_state_probs_1121[0])
sym_state_recursive_ratios_1121[1, 1] = sym.factor(sym_state_probs_1121
[2] / sym_state_probs_1121[1])
sym_state_recursive_ratios_1121[0, 2] = sym.factor(sym_state_probs_1121
[3] / sym_state_probs_1121[1])
sym_state_recursive_ratios_1121[1, 2] = sym.factor(sym_state_probs_1121
[4] / sym_state_probs_1121[3])
sym_state_recursive_ratios_right_1121 = (sym_state_recursive_ratios_1121
.copy())
sym_state_recursive_ratios_right_1121[1, 2] = sym.factor(
sym_state_probs_1121[4] / sym_state_probs_1121[2])
sym_state_recursive_ratios_P0_1121 = sym.zeros(buffer_capacity + 1,
system_capacity + 1)
sym_state_recursive_ratios_P0_1121[0, 0] = 1
sym_state_recursive_ratios_P0_1121[0, 1] = sym.factor(
sym_state_probs_1121[1] / sym_state_probs_1121[0])
sym_state_recursive_ratios_P0_1121[1, 1] = sym.factor(
sym_state_probs_1121[2] / sym_state_probs_1121[0])
sym_state_recursive_ratios_P0_1121[0, 2] = sym.factor(
sym_state_probs_1121[3] / sym_state_probs_1121[0])
sym_state_recursive_ratios_P0_1121[1, 2] = sym.factor(
sym_state_probs_1121[4] / sym_state_probs_1121[0])
return (sym_state_probs_1121, sym_state_recursive_ratios_1121,
sym_state_recursive_ratios_right_1121,
sym_state_recursive_ratios_P0_1121)
def get_symbolic_state_probabilities_1122():
threshold = 1
system_capacity = 2
buffer_capacity = 2
all_states_1122 = abg.markov.build_states(threshold=threshold,
system_capacity=system_capacity, buffer_capacity=buffer_capacity)
sym_state_probs_1122 = [(0) for _ in range(len(all_states_1122))]
sym_Lambda = sym.symbols('Lambda')
sym_lambda_1 = sym.symbols('lambda_1')
sym_lambda_2 = sym.symbols('lambda_2')
sym_mu = sym.symbols('mu')
sym_state_probs_1122[0] = (sym_mu ** 6 + 2 * sym_lambda_2 * sym_mu ** 5 +
sym_lambda_2 ** 2 * sym_mu ** 4)
sym_state_probs_1122[1] = sym_Lambda * sym_mu ** 3 * (sym_mu ** 2 + 2 *
sym_mu * sym_lambda_2 + sym_lambda_2 ** 2)
sym_state_probs_1122[2] = sym_Lambda * sym_lambda_2 * sym_mu ** 2 * (
sym_lambda_2 ** 2 + sym_lambda_2 * sym_lambda_1 + sym_lambda_1 *
sym_mu + sym_mu ** 2 + 2 * sym_lambda_2 * sym_mu)
sym_state_probs_1122[3] = sym_Lambda * sym_lambda_2 ** 2 * sym_mu * (
sym_lambda_2 ** 2 + 2 * sym_lambda_1 * sym_lambda_2 + 3 *
sym_lambda_1 * sym_mu + sym_mu ** 2 + 2 * sym_lambda_2 * sym_mu +
sym_lambda_1 ** 2)
sym_state_probs_1122[4] = sym_Lambda * sym_lambda_1 * sym_mu ** 3 * (
sym_lambda_2 + sym_mu)
sym_state_probs_1122[5
] = sym_Lambda * sym_lambda_1 * sym_lambda_2 * sym_mu ** 2 * (2 *
sym_mu + sym_lambda_1 + sym_lambda_2)
sym_state_probs_1122[6] = sym_Lambda * sym_lambda_1 * sym_lambda_2 ** 2 * (
sym_lambda_1 ** 2 + 4 * sym_lambda_1 * sym_mu + 2 * sym_lambda_1 *
sym_lambda_2 + 3 * sym_mu ** 2 + sym_lambda_2 ** 2 + 3 *
sym_lambda_2 * sym_mu)
total_1122 = np.sum(sym_state_probs_1122)
sym_state_probs_1122 = [(i / total_1122) for i in sym_state_probs_1122]
sym_state_recursive_ratios_1122 = sym.zeros(buffer_capacity + 1,
system_capacity + 1)
sym_state_recursive_ratios_1122[0, 0] = 1
sym_state_recursive_ratios_1122[0, 1] = sym.factor(sym_state_probs_1122
[1] / sym_state_probs_1122[0])
sym_state_recursive_ratios_1122[1, 1] = sym.factor(sym_state_probs_1122
[2] / sym_state_probs_1122[1])
sym_state_recursive_ratios_1122[2, 1] = sym.factor(sym_state_probs_1122
[3] / sym_state_probs_1122[2])
sym_state_recursive_ratios_1122[0, 2] = sym.factor(sym_state_probs_1122
[4] / sym_state_probs_1122[1])
sym_state_recursive_ratios_1122[1, 2] = sym.factor(sym_state_probs_1122
[5] / sym_state_probs_1122[4])
sym_state_recursive_ratios_1122[2, 2] = sym.factor(sym_state_probs_1122
[6] / sym_state_probs_1122[5])
sym_state_recursive_ratios_right_1122 = (sym_state_recursive_ratios_1122
.copy())
sym_state_recursive_ratios_right_1122[1, 2] = sym.factor(
sym_state_probs_1122[5] / sym_state_probs_1122[2])
sym_state_recursive_ratios_right_1122[2, 2] = sym.factor(
sym_state_probs_1122[6] / sym_state_probs_1122[3])
sym_state_recursive_ratios_P0_1122 = sym.zeros(buffer_capacity + 1,
system_capacity + 1)
sym_state_recursive_ratios_P0_1122[0, 0] = 1
sym_state_recursive_ratios_P0_1122[0, 1] = sym.factor(
sym_state_probs_1122[1] / sym_state_probs_1122[0])
sym_state_recursive_ratios_P0_1122[1, 1] = sym.factor(
sym_state_probs_1122[2] / sym_state_probs_1122[0])
sym_state_recursive_ratios_P0_1122[2, 1] = sym.factor(
sym_state_probs_1122[3] / sym_state_probs_1122[0])
sym_state_recursive_ratios_P0_1122[0, 2] = sym.factor(
sym_state_probs_1122[4] / sym_state_probs_1122[0])
sym_state_recursive_ratios_P0_1122[1, 2] = sym.factor(
sym_state_probs_1122[5] / sym_state_probs_1122[0])
sym_state_recursive_ratios_P0_1122[2, 2] = sym.factor(
sym_state_probs_1122[6] / sym_state_probs_1122[0])
return (sym_state_probs_1122, sym_state_recursive_ratios_1122,
sym_state_recursive_ratios_right_1122,
sym_state_recursive_ratios_P0_1122)
def get_symbolic_state_probabilities_1123():
num_of_servers = 1
threshold = 1
system_capacity = 2
buffer_capacity = 3
Q_sym_1123 = abg.markov.get_symbolic_transition_matrix(num_of_servers,
threshold, system_capacity, buffer_capacity)
p00, p01, p11, p21, p31, p02, p12, p22, p32 = sym.symbols(
'p00, p01, p11, p21, p31, p02, p12, p22, p32')
pi_1123 = sym.Matrix([p00, p01, p11, p21, p31, p02, p12, p22, p32])
dimension_1123 = Q_sym_1123.shape[0]
M_sym_1123 = sym.Matrix([Q_sym_1123.transpose()[:-1, :], sym.ones(1,
dimension_1123)])
sym_diff_equations_1123 = M_sym_1123 @ pi_1123
b_sym_1123 = sym.Matrix([sym.zeros(dimension_1123 - 1, 1), [1]])
eq0_1123 = sym.Eq(sym_diff_equations_1123[0], b_sym_1123[0])
eq1_1123 = sym.Eq(sym_diff_equations_1123[1], b_sym_1123[1])
eq2_1123 = sym.Eq(sym_diff_equations_1123[2], b_sym_1123[2])
eq3_1123 = sym.Eq(sym_diff_equations_1123[3], b_sym_1123[3])
eq4_1123 = sym.Eq(sym_diff_equations_1123[4], b_sym_1123[4])
eq5_1123 = sym.Eq(sym_diff_equations_1123[5], b_sym_1123[5])
eq6_1123 = sym.Eq(sym_diff_equations_1123[6], b_sym_1123[6])
eq7_1123 = sym.Eq(sym_diff_equations_1123[7], b_sym_1123[7])
eq8_1123 = sym.Eq(sym_diff_equations_1123[8], b_sym_1123[8])
sym_state_probs_1123 = sym.solve([eq0_1123, eq1_1123, eq2_1123,
eq3_1123, eq4_1123, eq5_1123, eq6_1123, eq7_1123, eq8_1123], (p00,
p01, p11, p21, p31, p02, p12, p22, p32))
sym_state_recursive_ratios_1123 = sym.zeros(buffer_capacity + 1,
system_capacity + 1)
sym_state_recursive_ratios_1123[0, 0] = 1
sym_state_recursive_ratios_1123[0, 1] = sym.factor(sym_state_probs_1123
[p01] / sym_state_probs_1123[p00])
sym_state_recursive_ratios_1123[1, 1] = sym.factor(sym_state_probs_1123
[p11] / sym_state_probs_1123[p01])
sym_state_recursive_ratios_1123[2, 1] = sym.factor(sym_state_probs_1123
[p21] / sym_state_probs_1123[p11])
sym_state_recursive_ratios_1123[3, 1] = sym.factor(sym_state_probs_1123
[p31] / sym_state_probs_1123[p21])
sym_state_recursive_ratios_1123[0, 2] = sym.factor(sym_state_probs_1123
[p02] / sym_state_probs_1123[p01])
sym_state_recursive_ratios_1123[1, 2] = sym.factor(sym_state_probs_1123
[p12] / sym_state_probs_1123[p02])
sym_state_recursive_ratios_1123[2, 2] = sym.factor(sym_state_probs_1123
[p22] / sym_state_probs_1123[p12])
sym_state_recursive_ratios_1123[2, 2] = sym.factor(sym_state_probs_1123
[p32] / sym_state_probs_1123[p22])
sym_state_recursive_ratios_right_1123 = (sym_state_recursive_ratios_1123
.copy())
sym_state_recursive_ratios_right_1123[1, 2] = sym.factor(
sym_state_probs_1123[p12] / sym_state_probs_1123[p11])
sym_state_recursive_ratios_right_1123[2, 2] = sym.factor(
sym_state_probs_1123[p22] / sym_state_probs_1123[p21])
sym_state_recursive_ratios_right_1123[3, 2] = sym.factor(
sym_state_probs_1123[p32] / sym_state_probs_1123[p22])
sym_state_recursive_ratios_P0_1123 = sym.zeros(buffer_capacity + 1,
system_capacity + 1)
sym_state_recursive_ratios_P0_1123[0, 0] = 1
sym_state_recursive_ratios_P0_1123[0, 1] = sym.factor(
sym_state_probs_1123[p01] / sym_state_probs_1123[p00])
sym_state_recursive_ratios_P0_1123[1, 1] = sym.factor(
sym_state_probs_1123[p11] / sym_state_probs_1123[p00])
sym_state_recursive_ratios_P0_1123[2, 1] = sym.factor(
sym_state_probs_1123[p21] / sym_state_probs_1123[p00])
sym_state_recursive_ratios_P0_1123[3, 1] = sym.factor(
sym_state_probs_1123[p31] / sym_state_probs_1123[p00])
sym_state_recursive_ratios_P0_1123[0, 2] = sym.factor(
sym_state_probs_1123[p02] / sym_state_probs_1123[p00])
sym_state_recursive_ratios_P0_1123[1, 2] = sym.factor(
sym_state_probs_1123[p12] / sym_state_probs_1123[p00])
sym_state_recursive_ratios_P0_1123[2, 2] = sym.factor(
sym_state_probs_1123[p22] / sym_state_probs_1123[p00])
sym_state_recursive_ratios_P0_1123[3, 2] = sym.factor(
sym_state_probs_1123[p32] / sym_state_probs_1123[p00])
return (sym_state_probs_1123, sym_state_recursive_ratios_1123,
sym_state_recursive_ratios_right_1123,
sym_state_recursive_ratios_P0_1123)
def get_symbolic_state_probabilities_1341():
threshold = 3
system_capacity = 4
buffer_capacity = 1
all_states_1341 = abg.markov.build_states(threshold=threshold,
system_capacity=system_capacity, buffer_capacity=buffer_capacity)
sym_state_probs_1341 = [(0) for _ in range(len(all_states_1341))]
sym_Lambda = sym.symbols('Lambda')
sym_lambda_1 = sym.symbols('lambda_1')
sym_lambda_2 = sym.symbols('lambda_2')
sym_mu = sym.symbols('mu')
sym_state_probs_1341[0] = sym_lambda_2 * sym_mu ** 5 + sym_mu ** 6
sym_state_probs_1341[1
] = sym_Lambda * sym_lambda_2 * sym_mu ** 4 + sym_Lambda * sym_mu ** 5
sym_state_probs_1341[2] = (sym_Lambda ** 2 * sym_lambda_2 * sym_mu ** 3 +
sym_Lambda ** 2 * sym_mu ** 4)
sym_state_probs_1341[3] = (sym_Lambda ** 3 * sym_lambda_2 * sym_mu ** 2 +
sym_Lambda ** 3 * sym_mu ** 3)
sym_state_probs_1341[4] = (sym_Lambda ** 3 * sym_lambda_1 *
sym_lambda_2 * sym_mu + sym_Lambda ** 3 * sym_lambda_2 * sym_mu **
2 + sym_Lambda ** 3 * sym_lambda_2 * sym_lambda_2 * sym_mu)
sym_state_probs_1341[5] = sym_Lambda ** 3 * sym_lambda_1 * sym_mu ** 2
sym_state_probs_1341[6] = (sym_Lambda ** 3 * sym_lambda_1 ** 2 *
sym_lambda_2 + sym_Lambda ** 3 * sym_lambda_1 * sym_lambda_2 ** 2 +
2 * sym_Lambda ** 3 * sym_lambda_1 * sym_lambda_2 * sym_mu)
total_1341 = np.sum(sym_state_probs_1341)
sym_state_probs_1341 = [(i / total_1341) for i in sym_state_probs_1341]
sym_state_recursive_ratios_1341 = sym.zeros(buffer_capacity + 1,
system_capacity + 1)
sym_state_recursive_ratios_1341[0, 0] = 1
sym_state_recursive_ratios_1341[0, 1] = sym.factor(sym_state_probs_1341
[1] / sym_state_probs_1341[0])
sym_state_recursive_ratios_1341[0, 2] = sym.factor(sym_state_probs_1341
[2] / sym_state_probs_1341[1])
sym_state_recursive_ratios_1341[0, 3] = sym.factor(sym_state_probs_1341
[3] / sym_state_probs_1341[2])
sym_state_recursive_ratios_1341[0, 4] = sym.factor(sym_state_probs_1341
[5] / sym_state_probs_1341[3])
sym_state_recursive_ratios_1341[1, 3] = sym.factor(sym_state_probs_1341
[4] / sym_state_probs_1341[3])
sym_state_recursive_ratios_1341[1, 4] = sym.factor(sym_state_probs_1341
[6] / sym_state_probs_1341[5])
sym_state_recursive_ratios_right_1341 = (sym_state_recursive_ratios_1341
.copy())
sym_state_recursive_ratios_right_1341[1, 4] = sym.factor(
sym_state_probs_1341[6] / sym_state_probs_1341[4])
sym_state_recursive_ratios_P0_1341 = sym.zeros(buffer_capacity + 1,
system_capacity + 1)
sym_state_recursive_ratios_P0_1341[0, 0] = 1
sym_state_recursive_ratios_P0_1341[0, 1] = sym.factor(
sym_state_probs_1341[1] / sym_state_probs_1341[0])
sym_state_recursive_ratios_P0_1341[0, 2] = sym.factor(
sym_state_probs_1341[2] / sym_state_probs_1341[0])
sym_state_recursive_ratios_P0_1341[0, 3] = sym.factor(
sym_state_probs_1341[3] / sym_state_probs_1341[0])
sym_state_recursive_ratios_P0_1341[1, 3] = sym.factor(
sym_state_probs_1341[4] / sym_state_probs_1341[0])
sym_state_recursive_ratios_P0_1341[0, 4] = sym.factor(
sym_state_probs_1341[5] / sym_state_probs_1341[0])
sym_state_recursive_ratios_P0_1341[1, 4] = sym.factor(
sym_state_probs_1341[6] / sym_state_probs_1341[0])
return (sym_state_probs_1341, sym_state_recursive_ratios_1341,
sym_state_recursive_ratios_right_1341,
sym_state_recursive_ratios_P0_1341)
def get_symbolic_state_probabilities_1131():
threshold = 1
system_capacity = 3
buffer_capacity = 1
all_states_1131 = abg.markov.build_states(threshold=threshold,
system_capacity=system_capacity, buffer_capacity=buffer_capacity)
sym_state_probs_1131 = [(0) for _ in range(len(all_states_1131))]
sym_Lambda = sym.symbols('Lambda')
sym_lambda_1 = sym.symbols('lambda_1')
sym_lambda_2 = sym.symbols('lambda_2')
sym_mu = sym.symbols('mu')
sym_state_probs_1131[0] = (sym_mu ** 6 + 2 * (sym_lambda_2 * sym_mu **
5) + sym_lambda_2 ** 2 * sym_mu ** 4 + sym_lambda_1 * sym_lambda_2 *
sym_mu ** 4)
sym_state_probs_1131[1] = sym_state_probs_1131[0] * sym_Lambda / sym_mu
sym_state_probs_1131[2] = (sym_Lambda * sym_lambda_1 ** 2 *
sym_lambda_2 * sym_mu ** 2 + sym_Lambda * sym_lambda_2 *
sym_lambda_1 * sym_mu ** 3 + 2 * (sym_Lambda * sym_lambda_1 *
sym_lambda_2 ** 2 * sym_mu ** 2) + 2 * (sym_Lambda * sym_lambda_2 **
2 * sym_mu ** 3) + sym_Lambda * sym_lambda_2 ** 3 * sym_mu ** 2 +
sym_Lambda * sym_lambda_2 * sym_mu ** 4)
sym_state_probs_1131[3] = sym_Lambda * sym_lambda_1 * sym_mu ** 3 * (
sym_lambda_2 + sym_mu)
sym_state_probs_1131[4
] = sym_Lambda * sym_lambda_2 * sym_lambda_1 * sym_mu * (
sym_lambda_2 ** 2 + 2 * sym_lambda_2 * sym_lambda_1 + 3 *
sym_lambda_2 * sym_mu + sym_lambda_1 ** 2 + 2 * sym_lambda_1 *
sym_mu + 2 * sym_mu ** 2)
sym_state_probs_1131[5] = sym_Lambda * sym_lambda_1 ** 2 * sym_mu ** 3
sym_state_probs_1131[6] = sym_Lambda * sym_lambda_2 * sym_lambda_1 ** 2 * (
sym_lambda_2 ** 2 + 2 * sym_lambda_2 * sym_lambda_1 + 3 *
sym_lambda_2 * sym_mu + sym_lambda_1 ** 2 + 2 * sym_lambda_1 *
sym_mu + 3 * sym_mu ** 2)
denominator = (sym_Lambda * sym_lambda_2 ** 3 * sym_lambda_1 ** 2 +
sym_Lambda * sym_lambda_2 ** 3 * sym_lambda_1 * sym_mu + sym_Lambda *
sym_lambda_2 ** 3 * sym_mu ** 2 + 2 * sym_Lambda * sym_lambda_2 **
2 * sym_lambda_1 ** 3 + 5 * sym_Lambda * sym_lambda_2 ** 2 *
sym_lambda_1 ** 2 * sym_mu + 5 * sym_Lambda * sym_lambda_2 ** 2 *
sym_lambda_1 * sym_mu ** 2 + 3 * sym_Lambda * sym_lambda_2 ** 2 *
sym_mu ** 3 + sym_Lambda * sym_lambda_2 * sym_lambda_1 ** 4 + 3 *
sym_Lambda * sym_lambda_2 * sym_lambda_1 ** 3 * sym_mu + 6 *
sym_Lambda * sym_lambda_2 * sym_lambda_1 ** 2 * sym_mu ** 2 + 5 *
sym_Lambda * sym_lambda_2 * sym_lambda_1 * sym_mu ** 3 + 3 *
sym_Lambda * sym_lambda_2 * sym_mu ** 4 + sym_Lambda * sym_lambda_1 **
2 * sym_mu ** 3 + sym_Lambda * sym_lambda_1 * sym_mu ** 4 +
sym_Lambda * sym_mu ** 5 + sym_lambda_2 ** 2 * sym_mu ** 4 +
sym_lambda_2 * sym_lambda_1 * sym_mu ** 4 + 2 * sym_lambda_2 *
sym_mu ** 5 + sym_mu ** 6)
sym_state_probs_1131 = [(i / denominator) for i in sym_state_probs_1131]
sym_state_recursive_ratios_1131 = sym.zeros(buffer_capacity + 1,
system_capacity + 1)
sym_state_recursive_ratios_1131[0, 0] = 1
sym_state_recursive_ratios_1131[0, 1] = sym.factor(sym_state_probs_1131
[1] / sym_state_probs_1131[0])
sym_state_recursive_ratios_1131[1, 1] = sym.factor(sym_state_probs_1131
[2] / sym_state_probs_1131[1])
sym_state_recursive_ratios_1131[0, 2] = sym.factor(sym_state_probs_1131
[3] / sym_state_probs_1131[1])
sym_state_recursive_ratios_1131[1, 2] = sym.factor(sym_state_probs_1131
[4] / sym_state_probs_1131[3])
sym_state_recursive_ratios_1131[0, 3] = sym.factor(sym_state_probs_1131
[5] / sym_state_probs_1131[3])
sym_state_recursive_ratios_1131[1, 3] = sym.factor(sym_state_probs_1131
[6] / sym_state_probs_1131[5])
sym_state_recursive_ratios_right_1131 = (sym_state_recursive_ratios_1131
.copy())
sym_state_recursive_ratios_right_1131[1, 2] = sym.factor(
sym_state_probs_1131[4] / sym_state_probs_1131[2])
sym_state_recursive_ratios_right_1131[1, 3] = sym.factor(
sym_state_probs_1131[6] / sym_state_probs_1131[4])
sym_state_recursive_ratios_P0_1131 = sym.zeros(buffer_capacity + 1,
system_capacity + 1)
sym_state_recursive_ratios_P0_1131[0, 0] = 1
sym_state_recursive_ratios_P0_1131[0, 1] = sym.factor(
sym_state_probs_1131[1] / sym_state_probs_1131[0])
sym_state_recursive_ratios_P0_1131[1, 1] = sym.factor(
sym_state_probs_1131[2] / sym_state_probs_1131[0])
sym_state_recursive_ratios_P0_1131[0, 2] = sym.factor(
sym_state_probs_1131[3] / sym_state_probs_1131[0])
sym_state_recursive_ratios_P0_1131[1, 2] = sym.factor(
sym_state_probs_1131[4] / sym_state_probs_1131[0])
sym_state_recursive_ratios_P0_1131[0, 3] = sym.factor(
sym_state_probs_1131[5] / sym_state_probs_1131[0])
sym_state_recursive_ratios_P0_1131[1, 3] = sym.factor(
sym_state_probs_1131[6] / sym_state_probs_1131[0])
return (sym_state_probs_1131, sym_state_recursive_ratios_1131,
sym_state_recursive_ratios_right_1131,
sym_state_recursive_ratios_P0_1131)
def get_symbolic_state_probabilities_1132():
num_of_servers = 1
threshold = 1
system_capacity = 3
buffer_capacity = 2
Q_sym_1132 = abg.markov.get_symbolic_transition_matrix(num_of_servers,
threshold, system_capacity, buffer_capacity)
p00, p01, p11, p21, p02, p12, p22, p03, p13, p23 = sym.symbols(
'p00, p01, p11, p21, p02, p12, p22, p03, p13, p23')
pi_1132 = sym.Matrix([p00, p01, p11, p21, p02, p12, p22, p03, p13, p23])
dimension_1132 = Q_sym_1132.shape[0]
M_sym_1132 = sym.Matrix([Q_sym_1132.transpose()[:-1, :], sym.ones(1,
dimension_1132)])
sym_diff_equations_1132 = M_sym_1132 @ pi_1132
b_sym_1132 = sym.Matrix([sym.zeros(dimension_1132 - 1, 1), [1]])
eq0_1132 = sym.Eq(sym_diff_equations_1132[0], b_sym_1132[0])
eq1_1132 = sym.Eq(sym_diff_equations_1132[1], b_sym_1132[1])
eq2_1132 = sym.Eq(sym_diff_equations_1132[2], b_sym_1132[2])
eq3_1132 = sym.Eq(sym_diff_equations_1132[3], b_sym_1132[3])
eq4_1132 = sym.Eq(sym_diff_equations_1132[4], b_sym_1132[4])
eq5_1132 = sym.Eq(sym_diff_equations_1132[5], b_sym_1132[5])
eq6_1132 = sym.Eq(sym_diff_equations_1132[6], b_sym_1132[6])
eq7_1132 = sym.Eq(sym_diff_equations_1132[7], b_sym_1132[7])
eq8_1132 = sym.Eq(sym_diff_equations_1132[8], b_sym_1132[8])
eq9_1132 = sym.Eq(sym_diff_equations_1132[9], b_sym_1132[9])
sym_state_probs_1132 = sym.solve([eq0_1132, eq1_1132, eq2_1132,
eq3_1132, eq4_1132, eq5_1132, eq6_1132, eq7_1132, eq8_1132,
eq9_1132], (p00, p01, p11, p21, p02, p12, p22, p03, p13, p23))
sym_state_recursive_ratios_1132 = sym.zeros(buffer_capacity + 1,
system_capacity + 1)
sym_state_recursive_ratios_1132[0, 0] = 1
sym_state_recursive_ratios_1132[0, 1] = sym.factor(sym_state_probs_1132
[p01] / sym_state_probs_1132[p00])
sym_state_recursive_ratios_1132[1, 1] = sym.factor(sym_state_probs_1132
[p11] / sym_state_probs_1132[p01])
sym_state_recursive_ratios_1132[2, 1] = sym.factor(sym_state_probs_1132
[p21] / sym_state_probs_1132[p11])
sym_state_recursive_ratios_1132[0, 2] = sym.factor(sym_state_probs_1132
[p02] / sym_state_probs_1132[p01])
sym_state_recursive_ratios_1132[1, 2] = sym.factor(sym_state_probs_1132
[p12] / sym_state_probs_1132[p02])
sym_state_recursive_ratios_1132[2, 2] = sym.factor(sym_state_probs_1132
[p22] / sym_state_probs_1132[p12])
sym_state_recursive_ratios_1132[0, 3] = sym.factor(sym_state_probs_1132
[p03] / sym_state_probs_1132[p02])
sym_state_recursive_ratios_1132[1, 3] = sym.factor(sym_state_probs_1132
[p13] / sym_state_probs_1132[p03])
sym_state_recursive_ratios_1132[2, 3] = sym.factor(sym_state_probs_1132
[p23] / sym_state_probs_1132[p13])
sym_state_recursive_ratios_right_1132 = (sym_state_recursive_ratios_1132
.copy())
sym_state_recursive_ratios_right_1132[1, 2] = sym.factor(
sym_state_probs_1132[p12] / sym_state_probs_1132[p11])
sym_state_recursive_ratios_right_1132[1, 3] = sym.factor(
sym_state_probs_1132[p13] / sym_state_probs_1132[p12])
sym_state_recursive_ratios_right_1132[2, 2] = sym.factor(
sym_state_probs_1132[p22] / sym_state_probs_1132[p21])
sym_state_recursive_ratios_right_1132[2, 3] = sym.factor(
sym_state_probs_1132[p23] / sym_state_probs_1132[p22])
sym_state_recursive_ratios_P0_1132 = sym.zeros(buffer_capacity + 1,
system_capacity + 1)
sym_state_recursive_ratios_P0_1132[0, 0] = 1
sym_state_recursive_ratios_P0_1132[0, 1] = sym.factor(
sym_state_probs_1132[p01] / sym_state_probs_1132[p00])
sym_state_recursive_ratios_P0_1132[1, 1] = sym.factor(
sym_state_probs_1132[p11] / sym_state_probs_1132[p00])
sym_state_recursive_ratios_P0_1132[2, 1] = sym.factor(
sym_state_probs_1132[p21] / sym_state_probs_1132[p00])
sym_state_recursive_ratios_P0_1132[0, 2] = sym.factor(
sym_state_probs_1132[p02] / sym_state_probs_1132[p00])
sym_state_recursive_ratios_P0_1132[1, 2] = sym.factor(
sym_state_probs_1132[p12] / sym_state_probs_1132[p00])
sym_state_recursive_ratios_P0_1132[2, 2] = sym.factor(
sym_state_probs_1132[p22] / sym_state_probs_1132[p00])
sym_state_recursive_ratios_P0_1132[0, 3] = sym.factor(
sym_state_probs_1132[p03] / sym_state_probs_1132[p00])
sym_state_recursive_ratios_P0_1132[1, 3] = sym.factor(
sym_state_probs_1132[p13] / sym_state_probs_1132[p00])
sym_state_recursive_ratios_P0_1132[2, 3] = sym.factor(
sym_state_probs_1132[p23] / sym_state_probs_1132[p00])
return (sym_state_probs_1132, sym_state_recursive_ratios_1132,
sym_state_recursive_ratios_right_1132,
sym_state_recursive_ratios_P0_1132)
def get_symbolic_state_probabilities_1141():
num_of_servers = 1
threshold = 1
system_capacity = 4
buffer_capacity = 1
Q_sym_1141 = abg.markov.get_symbolic_transition_matrix(num_of_servers,
threshold, system_capacity, buffer_capacity)
p00, p01, p11, p02, p12, p03, p13, p04, p14 = sym.symbols(
'p00, p01, p11, p02, p12, p03, p13, p04, p14')
pi_1141 = sym.Matrix([p00, p01, p11, p02, p12, p03, p13, p04, p14])
dimension_1141 = Q_sym_1141.shape[0]
M_sym_1141 = sym.Matrix([Q_sym_1141.transpose()[:-1, :], sym.ones(1,
dimension_1141)])
sym_diff_equations_1141 = M_sym_1141 @ pi_1141
b_sym_1141 = sym.Matrix([sym.zeros(dimension_1141 - 1, 1), [1]])
eq0_1141 = sym.Eq(sym_diff_equations_1141[0], b_sym_1141[0])
eq1_1141 = sym.Eq(sym_diff_equations_1141[1], b_sym_1141[1])
eq2_1141 = sym.Eq(sym_diff_equations_1141[2], b_sym_1141[2])
eq3_1141 = sym.Eq(sym_diff_equations_1141[3], b_sym_1141[3])
eq4_1141 = sym.Eq(sym_diff_equations_1141[4], b_sym_1141[4])
eq5_1141 = sym.Eq(sym_diff_equations_1141[5], b_sym_1141[5])
eq6_1141 = sym.Eq(sym_diff_equations_1141[6], b_sym_1141[6])
eq7_1141 = sym.Eq(sym_diff_equations_1141[7], b_sym_1141[7])
eq8_1141 = sym.Eq(sym_diff_equations_1141[8], b_sym_1141[8])
sym_state_probs_1141 = sym.solve([eq0_1141, eq1_1141, eq2_1141,
eq3_1141, eq4_1141, eq5_1141, eq6_1141, eq7_1141, eq8_1141], (p00,
p01, p11, p02, p12, p03, p13, p04, p14))
sym_state_recursive_ratios_1141 = sym.zeros(buffer_capacity + 1,
system_capacity + 1)
sym_state_recursive_ratios_1141[0, 0] = 1
sym_state_recursive_ratios_1141[0, 1] = sym.factor(sym_state_probs_1141
[p01] / sym_state_probs_1141[p00])
sym_state_recursive_ratios_1141[1, 1] = sym.factor(sym_state_probs_1141
[p11] / sym_state_probs_1141[p01])
sym_state_recursive_ratios_1141[0, 2] = sym.factor(sym_state_probs_1141
[p02] / sym_state_probs_1141[p01])
sym_state_recursive_ratios_1141[1, 2] = sym.factor(sym_state_probs_1141
[p12] / sym_state_probs_1141[p02])
sym_state_recursive_ratios_1141[0, 3] = sym.factor(sym_state_probs_1141
[p03] / sym_state_probs_1141[p02])
sym_state_recursive_ratios_1141[1, 3] = sym.factor(sym_state_probs_1141
[p13] / sym_state_probs_1141[p03])
sym_state_recursive_ratios_1141[0, 4] = sym.factor(sym_state_probs_1141
[p04] / sym_state_probs_1141[p03])
sym_state_recursive_ratios_1141[1, 4] = sym.factor(sym_state_probs_1141
[p14] / sym_state_probs_1141[p04])
sym_state_recursive_ratios_right_1141 = (sym_state_recursive_ratios_1141
.copy())
sym_state_recursive_ratios_right_1141[1, 2] = sym.factor(
sym_state_probs_1141[p12] / sym_state_probs_1141[p11])
sym_state_recursive_ratios_right_1141[1, 3] = sym.factor(
sym_state_probs_1141[p13] / sym_state_probs_1141[p12])
sym_state_recursive_ratios_right_1141[1, 4] = sym.factor(
sym_state_probs_1141[p14] / sym_state_probs_1141[p13])
sym_state_recursive_ratios_P0_1141 = sym.zeros(buffer_capacity + 1,
system_capacity + 1)
sym_state_recursive_ratios_P0_1141[0, 0] = 1
sym_state_recursive_ratios_P0_1141[0, 1] = sym.factor(
sym_state_probs_1141[p01] / sym_state_probs_1141[p00])
sym_state_recursive_ratios_P0_1141[1, 1] = sym.factor(
sym_state_probs_1141[p11] / sym_state_probs_1141[p00])
sym_state_recursive_ratios_P0_1141[0, 2] = sym.factor(
sym_state_probs_1141[p02] / sym_state_probs_1141[p00])
sym_state_recursive_ratios_P0_1141[1, 2] = sym.factor(
sym_state_probs_1141[p12] / sym_state_probs_1141[p00])
sym_state_recursive_ratios_P0_1141[0, 3] = sym.factor(
sym_state_probs_1141[p03] / sym_state_probs_1141[p00])
sym_state_recursive_ratios_P0_1141[1, 3] = sym.factor(
sym_state_probs_1141[p13] / sym_state_probs_1141[p00])
sym_state_recursive_ratios_P0_1141[0, 4] = sym.factor(
sym_state_probs_1141[p04] / sym_state_probs_1141[p00])
sym_state_recursive_ratios_P0_1141[1, 4] = sym.factor(
sym_state_probs_1141[p14] / sym_state_probs_1141[p00])
return (sym_state_probs_1141, sym_state_recursive_ratios_1141,
sym_state_recursive_ratios_right_1141,
sym_state_recursive_ratios_P0_1141)
def get_symbolic_state_probabilities_1142():
num_of_servers = 1
threshold = 1
system_capacity = 4
buffer_capacity = 2
Q_sym_1142 = abg.markov.get_symbolic_transition_matrix(num_of_servers=
num_of_servers, threshold=threshold, system_capacity=
system_capacity, buffer_capacity=buffer_capacity)
p00, p01, p11, p21, p02, p12, p22, p03, p13, p23, p04, p14, p24 = (sym.
symbols(
'p00, p01, p11, p21, p02, p12, p22, p03, p13, p23, p04, p14, p24'))
pi_1142 = sym.Matrix([p00, p01, p11, p21, p02, p12, p22, p03, p13, p23,
p04, p14, p24])
dimension_1142 = Q_sym_1142.shape[0]
M_sym_1142 = sym.Matrix([Q_sym_1142.transpose()[:-1, :], sym.ones(1,
dimension_1142)])
sym_diff_equations_1142 = M_sym_1142 @ pi_1142
b_sym_1142 = sym.Matrix([sym.zeros(dimension_1142 - 1, 1), [1]])
eq0_1142 = sym.Eq(sym_diff_equations_1142[0], b_sym_1142[0])
eq1_1142 = sym.Eq(sym_diff_equations_1142[1], b_sym_1142[1])
eq2_1142 = sym.Eq(sym_diff_equations_1142[2], b_sym_1142[2])
eq3_1142 = sym.Eq(sym_diff_equations_1142[3], b_sym_1142[3])
eq4_1142 = sym.Eq(sym_diff_equations_1142[4], b_sym_1142[4])
eq5_1142 = sym.Eq(sym_diff_equations_1142[5], b_sym_1142[5])
eq6_1142 = sym.Eq(sym_diff_equations_1142[6], b_sym_1142[6])
eq7_1142 = sym.Eq(sym_diff_equations_1142[7], b_sym_1142[7])
eq8_1142 = sym.Eq(sym_diff_equations_1142[8], b_sym_1142[8])
eq9_1142 = sym.Eq(sym_diff_equations_1142[9], b_sym_1142[9])
eq10_1142 = sym.Eq(sym_diff_equations_1142[10], b_sym_1142[10])
eq11_1142 = sym.Eq(sym_diff_equations_1142[11], b_sym_1142[11])
eq12_1142 = sym.Eq(sym_diff_equations_1142[12], b_sym_1142[12])
sym_state_probs_1142 = sym.solve([eq0_1142, eq1_1142, eq2_1142,
eq3_1142, eq4_1142, eq5_1142, eq6_1142, eq7_1142, eq8_1142,
eq9_1142, eq10_1142, eq11_1142, eq12_1142], (p00, p01, p11, p21,
p02, p12, p22, p03, p13, p23, p04, p14, p24))
sym_state_recursive_ratios_1142 = sym.zeros(buffer_capacity + 1,
system_capacity + 1)
sym_state_recursive_ratios_1142[0, 0] = 1
sym_state_recursive_ratios_1142[0, 1] = sym.factor(sym_state_probs_1142
[p01] / sym_state_probs_1142[p00])
sym_state_recursive_ratios_1142[1, 1] = sym.factor(sym_state_probs_1142
[p11] / sym_state_probs_1142[p01])
sym_state_recursive_ratios_1142[2, 1] = sym.factor(sym_state_probs_1142
[p21] / sym_state_probs_1142[p11])
sym_state_recursive_ratios_1142[0, 2] = sym.factor(sym_state_probs_1142
[p02] / sym_state_probs_1142[p01])
sym_state_recursive_ratios_1142[1, 2] = sym.factor(sym_state_probs_1142
[p12] / sym_state_probs_1142[p02])
sym_state_recursive_ratios_1142[2, 2] = sym.factor(sym_state_probs_1142
[p22] / sym_state_probs_1142[p12])
sym_state_recursive_ratios_1142[0, 3] = sym.factor(sym_state_probs_1142
[p03] / sym_state_probs_1142[p02])
sym_state_recursive_ratios_1142[1, 3] = sym.factor(sym_state_probs_1142
[p13] / sym_state_probs_1142[p03])
sym_state_recursive_ratios_1142[2, 3] = sym.factor(sym_state_probs_1142
[p23] / sym_state_probs_1142[p13])
sym_state_recursive_ratios_1142[0, 4] = sym.factor(sym_state_probs_1142
[p04] / sym_state_probs_1142[p03])
sym_state_recursive_ratios_1142[1, 4] = sym.factor(sym_state_probs_1142
[p14] / sym_state_probs_1142[p04])
sym_state_recursive_ratios_1142[2, 4] = sym.factor(sym_state_probs_1142
[p24] / sym_state_probs_1142[p14])
sym_state_recursive_ratios_right_1142 = (sym_state_recursive_ratios_1142
.copy())
sym_state_recursive_ratios_right_1142[1, 2] = sym.factor(
sym_state_probs_1142[p12] / sym_state_probs_1142[p11])
sym_state_recursive_ratios_right_1142[1, 3] = sym.factor(
sym_state_probs_1142[p13] / sym_state_probs_1142[p12])
sym_state_recursive_ratios_right_1142[1, 4] = sym.factor(
sym_state_probs_1142[p14] / sym_state_probs_1142[p13])
sym_state_recursive_ratios_right_1142[2, 2] = sym.factor(
sym_state_probs_1142[p22] / sym_state_probs_1142[p21])
sym_state_recursive_ratios_right_1142[2, 3] = sym.factor(
sym_state_probs_1142[p23] / sym_state_probs_1142[p22])
sym_state_recursive_ratios_right_1142[2, 4] = sym.factor(
sym_state_probs_1142[p24] / sym_state_probs_1142[p23])
sym_state_recursive_ratios_P0_1142 = sym.zeros(buffer_capacity + 1,
system_capacity + 1)
sym_state_recursive_ratios_P0_1142[0, 0] = 1
sym_state_recursive_ratios_P0_1142[0, 1] = sym.factor(
sym_state_probs_1142[p01] / sym_state_probs_1142[p00])
sym_state_recursive_ratios_P0_1142[1, 1] = sym.factor(
sym_state_probs_1142[p11] / sym_state_probs_1142[p00])
sym_state_recursive_ratios_P0_1142[2, 1] = sym.factor(
sym_state_probs_1142[p21] / sym_state_probs_1142[p00])
sym_state_recursive_ratios_P0_1142[0, 2] = sym.factor(
sym_state_probs_1142[p02] / sym_state_probs_1142[p00])
sym_state_recursive_ratios_P0_1142[1, 2] = sym.factor(
sym_state_probs_1142[p12] / sym_state_probs_1142[p00])
sym_state_recursive_ratios_P0_1142[2, 2] = sym.factor(
sym_state_probs_1142[p22] / sym_state_probs_1142[p00])
sym_state_recursive_ratios_P0_1142[0, 3] = sym.factor(
sym_state_probs_1142[p03] / sym_state_probs_1142[p00])
sym_state_recursive_ratios_P0_1142[1, 3] = sym.factor(
sym_state_probs_1142[p13] / sym_state_probs_1142[p00])
sym_state_recursive_ratios_P0_1142[2, 3] = sym.factor(
sym_state_probs_1142[p23] / sym_state_probs_1142[p00])
sym_state_recursive_ratios_P0_1142[0, 4] = sym.factor(
sym_state_probs_1142[p04] / sym_state_probs_1142[p00])
sym_state_recursive_ratios_P0_1142[1, 4] = sym.factor(
sym_state_probs_1142[p14] / sym_state_probs_1142[p00])
sym_state_recursive_ratios_P0_1142[2, 4] = sym.factor(
sym_state_probs_1142[p24] / sym_state_probs_1142[p00])
return (sym_state_probs_1142, sym_state_recursive_ratios_1142,
sym_state_recursive_ratios_right_1142,
sym_state_recursive_ratios_P0_1142)
def get_symbolic_state_probabilities_1151():
num_of_servers = 1
threshold = 1
system_capacity = 5
buffer_capacity = 1
Q_sym_1151 = abg.markov.get_symbolic_transition_matrix(num_of_servers,
threshold, system_capacity, buffer_capacity)
p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15 = sym.symbols(
'p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15')
pi_1151 = sym.Matrix([p00, p01, p11, p02, p12, p03, p13, p04, p14, p05,
p15])
dimension_1151 = Q_sym_1151.shape[0]
M_sym_1151 = sym.Matrix([Q_sym_1151.transpose()[:-1, :], sym.ones(1,
dimension_1151)])
sym_diff_equations_1151 = M_sym_1151 @ pi_1151
b_sym_1151 = sym.Matrix([sym.zeros(dimension_1151 - 1, 1), [1]])
eq0_1151 = sym.Eq(sym_diff_equations_1151[0], b_sym_1151[0])
eq1_1151 = sym.Eq(sym_diff_equations_1151[1], b_sym_1151[1])
eq2_1151 = sym.Eq(sym_diff_equations_1151[2], b_sym_1151[2])
eq3_1151 = sym.Eq(sym_diff_equations_1151[3], b_sym_1151[3])
eq4_1151 = sym.Eq(sym_diff_equations_1151[4], b_sym_1151[4])
eq5_1151 = sym.Eq(sym_diff_equations_1151[5], b_sym_1151[5])
eq6_1151 = sym.Eq(sym_diff_equations_1151[6], b_sym_1151[6])
eq7_1151 = sym.Eq(sym_diff_equations_1151[7], b_sym_1151[7])
eq8_1151 = sym.Eq(sym_diff_equations_1151[8], b_sym_1151[8])
eq9_1151 = sym.Eq(sym_diff_equations_1151[9], b_sym_1151[9])
eq10_1151 = sym.Eq(sym_diff_equations_1151[10], b_sym_1151[10])
sym_state_probs_1151 = sym.solve([eq0_1151, eq1_1151, eq2_1151,
eq3_1151, eq4_1151, eq5_1151, eq6_1151, eq7_1151, eq8_1151,
eq9_1151, eq10_1151], (p00, p01, p11, p02, p12, p03, p13, p04, p14,
p05, p15))
sym_state_recursive_ratios_1151 = sym.zeros(buffer_capacity + 1,
system_capacity + 1)
sym_state_recursive_ratios_1151[0, 0] = 1
sym_state_recursive_ratios_1151[0, 1] = sym.factor(sym_state_probs_1151
[p01] / sym_state_probs_1151[p00])
sym_state_recursive_ratios_1151[1, 1] = sym.factor(sym_state_probs_1151
[p11] / sym_state_probs_1151[p01])
sym_state_recursive_ratios_1151[0, 2] = sym.factor(sym_state_probs_1151
[p02] / sym_state_probs_1151[p01])
sym_state_recursive_ratios_1151[1, 2] = sym.factor(sym_state_probs_1151
[p12] / sym_state_probs_1151[p02])
sym_state_recursive_ratios_1151[0, 3] = sym.factor(sym_state_probs_1151
[p03] / sym_state_probs_1151[p02])
sym_state_recursive_ratios_1151[1, 3] = sym.factor(sym_state_probs_1151
[p13] / sym_state_probs_1151[p03])
sym_state_recursive_ratios_1151[0, 4] = sym.factor(sym_state_probs_1151
[p04] / sym_state_probs_1151[p03])
sym_state_recursive_ratios_1151[1, 4] = sym.factor(sym_state_probs_1151
[p14] / sym_state_probs_1151[p04])
sym_state_recursive_ratios_1151[0, 5] = sym.factor(sym_state_probs_1151
[p05] / sym_state_probs_1151[p04])
sym_state_recursive_ratios_1151[1, 5] = sym.factor(sym_state_probs_1151
[p15] / sym_state_probs_1151[p05])
sym_state_recursive_ratios_right_1151 = (sym_state_recursive_ratios_1151
.copy())
sym_state_recursive_ratios_right_1151[1, 2] = sym.factor(
sym_state_probs_1151[p12] / sym_state_probs_1151[p11])
sym_state_recursive_ratios_right_1151[1, 3] = sym.factor(
sym_state_probs_1151[p13] / sym_state_probs_1151[p12])
sym_state_recursive_ratios_right_1151[1, 4] = sym.factor(
sym_state_probs_1151[p14] / sym_state_probs_1151[p13])
sym_state_recursive_ratios_right_1151[1, 5] = sym.factor(
sym_state_probs_1151[p15] / sym_state_probs_1151[p14])
sym_state_recursive_ratios_P0_1151 = sym.zeros(buffer_capacity + 1,
system_capacity + 1)
sym_state_recursive_ratios_P0_1151[0, 0] = 1
sym_state_recursive_ratios_P0_1151[0, 1] = sym.factor(
sym_state_probs_1151[p01] / sym_state_probs_1151[p00])
sym_state_recursive_ratios_P0_1151[1, 1] = sym.factor(
sym_state_probs_1151[p11] / sym_state_probs_1151[p00])
sym_state_recursive_ratios_P0_1151[0, 2] = sym.factor(
sym_state_probs_1151[p02] / sym_state_probs_1151[p00])
sym_state_recursive_ratios_P0_1151[1, 2] = sym.factor(
sym_state_probs_1151[p12] / sym_state_probs_1151[p00])
sym_state_recursive_ratios_P0_1151[0, 3] = sym.factor(
sym_state_probs_1151[p03] / sym_state_probs_1151[p00])
sym_state_recursive_ratios_P0_1151[1, 3] = sym.factor(
sym_state_probs_1151[p13] / sym_state_probs_1151[p00])
sym_state_recursive_ratios_P0_1151[0, 4] = sym.factor(
sym_state_probs_1151[p04] / sym_state_probs_1151[p00])
sym_state_recursive_ratios_P0_1151[1, 4] = sym.factor(
sym_state_probs_1151[p14] / sym_state_probs_1151[p00])
sym_state_recursive_ratios_P0_1151[0, 5] = sym.factor(
sym_state_probs_1151[p05] / sym_state_probs_1151[p00])
sym_state_recursive_ratios_P0_1151[1, 5] = sym.factor(
sym_state_probs_1151[p15] / sym_state_probs_1151[p00])
return (sym_state_probs_1151, sym_state_recursive_ratios_1151,
sym_state_recursive_ratios_right_1151,
sym_state_recursive_ratios_P0_1151)
def get_symbolic_state_probabilities_1161():
num_of_servers = 1
threshold = 1
system_capacity = 6
buffer_capacity = 1
Q_sym_1161 = abg.markov.get_symbolic_transition_matrix(num_of_servers,
threshold, system_capacity, buffer_capacity)
p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15, p06, p16 = (sym.
symbols(
'p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15, p06, p16'))
pi_1161 = sym.Matrix([p00, p01, p11, p02, p12, p03, p13, p04, p14, p05,
p15, p06, p16])
dimension_1161 = Q_sym_1161.shape[0]
M_sym_1161 = sym.Matrix([Q_sym_1161.transpose()[:-1, :], sym.ones(1,
dimension_1161)])
sym_diff_equations_1161 = M_sym_1161 @ pi_1161
b_sym_1161 = sym.Matrix([sym.zeros(dimension_1161 - 1, 1), [1]])
eq0_1161 = sym.Eq(sym_diff_equations_1161[0], b_sym_1161[0])
eq1_1161 = sym.Eq(sym_diff_equations_1161[1], b_sym_1161[1])
eq2_1161 = sym.Eq(sym_diff_equations_1161[2], b_sym_1161[2])
eq3_1161 = sym.Eq(sym_diff_equations_1161[3], b_sym_1161[3])
eq4_1161 = sym.Eq(sym_diff_equations_1161[4], b_sym_1161[4])
eq5_1161 = sym.Eq(sym_diff_equations_1161[5], b_sym_1161[5])
eq6_1161 = sym.Eq(sym_diff_equations_1161[6], b_sym_1161[6])
eq7_1161 = sym.Eq(sym_diff_equations_1161[7], b_sym_1161[7])
eq8_1161 = sym.Eq(sym_diff_equations_1161[8], b_sym_1161[8])
eq9_1161 = sym.Eq(sym_diff_equations_1161[9], b_sym_1161[9])
eq10_1161 = sym.Eq(sym_diff_equations_1161[10], b_sym_1161[10])
eq11_1161 = sym.Eq(sym_diff_equations_1161[11], b_sym_1161[11])
eq12_1161 = sym.Eq(sym_diff_equations_1161[12], b_sym_1161[12])
sym_state_probs_1161 = sym.solve([eq0_1161, eq1_1161, eq2_1161,
eq3_1161, eq4_1161, eq5_1161, eq6_1161, eq7_1161, eq8_1161,
eq9_1161, eq10_1161, eq11_1161, eq12_1161], (p00, p01, p11, p02,
p12, p03, p13, p04, p14, p05, p15, p06, p16))
sym_state_recursive_ratios_1161 = sym.zeros(buffer_capacity + 1,
system_capacity + 1)
sym_state_recursive_ratios_1161[0, 0] = 1
sym_state_recursive_ratios_1161[0, 1] = sym.factor(sym_state_probs_1161
[p01] / sym_state_probs_1161[p00])
sym_state_recursive_ratios_1161[1, 1] = sym.factor(sym_state_probs_1161
[p11] / sym_state_probs_1161[p01])
sym_state_recursive_ratios_1161[0, 2] = sym.factor(sym_state_probs_1161
[p02] / sym_state_probs_1161[p01])
sym_state_recursive_ratios_1161[1, 2] = sym.factor(sym_state_probs_1161
[p12] / sym_state_probs_1161[p02])
sym_state_recursive_ratios_1161[0, 3] = sym.factor(sym_state_probs_1161
[p03] / sym_state_probs_1161[p02])
sym_state_recursive_ratios_1161[1, 3] = sym.factor(sym_state_probs_1161
[p13] / sym_state_probs_1161[p03])
sym_state_recursive_ratios_1161[0, 4] = sym.factor(sym_state_probs_1161
[p04] / sym_state_probs_1161[p03])
sym_state_recursive_ratios_1161[1, 4] = sym.factor(sym_state_probs_1161
[p14] / sym_state_probs_1161[p04])
sym_state_recursive_ratios_1161[0, 5] = sym.factor(sym_state_probs_1161
[p05] / sym_state_probs_1161[p04])
sym_state_recursive_ratios_1161[1, 5] = sym.factor(sym_state_probs_1161
[p15] / sym_state_probs_1161[p05])
sym_state_recursive_ratios_1161[0, 6] = sym.factor(sym_state_probs_1161
[p06] / sym_state_probs_1161[p05])
sym_state_recursive_ratios_1161[1, 6] = sym.factor(sym_state_probs_1161
[p16] / sym_state_probs_1161[p06])
sym_state_recursive_ratios_right_1161 = (sym_state_recursive_ratios_1161
.copy())
sym_state_recursive_ratios_right_1161[1, 2] = sym.factor(
sym_state_probs_1161[p12] / sym_state_probs_1161[p11])
sym_state_recursive_ratios_right_1161[1, 3] = sym.factor(
sym_state_probs_1161[p13] / sym_state_probs_1161[p12])
sym_state_recursive_ratios_right_1161[1, 4] = sym.factor(
sym_state_probs_1161[p14] / sym_state_probs_1161[p13])
sym_state_recursive_ratios_right_1161[1, 5] = sym.factor(
sym_state_probs_1161[p15] / sym_state_probs_1161[p14])
sym_state_recursive_ratios_right_1161[1, 6] = sym.factor(
sym_state_probs_1161[p16] / sym_state_probs_1161[p15])
sym_state_recursive_ratios_P0_1161 = sym.zeros(buffer_capacity + 1,
system_capacity + 1)
sym_state_recursive_ratios_P0_1161[0, 0] = 1
sym_state_recursive_ratios_P0_1161[0, 1] = sym.factor(
sym_state_probs_1161[p01] / sym_state_probs_1161[p00])
sym_state_recursive_ratios_P0_1161[1, 1] = sym.factor(
sym_state_probs_1161[p11] / sym_state_probs_1161[p00])
sym_state_recursive_ratios_P0_1161[0, 2] = sym.factor(
sym_state_probs_1161[p02] / sym_state_probs_1161[p00])
sym_state_recursive_ratios_P0_1161[1, 2] = sym.factor(
sym_state_probs_1161[p12] / sym_state_probs_1161[p00])
sym_state_recursive_ratios_P0_1161[0, 3] = sym.factor(
sym_state_probs_1161[p03] / sym_state_probs_1161[p00])
sym_state_recursive_ratios_P0_1161[1, 3] = sym.factor(
sym_state_probs_1161[p13] / sym_state_probs_1161[p00])
sym_state_recursive_ratios_P0_1161[0, 4] = sym.factor(
sym_state_probs_1161[p04] / sym_state_probs_1161[p00])
sym_state_recursive_ratios_P0_1161[1, 4] = sym.factor(
sym_state_probs_1161[p14] / sym_state_probs_1161[p00])
sym_state_recursive_ratios_P0_1161[0, 5] = sym.factor(
sym_state_probs_1161[p05] / sym_state_probs_1161[p00])
sym_state_recursive_ratios_P0_1161[1, 5] = sym.factor(
sym_state_probs_1161[p15] / sym_state_probs_1161[p00])
sym_state_recursive_ratios_P0_1161[0, 6] = sym.factor(
sym_state_probs_1161[p06] / sym_state_probs_1161[p00])
sym_state_recursive_ratios_P0_1161[1, 6] = sym.factor(
sym_state_probs_1161[p16] / sym_state_probs_1161[p00])
return (sym_state_probs_1161, sym_state_recursive_ratios_1161,
sym_state_recursive_ratios_right_1161,
sym_state_recursive_ratios_P0_1161)
def get_symbolic_state_probabilities_1171():
num_of_servers = 1
threshold = 1
system_capacity = 7
buffer_capacity = 1
Q_sym_1171 = abg.markov.get_symbolic_transition_matrix(num_of_servers,
threshold, system_capacity, buffer_capacity)
(p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15, p06, p16, p07, p17
) = (sym.symbols(
'p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15, p06, p16, p07, p17'
))
pi_1171 = sym.Matrix([p00, p01, p11, p02, p12, p03, p13, p04, p14, p05,
p15, p06, p16, p07, p17])
dimension_1171 = Q_sym_1171.shape[0]
M_sym_1171 = sym.Matrix([Q_sym_1171.transpose()[:-1, :], sym.ones(1,
dimension_1171)])
sym_diff_equations_1171 = M_sym_1171 @ pi_1171
b_sym_1171 = sym.Matrix([sym.zeros(dimension_1171 - 1, 1), [1]])
eq0_1171 = sym.Eq(sym_diff_equations_1171[0], b_sym_1171[0])
eq1_1171 = sym.Eq(sym_diff_equations_1171[1], b_sym_1171[1])
eq2_1171 = sym.Eq(sym_diff_equations_1171[2], b_sym_1171[2])
eq3_1171 = sym.Eq(sym_diff_equations_1171[3], b_sym_1171[3])
eq4_1171 = sym.Eq(sym_diff_equations_1171[4], b_sym_1171[4])
eq5_1171 = sym.Eq(sym_diff_equations_1171[5], b_sym_1171[5])
eq6_1171 = sym.Eq(sym_diff_equations_1171[6], b_sym_1171[6])
eq7_1171 = sym.Eq(sym_diff_equations_1171[7], b_sym_1171[7])
eq8_1171 = sym.Eq(sym_diff_equations_1171[8], b_sym_1171[8])
eq9_1171 = sym.Eq(sym_diff_equations_1171[9], b_sym_1171[9])
eq10_1171 = sym.Eq(sym_diff_equations_1171[10], b_sym_1171[10])
eq11_1171 = sym.Eq(sym_diff_equations_1171[11], b_sym_1171[11])
eq12_1171 = sym.Eq(sym_diff_equations_1171[12], b_sym_1171[12])
eq13_1171 = sym.Eq(sym_diff_equations_1171[13], b_sym_1171[13])
eq14_1171 = sym.Eq(sym_diff_equations_1171[14], b_sym_1171[14])
sym_state_probs_1171 = sym.solve([eq0_1171, eq1_1171, eq2_1171,
eq3_1171, eq4_1171, eq5_1171, eq6_1171, eq7_1171, eq8_1171,
eq9_1171, eq10_1171, eq11_1171, eq12_1171, eq13_1171, eq14_1171], (
p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15, p06, p16,
p07, p17))
sym_state_recursive_ratios_1171 = sym.zeros(buffer_capacity + 1,
system_capacity + 1)
sym_state_recursive_ratios_1171[0, 0] = 1
sym_state_recursive_ratios_1171[0, 1] = sym.factor(sym_state_probs_1171
[p01] / sym_state_probs_1171[p00])
sym_state_recursive_ratios_1171[1, 1] = sym.factor(sym_state_probs_1171
[p11] / sym_state_probs_1171[p01])
sym_state_recursive_ratios_1171[0, 2] = sym.factor(sym_state_probs_1171
[p02] / sym_state_probs_1171[p01])
sym_state_recursive_ratios_1171[1, 2] = sym.factor(sym_state_probs_1171
[p12] / sym_state_probs_1171[p02])
sym_state_recursive_ratios_1171[0, 3] = sym.factor(sym_state_probs_1171
[p03] / sym_state_probs_1171[p02])
sym_state_recursive_ratios_1171[1, 3] = sym.factor(sym_state_probs_1171
[p13] / sym_state_probs_1171[p03])
sym_state_recursive_ratios_1171[0, 4] = sym.factor(sym_state_probs_1171
[p04] / sym_state_probs_1171[p03])
sym_state_recursive_ratios_1171[1, 4] = sym.factor(sym_state_probs_1171
[p14] / sym_state_probs_1171[p04])
sym_state_recursive_ratios_1171[0, 5] = sym.factor(sym_state_probs_1171
[p05] / sym_state_probs_1171[p04])
sym_state_recursive_ratios_1171[1, 5] = sym.factor(sym_state_probs_1171
[p15] / sym_state_probs_1171[p05])
sym_state_recursive_ratios_1171[0, 6] = sym.factor(sym_state_probs_1171
[p06] / sym_state_probs_1171[p05])
sym_state_recursive_ratios_1171[1, 6] = sym.factor(sym_state_probs_1171
[p16] / sym_state_probs_1171[p06])
sym_state_recursive_ratios_1171[0, 7] = sym.factor(sym_state_probs_1171
[p07] / sym_state_probs_1171[p06])
sym_state_recursive_ratios_1171[1, 7] = sym.factor(sym_state_probs_1171
[p17] / sym_state_probs_1171[p07])
sym_state_recursive_ratios_right_1171 = (sym_state_recursive_ratios_1171
.copy())
sym_state_recursive_ratios_right_1171[1, 2] = sym.factor(
sym_state_probs_1171[p12] / sym_state_probs_1171[p11])
sym_state_recursive_ratios_right_1171[1, 3] = sym.factor(
sym_state_probs_1171[p13] / sym_state_probs_1171[p12])
sym_state_recursive_ratios_right_1171[1, 4] = sym.factor(
sym_state_probs_1171[p14] / sym_state_probs_1171[p13])
sym_state_recursive_ratios_right_1171[1, 5] = sym.factor(
sym_state_probs_1171[p15] / sym_state_probs_1171[p14])
sym_state_recursive_ratios_right_1171[1, 6] = sym.factor(
sym_state_probs_1171[p16] / sym_state_probs_1171[p15])
sym_state_recursive_ratios_right_1171[1, 7] = sym.factor(
sym_state_probs_1171[p17] / sym_state_probs_1171[p16])
sym_state_recursive_ratios_P0_1171 = sym.zeros(buffer_capacity + 1,
system_capacity + 1)
sym_state_recursive_ratios_P0_1171[0, 0] = 1
sym_state_recursive_ratios_P0_1171[0, 1] = sym.factor(
sym_state_probs_1171[p01] / sym_state_probs_1171[p00])
sym_state_recursive_ratios_P0_1171[1, 1] = sym.factor(
sym_state_probs_1171[p11] / sym_state_probs_1171[p00])
sym_state_recursive_ratios_P0_1171[0, 2] = sym.factor(
sym_state_probs_1171[p02] / sym_state_probs_1171[p00])
sym_state_recursive_ratios_P0_1171[1, 2] = sym.factor(
sym_state_probs_1171[p12] / sym_state_probs_1171[p00])
sym_state_recursive_ratios_P0_1171[0, 3] = sym.factor(
sym_state_probs_1171[p03] / sym_state_probs_1171[p00])
sym_state_recursive_ratios_P0_1171[1, 3] = sym.factor(
sym_state_probs_1171[p13] / sym_state_probs_1171[p00])
sym_state_recursive_ratios_P0_1171[0, 4] = sym.factor(
sym_state_probs_1171[p04] / sym_state_probs_1171[p00])
sym_state_recursive_ratios_P0_1171[1, 4] = sym.factor(
sym_state_probs_1171[p14] / sym_state_probs_1171[p00])
sym_state_recursive_ratios_P0_1171[0, 5] = sym.factor(
sym_state_probs_1171[p05] / sym_state_probs_1171[p00])
sym_state_recursive_ratios_P0_1171[1, 5] = sym.factor(
sym_state_probs_1171[p15] / sym_state_probs_1171[p00])
sym_state_recursive_ratios_P0_1171[0, 6] = sym.factor(
sym_state_probs_1171[p06] / sym_state_probs_1171[p00])
sym_state_recursive_ratios_P0_1171[1, 6] = sym.factor(
sym_state_probs_1171[p16] / sym_state_probs_1171[p00])
sym_state_recursive_ratios_P0_1171[0, 7] = sym.factor(
sym_state_probs_1171[p07] / sym_state_probs_1171[p00])
sym_state_recursive_ratios_P0_1171[1, 7] = sym.factor(
sym_state_probs_1171[p17] / sym_state_probs_1171[p00])
return (sym_state_probs_1171, sym_state_recursive_ratios_1171,
sym_state_recursive_ratios_right_1171,
sym_state_recursive_ratios_P0_1171)
def get_symbolic_state_probabilities_1181():
num_of_servers = 1
threshold = 1
system_capacity = 8
buffer_capacity = 1
Q_sym_1181 = abg.markov.get_symbolic_transition_matrix(num_of_servers,
threshold, system_capacity, buffer_capacity)
(p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15, p06, p16, p07,
p17, p08, p18) = (sym.symbols(
'p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15, p06, p16, p07, p17, p08, p18'
))
pi_1181 = sym.Matrix([p00, p01, p11, p02, p12, p03, p13, p04, p14, p05,
p15, p06, p16, p07, p17, p08, p18])
dimension_1181 = Q_sym_1181.shape[0]
M_sym_1181 = sym.Matrix([Q_sym_1181.transpose()[:-1, :], sym.ones(1,
dimension_1181)])
sym_diff_equations_1181 = M_sym_1181 @ pi_1181
b_sym_1181 = sym.Matrix([sym.zeros(dimension_1181 - 1, 1), [1]])
eq0_1181 = sym.Eq(sym_diff_equations_1181[0], b_sym_1181[0])
eq1_1181 = sym.Eq(sym_diff_equations_1181[1], b_sym_1181[1])
eq2_1181 = sym.Eq(sym_diff_equations_1181[2], b_sym_1181[2])
eq3_1181 = sym.Eq(sym_diff_equations_1181[3], b_sym_1181[3])
eq4_1181 = sym.Eq(sym_diff_equations_1181[4], b_sym_1181[4])
eq5_1181 = sym.Eq(sym_diff_equations_1181[5], b_sym_1181[5])
eq6_1181 = sym.Eq(sym_diff_equations_1181[6], b_sym_1181[6])
eq7_1181 = sym.Eq(sym_diff_equations_1181[7], b_sym_1181[7])
eq8_1181 = sym.Eq(sym_diff_equations_1181[8], b_sym_1181[8])
eq9_1181 = sym.Eq(sym_diff_equations_1181[9], b_sym_1181[9])
eq10_1181 = sym.Eq(sym_diff_equations_1181[10], b_sym_1181[10])
eq11_1181 = sym.Eq(sym_diff_equations_1181[11], b_sym_1181[11])
eq12_1181 = sym.Eq(sym_diff_equations_1181[12], b_sym_1181[12])
eq13_1181 = sym.Eq(sym_diff_equations_1181[13], b_sym_1181[13])
eq14_1181 = sym.Eq(sym_diff_equations_1181[14], b_sym_1181[14])
eq15_1181 = sym.Eq(sym_diff_equations_1181[15], b_sym_1181[15])
eq16_1181 = sym.Eq(sym_diff_equations_1181[16], b_sym_1181[16])
sym_state_probs_1181 = sym.solve([eq0_1181, eq1_1181, eq2_1181,
eq3_1181, eq4_1181, eq5_1181, eq6_1181, eq7_1181, eq8_1181,
eq9_1181, eq10_1181, eq11_1181, eq12_1181, eq13_1181, eq14_1181,
eq15_1181, eq16_1181], (p00, p01, p11, p02, p12, p03, p13, p04, p14,
p05, p15, p06, p16, p07, p17, p08, p18))
sym_state_recursive_ratios_1181 = sym.zeros(buffer_capacity + 1,
system_capacity + 1)
sym_state_recursive_ratios_1181[0, 0] = 1
sym_state_recursive_ratios_1181[0, 1] = sym.factor(sym_state_probs_1181
[p01] / sym_state_probs_1181[p00])
sym_state_recursive_ratios_1181[1, 1] = sym.factor(sym_state_probs_1181
[p11] / sym_state_probs_1181[p01])
sym_state_recursive_ratios_1181[0, 2] = sym.factor(sym_state_probs_1181
[p02] / sym_state_probs_1181[p01])
sym_state_recursive_ratios_1181[1, 2] = sym.factor(sym_state_probs_1181
[p12] / sym_state_probs_1181[p02])
sym_state_recursive_ratios_1181[0, 3] = sym.factor(sym_state_probs_1181
[p03] / sym_state_probs_1181[p02])
sym_state_recursive_ratios_1181[1, 3] = sym.factor(sym_state_probs_1181
[p13] / sym_state_probs_1181[p03])
sym_state_recursive_ratios_1181[0, 4] = sym.factor(sym_state_probs_1181
[p04] / sym_state_probs_1181[p03])
sym_state_recursive_ratios_1181[1, 4] = sym.factor(sym_state_probs_1181
[p14] / sym_state_probs_1181[p04])
sym_state_recursive_ratios_1181[0, 5] = sym.factor(sym_state_probs_1181
[p05] / sym_state_probs_1181[p04])
sym_state_recursive_ratios_1181[1, 5] = sym.factor(sym_state_probs_1181
[p15] / sym_state_probs_1181[p05])
sym_state_recursive_ratios_1181[0, 6] = sym.factor(sym_state_probs_1181
[p06] / sym_state_probs_1181[p05])
sym_state_recursive_ratios_1181[1, 6] = sym.factor(sym_state_probs_1181
[p16] / sym_state_probs_1181[p06])
sym_state_recursive_ratios_1181[0, 7] = sym.factor(sym_state_probs_1181
[p07] / sym_state_probs_1181[p06])
sym_state_recursive_ratios_1181[1, 7] = sym.factor(sym_state_probs_1181
[p17] / sym_state_probs_1181[p07])
sym_state_recursive_ratios_1181[0, 8] = sym.factor(sym_state_probs_1181
[p08] / sym_state_probs_1181[p07])
sym_state_recursive_ratios_1181[1, 8] = sym.factor(sym_state_probs_1181
[p18] / sym_state_probs_1181[p08])
sym_state_recursive_ratios_right_1181 = (sym_state_recursive_ratios_1181
.copy())
sym_state_recursive_ratios_right_1181[1, 2] = sym.factor(
sym_state_probs_1181[p12] / sym_state_probs_1181[p11])
sym_state_recursive_ratios_right_1181[1, 3] = sym.factor(
sym_state_probs_1181[p13] / sym_state_probs_1181[p12])
sym_state_recursive_ratios_right_1181[1, 4] = sym.factor(
sym_state_probs_1181[p14] / sym_state_probs_1181[p13])
sym_state_recursive_ratios_right_1181[1, 5] = sym.factor(
sym_state_probs_1181[p15] / sym_state_probs_1181[p14])
sym_state_recursive_ratios_right_1181[1, 6] = sym.factor(
sym_state_probs_1181[p16] / sym_state_probs_1181[p15])
sym_state_recursive_ratios_right_1181[1, 7] = sym.factor(
sym_state_probs_1181[p17] / sym_state_probs_1181[p16])
sym_state_recursive_ratios_right_1181[1, 8] = sym.factor(
sym_state_probs_1181[p18] / sym_state_probs_1181[p17])
sym_state_recursive_ratios_P0_1181 = sym.zeros(buffer_capacity + 1,
system_capacity + 1)
sym_state_recursive_ratios_P0_1181[0, 0] = 1
sym_state_recursive_ratios_P0_1181[0, 1] = sym.factor(
sym_state_probs_1181[p01] / sym_state_probs_1181[p00])
sym_state_recursive_ratios_P0_1181[1, 1] = sym.factor(
sym_state_probs_1181[p11] / sym_state_probs_1181[p00])
sym_state_recursive_ratios_P0_1181[0, 2] = sym.factor(
sym_state_probs_1181[p02] / sym_state_probs_1181[p00])
sym_state_recursive_ratios_P0_1181[1, 2] = sym.factor(
sym_state_probs_1181[p12] / sym_state_probs_1181[p00])
sym_state_recursive_ratios_P0_1181[0, 3] = sym.factor(
sym_state_probs_1181[p03] / sym_state_probs_1181[p00])
sym_state_recursive_ratios_P0_1181[1, 3] = sym.factor(
sym_state_probs_1181[p13] / sym_state_probs_1181[p00])
sym_state_recursive_ratios_P0_1181[0, 4] = sym.factor(
sym_state_probs_1181[p04] / sym_state_probs_1181[p00])
sym_state_recursive_ratios_P0_1181[1, 4] = sym.factor(
sym_state_probs_1181[p14] / sym_state_probs_1181[p00])
sym_state_recursive_ratios_P0_1181[0, 5] = sym.factor(
sym_state_probs_1181[p05] / sym_state_probs_1181[p00])
sym_state_recursive_ratios_P0_1181[1, 5] = sym.factor(
sym_state_probs_1181[p15] / sym_state_probs_1181[p00])
sym_state_recursive_ratios_P0_1181[0, 6] = sym.factor(
sym_state_probs_1181[p06] / sym_state_probs_1181[p00])
sym_state_recursive_ratios_P0_1181[1, 6] = sym.factor(
sym_state_probs_1181[p16] / sym_state_probs_1181[p00])
sym_state_recursive_ratios_P0_1181[0, 7] = sym.factor(
sym_state_probs_1181[p07] / sym_state_probs_1181[p00])
sym_state_recursive_ratios_P0_1181[1, 7] = sym.factor(
sym_state_probs_1181[p17] / sym_state_probs_1181[p00])
sym_state_recursive_ratios_P0_1181[0, 8] = sym.factor(
sym_state_probs_1181[p08] / sym_state_probs_1181[p00])
sym_state_recursive_ratios_P0_1181[1, 8] = sym.factor(
sym_state_probs_1181[p18] / sym_state_probs_1181[p00])
return (sym_state_probs_1181, sym_state_recursive_ratios_1181,
sym_state_recursive_ratios_right_1181,
sym_state_recursive_ratios_P0_1181)
def get_symbolic_state_probabilities_1191():
num_of_servers = 1
threshold = 1
system_capacity = 9
buffer_capacity = 1
Q_sym_1191 = abg.markov.get_symbolic_transition_matrix(num_of_servers,
threshold, system_capacity, buffer_capacity)
(p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15, p06, p16, p07,
p17, p08, p18, p09, p19) = (sym.symbols(
'p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15, p06, p16, p07, p17, p08, p18, p09, p19'
))
pi_1191 = sym.Matrix([p00, p01, p11, p02, p12, p03, p13, p04, p14, p05,
p15, p06, p16, p07, p17, p08, p18, p09, p19])
dimension_1191 = Q_sym_1191.shape[0]
M_sym_1191 = sym.Matrix([Q_sym_1191.transpose()[:-1, :], sym.ones(1,
dimension_1191)])
sym_diff_equations_1191 = M_sym_1191 @ pi_1191
b_sym_1191 = sym.Matrix([sym.zeros(dimension_1191 - 1, 1), [1]])
eq0_1191 = sym.Eq(sym_diff_equations_1191[0], b_sym_1191[0])
eq1_1191 = sym.Eq(sym_diff_equations_1191[1], b_sym_1191[1])
eq2_1191 = sym.Eq(sym_diff_equations_1191[2], b_sym_1191[2])
eq3_1191 = sym.Eq(sym_diff_equations_1191[3], b_sym_1191[3])
eq4_1191 = sym.Eq(sym_diff_equations_1191[4], b_sym_1191[4])
eq5_1191 = sym.Eq(sym_diff_equations_1191[5], b_sym_1191[5])
eq6_1191 = sym.Eq(sym_diff_equations_1191[6], b_sym_1191[6])
eq7_1191 = sym.Eq(sym_diff_equations_1191[7], b_sym_1191[7])
eq8_1191 = sym.Eq(sym_diff_equations_1191[8], b_sym_1191[8])
eq9_1191 = sym.Eq(sym_diff_equations_1191[9], b_sym_1191[9])
eq10_1191 = sym.Eq(sym_diff_equations_1191[10], b_sym_1191[10])
eq11_1191 = sym.Eq(sym_diff_equations_1191[11], b_sym_1191[11])
eq12_1191 = sym.Eq(sym_diff_equations_1191[12], b_sym_1191[12])
eq13_1191 = sym.Eq(sym_diff_equations_1191[13], b_sym_1191[13])
eq14_1191 = sym.Eq(sym_diff_equations_1191[14], b_sym_1191[14])
eq15_1191 = sym.Eq(sym_diff_equations_1191[15], b_sym_1191[15])
eq16_1191 = sym.Eq(sym_diff_equations_1191[16], b_sym_1191[16])
eq17_1191 = sym.Eq(sym_diff_equations_1191[17], b_sym_1191[17])
eq18_1191 = sym.Eq(sym_diff_equations_1191[18], b_sym_1191[18])
sym_state_probs_1191 = sym.solve([eq0_1191, eq1_1191, eq2_1191,
eq3_1191, eq4_1191, eq5_1191, eq6_1191, eq7_1191, eq8_1191,
eq9_1191, eq10_1191, eq11_1191, eq12_1191, eq13_1191, eq14_1191,
eq15_1191, eq16_1191, eq17_1191, eq18_1191], (p00, p01, p11, p02,
p12, p03, p13, p04, p14, p05, p15, p06, p16, p07, p17, p08, p18,
p09, p19))
sym_state_recursive_ratios_1191 = sym.zeros(buffer_capacity + 1,
system_capacity + 1)
sym_state_recursive_ratios_1191[0, 0] = 1
sym_state_recursive_ratios_1191[0, 1] = sym.factor(sym_state_probs_1191
[p01] / sym_state_probs_1191[p00])
sym_state_recursive_ratios_1191[1, 1] = sym.factor(sym_state_probs_1191
[p11] / sym_state_probs_1191[p01])
sym_state_recursive_ratios_1191[0, 2] = sym.factor(sym_state_probs_1191
[p02] / sym_state_probs_1191[p01])
sym_state_recursive_ratios_1191[1, 2] = sym.factor(sym_state_probs_1191
[p12] / sym_state_probs_1191[p02])
sym_state_recursive_ratios_1191[0, 3] = sym.factor(sym_state_probs_1191
[p03] / sym_state_probs_1191[p02])
sym_state_recursive_ratios_1191[1, 3] = sym.factor(sym_state_probs_1191
[p13] / sym_state_probs_1191[p03])
sym_state_recursive_ratios_1191[0, 4] = sym.factor(sym_state_probs_1191
[p04] / sym_state_probs_1191[p03])
sym_state_recursive_ratios_1191[1, 4] = sym.factor(sym_state_probs_1191
[p14] / sym_state_probs_1191[p04])
sym_state_recursive_ratios_1191[0, 5] = sym.factor(sym_state_probs_1191
[p05] / sym_state_probs_1191[p04])
sym_state_recursive_ratios_1191[1, 5] = sym.factor(sym_state_probs_1191
[p15] / sym_state_probs_1191[p05])
sym_state_recursive_ratios_1191[0, 6] = sym.factor(sym_state_probs_1191
[p06] / sym_state_probs_1191[p05])
sym_state_recursive_ratios_1191[1, 6] = sym.factor(sym_state_probs_1191
[p16] / sym_state_probs_1191[p06])
sym_state_recursive_ratios_1191[0, 7] = sym.factor(sym_state_probs_1191
[p07] / sym_state_probs_1191[p06])
sym_state_recursive_ratios_1191[1, 7] = sym.factor(sym_state_probs_1191
[p17] / sym_state_probs_1191[p07])
sym_state_recursive_ratios_1191[0, 8] = sym.factor(sym_state_probs_1191
[p08] / sym_state_probs_1191[p07])
sym_state_recursive_ratios_1191[1, 8] = sym.factor(sym_state_probs_1191
[p18] / sym_state_probs_1191[p08])
sym_state_recursive_ratios_1191[0, 9] = sym.factor(sym_state_probs_1191
[p09] / sym_state_probs_1191[p08])
sym_state_recursive_ratios_1191[1, 9] = sym.factor(sym_state_probs_1191
[p19] / sym_state_probs_1191[p09])
sym_state_recursive_ratios_right_1191 = (sym_state_recursive_ratios_1191
.copy())
sym_state_recursive_ratios_right_1191[1, 2] = sym.factor(
sym_state_probs_1191[p12] / sym_state_probs_1191[p11])
sym_state_recursive_ratios_right_1191[1, 3] = sym.factor(
sym_state_probs_1191[p13] / sym_state_probs_1191[p12])
sym_state_recursive_ratios_right_1191[1, 4] = sym.factor(
sym_state_probs_1191[p14] / sym_state_probs_1191[p13])
sym_state_recursive_ratios_right_1191[1, 5] = sym.factor(
sym_state_probs_1191[p15] / sym_state_probs_1191[p14])
sym_state_recursive_ratios_right_1191[1, 6] = sym.factor(
sym_state_probs_1191[p16] / sym_state_probs_1191[p15])
sym_state_recursive_ratios_right_1191[1, 7] = sym.factor(
sym_state_probs_1191[p17] / sym_state_probs_1191[p16])
sym_state_recursive_ratios_right_1191[1, 8] = sym.factor(
sym_state_probs_1191[p18] / sym_state_probs_1191[p17])
sym_state_recursive_ratios_right_1191[1, 8] = sym.factor(
sym_state_probs_1191[p18] / sym_state_probs_1191[p17])
sym_state_recursive_ratios_P0_1191 = sym.zeros(buffer_capacity + 1,
system_capacity + 1)
sym_state_recursive_ratios_P0_1191[0, 0] = 1
sym_state_recursive_ratios_P0_1191[0, 1] = sym.factor(
sym_state_probs_1191[p01] / sym_state_probs_1191[p00])
sym_state_recursive_ratios_P0_1191[1, 1] = sym.factor(
sym_state_probs_1191[p11] / sym_state_probs_1191[p00])
sym_state_recursive_ratios_P0_1191[0, 2] = sym.factor(
sym_state_probs_1191[p02] / sym_state_probs_1191[p00])
sym_state_recursive_ratios_P0_1191[1, 2] = sym.factor(
sym_state_probs_1191[p12] / sym_state_probs_1191[p00])
sym_state_recursive_ratios_P0_1191[0, 3] = sym.factor(
sym_state_probs_1191[p03] / sym_state_probs_1191[p00])
sym_state_recursive_ratios_P0_1191[1, 3] = sym.factor(
sym_state_probs_1191[p13] / sym_state_probs_1191[p00])
sym_state_recursive_ratios_P0_1191[0, 4] = sym.factor(
sym_state_probs_1191[p04] / sym_state_probs_1191[p00])
sym_state_recursive_ratios_P0_1191[1, 4] = sym.factor(
sym_state_probs_1191[p14] / sym_state_probs_1191[p00])
sym_state_recursive_ratios_P0_1191[0, 5] = sym.factor(
sym_state_probs_1191[p05] / sym_state_probs_1191[p00])
sym_state_recursive_ratios_P0_1191[1, 5] = sym.factor(
sym_state_probs_1191[p15] / sym_state_probs_1191[p00])
sym_state_recursive_ratios_P0_1191[0, 6] = sym.factor(
sym_state_probs_1191[p06] / sym_state_probs_1191[p00])
sym_state_recursive_ratios_P0_1191[1, 6] = sym.factor(
sym_state_probs_1191[p16] / sym_state_probs_1191[p00])
sym_state_recursive_ratios_P0_1191[0, 7] = sym.factor(
sym_state_probs_1191[p07] / sym_state_probs_1191[p00])
sym_state_recursive_ratios_P0_1191[1, 7] = sym.factor(
sym_state_probs_1191[p17] / sym_state_probs_1191[p00])
sym_state_recursive_ratios_P0_1191[0, 8] = sym.factor(
sym_state_probs_1191[p08] / sym_state_probs_1191[p00])
sym_state_recursive_ratios_P0_1191[1, 8] = sym.factor(
sym_state_probs_1191[p18] / sym_state_probs_1191[p00])
sym_state_recursive_ratios_P0_1191[0, 9] = sym.factor(
sym_state_probs_1191[p09] / sym_state_probs_1191[p00])
sym_state_recursive_ratios_P0_1191[1, 9] = sym.factor(
sym_state_probs_1191[p19] / sym_state_probs_1191[p00])
return (sym_state_probs_1191, sym_state_recursive_ratios_1191,
sym_state_recursive_ratios_right_1191,
sym_state_recursive_ratios_P0_1191)
<|reserved_special_token_1|>
import ambulance_game as abg
import numpy as np
import sympy as sym
from sympy.abc import a, b, c, d, e, f, g, h, i, j
def get_symbolic_pi(num_of_servers, threshold, system_capacity, buffer_capacity):
Q_sym = abg.markov.get_symbolic_transition_matrix(
num_of_servers=num_of_servers,
threshold=threshold,
system_capacity=system_capacity,
buffer_capacity=buffer_capacity,
)
dimension = Q_sym.shape[0]
if dimension > 7:
return "Capacity of 6 exceeded"
M_sym = sym.Matrix([Q_sym.transpose()[:-1, :], sym.ones(1, dimension)])
b_sym = sym.Matrix([sym.zeros(dimension - 1, 1), [1]])
system = M_sym.col_insert(dimension, b_sym)
sol = sym.solve_linear_system_LU(system, [a, b, c, d, e, f, g])
return sol
def get_symbolic_state_probabilities_1222():
num_of_servers = 1
threshold = 2
system_capacity = 2
buffer_capacity = 2
sym_pi_1222 = get_symbolic_pi(
num_of_servers=num_of_servers,
threshold=threshold,
system_capacity=system_capacity,
buffer_capacity=buffer_capacity,
)
all_states_1222 = abg.markov.build_states(
threshold=threshold,
system_capacity=system_capacity,
buffer_capacity=buffer_capacity,
)
sym_state_probs_1222 = [0 for _ in range(len(all_states_1222))]
sym_state_probs_1222[0] = sym.factor(sym_pi_1222[a]) # (0,0)
sym_state_probs_1222[1] = sym.factor(sym_pi_1222[b]) # (0,1)
sym_state_probs_1222[2] = sym.factor(sym_pi_1222[c]) # (1,1)
sym_state_probs_1222[3] = sym.factor(sym_pi_1222[d]) # (0,2)
sym_state_probs_1222[4] = sym.factor(sym_pi_1222[e]) # (1,2)
sym_state_recursive_ratios_1222 = sym.zeros(
buffer_capacity + 1, system_capacity + 1
)
sym_state_recursive_ratios_1222[0, 0] = 1
sym_state_recursive_ratios_1222[0, 1] = sym.factor(
sym_state_probs_1222[1] / sym_state_probs_1222[0]
) # (0,0) -> (0,1)
sym_state_recursive_ratios_1222[0, 2] = sym.factor(
sym_state_probs_1222[2] / sym_state_probs_1222[1]
) # (0,1) -> (1,1)
sym_state_recursive_ratios_1222[1, 2] = sym.factor(
sym_state_probs_1222[3] / sym_state_probs_1222[2]
) # (0,1) -> (0,2)
sym_state_recursive_ratios_1222[2, 2] = sym.factor(
sym_state_probs_1222[4] / sym_state_probs_1222[3]
) # (0,2) -> (1,2)
return sym_state_probs_1222, sym_state_recursive_ratios_1222
def get_symbolic_state_probabilities_1121():
num_of_servers = 1
threshold = 1
system_capacity = 2
buffer_capacity = 1
all_states_1121 = abg.markov.build_states(
threshold=threshold,
system_capacity=system_capacity,
buffer_capacity=buffer_capacity,
)
sym_pi_1121 = get_symbolic_pi(
num_of_servers=num_of_servers,
threshold=threshold,
system_capacity=system_capacity,
buffer_capacity=buffer_capacity,
)
sym_state_probs_1121 = [0 for _ in range(len(all_states_1121))]
sym_state_probs_1121[0] = sym.factor(sym_pi_1121[a]) # (0,0)
sym_state_probs_1121[1] = sym.factor(sym_pi_1121[b]) # (0,1)
sym_state_probs_1121[2] = sym.factor(sym_pi_1121[c]) # (1,1)
sym_state_probs_1121[3] = sym.factor(sym_pi_1121[d]) # (0,2)
sym_state_probs_1121[4] = sym.factor(sym_pi_1121[e]) # (1,2)
sym_state_recursive_ratios_1121 = sym.zeros(
buffer_capacity + 1, system_capacity + 1
)
sym_state_recursive_ratios_1121[0, 0] = 1
sym_state_recursive_ratios_1121[0, 1] = sym.factor(
sym_state_probs_1121[1] / sym_state_probs_1121[0]
) # (0,0) -> (0,1)
sym_state_recursive_ratios_1121[1, 1] = sym.factor(
sym_state_probs_1121[2] / sym_state_probs_1121[1]
) # (0,1) -> (1,1)
sym_state_recursive_ratios_1121[0, 2] = sym.factor(
sym_state_probs_1121[3] / sym_state_probs_1121[1]
) # (0,1) -> (0,2)
sym_state_recursive_ratios_1121[1, 2] = sym.factor(
sym_state_probs_1121[4] / sym_state_probs_1121[3]
) # (0,2) -> (1,2)
sym_state_recursive_ratios_right_1121 = sym_state_recursive_ratios_1121.copy()
sym_state_recursive_ratios_right_1121[1, 2] = sym.factor(
sym_state_probs_1121[4] / sym_state_probs_1121[2]
) # (1,1) -> (1,2)
sym_state_recursive_ratios_P0_1121 = sym.zeros(
buffer_capacity + 1, system_capacity + 1
)
sym_state_recursive_ratios_P0_1121[0, 0] = 1
sym_state_recursive_ratios_P0_1121[0, 1] = sym.factor(
sym_state_probs_1121[1] / sym_state_probs_1121[0]
) # (0,0) -> (0,1)
sym_state_recursive_ratios_P0_1121[1, 1] = sym.factor(
sym_state_probs_1121[2] / sym_state_probs_1121[0]
) # (0,0) -> (1,1)
sym_state_recursive_ratios_P0_1121[0, 2] = sym.factor(
sym_state_probs_1121[3] / sym_state_probs_1121[0]
) # (0,0) -> (0,2)
sym_state_recursive_ratios_P0_1121[1, 2] = sym.factor(
sym_state_probs_1121[4] / sym_state_probs_1121[0]
) # (0,0) -> (1,2)
return (
sym_state_probs_1121,
sym_state_recursive_ratios_1121,
sym_state_recursive_ratios_right_1121,
sym_state_recursive_ratios_P0_1121,
)
def get_symbolic_state_probabilities_1122():
# num_of_servers = 1
threshold = 1
system_capacity = 2
buffer_capacity = 2
all_states_1122 = abg.markov.build_states(
threshold=threshold,
system_capacity=system_capacity,
buffer_capacity=buffer_capacity,
)
sym_state_probs_1122 = [0 for _ in range(len(all_states_1122))]
sym_Lambda = sym.symbols("Lambda")
sym_lambda_1 = sym.symbols("lambda_1")
sym_lambda_2 = sym.symbols("lambda_2")
sym_mu = sym.symbols("mu")
sym_state_probs_1122[0] = (
(sym_mu**6)
+ 2 * (sym_lambda_2) * (sym_mu**5)
+ (sym_lambda_2**2) * (sym_mu**4)
) # (0,0)
sym_state_probs_1122[1] = (sym_Lambda * sym_mu**3) * (
sym_mu**2 + 2 * sym_mu * sym_lambda_2 + sym_lambda_2**2
) # (0,1)
sym_state_probs_1122[2] = (sym_Lambda * sym_lambda_2 * sym_mu**2) * (
sym_lambda_2**2
+ sym_lambda_2 * sym_lambda_1
+ sym_lambda_1 * sym_mu
+ sym_mu**2
+ 2 * sym_lambda_2 * sym_mu
) # (1,1)
sym_state_probs_1122[3] = (sym_Lambda * sym_lambda_2**2 * sym_mu) * (
sym_lambda_2**2
+ 2 * sym_lambda_1 * sym_lambda_2
+ 3 * sym_lambda_1 * sym_mu
+ sym_mu**2
+ 2 * sym_lambda_2 * sym_mu
+ sym_lambda_1**2
) # (2,1)
sym_state_probs_1122[4] = (sym_Lambda * sym_lambda_1 * sym_mu**3) * (
sym_lambda_2 + sym_mu
) # (0,2)
sym_state_probs_1122[5] = (
sym_Lambda * sym_lambda_1 * sym_lambda_2 * sym_mu**2
) * (
2 * sym_mu + sym_lambda_1 + sym_lambda_2
) # (1,2)
sym_state_probs_1122[6] = (sym_Lambda * sym_lambda_1 * sym_lambda_2**2) * (
sym_lambda_1**2
+ 4 * sym_lambda_1 * sym_mu
+ 2 * sym_lambda_1 * sym_lambda_2
+ 3 * sym_mu**2
+ sym_lambda_2**2
+ 3 * sym_lambda_2 * sym_mu
) # (2,2)
total_1122 = np.sum(sym_state_probs_1122)
sym_state_probs_1122 = [i / total_1122 for i in sym_state_probs_1122]
sym_state_recursive_ratios_1122 = sym.zeros(
buffer_capacity + 1, system_capacity + 1
)
sym_state_recursive_ratios_1122[0, 0] = 1
sym_state_recursive_ratios_1122[0, 1] = sym.factor(
sym_state_probs_1122[1] / sym_state_probs_1122[0]
) # (0,0) -> (0,1)
sym_state_recursive_ratios_1122[1, 1] = sym.factor(
sym_state_probs_1122[2] / sym_state_probs_1122[1]
) # (0,1) -> (1,1)
sym_state_recursive_ratios_1122[2, 1] = sym.factor(
sym_state_probs_1122[3] / sym_state_probs_1122[2]
) # (1,1) -> (2,1)
sym_state_recursive_ratios_1122[0, 2] = sym.factor(
sym_state_probs_1122[4] / sym_state_probs_1122[1]
) # (0,1) -> (0,2)
sym_state_recursive_ratios_1122[1, 2] = sym.factor(
sym_state_probs_1122[5] / sym_state_probs_1122[4]
) # (0,2) -> (1,2)
sym_state_recursive_ratios_1122[2, 2] = sym.factor(
sym_state_probs_1122[6] / sym_state_probs_1122[5]
) # (1,2) -> (2,2)
sym_state_recursive_ratios_right_1122 = sym_state_recursive_ratios_1122.copy()
sym_state_recursive_ratios_right_1122[1, 2] = sym.factor(
sym_state_probs_1122[5] / sym_state_probs_1122[2]
) # (1,1) -> (1,2)
sym_state_recursive_ratios_right_1122[2, 2] = sym.factor(
sym_state_probs_1122[6] / sym_state_probs_1122[3]
) # (2,1) -> (2,2)
sym_state_recursive_ratios_P0_1122 = sym.zeros(
buffer_capacity + 1, system_capacity + 1
)
sym_state_recursive_ratios_P0_1122[0, 0] = 1
sym_state_recursive_ratios_P0_1122[0, 1] = sym.factor(
sym_state_probs_1122[1] / sym_state_probs_1122[0]
) # (0,0) -> (0,1)
sym_state_recursive_ratios_P0_1122[1, 1] = sym.factor(
sym_state_probs_1122[2] / sym_state_probs_1122[0]
) # (0,0) -> (1,1)
sym_state_recursive_ratios_P0_1122[2, 1] = sym.factor(
sym_state_probs_1122[3] / sym_state_probs_1122[0]
) # (0,0) -> (2,1)
sym_state_recursive_ratios_P0_1122[0, 2] = sym.factor(
sym_state_probs_1122[4] / sym_state_probs_1122[0]
) # (0,0) -> (0,2)
sym_state_recursive_ratios_P0_1122[1, 2] = sym.factor(
sym_state_probs_1122[5] / sym_state_probs_1122[0]
) # (0,0) -> (1,2)
sym_state_recursive_ratios_P0_1122[2, 2] = sym.factor(
sym_state_probs_1122[6] / sym_state_probs_1122[0]
) # (0,0) -> (2,2)
return (
sym_state_probs_1122,
sym_state_recursive_ratios_1122,
sym_state_recursive_ratios_right_1122,
sym_state_recursive_ratios_P0_1122,
)
def get_symbolic_state_probabilities_1123():
num_of_servers = 1
threshold = 1
system_capacity = 2
buffer_capacity = 3
Q_sym_1123 = abg.markov.get_symbolic_transition_matrix(
num_of_servers, threshold, system_capacity, buffer_capacity
)
p00, p01, p11, p21, p31, p02, p12, p22, p32 = sym.symbols(
"p00, p01, p11, p21, p31, p02, p12, p22, p32"
)
pi_1123 = sym.Matrix([p00, p01, p11, p21, p31, p02, p12, p22, p32])
dimension_1123 = Q_sym_1123.shape[0]
M_sym_1123 = sym.Matrix(
[Q_sym_1123.transpose()[:-1, :], sym.ones(1, dimension_1123)]
)
sym_diff_equations_1123 = M_sym_1123 @ pi_1123
b_sym_1123 = sym.Matrix([sym.zeros(dimension_1123 - 1, 1), [1]])
eq0_1123 = sym.Eq(sym_diff_equations_1123[0], b_sym_1123[0])
eq1_1123 = sym.Eq(sym_diff_equations_1123[1], b_sym_1123[1])
eq2_1123 = sym.Eq(sym_diff_equations_1123[2], b_sym_1123[2])
eq3_1123 = sym.Eq(sym_diff_equations_1123[3], b_sym_1123[3])
eq4_1123 = sym.Eq(sym_diff_equations_1123[4], b_sym_1123[4])
eq5_1123 = sym.Eq(sym_diff_equations_1123[5], b_sym_1123[5])
eq6_1123 = sym.Eq(sym_diff_equations_1123[6], b_sym_1123[6])
eq7_1123 = sym.Eq(sym_diff_equations_1123[7], b_sym_1123[7])
eq8_1123 = sym.Eq(sym_diff_equations_1123[8], b_sym_1123[8])
sym_state_probs_1123 = sym.solve(
[
eq0_1123,
eq1_1123,
eq2_1123,
eq3_1123,
eq4_1123,
eq5_1123,
eq6_1123,
eq7_1123,
eq8_1123,
],
(p00, p01, p11, p21, p31, p02, p12, p22, p32),
)
sym_state_recursive_ratios_1123 = sym.zeros(
buffer_capacity + 1, system_capacity + 1
)
sym_state_recursive_ratios_1123[0, 0] = 1
sym_state_recursive_ratios_1123[0, 1] = sym.factor(
sym_state_probs_1123[p01] / sym_state_probs_1123[p00]
) # (0,0) -> (0,1)
sym_state_recursive_ratios_1123[1, 1] = sym.factor(
sym_state_probs_1123[p11] / sym_state_probs_1123[p01]
) # (0,1) -> (1,1)
sym_state_recursive_ratios_1123[2, 1] = sym.factor(
sym_state_probs_1123[p21] / sym_state_probs_1123[p11]
) # (1,1) -> (2,1)
sym_state_recursive_ratios_1123[3, 1] = sym.factor(
sym_state_probs_1123[p31] / sym_state_probs_1123[p21]
) # (2,1) -> (3,1)
sym_state_recursive_ratios_1123[0, 2] = sym.factor(
sym_state_probs_1123[p02] / sym_state_probs_1123[p01]
) # (0,1) -> (0,2)
sym_state_recursive_ratios_1123[1, 2] = sym.factor(
sym_state_probs_1123[p12] / sym_state_probs_1123[p02]
) # (0,2) -> (1,2)
sym_state_recursive_ratios_1123[2, 2] = sym.factor(
sym_state_probs_1123[p22] / sym_state_probs_1123[p12]
) # (1,2) -> (2,2)
sym_state_recursive_ratios_1123[2, 2] = sym.factor(
sym_state_probs_1123[p32] / sym_state_probs_1123[p22]
) # (2,2) -> (3,2)
sym_state_recursive_ratios_right_1123 = sym_state_recursive_ratios_1123.copy()
sym_state_recursive_ratios_right_1123[1, 2] = sym.factor(
sym_state_probs_1123[p12] / sym_state_probs_1123[p11]
) # (1,1) -> (1,2)
sym_state_recursive_ratios_right_1123[2, 2] = sym.factor(
sym_state_probs_1123[p22] / sym_state_probs_1123[p21]
) # (2,1) -> (2,2)
sym_state_recursive_ratios_right_1123[3, 2] = sym.factor(
sym_state_probs_1123[p32] / sym_state_probs_1123[p22]
) # (2,2) -> (3,2)
sym_state_recursive_ratios_P0_1123 = sym.zeros(
buffer_capacity + 1, system_capacity + 1
)
sym_state_recursive_ratios_P0_1123[0, 0] = 1
sym_state_recursive_ratios_P0_1123[0, 1] = sym.factor(
sym_state_probs_1123[p01] / sym_state_probs_1123[p00]
) # (0,0) -> (0,1)
sym_state_recursive_ratios_P0_1123[1, 1] = sym.factor(
sym_state_probs_1123[p11] / sym_state_probs_1123[p00]
) # (0,0) -> (1,1)
sym_state_recursive_ratios_P0_1123[2, 1] = sym.factor(
sym_state_probs_1123[p21] / sym_state_probs_1123[p00]
) # (0,0) -> (2,1)
sym_state_recursive_ratios_P0_1123[3, 1] = sym.factor(
sym_state_probs_1123[p31] / sym_state_probs_1123[p00]
) # (0,0) -> (3,1)
sym_state_recursive_ratios_P0_1123[0, 2] = sym.factor(
sym_state_probs_1123[p02] / sym_state_probs_1123[p00]
) # (0,0) -> (0,2)
sym_state_recursive_ratios_P0_1123[1, 2] = sym.factor(
sym_state_probs_1123[p12] / sym_state_probs_1123[p00]
) # (0,0) -> (1,2)
sym_state_recursive_ratios_P0_1123[2, 2] = sym.factor(
sym_state_probs_1123[p22] / sym_state_probs_1123[p00]
) # (0,0) -> (2,2)
sym_state_recursive_ratios_P0_1123[3, 2] = sym.factor(
sym_state_probs_1123[p32] / sym_state_probs_1123[p00]
) # (0,0) -> (3,2)
return (
sym_state_probs_1123,
sym_state_recursive_ratios_1123,
sym_state_recursive_ratios_right_1123,
sym_state_recursive_ratios_P0_1123,
)
def get_symbolic_state_probabilities_1341():
# num_of_servers = 1
threshold = 3
system_capacity = 4
buffer_capacity = 1
all_states_1341 = abg.markov.build_states(
threshold=threshold,
system_capacity=system_capacity,
buffer_capacity=buffer_capacity,
)
sym_state_probs_1341 = [0 for _ in range(len(all_states_1341))]
sym_Lambda = sym.symbols("Lambda")
sym_lambda_1 = sym.symbols("lambda_1")
sym_lambda_2 = sym.symbols("lambda_2")
sym_mu = sym.symbols("mu")
sym_state_probs_1341[0] = (sym_lambda_2) * (sym_mu**5) + (sym_mu**6) # (0,0)
sym_state_probs_1341[1] = sym_Lambda * sym_lambda_2 * (sym_mu**4) + sym_Lambda * (
sym_mu**5
) # (0,1)
sym_state_probs_1341[2] = (sym_Lambda**2) * sym_lambda_2 * (sym_mu**3) + (
sym_Lambda**2
) * (
sym_mu**4
) # (0,2)
sym_state_probs_1341[3] = (sym_Lambda**3) * sym_lambda_2 * (sym_mu**2) + (
sym_Lambda**3
) * (
sym_mu**3
) # (0,3)
sym_state_probs_1341[4] = (
(sym_Lambda**3) * sym_lambda_1 * sym_lambda_2 * sym_mu
+ (sym_Lambda**3) * sym_lambda_2 * (sym_mu**2)
+ (sym_Lambda**3) * sym_lambda_2 * sym_lambda_2 * sym_mu
) # (1,3)
sym_state_probs_1341[5] = (sym_Lambda**3) * sym_lambda_1 * (sym_mu**2) # (0,4)
sym_state_probs_1341[6] = (
(sym_Lambda**3) * (sym_lambda_1**2) * sym_lambda_2
+ (sym_Lambda**3) * sym_lambda_1 * (sym_lambda_2**2)
+ 2 * (sym_Lambda**3) * sym_lambda_1 * sym_lambda_2 * sym_mu
) # (1,4)
total_1341 = np.sum(sym_state_probs_1341)
sym_state_probs_1341 = [i / total_1341 for i in sym_state_probs_1341]
sym_state_recursive_ratios_1341 = sym.zeros(
buffer_capacity + 1, system_capacity + 1
)
sym_state_recursive_ratios_1341[0, 0] = 1
sym_state_recursive_ratios_1341[0, 1] = sym.factor(
sym_state_probs_1341[1] / sym_state_probs_1341[0]
) # (0,0) -> (0,1)
sym_state_recursive_ratios_1341[0, 2] = sym.factor(
sym_state_probs_1341[2] / sym_state_probs_1341[1]
) # (0,1) -> (0,2)
sym_state_recursive_ratios_1341[0, 3] = sym.factor(
sym_state_probs_1341[3] / sym_state_probs_1341[2]
) # (0,2) -> (0,3)
sym_state_recursive_ratios_1341[0, 4] = sym.factor(
sym_state_probs_1341[5] / sym_state_probs_1341[3]
) # (0,3) -> (0,4)
sym_state_recursive_ratios_1341[1, 3] = sym.factor(
sym_state_probs_1341[4] / sym_state_probs_1341[3]
) # (0,3) -> (1,3)
sym_state_recursive_ratios_1341[1, 4] = sym.factor(
sym_state_probs_1341[6] / sym_state_probs_1341[5]
) # (0,4) -> (1,4)
sym_state_recursive_ratios_right_1341 = sym_state_recursive_ratios_1341.copy()
sym_state_recursive_ratios_right_1341[1, 4] = sym.factor(
sym_state_probs_1341[6] / sym_state_probs_1341[4]
) # (1,3) -> (1,4)
sym_state_recursive_ratios_P0_1341 = sym.zeros(
buffer_capacity + 1, system_capacity + 1
)
sym_state_recursive_ratios_P0_1341[0, 0] = 1
sym_state_recursive_ratios_P0_1341[0, 1] = sym.factor(
sym_state_probs_1341[1] / sym_state_probs_1341[0]
) # (0,0) -> (0,1)
sym_state_recursive_ratios_P0_1341[0, 2] = sym.factor(
sym_state_probs_1341[2] / sym_state_probs_1341[0]
) # (0,0) -> (0,2)
sym_state_recursive_ratios_P0_1341[0, 3] = sym.factor(
sym_state_probs_1341[3] / sym_state_probs_1341[0]
) # (0,0) -> (0,3)
sym_state_recursive_ratios_P0_1341[1, 3] = sym.factor(
sym_state_probs_1341[4] / sym_state_probs_1341[0]
) # (0,0) -> (1,3)
sym_state_recursive_ratios_P0_1341[0, 4] = sym.factor(
sym_state_probs_1341[5] / sym_state_probs_1341[0]
) # (0,0) -> (0,4)
sym_state_recursive_ratios_P0_1341[1, 4] = sym.factor(
sym_state_probs_1341[6] / sym_state_probs_1341[0]
) # (0,0) -> (1,4)
return (
sym_state_probs_1341,
sym_state_recursive_ratios_1341,
sym_state_recursive_ratios_right_1341,
sym_state_recursive_ratios_P0_1341,
)
def get_symbolic_state_probabilities_1131():
# num_of_servers = 1
threshold = 1
system_capacity = 3
buffer_capacity = 1
all_states_1131 = abg.markov.build_states(
threshold=threshold,
system_capacity=system_capacity,
buffer_capacity=buffer_capacity,
)
sym_state_probs_1131 = [0 for _ in range(len(all_states_1131))]
sym_Lambda = sym.symbols("Lambda")
sym_lambda_1 = sym.symbols("lambda_1")
sym_lambda_2 = sym.symbols("lambda_2")
sym_mu = sym.symbols("mu")
# (0,0)
sym_state_probs_1131[0] = (
(sym_mu**6)
+ 2 * (sym_lambda_2 * (sym_mu**5))
+ ((sym_lambda_2**2) * (sym_mu**4))
+ (sym_lambda_1 * sym_lambda_2 * (sym_mu**4))
)
# (0,1)
sym_state_probs_1131[1] = sym_state_probs_1131[0] * sym_Lambda / sym_mu
# (1,1)
sym_state_probs_1131[2] = (
(sym_Lambda * (sym_lambda_1**2) * sym_lambda_2 * (sym_mu**2))
+ (sym_Lambda * sym_lambda_2 * sym_lambda_1 * (sym_mu**3))
+ 2 * (sym_Lambda * sym_lambda_1 * (sym_lambda_2**2) * (sym_mu**2))
+ 2 * (sym_Lambda * (sym_lambda_2**2) * (sym_mu**3))
+ (sym_Lambda * (sym_lambda_2**3) * (sym_mu**2))
+ (sym_Lambda * sym_lambda_2 * (sym_mu**4))
)
# (0,2)
sym_state_probs_1131[3] = (
sym_Lambda * sym_lambda_1 * sym_mu**3 * (sym_lambda_2 + sym_mu)
)
# (1,2)
sym_state_probs_1131[4] = (sym_Lambda * sym_lambda_2 * sym_lambda_1 * sym_mu) * (
(sym_lambda_2**2)
+ 2 * sym_lambda_2 * sym_lambda_1
+ 3 * sym_lambda_2 * sym_mu
+ (sym_lambda_1**2)
+ 2 * sym_lambda_1 * sym_mu
+ 2 * (sym_mu**2)
)
# (0,3)
sym_state_probs_1131[5] = sym_Lambda * (sym_lambda_1**2) * (sym_mu**3)
# (1,3)
sym_state_probs_1131[6] = (sym_Lambda * sym_lambda_2 * (sym_lambda_1**2)) * (
(sym_lambda_2**2)
+ 2 * sym_lambda_2 * sym_lambda_1
+ 3 * sym_lambda_2 * sym_mu
+ (sym_lambda_1**2)
+ 2 * sym_lambda_1 * sym_mu
+ 3 * (sym_mu**2)
)
denominator = (
sym_Lambda * sym_lambda_2**3 * sym_lambda_1**2
+ sym_Lambda * sym_lambda_2**3 * sym_lambda_1 * sym_mu
+ sym_Lambda * sym_lambda_2**3 * sym_mu**2
+ 2 * sym_Lambda * sym_lambda_2**2 * sym_lambda_1**3
+ 5 * sym_Lambda * sym_lambda_2**2 * sym_lambda_1**2 * sym_mu
+ 5 * sym_Lambda * sym_lambda_2**2 * sym_lambda_1 * sym_mu**2
+ 3 * sym_Lambda * sym_lambda_2**2 * sym_mu**3
+ sym_Lambda * sym_lambda_2 * sym_lambda_1**4
+ 3 * sym_Lambda * sym_lambda_2 * sym_lambda_1**3 * sym_mu
+ 6 * sym_Lambda * sym_lambda_2 * sym_lambda_1**2 * sym_mu**2
+ 5 * sym_Lambda * sym_lambda_2 * sym_lambda_1 * sym_mu**3
+ 3 * sym_Lambda * sym_lambda_2 * sym_mu**4
+ sym_Lambda * sym_lambda_1**2 * sym_mu**3
+ sym_Lambda * sym_lambda_1 * sym_mu**4
+ sym_Lambda * sym_mu**5
+ sym_lambda_2**2 * sym_mu**4
+ sym_lambda_2 * sym_lambda_1 * sym_mu**4
+ 2 * sym_lambda_2 * sym_mu**5
+ sym_mu**6
)
sym_state_probs_1131 = [i / denominator for i in sym_state_probs_1131]
sym_state_recursive_ratios_1131 = sym.zeros(
buffer_capacity + 1, system_capacity + 1
)
sym_state_recursive_ratios_1131[0, 0] = 1
sym_state_recursive_ratios_1131[0, 1] = sym.factor(
sym_state_probs_1131[1] / sym_state_probs_1131[0]
) # (0,0) -> (0,1)
sym_state_recursive_ratios_1131[1, 1] = sym.factor(
sym_state_probs_1131[2] / sym_state_probs_1131[1]
) # (0,1) -> (1,1)
sym_state_recursive_ratios_1131[0, 2] = sym.factor(
sym_state_probs_1131[3] / sym_state_probs_1131[1]
) # (0,1) -> (0,2)
sym_state_recursive_ratios_1131[1, 2] = sym.factor(
sym_state_probs_1131[4] / sym_state_probs_1131[3]
) # (0,2) -> (1,2)
sym_state_recursive_ratios_1131[0, 3] = sym.factor(
sym_state_probs_1131[5] / sym_state_probs_1131[3]
) # (0,2) -> (0,3)
sym_state_recursive_ratios_1131[1, 3] = sym.factor(
sym_state_probs_1131[6] / sym_state_probs_1131[5]
) # (0,3) -> (1,3)
sym_state_recursive_ratios_right_1131 = sym_state_recursive_ratios_1131.copy()
sym_state_recursive_ratios_right_1131[1, 2] = sym.factor(
sym_state_probs_1131[4] / sym_state_probs_1131[2]
) # (1,1) -> (1,2)
sym_state_recursive_ratios_right_1131[1, 3] = sym.factor(
sym_state_probs_1131[6] / sym_state_probs_1131[4]
) # (1,2) -> (1,3)
sym_state_recursive_ratios_P0_1131 = sym.zeros(
buffer_capacity + 1, system_capacity + 1
)
sym_state_recursive_ratios_P0_1131[0, 0] = 1
sym_state_recursive_ratios_P0_1131[0, 1] = sym.factor(
sym_state_probs_1131[1] / sym_state_probs_1131[0]
) # (0,0) -> (0,1)
sym_state_recursive_ratios_P0_1131[1, 1] = sym.factor(
sym_state_probs_1131[2] / sym_state_probs_1131[0]
) # (0,0) -> (1,1)
sym_state_recursive_ratios_P0_1131[0, 2] = sym.factor(
sym_state_probs_1131[3] / sym_state_probs_1131[0]
) # (0,0) -> (0,2)
sym_state_recursive_ratios_P0_1131[1, 2] = sym.factor(
sym_state_probs_1131[4] / sym_state_probs_1131[0]
) # (0,0) -> (1,2)
sym_state_recursive_ratios_P0_1131[0, 3] = sym.factor(
sym_state_probs_1131[5] / sym_state_probs_1131[0]
) # (0,0) -> (0,3)
sym_state_recursive_ratios_P0_1131[1, 3] = sym.factor(
sym_state_probs_1131[6] / sym_state_probs_1131[0]
) # (0,0) -> (1,3)
return (
sym_state_probs_1131,
sym_state_recursive_ratios_1131,
sym_state_recursive_ratios_right_1131,
sym_state_recursive_ratios_P0_1131,
)
def get_symbolic_state_probabilities_1132():
num_of_servers = 1
threshold = 1
system_capacity = 3
buffer_capacity = 2
Q_sym_1132 = abg.markov.get_symbolic_transition_matrix(
num_of_servers, threshold, system_capacity, buffer_capacity
)
p00, p01, p11, p21, p02, p12, p22, p03, p13, p23 = sym.symbols(
"p00, p01, p11, p21, p02, p12, p22, p03, p13, p23"
)
pi_1132 = sym.Matrix([p00, p01, p11, p21, p02, p12, p22, p03, p13, p23])
dimension_1132 = Q_sym_1132.shape[0]
M_sym_1132 = sym.Matrix(
[Q_sym_1132.transpose()[:-1, :], sym.ones(1, dimension_1132)]
)
sym_diff_equations_1132 = M_sym_1132 @ pi_1132
b_sym_1132 = sym.Matrix([sym.zeros(dimension_1132 - 1, 1), [1]])
eq0_1132 = sym.Eq(sym_diff_equations_1132[0], b_sym_1132[0])
eq1_1132 = sym.Eq(sym_diff_equations_1132[1], b_sym_1132[1])
eq2_1132 = sym.Eq(sym_diff_equations_1132[2], b_sym_1132[2])
eq3_1132 = sym.Eq(sym_diff_equations_1132[3], b_sym_1132[3])
eq4_1132 = sym.Eq(sym_diff_equations_1132[4], b_sym_1132[4])
eq5_1132 = sym.Eq(sym_diff_equations_1132[5], b_sym_1132[5])
eq6_1132 = sym.Eq(sym_diff_equations_1132[6], b_sym_1132[6])
eq7_1132 = sym.Eq(sym_diff_equations_1132[7], b_sym_1132[7])
eq8_1132 = sym.Eq(sym_diff_equations_1132[8], b_sym_1132[8])
eq9_1132 = sym.Eq(sym_diff_equations_1132[9], b_sym_1132[9])
sym_state_probs_1132 = sym.solve(
[
eq0_1132,
eq1_1132,
eq2_1132,
eq3_1132,
eq4_1132,
eq5_1132,
eq6_1132,
eq7_1132,
eq8_1132,
eq9_1132,
],
(p00, p01, p11, p21, p02, p12, p22, p03, p13, p23),
)
sym_state_recursive_ratios_1132 = sym.zeros(
buffer_capacity + 1, system_capacity + 1
)
sym_state_recursive_ratios_1132[0, 0] = 1
sym_state_recursive_ratios_1132[0, 1] = sym.factor(
sym_state_probs_1132[p01] / sym_state_probs_1132[p00]
) # (0,0) -> (0,1)
sym_state_recursive_ratios_1132[1, 1] = sym.factor(
sym_state_probs_1132[p11] / sym_state_probs_1132[p01]
) # (0,1) -> (1,1)
sym_state_recursive_ratios_1132[2, 1] = sym.factor(
sym_state_probs_1132[p21] / sym_state_probs_1132[p11]
) # (1,1) -> (2,1)
sym_state_recursive_ratios_1132[0, 2] = sym.factor(
sym_state_probs_1132[p02] / sym_state_probs_1132[p01]
) # (0,1) -> (0,2)
sym_state_recursive_ratios_1132[1, 2] = sym.factor(
sym_state_probs_1132[p12] / sym_state_probs_1132[p02]
) # (0,2) -> (1,2)
sym_state_recursive_ratios_1132[2, 2] = sym.factor(
sym_state_probs_1132[p22] / sym_state_probs_1132[p12]
) # (1,2) -> (2,2)
sym_state_recursive_ratios_1132[0, 3] = sym.factor(
sym_state_probs_1132[p03] / sym_state_probs_1132[p02]
) # (0,2) -> (0,3)
sym_state_recursive_ratios_1132[1, 3] = sym.factor(
sym_state_probs_1132[p13] / sym_state_probs_1132[p03]
) # (0,3) -> (1,3)
sym_state_recursive_ratios_1132[2, 3] = sym.factor(
sym_state_probs_1132[p23] / sym_state_probs_1132[p13]
) # (1,3) -> (2,3)
sym_state_recursive_ratios_right_1132 = sym_state_recursive_ratios_1132.copy()
sym_state_recursive_ratios_right_1132[1, 2] = sym.factor(
sym_state_probs_1132[p12] / sym_state_probs_1132[p11]
) # (1,1) -> (1,2)
sym_state_recursive_ratios_right_1132[1, 3] = sym.factor(
sym_state_probs_1132[p13] / sym_state_probs_1132[p12]
) # (1,2) -> (1,3)
sym_state_recursive_ratios_right_1132[2, 2] = sym.factor(
sym_state_probs_1132[p22] / sym_state_probs_1132[p21]
) # (2,1) -> (2,2)
sym_state_recursive_ratios_right_1132[2, 3] = sym.factor(
sym_state_probs_1132[p23] / sym_state_probs_1132[p22]
) # (2,2) -> (2,3)
sym_state_recursive_ratios_P0_1132 = sym.zeros(
buffer_capacity + 1, system_capacity + 1
)
sym_state_recursive_ratios_P0_1132[0, 0] = 1
sym_state_recursive_ratios_P0_1132[0, 1] = sym.factor(
sym_state_probs_1132[p01] / sym_state_probs_1132[p00]
) # (0,0) -> (0,1)
sym_state_recursive_ratios_P0_1132[1, 1] = sym.factor(
sym_state_probs_1132[p11] / sym_state_probs_1132[p00]
) # (0,0) -> (1,1)
sym_state_recursive_ratios_P0_1132[2, 1] = sym.factor(
sym_state_probs_1132[p21] / sym_state_probs_1132[p00]
) # (0,0) -> (2,1)
sym_state_recursive_ratios_P0_1132[0, 2] = sym.factor(
sym_state_probs_1132[p02] / sym_state_probs_1132[p00]
) # (0,0) -> (0,2)
sym_state_recursive_ratios_P0_1132[1, 2] = sym.factor(
sym_state_probs_1132[p12] / sym_state_probs_1132[p00]
) # (0,0) -> (1,2)
sym_state_recursive_ratios_P0_1132[2, 2] = sym.factor(
sym_state_probs_1132[p22] / sym_state_probs_1132[p00]
) # (0,0) -> (2,2)
sym_state_recursive_ratios_P0_1132[0, 3] = sym.factor(
sym_state_probs_1132[p03] / sym_state_probs_1132[p00]
) # (0,0) -> (0,3)
sym_state_recursive_ratios_P0_1132[1, 3] = sym.factor(
sym_state_probs_1132[p13] / sym_state_probs_1132[p00]
) # (0,0) -> (1,3)
sym_state_recursive_ratios_P0_1132[2, 3] = sym.factor(
sym_state_probs_1132[p23] / sym_state_probs_1132[p00]
) # (0,0) -> (2,3)
return (
sym_state_probs_1132,
sym_state_recursive_ratios_1132,
sym_state_recursive_ratios_right_1132,
sym_state_recursive_ratios_P0_1132,
)
def get_symbolic_state_probabilities_1141():
num_of_servers = 1
threshold = 1
system_capacity = 4
buffer_capacity = 1
Q_sym_1141 = abg.markov.get_symbolic_transition_matrix(
num_of_servers, threshold, system_capacity, buffer_capacity
)
p00, p01, p11, p02, p12, p03, p13, p04, p14 = sym.symbols(
"p00, p01, p11, p02, p12, p03, p13, p04, p14"
)
pi_1141 = sym.Matrix([p00, p01, p11, p02, p12, p03, p13, p04, p14])
dimension_1141 = Q_sym_1141.shape[0]
M_sym_1141 = sym.Matrix(
[Q_sym_1141.transpose()[:-1, :], sym.ones(1, dimension_1141)]
)
sym_diff_equations_1141 = M_sym_1141 @ pi_1141
b_sym_1141 = sym.Matrix([sym.zeros(dimension_1141 - 1, 1), [1]])
eq0_1141 = sym.Eq(sym_diff_equations_1141[0], b_sym_1141[0])
eq1_1141 = sym.Eq(sym_diff_equations_1141[1], b_sym_1141[1])
eq2_1141 = sym.Eq(sym_diff_equations_1141[2], b_sym_1141[2])
eq3_1141 = sym.Eq(sym_diff_equations_1141[3], b_sym_1141[3])
eq4_1141 = sym.Eq(sym_diff_equations_1141[4], b_sym_1141[4])
eq5_1141 = sym.Eq(sym_diff_equations_1141[5], b_sym_1141[5])
eq6_1141 = sym.Eq(sym_diff_equations_1141[6], b_sym_1141[6])
eq7_1141 = sym.Eq(sym_diff_equations_1141[7], b_sym_1141[7])
eq8_1141 = sym.Eq(sym_diff_equations_1141[8], b_sym_1141[8])
sym_state_probs_1141 = sym.solve(
[
eq0_1141,
eq1_1141,
eq2_1141,
eq3_1141,
eq4_1141,
eq5_1141,
eq6_1141,
eq7_1141,
eq8_1141,
],
(p00, p01, p11, p02, p12, p03, p13, p04, p14),
)
sym_state_recursive_ratios_1141 = sym.zeros(
buffer_capacity + 1, system_capacity + 1
)
sym_state_recursive_ratios_1141[0, 0] = 1
sym_state_recursive_ratios_1141[0, 1] = sym.factor(
sym_state_probs_1141[p01] / sym_state_probs_1141[p00]
) # (0,0) -> (0,1)
sym_state_recursive_ratios_1141[1, 1] = sym.factor(
sym_state_probs_1141[p11] / sym_state_probs_1141[p01]
) # (0,1) -> (1,1)
sym_state_recursive_ratios_1141[0, 2] = sym.factor(
sym_state_probs_1141[p02] / sym_state_probs_1141[p01]
) # (0,1) -> (0,2)
sym_state_recursive_ratios_1141[1, 2] = sym.factor(
sym_state_probs_1141[p12] / sym_state_probs_1141[p02]
) # (0,2) -> (1,2)
sym_state_recursive_ratios_1141[0, 3] = sym.factor(
sym_state_probs_1141[p03] / sym_state_probs_1141[p02]
) # (0,2) -> (0,3)
sym_state_recursive_ratios_1141[1, 3] = sym.factor(
sym_state_probs_1141[p13] / sym_state_probs_1141[p03]
) # (0,3) -> (1,3)
sym_state_recursive_ratios_1141[0, 4] = sym.factor(
sym_state_probs_1141[p04] / sym_state_probs_1141[p03]
) # (0,3) -> (0,4)
sym_state_recursive_ratios_1141[1, 4] = sym.factor(
sym_state_probs_1141[p14] / sym_state_probs_1141[p04]
) # (0,4) -> (1,4)
sym_state_recursive_ratios_right_1141 = sym_state_recursive_ratios_1141.copy()
sym_state_recursive_ratios_right_1141[1, 2] = sym.factor(
sym_state_probs_1141[p12] / sym_state_probs_1141[p11]
) # (1,1) -> (1,2)
sym_state_recursive_ratios_right_1141[1, 3] = sym.factor(
sym_state_probs_1141[p13] / sym_state_probs_1141[p12]
) # (1,2) -> (1,3)
sym_state_recursive_ratios_right_1141[1, 4] = sym.factor(
sym_state_probs_1141[p14] / sym_state_probs_1141[p13]
) # (1,3) -> (1,4)
sym_state_recursive_ratios_P0_1141 = sym.zeros(
buffer_capacity + 1, system_capacity + 1
)
sym_state_recursive_ratios_P0_1141[0, 0] = 1
sym_state_recursive_ratios_P0_1141[0, 1] = sym.factor(
sym_state_probs_1141[p01] / sym_state_probs_1141[p00]
) # (0,0) -> (0,1)
sym_state_recursive_ratios_P0_1141[1, 1] = sym.factor(
sym_state_probs_1141[p11] / sym_state_probs_1141[p00]
) # (0,0) -> (1,1)
sym_state_recursive_ratios_P0_1141[0, 2] = sym.factor(
sym_state_probs_1141[p02] / sym_state_probs_1141[p00]
) # (0,0) -> (0,2)
sym_state_recursive_ratios_P0_1141[1, 2] = sym.factor(
sym_state_probs_1141[p12] / sym_state_probs_1141[p00]
) # (0,0) -> (1,2)
sym_state_recursive_ratios_P0_1141[0, 3] = sym.factor(
sym_state_probs_1141[p03] / sym_state_probs_1141[p00]
) # (0,0) -> (0,3)
sym_state_recursive_ratios_P0_1141[1, 3] = sym.factor(
sym_state_probs_1141[p13] / sym_state_probs_1141[p00]
) # (0,0) -> (1,3)
sym_state_recursive_ratios_P0_1141[0, 4] = sym.factor(
sym_state_probs_1141[p04] / sym_state_probs_1141[p00]
) # (0,0) -> (0,4)
sym_state_recursive_ratios_P0_1141[1, 4] = sym.factor(
sym_state_probs_1141[p14] / sym_state_probs_1141[p00]
) # (0,0) -> (1,4)
return (
sym_state_probs_1141,
sym_state_recursive_ratios_1141,
sym_state_recursive_ratios_right_1141,
sym_state_recursive_ratios_P0_1141,
)
def get_symbolic_state_probabilities_1142():
num_of_servers = 1
threshold = 1
system_capacity = 4
buffer_capacity = 2
Q_sym_1142 = abg.markov.get_symbolic_transition_matrix(
num_of_servers=num_of_servers,
threshold=threshold,
system_capacity=system_capacity,
buffer_capacity=buffer_capacity,
)
p00, p01, p11, p21, p02, p12, p22, p03, p13, p23, p04, p14, p24 = sym.symbols(
"p00, p01, p11, p21, p02, p12, p22, p03, p13, p23, p04, p14, p24"
)
pi_1142 = sym.Matrix(
[p00, p01, p11, p21, p02, p12, p22, p03, p13, p23, p04, p14, p24]
)
dimension_1142 = Q_sym_1142.shape[0]
M_sym_1142 = sym.Matrix(
[Q_sym_1142.transpose()[:-1, :], sym.ones(1, dimension_1142)]
)
sym_diff_equations_1142 = M_sym_1142 @ pi_1142
b_sym_1142 = sym.Matrix([sym.zeros(dimension_1142 - 1, 1), [1]])
eq0_1142 = sym.Eq(sym_diff_equations_1142[0], b_sym_1142[0])
eq1_1142 = sym.Eq(sym_diff_equations_1142[1], b_sym_1142[1])
eq2_1142 = sym.Eq(sym_diff_equations_1142[2], b_sym_1142[2])
eq3_1142 = sym.Eq(sym_diff_equations_1142[3], b_sym_1142[3])
eq4_1142 = sym.Eq(sym_diff_equations_1142[4], b_sym_1142[4])
eq5_1142 = sym.Eq(sym_diff_equations_1142[5], b_sym_1142[5])
eq6_1142 = sym.Eq(sym_diff_equations_1142[6], b_sym_1142[6])
eq7_1142 = sym.Eq(sym_diff_equations_1142[7], b_sym_1142[7])
eq8_1142 = sym.Eq(sym_diff_equations_1142[8], b_sym_1142[8])
eq9_1142 = sym.Eq(sym_diff_equations_1142[9], b_sym_1142[9])
eq10_1142 = sym.Eq(sym_diff_equations_1142[10], b_sym_1142[10])
eq11_1142 = sym.Eq(sym_diff_equations_1142[11], b_sym_1142[11])
eq12_1142 = sym.Eq(sym_diff_equations_1142[12], b_sym_1142[12])
sym_state_probs_1142 = sym.solve(
[
eq0_1142,
eq1_1142,
eq2_1142,
eq3_1142,
eq4_1142,
eq5_1142,
eq6_1142,
eq7_1142,
eq8_1142,
eq9_1142,
eq10_1142,
eq11_1142,
eq12_1142,
],
(p00, p01, p11, p21, p02, p12, p22, p03, p13, p23, p04, p14, p24),
)
sym_state_recursive_ratios_1142 = sym.zeros(
buffer_capacity + 1, system_capacity + 1
)
sym_state_recursive_ratios_1142[0, 0] = 1
sym_state_recursive_ratios_1142[0, 1] = sym.factor(
sym_state_probs_1142[p01] / sym_state_probs_1142[p00]
) # (0,0) -> (0,1)
sym_state_recursive_ratios_1142[1, 1] = sym.factor(
sym_state_probs_1142[p11] / sym_state_probs_1142[p01]
) # (0,1) -> (1,1)
sym_state_recursive_ratios_1142[2, 1] = sym.factor(
sym_state_probs_1142[p21] / sym_state_probs_1142[p11]
) # (1,1) -> (2,1)
sym_state_recursive_ratios_1142[0, 2] = sym.factor(
sym_state_probs_1142[p02] / sym_state_probs_1142[p01]
) # (0,1) -> (0,2)
sym_state_recursive_ratios_1142[1, 2] = sym.factor(
sym_state_probs_1142[p12] / sym_state_probs_1142[p02]
) # (0,2) -> (1,2)
sym_state_recursive_ratios_1142[2, 2] = sym.factor(
sym_state_probs_1142[p22] / sym_state_probs_1142[p12]
) # (1,2) -> (2,2)
sym_state_recursive_ratios_1142[0, 3] = sym.factor(
sym_state_probs_1142[p03] / sym_state_probs_1142[p02]
) # (0,2) -> (0,3)
sym_state_recursive_ratios_1142[1, 3] = sym.factor(
sym_state_probs_1142[p13] / sym_state_probs_1142[p03]
) # (0,3) -> (1,3)
sym_state_recursive_ratios_1142[2, 3] = sym.factor(
sym_state_probs_1142[p23] / sym_state_probs_1142[p13]
) # (1,3) -> (2,3)
sym_state_recursive_ratios_1142[0, 4] = sym.factor(
sym_state_probs_1142[p04] / sym_state_probs_1142[p03]
) # (0,3) -> (0,4)
sym_state_recursive_ratios_1142[1, 4] = sym.factor(
sym_state_probs_1142[p14] / sym_state_probs_1142[p04]
) # (0,4) -> (1,4)
sym_state_recursive_ratios_1142[2, 4] = sym.factor(
sym_state_probs_1142[p24] / sym_state_probs_1142[p14]
) # (1,4) -> (2,4)
sym_state_recursive_ratios_right_1142 = sym_state_recursive_ratios_1142.copy()
sym_state_recursive_ratios_right_1142[1, 2] = sym.factor(
sym_state_probs_1142[p12] / sym_state_probs_1142[p11]
) # (1,1) -> (1,2)
sym_state_recursive_ratios_right_1142[1, 3] = sym.factor(
sym_state_probs_1142[p13] / sym_state_probs_1142[p12]
) # (1,2) -> (1,3)
sym_state_recursive_ratios_right_1142[1, 4] = sym.factor(
sym_state_probs_1142[p14] / sym_state_probs_1142[p13]
) # (1,3) -> (1,4)
sym_state_recursive_ratios_right_1142[2, 2] = sym.factor(
sym_state_probs_1142[p22] / sym_state_probs_1142[p21]
) # (2,1) -> (2,2)
sym_state_recursive_ratios_right_1142[2, 3] = sym.factor(
sym_state_probs_1142[p23] / sym_state_probs_1142[p22]
) # (2,2) -> (2,3)
sym_state_recursive_ratios_right_1142[2, 4] = sym.factor(
sym_state_probs_1142[p24] / sym_state_probs_1142[p23]
) # (2,3) -> (2,4)
sym_state_recursive_ratios_P0_1142 = sym.zeros(
buffer_capacity + 1, system_capacity + 1
)
sym_state_recursive_ratios_P0_1142[0, 0] = 1
sym_state_recursive_ratios_P0_1142[0, 1] = sym.factor(
sym_state_probs_1142[p01] / sym_state_probs_1142[p00]
) # (0,0) -> (0,1)
sym_state_recursive_ratios_P0_1142[1, 1] = sym.factor(
sym_state_probs_1142[p11] / sym_state_probs_1142[p00]
) # (0,0) -> (1,1)
sym_state_recursive_ratios_P0_1142[2, 1] = sym.factor(
sym_state_probs_1142[p21] / sym_state_probs_1142[p00]
) # (0,0) -> (2,1)
sym_state_recursive_ratios_P0_1142[0, 2] = sym.factor(
sym_state_probs_1142[p02] / sym_state_probs_1142[p00]
) # (0,0) -> (0,2)
sym_state_recursive_ratios_P0_1142[1, 2] = sym.factor(
sym_state_probs_1142[p12] / sym_state_probs_1142[p00]
) # (0,0) -> (1,2)
sym_state_recursive_ratios_P0_1142[2, 2] = sym.factor(
sym_state_probs_1142[p22] / sym_state_probs_1142[p00]
) # (0,0) -> (2,2)
sym_state_recursive_ratios_P0_1142[0, 3] = sym.factor(
sym_state_probs_1142[p03] / sym_state_probs_1142[p00]
) # (0,0) -> (0,3)
sym_state_recursive_ratios_P0_1142[1, 3] = sym.factor(
sym_state_probs_1142[p13] / sym_state_probs_1142[p00]
) # (0,0) -> (1,3)
sym_state_recursive_ratios_P0_1142[2, 3] = sym.factor(
sym_state_probs_1142[p23] / sym_state_probs_1142[p00]
) # (0,0) -> (2,3)
sym_state_recursive_ratios_P0_1142[0, 4] = sym.factor(
sym_state_probs_1142[p04] / sym_state_probs_1142[p00]
) # (0,0) -> (0,4)
sym_state_recursive_ratios_P0_1142[1, 4] = sym.factor(
sym_state_probs_1142[p14] / sym_state_probs_1142[p00]
) # (0,0) -> (1,4)
sym_state_recursive_ratios_P0_1142[2, 4] = sym.factor(
sym_state_probs_1142[p24] / sym_state_probs_1142[p00]
) # (0,0) -> (2,4)
return (
sym_state_probs_1142,
sym_state_recursive_ratios_1142,
sym_state_recursive_ratios_right_1142,
sym_state_recursive_ratios_P0_1142,
)
def get_symbolic_state_probabilities_1151():
num_of_servers = 1
threshold = 1
system_capacity = 5
buffer_capacity = 1
Q_sym_1151 = abg.markov.get_symbolic_transition_matrix(
num_of_servers, threshold, system_capacity, buffer_capacity
)
p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15 = sym.symbols(
"p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15"
)
pi_1151 = sym.Matrix([p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15])
dimension_1151 = Q_sym_1151.shape[0]
M_sym_1151 = sym.Matrix(
[Q_sym_1151.transpose()[:-1, :], sym.ones(1, dimension_1151)]
)
sym_diff_equations_1151 = M_sym_1151 @ pi_1151
b_sym_1151 = sym.Matrix([sym.zeros(dimension_1151 - 1, 1), [1]])
eq0_1151 = sym.Eq(sym_diff_equations_1151[0], b_sym_1151[0])
eq1_1151 = sym.Eq(sym_diff_equations_1151[1], b_sym_1151[1])
eq2_1151 = sym.Eq(sym_diff_equations_1151[2], b_sym_1151[2])
eq3_1151 = sym.Eq(sym_diff_equations_1151[3], b_sym_1151[3])
eq4_1151 = sym.Eq(sym_diff_equations_1151[4], b_sym_1151[4])
eq5_1151 = sym.Eq(sym_diff_equations_1151[5], b_sym_1151[5])
eq6_1151 = sym.Eq(sym_diff_equations_1151[6], b_sym_1151[6])
eq7_1151 = sym.Eq(sym_diff_equations_1151[7], b_sym_1151[7])
eq8_1151 = sym.Eq(sym_diff_equations_1151[8], b_sym_1151[8])
eq9_1151 = sym.Eq(sym_diff_equations_1151[9], b_sym_1151[9])
eq10_1151 = sym.Eq(sym_diff_equations_1151[10], b_sym_1151[10])
sym_state_probs_1151 = sym.solve(
[
eq0_1151,
eq1_1151,
eq2_1151,
eq3_1151,
eq4_1151,
eq5_1151,
eq6_1151,
eq7_1151,
eq8_1151,
eq9_1151,
eq10_1151,
],
(p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15),
)
sym_state_recursive_ratios_1151 = sym.zeros(
buffer_capacity + 1, system_capacity + 1
)
sym_state_recursive_ratios_1151[0, 0] = 1
sym_state_recursive_ratios_1151[0, 1] = sym.factor(
sym_state_probs_1151[p01] / sym_state_probs_1151[p00]
) # (0,0) -> (0,1)
sym_state_recursive_ratios_1151[1, 1] = sym.factor(
sym_state_probs_1151[p11] / sym_state_probs_1151[p01]
) # (0,1) -> (1,1)
sym_state_recursive_ratios_1151[0, 2] = sym.factor(
sym_state_probs_1151[p02] / sym_state_probs_1151[p01]
) # (0,1) -> (0,2)
sym_state_recursive_ratios_1151[1, 2] = sym.factor(
sym_state_probs_1151[p12] / sym_state_probs_1151[p02]
) # (0,2) -> (1,2)
sym_state_recursive_ratios_1151[0, 3] = sym.factor(
sym_state_probs_1151[p03] / sym_state_probs_1151[p02]
) # (0,2) -> (0,3)
sym_state_recursive_ratios_1151[1, 3] = sym.factor(
sym_state_probs_1151[p13] / sym_state_probs_1151[p03]
) # (0,3) -> (1,3)
sym_state_recursive_ratios_1151[0, 4] = sym.factor(
sym_state_probs_1151[p04] / sym_state_probs_1151[p03]
) # (0,3) -> (0,4)
sym_state_recursive_ratios_1151[1, 4] = sym.factor(
sym_state_probs_1151[p14] / sym_state_probs_1151[p04]
) # (0,4) -> (1,4)
sym_state_recursive_ratios_1151[0, 5] = sym.factor(
sym_state_probs_1151[p05] / sym_state_probs_1151[p04]
) # (0,4) -> (0,5)
sym_state_recursive_ratios_1151[1, 5] = sym.factor(
sym_state_probs_1151[p15] / sym_state_probs_1151[p05]
) # (0,5) -> (1,5)
sym_state_recursive_ratios_right_1151 = sym_state_recursive_ratios_1151.copy()
sym_state_recursive_ratios_right_1151[1, 2] = sym.factor(
sym_state_probs_1151[p12] / sym_state_probs_1151[p11]
) # (1,1) -> (1,2)
sym_state_recursive_ratios_right_1151[1, 3] = sym.factor(
sym_state_probs_1151[p13] / sym_state_probs_1151[p12]
) # (1,2) -> (1,3)
sym_state_recursive_ratios_right_1151[1, 4] = sym.factor(
sym_state_probs_1151[p14] / sym_state_probs_1151[p13]
) # (1,3) -> (1,4)
sym_state_recursive_ratios_right_1151[1, 5] = sym.factor(
sym_state_probs_1151[p15] / sym_state_probs_1151[p14]
) # (1,4) -> (1,5)
sym_state_recursive_ratios_P0_1151 = sym.zeros(
buffer_capacity + 1, system_capacity + 1
)
sym_state_recursive_ratios_P0_1151[0, 0] = 1
sym_state_recursive_ratios_P0_1151[0, 1] = sym.factor(
sym_state_probs_1151[p01] / sym_state_probs_1151[p00]
) # (0,0) -> (0,1)
sym_state_recursive_ratios_P0_1151[1, 1] = sym.factor(
sym_state_probs_1151[p11] / sym_state_probs_1151[p00]
) # (0,0) -> (1,1)
sym_state_recursive_ratios_P0_1151[0, 2] = sym.factor(
sym_state_probs_1151[p02] / sym_state_probs_1151[p00]
) # (0,0) -> (0,2)
sym_state_recursive_ratios_P0_1151[1, 2] = sym.factor(
sym_state_probs_1151[p12] / sym_state_probs_1151[p00]
) # (0,0) -> (1,2)
sym_state_recursive_ratios_P0_1151[0, 3] = sym.factor(
sym_state_probs_1151[p03] / sym_state_probs_1151[p00]
) # (0,0) -> (0,3)
sym_state_recursive_ratios_P0_1151[1, 3] = sym.factor(
sym_state_probs_1151[p13] / sym_state_probs_1151[p00]
) # (0,0) -> (1,3)
sym_state_recursive_ratios_P0_1151[0, 4] = sym.factor(
sym_state_probs_1151[p04] / sym_state_probs_1151[p00]
) # (0,0) -> (0,4)
sym_state_recursive_ratios_P0_1151[1, 4] = sym.factor(
sym_state_probs_1151[p14] / sym_state_probs_1151[p00]
) # (0,0) -> (1,4)
sym_state_recursive_ratios_P0_1151[0, 5] = sym.factor(
sym_state_probs_1151[p05] / sym_state_probs_1151[p00]
) # (0,0) -> (0,5)
sym_state_recursive_ratios_P0_1151[1, 5] = sym.factor(
sym_state_probs_1151[p15] / sym_state_probs_1151[p00]
) # (0,0) -> (1,5)
return (
sym_state_probs_1151,
sym_state_recursive_ratios_1151,
sym_state_recursive_ratios_right_1151,
sym_state_recursive_ratios_P0_1151,
)
def get_symbolic_state_probabilities_1161():
num_of_servers = 1
threshold = 1
system_capacity = 6
buffer_capacity = 1
Q_sym_1161 = abg.markov.get_symbolic_transition_matrix(
num_of_servers, threshold, system_capacity, buffer_capacity
)
p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15, p06, p16 = sym.symbols(
"p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15, p06, p16"
)
pi_1161 = sym.Matrix(
[p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15, p06, p16]
)
dimension_1161 = Q_sym_1161.shape[0]
M_sym_1161 = sym.Matrix(
[Q_sym_1161.transpose()[:-1, :], sym.ones(1, dimension_1161)]
)
sym_diff_equations_1161 = M_sym_1161 @ pi_1161
b_sym_1161 = sym.Matrix([sym.zeros(dimension_1161 - 1, 1), [1]])
eq0_1161 = sym.Eq(sym_diff_equations_1161[0], b_sym_1161[0])
eq1_1161 = sym.Eq(sym_diff_equations_1161[1], b_sym_1161[1])
eq2_1161 = sym.Eq(sym_diff_equations_1161[2], b_sym_1161[2])
eq3_1161 = sym.Eq(sym_diff_equations_1161[3], b_sym_1161[3])
eq4_1161 = sym.Eq(sym_diff_equations_1161[4], b_sym_1161[4])
eq5_1161 = sym.Eq(sym_diff_equations_1161[5], b_sym_1161[5])
eq6_1161 = sym.Eq(sym_diff_equations_1161[6], b_sym_1161[6])
eq7_1161 = sym.Eq(sym_diff_equations_1161[7], b_sym_1161[7])
eq8_1161 = sym.Eq(sym_diff_equations_1161[8], b_sym_1161[8])
eq9_1161 = sym.Eq(sym_diff_equations_1161[9], b_sym_1161[9])
eq10_1161 = sym.Eq(sym_diff_equations_1161[10], b_sym_1161[10])
eq11_1161 = sym.Eq(sym_diff_equations_1161[11], b_sym_1161[11])
eq12_1161 = sym.Eq(sym_diff_equations_1161[12], b_sym_1161[12])
sym_state_probs_1161 = sym.solve(
[
eq0_1161,
eq1_1161,
eq2_1161,
eq3_1161,
eq4_1161,
eq5_1161,
eq6_1161,
eq7_1161,
eq8_1161,
eq9_1161,
eq10_1161,
eq11_1161,
eq12_1161,
],
(p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15, p06, p16),
)
sym_state_recursive_ratios_1161 = sym.zeros(
buffer_capacity + 1, system_capacity + 1
)
sym_state_recursive_ratios_1161[0, 0] = 1
sym_state_recursive_ratios_1161[0, 1] = sym.factor(
sym_state_probs_1161[p01] / sym_state_probs_1161[p00]
) # (0,0) -> (0,1)
sym_state_recursive_ratios_1161[1, 1] = sym.factor(
sym_state_probs_1161[p11] / sym_state_probs_1161[p01]
) # (0,1) -> (1,1)
sym_state_recursive_ratios_1161[0, 2] = sym.factor(
sym_state_probs_1161[p02] / sym_state_probs_1161[p01]
) # (0,1) -> (0,2)
sym_state_recursive_ratios_1161[1, 2] = sym.factor(
sym_state_probs_1161[p12] / sym_state_probs_1161[p02]
) # (0,2) -> (1,2)
sym_state_recursive_ratios_1161[0, 3] = sym.factor(
sym_state_probs_1161[p03] / sym_state_probs_1161[p02]
) # (0,2) -> (0,3)
sym_state_recursive_ratios_1161[1, 3] = sym.factor(
sym_state_probs_1161[p13] / sym_state_probs_1161[p03]
) # (0,3) -> (1,3)
sym_state_recursive_ratios_1161[0, 4] = sym.factor(
sym_state_probs_1161[p04] / sym_state_probs_1161[p03]
) # (0,3) -> (0,4)
sym_state_recursive_ratios_1161[1, 4] = sym.factor(
sym_state_probs_1161[p14] / sym_state_probs_1161[p04]
) # (0,4) -> (1,4)
sym_state_recursive_ratios_1161[0, 5] = sym.factor(
sym_state_probs_1161[p05] / sym_state_probs_1161[p04]
) # (0,4) -> (0,5)
sym_state_recursive_ratios_1161[1, 5] = sym.factor(
sym_state_probs_1161[p15] / sym_state_probs_1161[p05]
) # (0,5) -> (1,5)
sym_state_recursive_ratios_1161[0, 6] = sym.factor(
sym_state_probs_1161[p06] / sym_state_probs_1161[p05]
) # (0,5) -> (0,6)
sym_state_recursive_ratios_1161[1, 6] = sym.factor(
sym_state_probs_1161[p16] / sym_state_probs_1161[p06]
) # (0,6) -> (1,6)
sym_state_recursive_ratios_right_1161 = sym_state_recursive_ratios_1161.copy()
sym_state_recursive_ratios_right_1161[1, 2] = sym.factor(
sym_state_probs_1161[p12] / sym_state_probs_1161[p11]
) # (1,1) -> (1,2)
sym_state_recursive_ratios_right_1161[1, 3] = sym.factor(
sym_state_probs_1161[p13] / sym_state_probs_1161[p12]
) # (1,2) -> (1,3)
sym_state_recursive_ratios_right_1161[1, 4] = sym.factor(
sym_state_probs_1161[p14] / sym_state_probs_1161[p13]
) # (1,3) -> (1,4)
sym_state_recursive_ratios_right_1161[1, 5] = sym.factor(
sym_state_probs_1161[p15] / sym_state_probs_1161[p14]
) # (1,4) -> (1,5)
sym_state_recursive_ratios_right_1161[1, 6] = sym.factor(
sym_state_probs_1161[p16] / sym_state_probs_1161[p15]
) # (1,5) -> (1,6)
sym_state_recursive_ratios_P0_1161 = sym.zeros(
buffer_capacity + 1, system_capacity + 1
)
sym_state_recursive_ratios_P0_1161[0, 0] = 1
sym_state_recursive_ratios_P0_1161[0, 1] = sym.factor(
sym_state_probs_1161[p01] / sym_state_probs_1161[p00]
) # (0,0) -> (0,1)
sym_state_recursive_ratios_P0_1161[1, 1] = sym.factor(
sym_state_probs_1161[p11] / sym_state_probs_1161[p00]
) # (0,0) -> (1,1)
sym_state_recursive_ratios_P0_1161[0, 2] = sym.factor(
sym_state_probs_1161[p02] / sym_state_probs_1161[p00]
) # (0,0) -> (0,2)
sym_state_recursive_ratios_P0_1161[1, 2] = sym.factor(
sym_state_probs_1161[p12] / sym_state_probs_1161[p00]
) # (0,0) -> (1,2)
sym_state_recursive_ratios_P0_1161[0, 3] = sym.factor(
sym_state_probs_1161[p03] / sym_state_probs_1161[p00]
) # (0,0) -> (0,3)
sym_state_recursive_ratios_P0_1161[1, 3] = sym.factor(
sym_state_probs_1161[p13] / sym_state_probs_1161[p00]
) # (0,0) -> (1,3)
sym_state_recursive_ratios_P0_1161[0, 4] = sym.factor(
sym_state_probs_1161[p04] / sym_state_probs_1161[p00]
) # (0,0) -> (0,4)
sym_state_recursive_ratios_P0_1161[1, 4] = sym.factor(
sym_state_probs_1161[p14] / sym_state_probs_1161[p00]
) # (0,0) -> (1,4)
sym_state_recursive_ratios_P0_1161[0, 5] = sym.factor(
sym_state_probs_1161[p05] / sym_state_probs_1161[p00]
) # (0,0) -> (0,5)
sym_state_recursive_ratios_P0_1161[1, 5] = sym.factor(
sym_state_probs_1161[p15] / sym_state_probs_1161[p00]
) # (0,0) -> (1,5)
sym_state_recursive_ratios_P0_1161[0, 6] = sym.factor(
sym_state_probs_1161[p06] / sym_state_probs_1161[p00]
) # (0,0) -> (0,6)
sym_state_recursive_ratios_P0_1161[1, 6] = sym.factor(
sym_state_probs_1161[p16] / sym_state_probs_1161[p00]
) # (0,0) -> (1,6)
return (
sym_state_probs_1161,
sym_state_recursive_ratios_1161,
sym_state_recursive_ratios_right_1161,
sym_state_recursive_ratios_P0_1161,
)
def get_symbolic_state_probabilities_1171():
num_of_servers = 1
threshold = 1
system_capacity = 7
buffer_capacity = 1
Q_sym_1171 = abg.markov.get_symbolic_transition_matrix(
num_of_servers, threshold, system_capacity, buffer_capacity
)
(
p00,
p01,
p11,
p02,
p12,
p03,
p13,
p04,
p14,
p05,
p15,
p06,
p16,
p07,
p17,
) = sym.symbols(
"p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15, p06, p16, p07, p17"
)
pi_1171 = sym.Matrix(
[p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15, p06, p16, p07, p17]
)
dimension_1171 = Q_sym_1171.shape[0]
M_sym_1171 = sym.Matrix(
[Q_sym_1171.transpose()[:-1, :], sym.ones(1, dimension_1171)]
)
sym_diff_equations_1171 = M_sym_1171 @ pi_1171
b_sym_1171 = sym.Matrix([sym.zeros(dimension_1171 - 1, 1), [1]])
eq0_1171 = sym.Eq(sym_diff_equations_1171[0], b_sym_1171[0])
eq1_1171 = sym.Eq(sym_diff_equations_1171[1], b_sym_1171[1])
eq2_1171 = sym.Eq(sym_diff_equations_1171[2], b_sym_1171[2])
eq3_1171 = sym.Eq(sym_diff_equations_1171[3], b_sym_1171[3])
eq4_1171 = sym.Eq(sym_diff_equations_1171[4], b_sym_1171[4])
eq5_1171 = sym.Eq(sym_diff_equations_1171[5], b_sym_1171[5])
eq6_1171 = sym.Eq(sym_diff_equations_1171[6], b_sym_1171[6])
eq7_1171 = sym.Eq(sym_diff_equations_1171[7], b_sym_1171[7])
eq8_1171 = sym.Eq(sym_diff_equations_1171[8], b_sym_1171[8])
eq9_1171 = sym.Eq(sym_diff_equations_1171[9], b_sym_1171[9])
eq10_1171 = sym.Eq(sym_diff_equations_1171[10], b_sym_1171[10])
eq11_1171 = sym.Eq(sym_diff_equations_1171[11], b_sym_1171[11])
eq12_1171 = sym.Eq(sym_diff_equations_1171[12], b_sym_1171[12])
eq13_1171 = sym.Eq(sym_diff_equations_1171[13], b_sym_1171[13])
eq14_1171 = sym.Eq(sym_diff_equations_1171[14], b_sym_1171[14])
sym_state_probs_1171 = sym.solve(
[
eq0_1171,
eq1_1171,
eq2_1171,
eq3_1171,
eq4_1171,
eq5_1171,
eq6_1171,
eq7_1171,
eq8_1171,
eq9_1171,
eq10_1171,
eq11_1171,
eq12_1171,
eq13_1171,
eq14_1171,
],
(p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15, p06, p16, p07, p17),
)
sym_state_recursive_ratios_1171 = sym.zeros(
buffer_capacity + 1, system_capacity + 1
)
sym_state_recursive_ratios_1171[0, 0] = 1
sym_state_recursive_ratios_1171[0, 1] = sym.factor(
sym_state_probs_1171[p01] / sym_state_probs_1171[p00]
) # (0,0) -> (0,1)
sym_state_recursive_ratios_1171[1, 1] = sym.factor(
sym_state_probs_1171[p11] / sym_state_probs_1171[p01]
) # (0,1) -> (1,1)
sym_state_recursive_ratios_1171[0, 2] = sym.factor(
sym_state_probs_1171[p02] / sym_state_probs_1171[p01]
) # (0,1) -> (0,2)
sym_state_recursive_ratios_1171[1, 2] = sym.factor(
sym_state_probs_1171[p12] / sym_state_probs_1171[p02]
) # (0,2) -> (1,2)
sym_state_recursive_ratios_1171[0, 3] = sym.factor(
sym_state_probs_1171[p03] / sym_state_probs_1171[p02]
) # (0,2) -> (0,3)
sym_state_recursive_ratios_1171[1, 3] = sym.factor(
sym_state_probs_1171[p13] / sym_state_probs_1171[p03]
) # (0,3) -> (1,3)
sym_state_recursive_ratios_1171[0, 4] = sym.factor(
sym_state_probs_1171[p04] / sym_state_probs_1171[p03]
) # (0,3) -> (0,4)
sym_state_recursive_ratios_1171[1, 4] = sym.factor(
sym_state_probs_1171[p14] / sym_state_probs_1171[p04]
) # (0,4) -> (1,4)
sym_state_recursive_ratios_1171[0, 5] = sym.factor(
sym_state_probs_1171[p05] / sym_state_probs_1171[p04]
) # (0,4) -> (0,5)
sym_state_recursive_ratios_1171[1, 5] = sym.factor(
sym_state_probs_1171[p15] / sym_state_probs_1171[p05]
) # (0,5) -> (1,5)
sym_state_recursive_ratios_1171[0, 6] = sym.factor(
sym_state_probs_1171[p06] / sym_state_probs_1171[p05]
) # (0,5) -> (0,6)
sym_state_recursive_ratios_1171[1, 6] = sym.factor(
sym_state_probs_1171[p16] / sym_state_probs_1171[p06]
) # (0,6) -> (1,6)
sym_state_recursive_ratios_1171[0, 7] = sym.factor(
sym_state_probs_1171[p07] / sym_state_probs_1171[p06]
) # (0,6) -> (0,7)
sym_state_recursive_ratios_1171[1, 7] = sym.factor(
sym_state_probs_1171[p17] / sym_state_probs_1171[p07]
) # (0,7) -> (1,7)
sym_state_recursive_ratios_right_1171 = sym_state_recursive_ratios_1171.copy()
sym_state_recursive_ratios_right_1171[1, 2] = sym.factor(
sym_state_probs_1171[p12] / sym_state_probs_1171[p11]
) # (1,1) -> (1,2)
sym_state_recursive_ratios_right_1171[1, 3] = sym.factor(
sym_state_probs_1171[p13] / sym_state_probs_1171[p12]
) # (1,2) -> (1,3)
sym_state_recursive_ratios_right_1171[1, 4] = sym.factor(
sym_state_probs_1171[p14] / sym_state_probs_1171[p13]
) # (1,3) -> (1,4)
sym_state_recursive_ratios_right_1171[1, 5] = sym.factor(
sym_state_probs_1171[p15] / sym_state_probs_1171[p14]
) # (1,4) -> (1,5)
sym_state_recursive_ratios_right_1171[1, 6] = sym.factor(
sym_state_probs_1171[p16] / sym_state_probs_1171[p15]
) # (1,5) -> (1,6)
sym_state_recursive_ratios_right_1171[1, 7] = sym.factor(
sym_state_probs_1171[p17] / sym_state_probs_1171[p16]
) # (1,6) -> (1,7)
sym_state_recursive_ratios_P0_1171 = sym.zeros(
buffer_capacity + 1, system_capacity + 1
)
sym_state_recursive_ratios_P0_1171[0, 0] = 1
sym_state_recursive_ratios_P0_1171[0, 1] = sym.factor(
sym_state_probs_1171[p01] / sym_state_probs_1171[p00]
) # (0,0) -> (0,1)
sym_state_recursive_ratios_P0_1171[1, 1] = sym.factor(
sym_state_probs_1171[p11] / sym_state_probs_1171[p00]
) # (0,0) -> (1,1)
sym_state_recursive_ratios_P0_1171[0, 2] = sym.factor(
sym_state_probs_1171[p02] / sym_state_probs_1171[p00]
) # (0,0) -> (0,2)
sym_state_recursive_ratios_P0_1171[1, 2] = sym.factor(
sym_state_probs_1171[p12] / sym_state_probs_1171[p00]
) # (0,0) -> (1,2)
sym_state_recursive_ratios_P0_1171[0, 3] = sym.factor(
sym_state_probs_1171[p03] / sym_state_probs_1171[p00]
) # (0,0) -> (0,3)
sym_state_recursive_ratios_P0_1171[1, 3] = sym.factor(
sym_state_probs_1171[p13] / sym_state_probs_1171[p00]
) # (0,0) -> (1,3)
sym_state_recursive_ratios_P0_1171[0, 4] = sym.factor(
sym_state_probs_1171[p04] / sym_state_probs_1171[p00]
) # (0,0) -> (0,4)
sym_state_recursive_ratios_P0_1171[1, 4] = sym.factor(
sym_state_probs_1171[p14] / sym_state_probs_1171[p00]
) # (0,0) -> (1,4)
sym_state_recursive_ratios_P0_1171[0, 5] = sym.factor(
sym_state_probs_1171[p05] / sym_state_probs_1171[p00]
) # (0,0) -> (0,5)
sym_state_recursive_ratios_P0_1171[1, 5] = sym.factor(
sym_state_probs_1171[p15] / sym_state_probs_1171[p00]
) # (0,0) -> (1,5)
sym_state_recursive_ratios_P0_1171[0, 6] = sym.factor(
sym_state_probs_1171[p06] / sym_state_probs_1171[p00]
) # (0,0) -> (0,6)
sym_state_recursive_ratios_P0_1171[1, 6] = sym.factor(
sym_state_probs_1171[p16] / sym_state_probs_1171[p00]
) # (0,0) -> (1,6)
sym_state_recursive_ratios_P0_1171[0, 7] = sym.factor(
sym_state_probs_1171[p07] / sym_state_probs_1171[p00]
) # (0,0) -> (0,7)
sym_state_recursive_ratios_P0_1171[1, 7] = sym.factor(
sym_state_probs_1171[p17] / sym_state_probs_1171[p00]
) # (0,0) -> (1,7)
return (
sym_state_probs_1171,
sym_state_recursive_ratios_1171,
sym_state_recursive_ratios_right_1171,
sym_state_recursive_ratios_P0_1171,
)
def get_symbolic_state_probabilities_1181():
num_of_servers = 1
threshold = 1
system_capacity = 8
buffer_capacity = 1
Q_sym_1181 = abg.markov.get_symbolic_transition_matrix(
num_of_servers, threshold, system_capacity, buffer_capacity
)
(
p00,
p01,
p11,
p02,
p12,
p03,
p13,
p04,
p14,
p05,
p15,
p06,
p16,
p07,
p17,
p08,
p18,
) = sym.symbols(
"p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15, p06, p16, p07, p17, p08, p18"
)
pi_1181 = sym.Matrix(
[
p00,
p01,
p11,
p02,
p12,
p03,
p13,
p04,
p14,
p05,
p15,
p06,
p16,
p07,
p17,
p08,
p18,
]
)
dimension_1181 = Q_sym_1181.shape[0]
M_sym_1181 = sym.Matrix(
[Q_sym_1181.transpose()[:-1, :], sym.ones(1, dimension_1181)]
)
sym_diff_equations_1181 = M_sym_1181 @ pi_1181
b_sym_1181 = sym.Matrix([sym.zeros(dimension_1181 - 1, 1), [1]])
eq0_1181 = sym.Eq(sym_diff_equations_1181[0], b_sym_1181[0])
eq1_1181 = sym.Eq(sym_diff_equations_1181[1], b_sym_1181[1])
eq2_1181 = sym.Eq(sym_diff_equations_1181[2], b_sym_1181[2])
eq3_1181 = sym.Eq(sym_diff_equations_1181[3], b_sym_1181[3])
eq4_1181 = sym.Eq(sym_diff_equations_1181[4], b_sym_1181[4])
eq5_1181 = sym.Eq(sym_diff_equations_1181[5], b_sym_1181[5])
eq6_1181 = sym.Eq(sym_diff_equations_1181[6], b_sym_1181[6])
eq7_1181 = sym.Eq(sym_diff_equations_1181[7], b_sym_1181[7])
eq8_1181 = sym.Eq(sym_diff_equations_1181[8], b_sym_1181[8])
eq9_1181 = sym.Eq(sym_diff_equations_1181[9], b_sym_1181[9])
eq10_1181 = sym.Eq(sym_diff_equations_1181[10], b_sym_1181[10])
eq11_1181 = sym.Eq(sym_diff_equations_1181[11], b_sym_1181[11])
eq12_1181 = sym.Eq(sym_diff_equations_1181[12], b_sym_1181[12])
eq13_1181 = sym.Eq(sym_diff_equations_1181[13], b_sym_1181[13])
eq14_1181 = sym.Eq(sym_diff_equations_1181[14], b_sym_1181[14])
eq15_1181 = sym.Eq(sym_diff_equations_1181[15], b_sym_1181[15])
eq16_1181 = sym.Eq(sym_diff_equations_1181[16], b_sym_1181[16])
sym_state_probs_1181 = sym.solve(
[
eq0_1181,
eq1_1181,
eq2_1181,
eq3_1181,
eq4_1181,
eq5_1181,
eq6_1181,
eq7_1181,
eq8_1181,
eq9_1181,
eq10_1181,
eq11_1181,
eq12_1181,
eq13_1181,
eq14_1181,
eq15_1181,
eq16_1181,
],
(
p00,
p01,
p11,
p02,
p12,
p03,
p13,
p04,
p14,
p05,
p15,
p06,
p16,
p07,
p17,
p08,
p18,
),
)
sym_state_recursive_ratios_1181 = sym.zeros(
buffer_capacity + 1, system_capacity + 1
)
sym_state_recursive_ratios_1181[0, 0] = 1
sym_state_recursive_ratios_1181[0, 1] = sym.factor(
sym_state_probs_1181[p01] / sym_state_probs_1181[p00]
) # (0,0) -> (0,1)
sym_state_recursive_ratios_1181[1, 1] = sym.factor(
sym_state_probs_1181[p11] / sym_state_probs_1181[p01]
) # (0,1) -> (1,1)
sym_state_recursive_ratios_1181[0, 2] = sym.factor(
sym_state_probs_1181[p02] / sym_state_probs_1181[p01]
) # (0,1) -> (0,2)
sym_state_recursive_ratios_1181[1, 2] = sym.factor(
sym_state_probs_1181[p12] / sym_state_probs_1181[p02]
) # (0,2) -> (1,2)
sym_state_recursive_ratios_1181[0, 3] = sym.factor(
sym_state_probs_1181[p03] / sym_state_probs_1181[p02]
) # (0,2) -> (0,3)
sym_state_recursive_ratios_1181[1, 3] = sym.factor(
sym_state_probs_1181[p13] / sym_state_probs_1181[p03]
) # (0,3) -> (1,3)
sym_state_recursive_ratios_1181[0, 4] = sym.factor(
sym_state_probs_1181[p04] / sym_state_probs_1181[p03]
) # (0,3) -> (0,4)
sym_state_recursive_ratios_1181[1, 4] = sym.factor(
sym_state_probs_1181[p14] / sym_state_probs_1181[p04]
) # (0,4) -> (1,4)
sym_state_recursive_ratios_1181[0, 5] = sym.factor(
sym_state_probs_1181[p05] / sym_state_probs_1181[p04]
) # (0,4) -> (0,5)
sym_state_recursive_ratios_1181[1, 5] = sym.factor(
sym_state_probs_1181[p15] / sym_state_probs_1181[p05]
) # (0,5) -> (1,5)
sym_state_recursive_ratios_1181[0, 6] = sym.factor(
sym_state_probs_1181[p06] / sym_state_probs_1181[p05]
) # (0,5) -> (0,6)
sym_state_recursive_ratios_1181[1, 6] = sym.factor(
sym_state_probs_1181[p16] / sym_state_probs_1181[p06]
) # (0,6) -> (1,6)
sym_state_recursive_ratios_1181[0, 7] = sym.factor(
sym_state_probs_1181[p07] / sym_state_probs_1181[p06]
) # (0,6) -> (0,7)
sym_state_recursive_ratios_1181[1, 7] = sym.factor(
sym_state_probs_1181[p17] / sym_state_probs_1181[p07]
) # (0,7) -> (1,7)
sym_state_recursive_ratios_1181[0, 8] = sym.factor(
sym_state_probs_1181[p08] / sym_state_probs_1181[p07]
) # (0,7) -> (0,8)
sym_state_recursive_ratios_1181[1, 8] = sym.factor(
sym_state_probs_1181[p18] / sym_state_probs_1181[p08]
) # (0,8) -> (1,8)
sym_state_recursive_ratios_right_1181 = sym_state_recursive_ratios_1181.copy()
sym_state_recursive_ratios_right_1181[1, 2] = sym.factor(
sym_state_probs_1181[p12] / sym_state_probs_1181[p11]
) # (1,1) -> (1,2)
sym_state_recursive_ratios_right_1181[1, 3] = sym.factor(
sym_state_probs_1181[p13] / sym_state_probs_1181[p12]
) # (1,2) -> (1,3)
sym_state_recursive_ratios_right_1181[1, 4] = sym.factor(
sym_state_probs_1181[p14] / sym_state_probs_1181[p13]
) # (1,3) -> (1,4)
sym_state_recursive_ratios_right_1181[1, 5] = sym.factor(
sym_state_probs_1181[p15] / sym_state_probs_1181[p14]
) # (1,4) -> (1,5)
sym_state_recursive_ratios_right_1181[1, 6] = sym.factor(
sym_state_probs_1181[p16] / sym_state_probs_1181[p15]
) # (1,5) -> (1,6)
sym_state_recursive_ratios_right_1181[1, 7] = sym.factor(
sym_state_probs_1181[p17] / sym_state_probs_1181[p16]
) # (1,6) -> (1,7)
sym_state_recursive_ratios_right_1181[1, 8] = sym.factor(
sym_state_probs_1181[p18] / sym_state_probs_1181[p17]
) # (1,7) -> (1,8)
sym_state_recursive_ratios_P0_1181 = sym.zeros(
buffer_capacity + 1, system_capacity + 1
)
sym_state_recursive_ratios_P0_1181[0, 0] = 1
sym_state_recursive_ratios_P0_1181[0, 1] = sym.factor(
sym_state_probs_1181[p01] / sym_state_probs_1181[p00]
) # (0,0) -> (0,1)
sym_state_recursive_ratios_P0_1181[1, 1] = sym.factor(
sym_state_probs_1181[p11] / sym_state_probs_1181[p00]
) # (0,0) -> (1,1)
sym_state_recursive_ratios_P0_1181[0, 2] = sym.factor(
sym_state_probs_1181[p02] / sym_state_probs_1181[p00]
) # (0,0) -> (0,2)
sym_state_recursive_ratios_P0_1181[1, 2] = sym.factor(
sym_state_probs_1181[p12] / sym_state_probs_1181[p00]
) # (0,0) -> (1,2)
sym_state_recursive_ratios_P0_1181[0, 3] = sym.factor(
sym_state_probs_1181[p03] / sym_state_probs_1181[p00]
) # (0,0) -> (0,3)
sym_state_recursive_ratios_P0_1181[1, 3] = sym.factor(
sym_state_probs_1181[p13] / sym_state_probs_1181[p00]
) # (0,0) -> (1,3)
sym_state_recursive_ratios_P0_1181[0, 4] = sym.factor(
sym_state_probs_1181[p04] / sym_state_probs_1181[p00]
) # (0,0) -> (0,4)
sym_state_recursive_ratios_P0_1181[1, 4] = sym.factor(
sym_state_probs_1181[p14] / sym_state_probs_1181[p00]
) # (0,0) -> (1,4)
sym_state_recursive_ratios_P0_1181[0, 5] = sym.factor(
sym_state_probs_1181[p05] / sym_state_probs_1181[p00]
) # (0,0) -> (0,5)
sym_state_recursive_ratios_P0_1181[1, 5] = sym.factor(
sym_state_probs_1181[p15] / sym_state_probs_1181[p00]
) # (0,0) -> (1,5)
sym_state_recursive_ratios_P0_1181[0, 6] = sym.factor(
sym_state_probs_1181[p06] / sym_state_probs_1181[p00]
) # (0,0) -> (0,6)
sym_state_recursive_ratios_P0_1181[1, 6] = sym.factor(
sym_state_probs_1181[p16] / sym_state_probs_1181[p00]
) # (0,0) -> (1,6)
sym_state_recursive_ratios_P0_1181[0, 7] = sym.factor(
sym_state_probs_1181[p07] / sym_state_probs_1181[p00]
) # (0,0) -> (0,7)
sym_state_recursive_ratios_P0_1181[1, 7] = sym.factor(
sym_state_probs_1181[p17] / sym_state_probs_1181[p00]
) # (0,0) -> (1,7)
sym_state_recursive_ratios_P0_1181[0, 8] = sym.factor(
sym_state_probs_1181[p08] / sym_state_probs_1181[p00]
) # (0,0) -> (0,8)
sym_state_recursive_ratios_P0_1181[1, 8] = sym.factor(
sym_state_probs_1181[p18] / sym_state_probs_1181[p00]
) # (0,0) -> (1,8)
return (
sym_state_probs_1181,
sym_state_recursive_ratios_1181,
sym_state_recursive_ratios_right_1181,
sym_state_recursive_ratios_P0_1181,
)
def get_symbolic_state_probabilities_1191():
num_of_servers = 1
threshold = 1
system_capacity = 9
buffer_capacity = 1
Q_sym_1191 = abg.markov.get_symbolic_transition_matrix(
num_of_servers, threshold, system_capacity, buffer_capacity
)
(
p00,
p01,
p11,
p02,
p12,
p03,
p13,
p04,
p14,
p05,
p15,
p06,
p16,
p07,
p17,
p08,
p18,
p09,
p19,
) = sym.symbols(
"p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15, p06, p16, p07, p17, p08, p18, p09, p19"
)
pi_1191 = sym.Matrix(
[
p00,
p01,
p11,
p02,
p12,
p03,
p13,
p04,
p14,
p05,
p15,
p06,
p16,
p07,
p17,
p08,
p18,
p09,
p19,
]
)
dimension_1191 = Q_sym_1191.shape[0]
M_sym_1191 = sym.Matrix(
[Q_sym_1191.transpose()[:-1, :], sym.ones(1, dimension_1191)]
)
sym_diff_equations_1191 = M_sym_1191 @ pi_1191
b_sym_1191 = sym.Matrix([sym.zeros(dimension_1191 - 1, 1), [1]])
eq0_1191 = sym.Eq(sym_diff_equations_1191[0], b_sym_1191[0])
eq1_1191 = sym.Eq(sym_diff_equations_1191[1], b_sym_1191[1])
eq2_1191 = sym.Eq(sym_diff_equations_1191[2], b_sym_1191[2])
eq3_1191 = sym.Eq(sym_diff_equations_1191[3], b_sym_1191[3])
eq4_1191 = sym.Eq(sym_diff_equations_1191[4], b_sym_1191[4])
eq5_1191 = sym.Eq(sym_diff_equations_1191[5], b_sym_1191[5])
eq6_1191 = sym.Eq(sym_diff_equations_1191[6], b_sym_1191[6])
eq7_1191 = sym.Eq(sym_diff_equations_1191[7], b_sym_1191[7])
eq8_1191 = sym.Eq(sym_diff_equations_1191[8], b_sym_1191[8])
eq9_1191 = sym.Eq(sym_diff_equations_1191[9], b_sym_1191[9])
eq10_1191 = sym.Eq(sym_diff_equations_1191[10], b_sym_1191[10])
eq11_1191 = sym.Eq(sym_diff_equations_1191[11], b_sym_1191[11])
eq12_1191 = sym.Eq(sym_diff_equations_1191[12], b_sym_1191[12])
eq13_1191 = sym.Eq(sym_diff_equations_1191[13], b_sym_1191[13])
eq14_1191 = sym.Eq(sym_diff_equations_1191[14], b_sym_1191[14])
eq15_1191 = sym.Eq(sym_diff_equations_1191[15], b_sym_1191[15])
eq16_1191 = sym.Eq(sym_diff_equations_1191[16], b_sym_1191[16])
eq17_1191 = sym.Eq(sym_diff_equations_1191[17], b_sym_1191[17])
eq18_1191 = sym.Eq(sym_diff_equations_1191[18], b_sym_1191[18])
sym_state_probs_1191 = sym.solve(
[
eq0_1191,
eq1_1191,
eq2_1191,
eq3_1191,
eq4_1191,
eq5_1191,
eq6_1191,
eq7_1191,
eq8_1191,
eq9_1191,
eq10_1191,
eq11_1191,
eq12_1191,
eq13_1191,
eq14_1191,
eq15_1191,
eq16_1191,
eq17_1191,
eq18_1191,
],
(
p00,
p01,
p11,
p02,
p12,
p03,
p13,
p04,
p14,
p05,
p15,
p06,
p16,
p07,
p17,
p08,
p18,
p09,
p19,
),
)
sym_state_recursive_ratios_1191 = sym.zeros(
buffer_capacity + 1, system_capacity + 1
)
sym_state_recursive_ratios_1191[0, 0] = 1
sym_state_recursive_ratios_1191[0, 1] = sym.factor(
sym_state_probs_1191[p01] / sym_state_probs_1191[p00]
) # (0,0) -> (0,1)
sym_state_recursive_ratios_1191[1, 1] = sym.factor(
sym_state_probs_1191[p11] / sym_state_probs_1191[p01]
) # (0,1) -> (1,1)
sym_state_recursive_ratios_1191[0, 2] = sym.factor(
sym_state_probs_1191[p02] / sym_state_probs_1191[p01]
) # (0,1) -> (0,2)
sym_state_recursive_ratios_1191[1, 2] = sym.factor(
sym_state_probs_1191[p12] / sym_state_probs_1191[p02]
) # (0,2) -> (1,2)
sym_state_recursive_ratios_1191[0, 3] = sym.factor(
sym_state_probs_1191[p03] / sym_state_probs_1191[p02]
) # (0,2) -> (0,3)
sym_state_recursive_ratios_1191[1, 3] = sym.factor(
sym_state_probs_1191[p13] / sym_state_probs_1191[p03]
) # (0,3) -> (1,3)
sym_state_recursive_ratios_1191[0, 4] = sym.factor(
sym_state_probs_1191[p04] / sym_state_probs_1191[p03]
) # (0,3) -> (0,4)
sym_state_recursive_ratios_1191[1, 4] = sym.factor(
sym_state_probs_1191[p14] / sym_state_probs_1191[p04]
) # (0,4) -> (1,4)
sym_state_recursive_ratios_1191[0, 5] = sym.factor(
sym_state_probs_1191[p05] / sym_state_probs_1191[p04]
) # (0,4) -> (0,5)
sym_state_recursive_ratios_1191[1, 5] = sym.factor(
sym_state_probs_1191[p15] / sym_state_probs_1191[p05]
) # (0,5) -> (1,5)
sym_state_recursive_ratios_1191[0, 6] = sym.factor(
sym_state_probs_1191[p06] / sym_state_probs_1191[p05]
) # (0,5) -> (0,6)
sym_state_recursive_ratios_1191[1, 6] = sym.factor(
sym_state_probs_1191[p16] / sym_state_probs_1191[p06]
) # (0,6) -> (1,6)
sym_state_recursive_ratios_1191[0, 7] = sym.factor(
sym_state_probs_1191[p07] / sym_state_probs_1191[p06]
) # (0,6) -> (0,7)
sym_state_recursive_ratios_1191[1, 7] = sym.factor(
sym_state_probs_1191[p17] / sym_state_probs_1191[p07]
) # (0,7) -> (1,7)
sym_state_recursive_ratios_1191[0, 8] = sym.factor(
sym_state_probs_1191[p08] / sym_state_probs_1191[p07]
) # (0,7) -> (0,8)
sym_state_recursive_ratios_1191[1, 8] = sym.factor(
sym_state_probs_1191[p18] / sym_state_probs_1191[p08]
) # (0,8) -> (1,8)
sym_state_recursive_ratios_1191[0, 9] = sym.factor(
sym_state_probs_1191[p09] / sym_state_probs_1191[p08]
) # (0,8) -> (0,9)
sym_state_recursive_ratios_1191[1, 9] = sym.factor(
sym_state_probs_1191[p19] / sym_state_probs_1191[p09]
) # (0,9) -> (1,9)
sym_state_recursive_ratios_right_1191 = sym_state_recursive_ratios_1191.copy()
sym_state_recursive_ratios_right_1191[1, 2] = sym.factor(
sym_state_probs_1191[p12] / sym_state_probs_1191[p11]
) # (1,1) -> (1,2)
sym_state_recursive_ratios_right_1191[1, 3] = sym.factor(
sym_state_probs_1191[p13] / sym_state_probs_1191[p12]
) # (1,2) -> (1,3)
sym_state_recursive_ratios_right_1191[1, 4] = sym.factor(
sym_state_probs_1191[p14] / sym_state_probs_1191[p13]
) # (1,3) -> (1,4)
sym_state_recursive_ratios_right_1191[1, 5] = sym.factor(
sym_state_probs_1191[p15] / sym_state_probs_1191[p14]
) # (1,4) -> (1,5)
sym_state_recursive_ratios_right_1191[1, 6] = sym.factor(
sym_state_probs_1191[p16] / sym_state_probs_1191[p15]
) # (1,5) -> (1,6)
sym_state_recursive_ratios_right_1191[1, 7] = sym.factor(
sym_state_probs_1191[p17] / sym_state_probs_1191[p16]
) # (1,6) -> (1,7)
sym_state_recursive_ratios_right_1191[1, 8] = sym.factor(
sym_state_probs_1191[p18] / sym_state_probs_1191[p17]
) # (1,7) -> (1,8)
sym_state_recursive_ratios_right_1191[1, 8] = sym.factor(
sym_state_probs_1191[p18] / sym_state_probs_1191[p17]
) # (1,8) -> (1,9)
sym_state_recursive_ratios_P0_1191 = sym.zeros(
buffer_capacity + 1, system_capacity + 1
)
sym_state_recursive_ratios_P0_1191[0, 0] = 1
sym_state_recursive_ratios_P0_1191[0, 1] = sym.factor(
sym_state_probs_1191[p01] / sym_state_probs_1191[p00]
) # (0,0) -> (0,1)
sym_state_recursive_ratios_P0_1191[1, 1] = sym.factor(
sym_state_probs_1191[p11] / sym_state_probs_1191[p00]
) # (0,0) -> (1,1)
sym_state_recursive_ratios_P0_1191[0, 2] = sym.factor(
sym_state_probs_1191[p02] / sym_state_probs_1191[p00]
) # (0,0) -> (0,2)
sym_state_recursive_ratios_P0_1191[1, 2] = sym.factor(
sym_state_probs_1191[p12] / sym_state_probs_1191[p00]
) # (0,0) -> (1,2)
sym_state_recursive_ratios_P0_1191[0, 3] = sym.factor(
sym_state_probs_1191[p03] / sym_state_probs_1191[p00]
) # (0,0) -> (0,3)
sym_state_recursive_ratios_P0_1191[1, 3] = sym.factor(
sym_state_probs_1191[p13] / sym_state_probs_1191[p00]
) # (0,0) -> (1,3)
sym_state_recursive_ratios_P0_1191[0, 4] = sym.factor(
sym_state_probs_1191[p04] / sym_state_probs_1191[p00]
) # (0,0) -> (0,4)
sym_state_recursive_ratios_P0_1191[1, 4] = sym.factor(
sym_state_probs_1191[p14] / sym_state_probs_1191[p00]
) # (0,0) -> (1,4)
sym_state_recursive_ratios_P0_1191[0, 5] = sym.factor(
sym_state_probs_1191[p05] / sym_state_probs_1191[p00]
) # (0,0) -> (0,5)
sym_state_recursive_ratios_P0_1191[1, 5] = sym.factor(
sym_state_probs_1191[p15] / sym_state_probs_1191[p00]
) # (0,0) -> (1,5)
sym_state_recursive_ratios_P0_1191[0, 6] = sym.factor(
sym_state_probs_1191[p06] / sym_state_probs_1191[p00]
) # (0,0) -> (0,6)
sym_state_recursive_ratios_P0_1191[1, 6] = sym.factor(
sym_state_probs_1191[p16] / sym_state_probs_1191[p00]
) # (0,0) -> (1,6)
sym_state_recursive_ratios_P0_1191[0, 7] = sym.factor(
sym_state_probs_1191[p07] / sym_state_probs_1191[p00]
) # (0,0) -> (0,7)
sym_state_recursive_ratios_P0_1191[1, 7] = sym.factor(
sym_state_probs_1191[p17] / sym_state_probs_1191[p00]
) # (0,0) -> (1,7)
sym_state_recursive_ratios_P0_1191[0, 8] = sym.factor(
sym_state_probs_1191[p08] / sym_state_probs_1191[p00]
) # (0,0) -> (0,8)
sym_state_recursive_ratios_P0_1191[1, 8] = sym.factor(
sym_state_probs_1191[p18] / sym_state_probs_1191[p00]
) # (0,0) -> (1,8)
sym_state_recursive_ratios_P0_1191[0, 9] = sym.factor(
sym_state_probs_1191[p09] / sym_state_probs_1191[p00]
) # (0,0) -> (0,9)
sym_state_recursive_ratios_P0_1191[1, 9] = sym.factor(
sym_state_probs_1191[p19] / sym_state_probs_1191[p00]
) # (0,0) -> (1,9)
return (
sym_state_probs_1191,
sym_state_recursive_ratios_1191,
sym_state_recursive_ratios_right_1191,
sym_state_recursive_ratios_P0_1191,
)
|
flexible
|
{
"blob_id": "9dd59fee46bd4bec87cc8c40099110b483ad0496",
"index": 6990,
"step-1": "<mask token>\n\n\ndef get_symbolic_state_probabilities_1222():\n num_of_servers = 1\n threshold = 2\n system_capacity = 2\n buffer_capacity = 2\n sym_pi_1222 = get_symbolic_pi(num_of_servers=num_of_servers, threshold=\n threshold, system_capacity=system_capacity, buffer_capacity=\n buffer_capacity)\n all_states_1222 = abg.markov.build_states(threshold=threshold,\n system_capacity=system_capacity, buffer_capacity=buffer_capacity)\n sym_state_probs_1222 = [(0) for _ in range(len(all_states_1222))]\n sym_state_probs_1222[0] = sym.factor(sym_pi_1222[a])\n sym_state_probs_1222[1] = sym.factor(sym_pi_1222[b])\n sym_state_probs_1222[2] = sym.factor(sym_pi_1222[c])\n sym_state_probs_1222[3] = sym.factor(sym_pi_1222[d])\n sym_state_probs_1222[4] = sym.factor(sym_pi_1222[e])\n sym_state_recursive_ratios_1222 = sym.zeros(buffer_capacity + 1, \n system_capacity + 1)\n sym_state_recursive_ratios_1222[0, 0] = 1\n sym_state_recursive_ratios_1222[0, 1] = sym.factor(sym_state_probs_1222\n [1] / sym_state_probs_1222[0])\n sym_state_recursive_ratios_1222[0, 2] = sym.factor(sym_state_probs_1222\n [2] / sym_state_probs_1222[1])\n sym_state_recursive_ratios_1222[1, 2] = sym.factor(sym_state_probs_1222\n [3] / sym_state_probs_1222[2])\n sym_state_recursive_ratios_1222[2, 2] = sym.factor(sym_state_probs_1222\n [4] / sym_state_probs_1222[3])\n return sym_state_probs_1222, sym_state_recursive_ratios_1222\n\n\n<mask token>\n\n\ndef get_symbolic_state_probabilities_1123():\n num_of_servers = 1\n threshold = 1\n system_capacity = 2\n buffer_capacity = 3\n Q_sym_1123 = abg.markov.get_symbolic_transition_matrix(num_of_servers,\n threshold, system_capacity, buffer_capacity)\n p00, p01, p11, p21, p31, p02, p12, p22, p32 = sym.symbols(\n 'p00, p01, p11, p21, p31, p02, p12, p22, p32')\n pi_1123 = sym.Matrix([p00, p01, p11, p21, p31, p02, p12, p22, p32])\n dimension_1123 = Q_sym_1123.shape[0]\n M_sym_1123 = sym.Matrix([Q_sym_1123.transpose()[:-1, :], sym.ones(1,\n dimension_1123)])\n sym_diff_equations_1123 = M_sym_1123 @ pi_1123\n b_sym_1123 = sym.Matrix([sym.zeros(dimension_1123 - 1, 1), [1]])\n eq0_1123 = sym.Eq(sym_diff_equations_1123[0], b_sym_1123[0])\n eq1_1123 = sym.Eq(sym_diff_equations_1123[1], b_sym_1123[1])\n eq2_1123 = sym.Eq(sym_diff_equations_1123[2], b_sym_1123[2])\n eq3_1123 = sym.Eq(sym_diff_equations_1123[3], b_sym_1123[3])\n eq4_1123 = sym.Eq(sym_diff_equations_1123[4], b_sym_1123[4])\n eq5_1123 = sym.Eq(sym_diff_equations_1123[5], b_sym_1123[5])\n eq6_1123 = sym.Eq(sym_diff_equations_1123[6], b_sym_1123[6])\n eq7_1123 = sym.Eq(sym_diff_equations_1123[7], b_sym_1123[7])\n eq8_1123 = sym.Eq(sym_diff_equations_1123[8], b_sym_1123[8])\n sym_state_probs_1123 = sym.solve([eq0_1123, eq1_1123, eq2_1123,\n eq3_1123, eq4_1123, eq5_1123, eq6_1123, eq7_1123, eq8_1123], (p00,\n p01, p11, p21, p31, p02, p12, p22, p32))\n sym_state_recursive_ratios_1123 = sym.zeros(buffer_capacity + 1, \n system_capacity + 1)\n sym_state_recursive_ratios_1123[0, 0] = 1\n sym_state_recursive_ratios_1123[0, 1] = sym.factor(sym_state_probs_1123\n [p01] / sym_state_probs_1123[p00])\n sym_state_recursive_ratios_1123[1, 1] = sym.factor(sym_state_probs_1123\n [p11] / sym_state_probs_1123[p01])\n sym_state_recursive_ratios_1123[2, 1] = sym.factor(sym_state_probs_1123\n [p21] / sym_state_probs_1123[p11])\n sym_state_recursive_ratios_1123[3, 1] = sym.factor(sym_state_probs_1123\n [p31] / sym_state_probs_1123[p21])\n sym_state_recursive_ratios_1123[0, 2] = sym.factor(sym_state_probs_1123\n [p02] / sym_state_probs_1123[p01])\n sym_state_recursive_ratios_1123[1, 2] = sym.factor(sym_state_probs_1123\n [p12] / sym_state_probs_1123[p02])\n sym_state_recursive_ratios_1123[2, 2] = sym.factor(sym_state_probs_1123\n [p22] / sym_state_probs_1123[p12])\n sym_state_recursive_ratios_1123[2, 2] = sym.factor(sym_state_probs_1123\n [p32] / sym_state_probs_1123[p22])\n sym_state_recursive_ratios_right_1123 = (sym_state_recursive_ratios_1123\n .copy())\n sym_state_recursive_ratios_right_1123[1, 2] = sym.factor(\n sym_state_probs_1123[p12] / sym_state_probs_1123[p11])\n sym_state_recursive_ratios_right_1123[2, 2] = sym.factor(\n sym_state_probs_1123[p22] / sym_state_probs_1123[p21])\n sym_state_recursive_ratios_right_1123[3, 2] = sym.factor(\n sym_state_probs_1123[p32] / sym_state_probs_1123[p22])\n sym_state_recursive_ratios_P0_1123 = sym.zeros(buffer_capacity + 1, \n system_capacity + 1)\n sym_state_recursive_ratios_P0_1123[0, 0] = 1\n sym_state_recursive_ratios_P0_1123[0, 1] = sym.factor(\n sym_state_probs_1123[p01] / sym_state_probs_1123[p00])\n sym_state_recursive_ratios_P0_1123[1, 1] = sym.factor(\n sym_state_probs_1123[p11] / sym_state_probs_1123[p00])\n sym_state_recursive_ratios_P0_1123[2, 1] = sym.factor(\n sym_state_probs_1123[p21] / sym_state_probs_1123[p00])\n sym_state_recursive_ratios_P0_1123[3, 1] = sym.factor(\n sym_state_probs_1123[p31] / sym_state_probs_1123[p00])\n sym_state_recursive_ratios_P0_1123[0, 2] = sym.factor(\n sym_state_probs_1123[p02] / sym_state_probs_1123[p00])\n sym_state_recursive_ratios_P0_1123[1, 2] = sym.factor(\n sym_state_probs_1123[p12] / sym_state_probs_1123[p00])\n sym_state_recursive_ratios_P0_1123[2, 2] = sym.factor(\n sym_state_probs_1123[p22] / sym_state_probs_1123[p00])\n sym_state_recursive_ratios_P0_1123[3, 2] = sym.factor(\n sym_state_probs_1123[p32] / sym_state_probs_1123[p00])\n return (sym_state_probs_1123, sym_state_recursive_ratios_1123,\n sym_state_recursive_ratios_right_1123,\n sym_state_recursive_ratios_P0_1123)\n\n\n<mask token>\n\n\ndef get_symbolic_state_probabilities_1151():\n num_of_servers = 1\n threshold = 1\n system_capacity = 5\n buffer_capacity = 1\n Q_sym_1151 = abg.markov.get_symbolic_transition_matrix(num_of_servers,\n threshold, system_capacity, buffer_capacity)\n p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15 = sym.symbols(\n 'p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15')\n pi_1151 = sym.Matrix([p00, p01, p11, p02, p12, p03, p13, p04, p14, p05,\n p15])\n dimension_1151 = Q_sym_1151.shape[0]\n M_sym_1151 = sym.Matrix([Q_sym_1151.transpose()[:-1, :], sym.ones(1,\n dimension_1151)])\n sym_diff_equations_1151 = M_sym_1151 @ pi_1151\n b_sym_1151 = sym.Matrix([sym.zeros(dimension_1151 - 1, 1), [1]])\n eq0_1151 = sym.Eq(sym_diff_equations_1151[0], b_sym_1151[0])\n eq1_1151 = sym.Eq(sym_diff_equations_1151[1], b_sym_1151[1])\n eq2_1151 = sym.Eq(sym_diff_equations_1151[2], b_sym_1151[2])\n eq3_1151 = sym.Eq(sym_diff_equations_1151[3], b_sym_1151[3])\n eq4_1151 = sym.Eq(sym_diff_equations_1151[4], b_sym_1151[4])\n eq5_1151 = sym.Eq(sym_diff_equations_1151[5], b_sym_1151[5])\n eq6_1151 = sym.Eq(sym_diff_equations_1151[6], b_sym_1151[6])\n eq7_1151 = sym.Eq(sym_diff_equations_1151[7], b_sym_1151[7])\n eq8_1151 = sym.Eq(sym_diff_equations_1151[8], b_sym_1151[8])\n eq9_1151 = sym.Eq(sym_diff_equations_1151[9], b_sym_1151[9])\n eq10_1151 = sym.Eq(sym_diff_equations_1151[10], b_sym_1151[10])\n sym_state_probs_1151 = sym.solve([eq0_1151, eq1_1151, eq2_1151,\n eq3_1151, eq4_1151, eq5_1151, eq6_1151, eq7_1151, eq8_1151,\n eq9_1151, eq10_1151], (p00, p01, p11, p02, p12, p03, p13, p04, p14,\n p05, p15))\n sym_state_recursive_ratios_1151 = sym.zeros(buffer_capacity + 1, \n system_capacity + 1)\n sym_state_recursive_ratios_1151[0, 0] = 1\n sym_state_recursive_ratios_1151[0, 1] = sym.factor(sym_state_probs_1151\n [p01] / sym_state_probs_1151[p00])\n sym_state_recursive_ratios_1151[1, 1] = sym.factor(sym_state_probs_1151\n [p11] / sym_state_probs_1151[p01])\n sym_state_recursive_ratios_1151[0, 2] = sym.factor(sym_state_probs_1151\n [p02] / sym_state_probs_1151[p01])\n sym_state_recursive_ratios_1151[1, 2] = sym.factor(sym_state_probs_1151\n [p12] / sym_state_probs_1151[p02])\n sym_state_recursive_ratios_1151[0, 3] = sym.factor(sym_state_probs_1151\n [p03] / sym_state_probs_1151[p02])\n sym_state_recursive_ratios_1151[1, 3] = sym.factor(sym_state_probs_1151\n [p13] / sym_state_probs_1151[p03])\n sym_state_recursive_ratios_1151[0, 4] = sym.factor(sym_state_probs_1151\n [p04] / sym_state_probs_1151[p03])\n sym_state_recursive_ratios_1151[1, 4] = sym.factor(sym_state_probs_1151\n [p14] / sym_state_probs_1151[p04])\n sym_state_recursive_ratios_1151[0, 5] = sym.factor(sym_state_probs_1151\n [p05] / sym_state_probs_1151[p04])\n sym_state_recursive_ratios_1151[1, 5] = sym.factor(sym_state_probs_1151\n [p15] / sym_state_probs_1151[p05])\n sym_state_recursive_ratios_right_1151 = (sym_state_recursive_ratios_1151\n .copy())\n sym_state_recursive_ratios_right_1151[1, 2] = sym.factor(\n sym_state_probs_1151[p12] / sym_state_probs_1151[p11])\n sym_state_recursive_ratios_right_1151[1, 3] = sym.factor(\n sym_state_probs_1151[p13] / sym_state_probs_1151[p12])\n sym_state_recursive_ratios_right_1151[1, 4] = sym.factor(\n sym_state_probs_1151[p14] / sym_state_probs_1151[p13])\n sym_state_recursive_ratios_right_1151[1, 5] = sym.factor(\n sym_state_probs_1151[p15] / sym_state_probs_1151[p14])\n sym_state_recursive_ratios_P0_1151 = sym.zeros(buffer_capacity + 1, \n system_capacity + 1)\n sym_state_recursive_ratios_P0_1151[0, 0] = 1\n sym_state_recursive_ratios_P0_1151[0, 1] = sym.factor(\n sym_state_probs_1151[p01] / sym_state_probs_1151[p00])\n sym_state_recursive_ratios_P0_1151[1, 1] = sym.factor(\n sym_state_probs_1151[p11] / sym_state_probs_1151[p00])\n sym_state_recursive_ratios_P0_1151[0, 2] = sym.factor(\n sym_state_probs_1151[p02] / sym_state_probs_1151[p00])\n sym_state_recursive_ratios_P0_1151[1, 2] = sym.factor(\n sym_state_probs_1151[p12] / sym_state_probs_1151[p00])\n sym_state_recursive_ratios_P0_1151[0, 3] = sym.factor(\n sym_state_probs_1151[p03] / sym_state_probs_1151[p00])\n sym_state_recursive_ratios_P0_1151[1, 3] = sym.factor(\n sym_state_probs_1151[p13] / sym_state_probs_1151[p00])\n sym_state_recursive_ratios_P0_1151[0, 4] = sym.factor(\n sym_state_probs_1151[p04] / sym_state_probs_1151[p00])\n sym_state_recursive_ratios_P0_1151[1, 4] = sym.factor(\n sym_state_probs_1151[p14] / sym_state_probs_1151[p00])\n sym_state_recursive_ratios_P0_1151[0, 5] = sym.factor(\n sym_state_probs_1151[p05] / sym_state_probs_1151[p00])\n sym_state_recursive_ratios_P0_1151[1, 5] = sym.factor(\n sym_state_probs_1151[p15] / sym_state_probs_1151[p00])\n return (sym_state_probs_1151, sym_state_recursive_ratios_1151,\n sym_state_recursive_ratios_right_1151,\n sym_state_recursive_ratios_P0_1151)\n\n\ndef get_symbolic_state_probabilities_1161():\n num_of_servers = 1\n threshold = 1\n system_capacity = 6\n buffer_capacity = 1\n Q_sym_1161 = abg.markov.get_symbolic_transition_matrix(num_of_servers,\n threshold, system_capacity, buffer_capacity)\n p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15, p06, p16 = (sym.\n symbols(\n 'p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15, p06, p16'))\n pi_1161 = sym.Matrix([p00, p01, p11, p02, p12, p03, p13, p04, p14, p05,\n p15, p06, p16])\n dimension_1161 = Q_sym_1161.shape[0]\n M_sym_1161 = sym.Matrix([Q_sym_1161.transpose()[:-1, :], sym.ones(1,\n dimension_1161)])\n sym_diff_equations_1161 = M_sym_1161 @ pi_1161\n b_sym_1161 = sym.Matrix([sym.zeros(dimension_1161 - 1, 1), [1]])\n eq0_1161 = sym.Eq(sym_diff_equations_1161[0], b_sym_1161[0])\n eq1_1161 = sym.Eq(sym_diff_equations_1161[1], b_sym_1161[1])\n eq2_1161 = sym.Eq(sym_diff_equations_1161[2], b_sym_1161[2])\n eq3_1161 = sym.Eq(sym_diff_equations_1161[3], b_sym_1161[3])\n eq4_1161 = sym.Eq(sym_diff_equations_1161[4], b_sym_1161[4])\n eq5_1161 = sym.Eq(sym_diff_equations_1161[5], b_sym_1161[5])\n eq6_1161 = sym.Eq(sym_diff_equations_1161[6], b_sym_1161[6])\n eq7_1161 = sym.Eq(sym_diff_equations_1161[7], b_sym_1161[7])\n eq8_1161 = sym.Eq(sym_diff_equations_1161[8], b_sym_1161[8])\n eq9_1161 = sym.Eq(sym_diff_equations_1161[9], b_sym_1161[9])\n eq10_1161 = sym.Eq(sym_diff_equations_1161[10], b_sym_1161[10])\n eq11_1161 = sym.Eq(sym_diff_equations_1161[11], b_sym_1161[11])\n eq12_1161 = sym.Eq(sym_diff_equations_1161[12], b_sym_1161[12])\n sym_state_probs_1161 = sym.solve([eq0_1161, eq1_1161, eq2_1161,\n eq3_1161, eq4_1161, eq5_1161, eq6_1161, eq7_1161, eq8_1161,\n eq9_1161, eq10_1161, eq11_1161, eq12_1161], (p00, p01, p11, p02,\n p12, p03, p13, p04, p14, p05, p15, p06, p16))\n sym_state_recursive_ratios_1161 = sym.zeros(buffer_capacity + 1, \n system_capacity + 1)\n sym_state_recursive_ratios_1161[0, 0] = 1\n sym_state_recursive_ratios_1161[0, 1] = sym.factor(sym_state_probs_1161\n [p01] / sym_state_probs_1161[p00])\n sym_state_recursive_ratios_1161[1, 1] = sym.factor(sym_state_probs_1161\n [p11] / sym_state_probs_1161[p01])\n sym_state_recursive_ratios_1161[0, 2] = sym.factor(sym_state_probs_1161\n [p02] / sym_state_probs_1161[p01])\n sym_state_recursive_ratios_1161[1, 2] = sym.factor(sym_state_probs_1161\n [p12] / sym_state_probs_1161[p02])\n sym_state_recursive_ratios_1161[0, 3] = sym.factor(sym_state_probs_1161\n [p03] / sym_state_probs_1161[p02])\n sym_state_recursive_ratios_1161[1, 3] = sym.factor(sym_state_probs_1161\n [p13] / sym_state_probs_1161[p03])\n sym_state_recursive_ratios_1161[0, 4] = sym.factor(sym_state_probs_1161\n [p04] / sym_state_probs_1161[p03])\n sym_state_recursive_ratios_1161[1, 4] = sym.factor(sym_state_probs_1161\n [p14] / sym_state_probs_1161[p04])\n sym_state_recursive_ratios_1161[0, 5] = sym.factor(sym_state_probs_1161\n [p05] / sym_state_probs_1161[p04])\n sym_state_recursive_ratios_1161[1, 5] = sym.factor(sym_state_probs_1161\n [p15] / sym_state_probs_1161[p05])\n sym_state_recursive_ratios_1161[0, 6] = sym.factor(sym_state_probs_1161\n [p06] / sym_state_probs_1161[p05])\n sym_state_recursive_ratios_1161[1, 6] = sym.factor(sym_state_probs_1161\n [p16] / sym_state_probs_1161[p06])\n sym_state_recursive_ratios_right_1161 = (sym_state_recursive_ratios_1161\n .copy())\n sym_state_recursive_ratios_right_1161[1, 2] = sym.factor(\n sym_state_probs_1161[p12] / sym_state_probs_1161[p11])\n sym_state_recursive_ratios_right_1161[1, 3] = sym.factor(\n sym_state_probs_1161[p13] / sym_state_probs_1161[p12])\n sym_state_recursive_ratios_right_1161[1, 4] = sym.factor(\n sym_state_probs_1161[p14] / sym_state_probs_1161[p13])\n sym_state_recursive_ratios_right_1161[1, 5] = sym.factor(\n sym_state_probs_1161[p15] / sym_state_probs_1161[p14])\n sym_state_recursive_ratios_right_1161[1, 6] = sym.factor(\n sym_state_probs_1161[p16] / sym_state_probs_1161[p15])\n sym_state_recursive_ratios_P0_1161 = sym.zeros(buffer_capacity + 1, \n system_capacity + 1)\n sym_state_recursive_ratios_P0_1161[0, 0] = 1\n sym_state_recursive_ratios_P0_1161[0, 1] = sym.factor(\n sym_state_probs_1161[p01] / sym_state_probs_1161[p00])\n sym_state_recursive_ratios_P0_1161[1, 1] = sym.factor(\n sym_state_probs_1161[p11] / sym_state_probs_1161[p00])\n sym_state_recursive_ratios_P0_1161[0, 2] = sym.factor(\n sym_state_probs_1161[p02] / sym_state_probs_1161[p00])\n sym_state_recursive_ratios_P0_1161[1, 2] = sym.factor(\n sym_state_probs_1161[p12] / sym_state_probs_1161[p00])\n sym_state_recursive_ratios_P0_1161[0, 3] = sym.factor(\n sym_state_probs_1161[p03] / sym_state_probs_1161[p00])\n sym_state_recursive_ratios_P0_1161[1, 3] = sym.factor(\n sym_state_probs_1161[p13] / sym_state_probs_1161[p00])\n sym_state_recursive_ratios_P0_1161[0, 4] = sym.factor(\n sym_state_probs_1161[p04] / sym_state_probs_1161[p00])\n sym_state_recursive_ratios_P0_1161[1, 4] = sym.factor(\n sym_state_probs_1161[p14] / sym_state_probs_1161[p00])\n sym_state_recursive_ratios_P0_1161[0, 5] = sym.factor(\n sym_state_probs_1161[p05] / sym_state_probs_1161[p00])\n sym_state_recursive_ratios_P0_1161[1, 5] = sym.factor(\n sym_state_probs_1161[p15] / sym_state_probs_1161[p00])\n sym_state_recursive_ratios_P0_1161[0, 6] = sym.factor(\n sym_state_probs_1161[p06] / sym_state_probs_1161[p00])\n sym_state_recursive_ratios_P0_1161[1, 6] = sym.factor(\n sym_state_probs_1161[p16] / sym_state_probs_1161[p00])\n return (sym_state_probs_1161, sym_state_recursive_ratios_1161,\n sym_state_recursive_ratios_right_1161,\n sym_state_recursive_ratios_P0_1161)\n\n\ndef get_symbolic_state_probabilities_1171():\n num_of_servers = 1\n threshold = 1\n system_capacity = 7\n buffer_capacity = 1\n Q_sym_1171 = abg.markov.get_symbolic_transition_matrix(num_of_servers,\n threshold, system_capacity, buffer_capacity)\n (p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15, p06, p16, p07, p17\n ) = (sym.symbols(\n 'p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15, p06, p16, p07, p17'\n ))\n pi_1171 = sym.Matrix([p00, p01, p11, p02, p12, p03, p13, p04, p14, p05,\n p15, p06, p16, p07, p17])\n dimension_1171 = Q_sym_1171.shape[0]\n M_sym_1171 = sym.Matrix([Q_sym_1171.transpose()[:-1, :], sym.ones(1,\n dimension_1171)])\n sym_diff_equations_1171 = M_sym_1171 @ pi_1171\n b_sym_1171 = sym.Matrix([sym.zeros(dimension_1171 - 1, 1), [1]])\n eq0_1171 = sym.Eq(sym_diff_equations_1171[0], b_sym_1171[0])\n eq1_1171 = sym.Eq(sym_diff_equations_1171[1], b_sym_1171[1])\n eq2_1171 = sym.Eq(sym_diff_equations_1171[2], b_sym_1171[2])\n eq3_1171 = sym.Eq(sym_diff_equations_1171[3], b_sym_1171[3])\n eq4_1171 = sym.Eq(sym_diff_equations_1171[4], b_sym_1171[4])\n eq5_1171 = sym.Eq(sym_diff_equations_1171[5], b_sym_1171[5])\n eq6_1171 = sym.Eq(sym_diff_equations_1171[6], b_sym_1171[6])\n eq7_1171 = sym.Eq(sym_diff_equations_1171[7], b_sym_1171[7])\n eq8_1171 = sym.Eq(sym_diff_equations_1171[8], b_sym_1171[8])\n eq9_1171 = sym.Eq(sym_diff_equations_1171[9], b_sym_1171[9])\n eq10_1171 = sym.Eq(sym_diff_equations_1171[10], b_sym_1171[10])\n eq11_1171 = sym.Eq(sym_diff_equations_1171[11], b_sym_1171[11])\n eq12_1171 = sym.Eq(sym_diff_equations_1171[12], b_sym_1171[12])\n eq13_1171 = sym.Eq(sym_diff_equations_1171[13], b_sym_1171[13])\n eq14_1171 = sym.Eq(sym_diff_equations_1171[14], b_sym_1171[14])\n sym_state_probs_1171 = sym.solve([eq0_1171, eq1_1171, eq2_1171,\n eq3_1171, eq4_1171, eq5_1171, eq6_1171, eq7_1171, eq8_1171,\n eq9_1171, eq10_1171, eq11_1171, eq12_1171, eq13_1171, eq14_1171], (\n p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15, p06, p16,\n p07, p17))\n sym_state_recursive_ratios_1171 = sym.zeros(buffer_capacity + 1, \n system_capacity + 1)\n sym_state_recursive_ratios_1171[0, 0] = 1\n sym_state_recursive_ratios_1171[0, 1] = sym.factor(sym_state_probs_1171\n [p01] / sym_state_probs_1171[p00])\n sym_state_recursive_ratios_1171[1, 1] = sym.factor(sym_state_probs_1171\n [p11] / sym_state_probs_1171[p01])\n sym_state_recursive_ratios_1171[0, 2] = sym.factor(sym_state_probs_1171\n [p02] / sym_state_probs_1171[p01])\n sym_state_recursive_ratios_1171[1, 2] = sym.factor(sym_state_probs_1171\n [p12] / sym_state_probs_1171[p02])\n sym_state_recursive_ratios_1171[0, 3] = sym.factor(sym_state_probs_1171\n [p03] / sym_state_probs_1171[p02])\n sym_state_recursive_ratios_1171[1, 3] = sym.factor(sym_state_probs_1171\n [p13] / sym_state_probs_1171[p03])\n sym_state_recursive_ratios_1171[0, 4] = sym.factor(sym_state_probs_1171\n [p04] / sym_state_probs_1171[p03])\n sym_state_recursive_ratios_1171[1, 4] = sym.factor(sym_state_probs_1171\n [p14] / sym_state_probs_1171[p04])\n sym_state_recursive_ratios_1171[0, 5] = sym.factor(sym_state_probs_1171\n [p05] / sym_state_probs_1171[p04])\n sym_state_recursive_ratios_1171[1, 5] = sym.factor(sym_state_probs_1171\n [p15] / sym_state_probs_1171[p05])\n sym_state_recursive_ratios_1171[0, 6] = sym.factor(sym_state_probs_1171\n [p06] / sym_state_probs_1171[p05])\n sym_state_recursive_ratios_1171[1, 6] = sym.factor(sym_state_probs_1171\n [p16] / sym_state_probs_1171[p06])\n sym_state_recursive_ratios_1171[0, 7] = sym.factor(sym_state_probs_1171\n [p07] / sym_state_probs_1171[p06])\n sym_state_recursive_ratios_1171[1, 7] = sym.factor(sym_state_probs_1171\n [p17] / sym_state_probs_1171[p07])\n sym_state_recursive_ratios_right_1171 = (sym_state_recursive_ratios_1171\n .copy())\n sym_state_recursive_ratios_right_1171[1, 2] = sym.factor(\n sym_state_probs_1171[p12] / sym_state_probs_1171[p11])\n sym_state_recursive_ratios_right_1171[1, 3] = sym.factor(\n sym_state_probs_1171[p13] / sym_state_probs_1171[p12])\n sym_state_recursive_ratios_right_1171[1, 4] = sym.factor(\n sym_state_probs_1171[p14] / sym_state_probs_1171[p13])\n sym_state_recursive_ratios_right_1171[1, 5] = sym.factor(\n sym_state_probs_1171[p15] / sym_state_probs_1171[p14])\n sym_state_recursive_ratios_right_1171[1, 6] = sym.factor(\n sym_state_probs_1171[p16] / sym_state_probs_1171[p15])\n sym_state_recursive_ratios_right_1171[1, 7] = sym.factor(\n sym_state_probs_1171[p17] / sym_state_probs_1171[p16])\n sym_state_recursive_ratios_P0_1171 = sym.zeros(buffer_capacity + 1, \n system_capacity + 1)\n sym_state_recursive_ratios_P0_1171[0, 0] = 1\n sym_state_recursive_ratios_P0_1171[0, 1] = sym.factor(\n sym_state_probs_1171[p01] / sym_state_probs_1171[p00])\n sym_state_recursive_ratios_P0_1171[1, 1] = sym.factor(\n sym_state_probs_1171[p11] / sym_state_probs_1171[p00])\n sym_state_recursive_ratios_P0_1171[0, 2] = sym.factor(\n sym_state_probs_1171[p02] / sym_state_probs_1171[p00])\n sym_state_recursive_ratios_P0_1171[1, 2] = sym.factor(\n sym_state_probs_1171[p12] / sym_state_probs_1171[p00])\n sym_state_recursive_ratios_P0_1171[0, 3] = sym.factor(\n sym_state_probs_1171[p03] / sym_state_probs_1171[p00])\n sym_state_recursive_ratios_P0_1171[1, 3] = sym.factor(\n sym_state_probs_1171[p13] / sym_state_probs_1171[p00])\n sym_state_recursive_ratios_P0_1171[0, 4] = sym.factor(\n sym_state_probs_1171[p04] / sym_state_probs_1171[p00])\n sym_state_recursive_ratios_P0_1171[1, 4] = sym.factor(\n sym_state_probs_1171[p14] / sym_state_probs_1171[p00])\n sym_state_recursive_ratios_P0_1171[0, 5] = sym.factor(\n sym_state_probs_1171[p05] / sym_state_probs_1171[p00])\n sym_state_recursive_ratios_P0_1171[1, 5] = sym.factor(\n sym_state_probs_1171[p15] / sym_state_probs_1171[p00])\n sym_state_recursive_ratios_P0_1171[0, 6] = sym.factor(\n sym_state_probs_1171[p06] / sym_state_probs_1171[p00])\n sym_state_recursive_ratios_P0_1171[1, 6] = sym.factor(\n sym_state_probs_1171[p16] / sym_state_probs_1171[p00])\n sym_state_recursive_ratios_P0_1171[0, 7] = sym.factor(\n sym_state_probs_1171[p07] / sym_state_probs_1171[p00])\n sym_state_recursive_ratios_P0_1171[1, 7] = sym.factor(\n sym_state_probs_1171[p17] / sym_state_probs_1171[p00])\n return (sym_state_probs_1171, sym_state_recursive_ratios_1171,\n sym_state_recursive_ratios_right_1171,\n sym_state_recursive_ratios_P0_1171)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef get_symbolic_pi(num_of_servers, threshold, system_capacity, buffer_capacity\n ):\n Q_sym = abg.markov.get_symbolic_transition_matrix(num_of_servers=\n num_of_servers, threshold=threshold, system_capacity=\n system_capacity, buffer_capacity=buffer_capacity)\n dimension = Q_sym.shape[0]\n if dimension > 7:\n return 'Capacity of 6 exceeded'\n M_sym = sym.Matrix([Q_sym.transpose()[:-1, :], sym.ones(1, dimension)])\n b_sym = sym.Matrix([sym.zeros(dimension - 1, 1), [1]])\n system = M_sym.col_insert(dimension, b_sym)\n sol = sym.solve_linear_system_LU(system, [a, b, c, d, e, f, g])\n return sol\n\n\ndef get_symbolic_state_probabilities_1222():\n num_of_servers = 1\n threshold = 2\n system_capacity = 2\n buffer_capacity = 2\n sym_pi_1222 = get_symbolic_pi(num_of_servers=num_of_servers, threshold=\n threshold, system_capacity=system_capacity, buffer_capacity=\n buffer_capacity)\n all_states_1222 = abg.markov.build_states(threshold=threshold,\n system_capacity=system_capacity, buffer_capacity=buffer_capacity)\n sym_state_probs_1222 = [(0) for _ in range(len(all_states_1222))]\n sym_state_probs_1222[0] = sym.factor(sym_pi_1222[a])\n sym_state_probs_1222[1] = sym.factor(sym_pi_1222[b])\n sym_state_probs_1222[2] = sym.factor(sym_pi_1222[c])\n sym_state_probs_1222[3] = sym.factor(sym_pi_1222[d])\n sym_state_probs_1222[4] = sym.factor(sym_pi_1222[e])\n sym_state_recursive_ratios_1222 = sym.zeros(buffer_capacity + 1, \n system_capacity + 1)\n sym_state_recursive_ratios_1222[0, 0] = 1\n sym_state_recursive_ratios_1222[0, 1] = sym.factor(sym_state_probs_1222\n [1] / sym_state_probs_1222[0])\n sym_state_recursive_ratios_1222[0, 2] = sym.factor(sym_state_probs_1222\n [2] / sym_state_probs_1222[1])\n sym_state_recursive_ratios_1222[1, 2] = sym.factor(sym_state_probs_1222\n [3] / sym_state_probs_1222[2])\n sym_state_recursive_ratios_1222[2, 2] = sym.factor(sym_state_probs_1222\n [4] / sym_state_probs_1222[3])\n return sym_state_probs_1222, sym_state_recursive_ratios_1222\n\n\n<mask token>\n\n\ndef get_symbolic_state_probabilities_1122():\n threshold = 1\n system_capacity = 2\n buffer_capacity = 2\n all_states_1122 = abg.markov.build_states(threshold=threshold,\n system_capacity=system_capacity, buffer_capacity=buffer_capacity)\n sym_state_probs_1122 = [(0) for _ in range(len(all_states_1122))]\n sym_Lambda = sym.symbols('Lambda')\n sym_lambda_1 = sym.symbols('lambda_1')\n sym_lambda_2 = sym.symbols('lambda_2')\n sym_mu = sym.symbols('mu')\n sym_state_probs_1122[0] = (sym_mu ** 6 + 2 * sym_lambda_2 * sym_mu ** 5 +\n sym_lambda_2 ** 2 * sym_mu ** 4)\n sym_state_probs_1122[1] = sym_Lambda * sym_mu ** 3 * (sym_mu ** 2 + 2 *\n sym_mu * sym_lambda_2 + sym_lambda_2 ** 2)\n sym_state_probs_1122[2] = sym_Lambda * sym_lambda_2 * sym_mu ** 2 * (\n sym_lambda_2 ** 2 + sym_lambda_2 * sym_lambda_1 + sym_lambda_1 *\n sym_mu + sym_mu ** 2 + 2 * sym_lambda_2 * sym_mu)\n sym_state_probs_1122[3] = sym_Lambda * sym_lambda_2 ** 2 * sym_mu * (\n sym_lambda_2 ** 2 + 2 * sym_lambda_1 * sym_lambda_2 + 3 *\n sym_lambda_1 * sym_mu + sym_mu ** 2 + 2 * sym_lambda_2 * sym_mu + \n sym_lambda_1 ** 2)\n sym_state_probs_1122[4] = sym_Lambda * sym_lambda_1 * sym_mu ** 3 * (\n sym_lambda_2 + sym_mu)\n sym_state_probs_1122[5\n ] = sym_Lambda * sym_lambda_1 * sym_lambda_2 * sym_mu ** 2 * (2 *\n sym_mu + sym_lambda_1 + sym_lambda_2)\n sym_state_probs_1122[6] = sym_Lambda * sym_lambda_1 * sym_lambda_2 ** 2 * (\n sym_lambda_1 ** 2 + 4 * sym_lambda_1 * sym_mu + 2 * sym_lambda_1 *\n sym_lambda_2 + 3 * sym_mu ** 2 + sym_lambda_2 ** 2 + 3 *\n sym_lambda_2 * sym_mu)\n total_1122 = np.sum(sym_state_probs_1122)\n sym_state_probs_1122 = [(i / total_1122) for i in sym_state_probs_1122]\n sym_state_recursive_ratios_1122 = sym.zeros(buffer_capacity + 1, \n system_capacity + 1)\n sym_state_recursive_ratios_1122[0, 0] = 1\n sym_state_recursive_ratios_1122[0, 1] = sym.factor(sym_state_probs_1122\n [1] / sym_state_probs_1122[0])\n sym_state_recursive_ratios_1122[1, 1] = sym.factor(sym_state_probs_1122\n [2] / sym_state_probs_1122[1])\n sym_state_recursive_ratios_1122[2, 1] = sym.factor(sym_state_probs_1122\n [3] / sym_state_probs_1122[2])\n sym_state_recursive_ratios_1122[0, 2] = sym.factor(sym_state_probs_1122\n [4] / sym_state_probs_1122[1])\n sym_state_recursive_ratios_1122[1, 2] = sym.factor(sym_state_probs_1122\n [5] / sym_state_probs_1122[4])\n sym_state_recursive_ratios_1122[2, 2] = sym.factor(sym_state_probs_1122\n [6] / sym_state_probs_1122[5])\n sym_state_recursive_ratios_right_1122 = (sym_state_recursive_ratios_1122\n .copy())\n sym_state_recursive_ratios_right_1122[1, 2] = sym.factor(\n sym_state_probs_1122[5] / sym_state_probs_1122[2])\n sym_state_recursive_ratios_right_1122[2, 2] = sym.factor(\n sym_state_probs_1122[6] / sym_state_probs_1122[3])\n sym_state_recursive_ratios_P0_1122 = sym.zeros(buffer_capacity + 1, \n system_capacity + 1)\n sym_state_recursive_ratios_P0_1122[0, 0] = 1\n sym_state_recursive_ratios_P0_1122[0, 1] = sym.factor(\n sym_state_probs_1122[1] / sym_state_probs_1122[0])\n sym_state_recursive_ratios_P0_1122[1, 1] = sym.factor(\n sym_state_probs_1122[2] / sym_state_probs_1122[0])\n sym_state_recursive_ratios_P0_1122[2, 1] = sym.factor(\n sym_state_probs_1122[3] / sym_state_probs_1122[0])\n sym_state_recursive_ratios_P0_1122[0, 2] = sym.factor(\n sym_state_probs_1122[4] / sym_state_probs_1122[0])\n sym_state_recursive_ratios_P0_1122[1, 2] = sym.factor(\n sym_state_probs_1122[5] / sym_state_probs_1122[0])\n sym_state_recursive_ratios_P0_1122[2, 2] = sym.factor(\n sym_state_probs_1122[6] / sym_state_probs_1122[0])\n return (sym_state_probs_1122, sym_state_recursive_ratios_1122,\n sym_state_recursive_ratios_right_1122,\n sym_state_recursive_ratios_P0_1122)\n\n\ndef get_symbolic_state_probabilities_1123():\n num_of_servers = 1\n threshold = 1\n system_capacity = 2\n buffer_capacity = 3\n Q_sym_1123 = abg.markov.get_symbolic_transition_matrix(num_of_servers,\n threshold, system_capacity, buffer_capacity)\n p00, p01, p11, p21, p31, p02, p12, p22, p32 = sym.symbols(\n 'p00, p01, p11, p21, p31, p02, p12, p22, p32')\n pi_1123 = sym.Matrix([p00, p01, p11, p21, p31, p02, p12, p22, p32])\n dimension_1123 = Q_sym_1123.shape[0]\n M_sym_1123 = sym.Matrix([Q_sym_1123.transpose()[:-1, :], sym.ones(1,\n dimension_1123)])\n sym_diff_equations_1123 = M_sym_1123 @ pi_1123\n b_sym_1123 = sym.Matrix([sym.zeros(dimension_1123 - 1, 1), [1]])\n eq0_1123 = sym.Eq(sym_diff_equations_1123[0], b_sym_1123[0])\n eq1_1123 = sym.Eq(sym_diff_equations_1123[1], b_sym_1123[1])\n eq2_1123 = sym.Eq(sym_diff_equations_1123[2], b_sym_1123[2])\n eq3_1123 = sym.Eq(sym_diff_equations_1123[3], b_sym_1123[3])\n eq4_1123 = sym.Eq(sym_diff_equations_1123[4], b_sym_1123[4])\n eq5_1123 = sym.Eq(sym_diff_equations_1123[5], b_sym_1123[5])\n eq6_1123 = sym.Eq(sym_diff_equations_1123[6], b_sym_1123[6])\n eq7_1123 = sym.Eq(sym_diff_equations_1123[7], b_sym_1123[7])\n eq8_1123 = sym.Eq(sym_diff_equations_1123[8], b_sym_1123[8])\n sym_state_probs_1123 = sym.solve([eq0_1123, eq1_1123, eq2_1123,\n eq3_1123, eq4_1123, eq5_1123, eq6_1123, eq7_1123, eq8_1123], (p00,\n p01, p11, p21, p31, p02, p12, p22, p32))\n sym_state_recursive_ratios_1123 = sym.zeros(buffer_capacity + 1, \n system_capacity + 1)\n sym_state_recursive_ratios_1123[0, 0] = 1\n sym_state_recursive_ratios_1123[0, 1] = sym.factor(sym_state_probs_1123\n [p01] / sym_state_probs_1123[p00])\n sym_state_recursive_ratios_1123[1, 1] = sym.factor(sym_state_probs_1123\n [p11] / sym_state_probs_1123[p01])\n sym_state_recursive_ratios_1123[2, 1] = sym.factor(sym_state_probs_1123\n [p21] / sym_state_probs_1123[p11])\n sym_state_recursive_ratios_1123[3, 1] = sym.factor(sym_state_probs_1123\n [p31] / sym_state_probs_1123[p21])\n sym_state_recursive_ratios_1123[0, 2] = sym.factor(sym_state_probs_1123\n [p02] / sym_state_probs_1123[p01])\n sym_state_recursive_ratios_1123[1, 2] = sym.factor(sym_state_probs_1123\n [p12] / sym_state_probs_1123[p02])\n sym_state_recursive_ratios_1123[2, 2] = sym.factor(sym_state_probs_1123\n [p22] / sym_state_probs_1123[p12])\n sym_state_recursive_ratios_1123[2, 2] = sym.factor(sym_state_probs_1123\n [p32] / sym_state_probs_1123[p22])\n sym_state_recursive_ratios_right_1123 = (sym_state_recursive_ratios_1123\n .copy())\n sym_state_recursive_ratios_right_1123[1, 2] = sym.factor(\n sym_state_probs_1123[p12] / sym_state_probs_1123[p11])\n sym_state_recursive_ratios_right_1123[2, 2] = sym.factor(\n sym_state_probs_1123[p22] / sym_state_probs_1123[p21])\n sym_state_recursive_ratios_right_1123[3, 2] = sym.factor(\n sym_state_probs_1123[p32] / sym_state_probs_1123[p22])\n sym_state_recursive_ratios_P0_1123 = sym.zeros(buffer_capacity + 1, \n system_capacity + 1)\n sym_state_recursive_ratios_P0_1123[0, 0] = 1\n sym_state_recursive_ratios_P0_1123[0, 1] = sym.factor(\n sym_state_probs_1123[p01] / sym_state_probs_1123[p00])\n sym_state_recursive_ratios_P0_1123[1, 1] = sym.factor(\n sym_state_probs_1123[p11] / sym_state_probs_1123[p00])\n sym_state_recursive_ratios_P0_1123[2, 1] = sym.factor(\n sym_state_probs_1123[p21] / sym_state_probs_1123[p00])\n sym_state_recursive_ratios_P0_1123[3, 1] = sym.factor(\n sym_state_probs_1123[p31] / sym_state_probs_1123[p00])\n sym_state_recursive_ratios_P0_1123[0, 2] = sym.factor(\n sym_state_probs_1123[p02] / sym_state_probs_1123[p00])\n sym_state_recursive_ratios_P0_1123[1, 2] = sym.factor(\n sym_state_probs_1123[p12] / sym_state_probs_1123[p00])\n sym_state_recursive_ratios_P0_1123[2, 2] = sym.factor(\n sym_state_probs_1123[p22] / sym_state_probs_1123[p00])\n sym_state_recursive_ratios_P0_1123[3, 2] = sym.factor(\n sym_state_probs_1123[p32] / sym_state_probs_1123[p00])\n return (sym_state_probs_1123, sym_state_recursive_ratios_1123,\n sym_state_recursive_ratios_right_1123,\n sym_state_recursive_ratios_P0_1123)\n\n\ndef get_symbolic_state_probabilities_1341():\n threshold = 3\n system_capacity = 4\n buffer_capacity = 1\n all_states_1341 = abg.markov.build_states(threshold=threshold,\n system_capacity=system_capacity, buffer_capacity=buffer_capacity)\n sym_state_probs_1341 = [(0) for _ in range(len(all_states_1341))]\n sym_Lambda = sym.symbols('Lambda')\n sym_lambda_1 = sym.symbols('lambda_1')\n sym_lambda_2 = sym.symbols('lambda_2')\n sym_mu = sym.symbols('mu')\n sym_state_probs_1341[0] = sym_lambda_2 * sym_mu ** 5 + sym_mu ** 6\n sym_state_probs_1341[1\n ] = sym_Lambda * sym_lambda_2 * sym_mu ** 4 + sym_Lambda * sym_mu ** 5\n sym_state_probs_1341[2] = (sym_Lambda ** 2 * sym_lambda_2 * sym_mu ** 3 +\n sym_Lambda ** 2 * sym_mu ** 4)\n sym_state_probs_1341[3] = (sym_Lambda ** 3 * sym_lambda_2 * sym_mu ** 2 +\n sym_Lambda ** 3 * sym_mu ** 3)\n sym_state_probs_1341[4] = (sym_Lambda ** 3 * sym_lambda_1 *\n sym_lambda_2 * sym_mu + sym_Lambda ** 3 * sym_lambda_2 * sym_mu ** \n 2 + sym_Lambda ** 3 * sym_lambda_2 * sym_lambda_2 * sym_mu)\n sym_state_probs_1341[5] = sym_Lambda ** 3 * sym_lambda_1 * sym_mu ** 2\n sym_state_probs_1341[6] = (sym_Lambda ** 3 * sym_lambda_1 ** 2 *\n sym_lambda_2 + sym_Lambda ** 3 * sym_lambda_1 * sym_lambda_2 ** 2 +\n 2 * sym_Lambda ** 3 * sym_lambda_1 * sym_lambda_2 * sym_mu)\n total_1341 = np.sum(sym_state_probs_1341)\n sym_state_probs_1341 = [(i / total_1341) for i in sym_state_probs_1341]\n sym_state_recursive_ratios_1341 = sym.zeros(buffer_capacity + 1, \n system_capacity + 1)\n sym_state_recursive_ratios_1341[0, 0] = 1\n sym_state_recursive_ratios_1341[0, 1] = sym.factor(sym_state_probs_1341\n [1] / sym_state_probs_1341[0])\n sym_state_recursive_ratios_1341[0, 2] = sym.factor(sym_state_probs_1341\n [2] / sym_state_probs_1341[1])\n sym_state_recursive_ratios_1341[0, 3] = sym.factor(sym_state_probs_1341\n [3] / sym_state_probs_1341[2])\n sym_state_recursive_ratios_1341[0, 4] = sym.factor(sym_state_probs_1341\n [5] / sym_state_probs_1341[3])\n sym_state_recursive_ratios_1341[1, 3] = sym.factor(sym_state_probs_1341\n [4] / sym_state_probs_1341[3])\n sym_state_recursive_ratios_1341[1, 4] = sym.factor(sym_state_probs_1341\n [6] / sym_state_probs_1341[5])\n sym_state_recursive_ratios_right_1341 = (sym_state_recursive_ratios_1341\n .copy())\n sym_state_recursive_ratios_right_1341[1, 4] = sym.factor(\n sym_state_probs_1341[6] / sym_state_probs_1341[4])\n sym_state_recursive_ratios_P0_1341 = sym.zeros(buffer_capacity + 1, \n system_capacity + 1)\n sym_state_recursive_ratios_P0_1341[0, 0] = 1\n sym_state_recursive_ratios_P0_1341[0, 1] = sym.factor(\n sym_state_probs_1341[1] / sym_state_probs_1341[0])\n sym_state_recursive_ratios_P0_1341[0, 2] = sym.factor(\n sym_state_probs_1341[2] / sym_state_probs_1341[0])\n sym_state_recursive_ratios_P0_1341[0, 3] = sym.factor(\n sym_state_probs_1341[3] / sym_state_probs_1341[0])\n sym_state_recursive_ratios_P0_1341[1, 3] = sym.factor(\n sym_state_probs_1341[4] / sym_state_probs_1341[0])\n sym_state_recursive_ratios_P0_1341[0, 4] = sym.factor(\n sym_state_probs_1341[5] / sym_state_probs_1341[0])\n sym_state_recursive_ratios_P0_1341[1, 4] = sym.factor(\n sym_state_probs_1341[6] / sym_state_probs_1341[0])\n return (sym_state_probs_1341, sym_state_recursive_ratios_1341,\n sym_state_recursive_ratios_right_1341,\n sym_state_recursive_ratios_P0_1341)\n\n\ndef get_symbolic_state_probabilities_1131():\n threshold = 1\n system_capacity = 3\n buffer_capacity = 1\n all_states_1131 = abg.markov.build_states(threshold=threshold,\n system_capacity=system_capacity, buffer_capacity=buffer_capacity)\n sym_state_probs_1131 = [(0) for _ in range(len(all_states_1131))]\n sym_Lambda = sym.symbols('Lambda')\n sym_lambda_1 = sym.symbols('lambda_1')\n sym_lambda_2 = sym.symbols('lambda_2')\n sym_mu = sym.symbols('mu')\n sym_state_probs_1131[0] = (sym_mu ** 6 + 2 * (sym_lambda_2 * sym_mu ** \n 5) + sym_lambda_2 ** 2 * sym_mu ** 4 + sym_lambda_1 * sym_lambda_2 *\n sym_mu ** 4)\n sym_state_probs_1131[1] = sym_state_probs_1131[0] * sym_Lambda / sym_mu\n sym_state_probs_1131[2] = (sym_Lambda * sym_lambda_1 ** 2 *\n sym_lambda_2 * sym_mu ** 2 + sym_Lambda * sym_lambda_2 *\n sym_lambda_1 * sym_mu ** 3 + 2 * (sym_Lambda * sym_lambda_1 * \n sym_lambda_2 ** 2 * sym_mu ** 2) + 2 * (sym_Lambda * sym_lambda_2 **\n 2 * sym_mu ** 3) + sym_Lambda * sym_lambda_2 ** 3 * sym_mu ** 2 + \n sym_Lambda * sym_lambda_2 * sym_mu ** 4)\n sym_state_probs_1131[3] = sym_Lambda * sym_lambda_1 * sym_mu ** 3 * (\n sym_lambda_2 + sym_mu)\n sym_state_probs_1131[4\n ] = sym_Lambda * sym_lambda_2 * sym_lambda_1 * sym_mu * (\n sym_lambda_2 ** 2 + 2 * sym_lambda_2 * sym_lambda_1 + 3 *\n sym_lambda_2 * sym_mu + sym_lambda_1 ** 2 + 2 * sym_lambda_1 *\n sym_mu + 2 * sym_mu ** 2)\n sym_state_probs_1131[5] = sym_Lambda * sym_lambda_1 ** 2 * sym_mu ** 3\n sym_state_probs_1131[6] = sym_Lambda * sym_lambda_2 * sym_lambda_1 ** 2 * (\n sym_lambda_2 ** 2 + 2 * sym_lambda_2 * sym_lambda_1 + 3 *\n sym_lambda_2 * sym_mu + sym_lambda_1 ** 2 + 2 * sym_lambda_1 *\n sym_mu + 3 * sym_mu ** 2)\n denominator = (sym_Lambda * sym_lambda_2 ** 3 * sym_lambda_1 ** 2 + \n sym_Lambda * sym_lambda_2 ** 3 * sym_lambda_1 * sym_mu + sym_Lambda *\n sym_lambda_2 ** 3 * sym_mu ** 2 + 2 * sym_Lambda * sym_lambda_2 ** \n 2 * sym_lambda_1 ** 3 + 5 * sym_Lambda * sym_lambda_2 ** 2 * \n sym_lambda_1 ** 2 * sym_mu + 5 * sym_Lambda * sym_lambda_2 ** 2 *\n sym_lambda_1 * sym_mu ** 2 + 3 * sym_Lambda * sym_lambda_2 ** 2 * \n sym_mu ** 3 + sym_Lambda * sym_lambda_2 * sym_lambda_1 ** 4 + 3 *\n sym_Lambda * sym_lambda_2 * sym_lambda_1 ** 3 * sym_mu + 6 *\n sym_Lambda * sym_lambda_2 * sym_lambda_1 ** 2 * sym_mu ** 2 + 5 *\n sym_Lambda * sym_lambda_2 * sym_lambda_1 * sym_mu ** 3 + 3 *\n sym_Lambda * sym_lambda_2 * sym_mu ** 4 + sym_Lambda * sym_lambda_1 **\n 2 * sym_mu ** 3 + sym_Lambda * sym_lambda_1 * sym_mu ** 4 + \n sym_Lambda * sym_mu ** 5 + sym_lambda_2 ** 2 * sym_mu ** 4 + \n sym_lambda_2 * sym_lambda_1 * sym_mu ** 4 + 2 * sym_lambda_2 * \n sym_mu ** 5 + sym_mu ** 6)\n sym_state_probs_1131 = [(i / denominator) for i in sym_state_probs_1131]\n sym_state_recursive_ratios_1131 = sym.zeros(buffer_capacity + 1, \n system_capacity + 1)\n sym_state_recursive_ratios_1131[0, 0] = 1\n sym_state_recursive_ratios_1131[0, 1] = sym.factor(sym_state_probs_1131\n [1] / sym_state_probs_1131[0])\n sym_state_recursive_ratios_1131[1, 1] = sym.factor(sym_state_probs_1131\n [2] / sym_state_probs_1131[1])\n sym_state_recursive_ratios_1131[0, 2] = sym.factor(sym_state_probs_1131\n [3] / sym_state_probs_1131[1])\n sym_state_recursive_ratios_1131[1, 2] = sym.factor(sym_state_probs_1131\n [4] / sym_state_probs_1131[3])\n sym_state_recursive_ratios_1131[0, 3] = sym.factor(sym_state_probs_1131\n [5] / sym_state_probs_1131[3])\n sym_state_recursive_ratios_1131[1, 3] = sym.factor(sym_state_probs_1131\n [6] / sym_state_probs_1131[5])\n sym_state_recursive_ratios_right_1131 = (sym_state_recursive_ratios_1131\n .copy())\n sym_state_recursive_ratios_right_1131[1, 2] = sym.factor(\n sym_state_probs_1131[4] / sym_state_probs_1131[2])\n sym_state_recursive_ratios_right_1131[1, 3] = sym.factor(\n sym_state_probs_1131[6] / sym_state_probs_1131[4])\n sym_state_recursive_ratios_P0_1131 = sym.zeros(buffer_capacity + 1, \n system_capacity + 1)\n sym_state_recursive_ratios_P0_1131[0, 0] = 1\n sym_state_recursive_ratios_P0_1131[0, 1] = sym.factor(\n sym_state_probs_1131[1] / sym_state_probs_1131[0])\n sym_state_recursive_ratios_P0_1131[1, 1] = sym.factor(\n sym_state_probs_1131[2] / sym_state_probs_1131[0])\n sym_state_recursive_ratios_P0_1131[0, 2] = sym.factor(\n sym_state_probs_1131[3] / sym_state_probs_1131[0])\n sym_state_recursive_ratios_P0_1131[1, 2] = sym.factor(\n sym_state_probs_1131[4] / sym_state_probs_1131[0])\n sym_state_recursive_ratios_P0_1131[0, 3] = sym.factor(\n sym_state_probs_1131[5] / sym_state_probs_1131[0])\n sym_state_recursive_ratios_P0_1131[1, 3] = sym.factor(\n sym_state_probs_1131[6] / sym_state_probs_1131[0])\n return (sym_state_probs_1131, sym_state_recursive_ratios_1131,\n sym_state_recursive_ratios_right_1131,\n sym_state_recursive_ratios_P0_1131)\n\n\ndef get_symbolic_state_probabilities_1132():\n num_of_servers = 1\n threshold = 1\n system_capacity = 3\n buffer_capacity = 2\n Q_sym_1132 = abg.markov.get_symbolic_transition_matrix(num_of_servers,\n threshold, system_capacity, buffer_capacity)\n p00, p01, p11, p21, p02, p12, p22, p03, p13, p23 = sym.symbols(\n 'p00, p01, p11, p21, p02, p12, p22, p03, p13, p23')\n pi_1132 = sym.Matrix([p00, p01, p11, p21, p02, p12, p22, p03, p13, p23])\n dimension_1132 = Q_sym_1132.shape[0]\n M_sym_1132 = sym.Matrix([Q_sym_1132.transpose()[:-1, :], sym.ones(1,\n dimension_1132)])\n sym_diff_equations_1132 = M_sym_1132 @ pi_1132\n b_sym_1132 = sym.Matrix([sym.zeros(dimension_1132 - 1, 1), [1]])\n eq0_1132 = sym.Eq(sym_diff_equations_1132[0], b_sym_1132[0])\n eq1_1132 = sym.Eq(sym_diff_equations_1132[1], b_sym_1132[1])\n eq2_1132 = sym.Eq(sym_diff_equations_1132[2], b_sym_1132[2])\n eq3_1132 = sym.Eq(sym_diff_equations_1132[3], b_sym_1132[3])\n eq4_1132 = sym.Eq(sym_diff_equations_1132[4], b_sym_1132[4])\n eq5_1132 = sym.Eq(sym_diff_equations_1132[5], b_sym_1132[5])\n eq6_1132 = sym.Eq(sym_diff_equations_1132[6], b_sym_1132[6])\n eq7_1132 = sym.Eq(sym_diff_equations_1132[7], b_sym_1132[7])\n eq8_1132 = sym.Eq(sym_diff_equations_1132[8], b_sym_1132[8])\n eq9_1132 = sym.Eq(sym_diff_equations_1132[9], b_sym_1132[9])\n sym_state_probs_1132 = sym.solve([eq0_1132, eq1_1132, eq2_1132,\n eq3_1132, eq4_1132, eq5_1132, eq6_1132, eq7_1132, eq8_1132,\n eq9_1132], (p00, p01, p11, p21, p02, p12, p22, p03, p13, p23))\n sym_state_recursive_ratios_1132 = sym.zeros(buffer_capacity + 1, \n system_capacity + 1)\n sym_state_recursive_ratios_1132[0, 0] = 1\n sym_state_recursive_ratios_1132[0, 1] = sym.factor(sym_state_probs_1132\n [p01] / sym_state_probs_1132[p00])\n sym_state_recursive_ratios_1132[1, 1] = sym.factor(sym_state_probs_1132\n [p11] / sym_state_probs_1132[p01])\n sym_state_recursive_ratios_1132[2, 1] = sym.factor(sym_state_probs_1132\n [p21] / sym_state_probs_1132[p11])\n sym_state_recursive_ratios_1132[0, 2] = sym.factor(sym_state_probs_1132\n [p02] / sym_state_probs_1132[p01])\n sym_state_recursive_ratios_1132[1, 2] = sym.factor(sym_state_probs_1132\n [p12] / sym_state_probs_1132[p02])\n sym_state_recursive_ratios_1132[2, 2] = sym.factor(sym_state_probs_1132\n [p22] / sym_state_probs_1132[p12])\n sym_state_recursive_ratios_1132[0, 3] = sym.factor(sym_state_probs_1132\n [p03] / sym_state_probs_1132[p02])\n sym_state_recursive_ratios_1132[1, 3] = sym.factor(sym_state_probs_1132\n [p13] / sym_state_probs_1132[p03])\n sym_state_recursive_ratios_1132[2, 3] = sym.factor(sym_state_probs_1132\n [p23] / sym_state_probs_1132[p13])\n sym_state_recursive_ratios_right_1132 = (sym_state_recursive_ratios_1132\n .copy())\n sym_state_recursive_ratios_right_1132[1, 2] = sym.factor(\n sym_state_probs_1132[p12] / sym_state_probs_1132[p11])\n sym_state_recursive_ratios_right_1132[1, 3] = sym.factor(\n sym_state_probs_1132[p13] / sym_state_probs_1132[p12])\n sym_state_recursive_ratios_right_1132[2, 2] = sym.factor(\n sym_state_probs_1132[p22] / sym_state_probs_1132[p21])\n sym_state_recursive_ratios_right_1132[2, 3] = sym.factor(\n sym_state_probs_1132[p23] / sym_state_probs_1132[p22])\n sym_state_recursive_ratios_P0_1132 = sym.zeros(buffer_capacity + 1, \n system_capacity + 1)\n sym_state_recursive_ratios_P0_1132[0, 0] = 1\n sym_state_recursive_ratios_P0_1132[0, 1] = sym.factor(\n sym_state_probs_1132[p01] / sym_state_probs_1132[p00])\n sym_state_recursive_ratios_P0_1132[1, 1] = sym.factor(\n sym_state_probs_1132[p11] / sym_state_probs_1132[p00])\n sym_state_recursive_ratios_P0_1132[2, 1] = sym.factor(\n sym_state_probs_1132[p21] / sym_state_probs_1132[p00])\n sym_state_recursive_ratios_P0_1132[0, 2] = sym.factor(\n sym_state_probs_1132[p02] / sym_state_probs_1132[p00])\n sym_state_recursive_ratios_P0_1132[1, 2] = sym.factor(\n sym_state_probs_1132[p12] / sym_state_probs_1132[p00])\n sym_state_recursive_ratios_P0_1132[2, 2] = sym.factor(\n sym_state_probs_1132[p22] / sym_state_probs_1132[p00])\n sym_state_recursive_ratios_P0_1132[0, 3] = sym.factor(\n sym_state_probs_1132[p03] / sym_state_probs_1132[p00])\n sym_state_recursive_ratios_P0_1132[1, 3] = sym.factor(\n sym_state_probs_1132[p13] / sym_state_probs_1132[p00])\n sym_state_recursive_ratios_P0_1132[2, 3] = sym.factor(\n sym_state_probs_1132[p23] / sym_state_probs_1132[p00])\n return (sym_state_probs_1132, sym_state_recursive_ratios_1132,\n sym_state_recursive_ratios_right_1132,\n sym_state_recursive_ratios_P0_1132)\n\n\n<mask token>\n\n\ndef get_symbolic_state_probabilities_1151():\n num_of_servers = 1\n threshold = 1\n system_capacity = 5\n buffer_capacity = 1\n Q_sym_1151 = abg.markov.get_symbolic_transition_matrix(num_of_servers,\n threshold, system_capacity, buffer_capacity)\n p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15 = sym.symbols(\n 'p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15')\n pi_1151 = sym.Matrix([p00, p01, p11, p02, p12, p03, p13, p04, p14, p05,\n p15])\n dimension_1151 = Q_sym_1151.shape[0]\n M_sym_1151 = sym.Matrix([Q_sym_1151.transpose()[:-1, :], sym.ones(1,\n dimension_1151)])\n sym_diff_equations_1151 = M_sym_1151 @ pi_1151\n b_sym_1151 = sym.Matrix([sym.zeros(dimension_1151 - 1, 1), [1]])\n eq0_1151 = sym.Eq(sym_diff_equations_1151[0], b_sym_1151[0])\n eq1_1151 = sym.Eq(sym_diff_equations_1151[1], b_sym_1151[1])\n eq2_1151 = sym.Eq(sym_diff_equations_1151[2], b_sym_1151[2])\n eq3_1151 = sym.Eq(sym_diff_equations_1151[3], b_sym_1151[3])\n eq4_1151 = sym.Eq(sym_diff_equations_1151[4], b_sym_1151[4])\n eq5_1151 = sym.Eq(sym_diff_equations_1151[5], b_sym_1151[5])\n eq6_1151 = sym.Eq(sym_diff_equations_1151[6], b_sym_1151[6])\n eq7_1151 = sym.Eq(sym_diff_equations_1151[7], b_sym_1151[7])\n eq8_1151 = sym.Eq(sym_diff_equations_1151[8], b_sym_1151[8])\n eq9_1151 = sym.Eq(sym_diff_equations_1151[9], b_sym_1151[9])\n eq10_1151 = sym.Eq(sym_diff_equations_1151[10], b_sym_1151[10])\n sym_state_probs_1151 = sym.solve([eq0_1151, eq1_1151, eq2_1151,\n eq3_1151, eq4_1151, eq5_1151, eq6_1151, eq7_1151, eq8_1151,\n eq9_1151, eq10_1151], (p00, p01, p11, p02, p12, p03, p13, p04, p14,\n p05, p15))\n sym_state_recursive_ratios_1151 = sym.zeros(buffer_capacity + 1, \n system_capacity + 1)\n sym_state_recursive_ratios_1151[0, 0] = 1\n sym_state_recursive_ratios_1151[0, 1] = sym.factor(sym_state_probs_1151\n [p01] / sym_state_probs_1151[p00])\n sym_state_recursive_ratios_1151[1, 1] = sym.factor(sym_state_probs_1151\n [p11] / sym_state_probs_1151[p01])\n sym_state_recursive_ratios_1151[0, 2] = sym.factor(sym_state_probs_1151\n [p02] / sym_state_probs_1151[p01])\n sym_state_recursive_ratios_1151[1, 2] = sym.factor(sym_state_probs_1151\n [p12] / sym_state_probs_1151[p02])\n sym_state_recursive_ratios_1151[0, 3] = sym.factor(sym_state_probs_1151\n [p03] / sym_state_probs_1151[p02])\n sym_state_recursive_ratios_1151[1, 3] = sym.factor(sym_state_probs_1151\n [p13] / sym_state_probs_1151[p03])\n sym_state_recursive_ratios_1151[0, 4] = sym.factor(sym_state_probs_1151\n [p04] / sym_state_probs_1151[p03])\n sym_state_recursive_ratios_1151[1, 4] = sym.factor(sym_state_probs_1151\n [p14] / sym_state_probs_1151[p04])\n sym_state_recursive_ratios_1151[0, 5] = sym.factor(sym_state_probs_1151\n [p05] / sym_state_probs_1151[p04])\n sym_state_recursive_ratios_1151[1, 5] = sym.factor(sym_state_probs_1151\n [p15] / sym_state_probs_1151[p05])\n sym_state_recursive_ratios_right_1151 = (sym_state_recursive_ratios_1151\n .copy())\n sym_state_recursive_ratios_right_1151[1, 2] = sym.factor(\n sym_state_probs_1151[p12] / sym_state_probs_1151[p11])\n sym_state_recursive_ratios_right_1151[1, 3] = sym.factor(\n sym_state_probs_1151[p13] / sym_state_probs_1151[p12])\n sym_state_recursive_ratios_right_1151[1, 4] = sym.factor(\n sym_state_probs_1151[p14] / sym_state_probs_1151[p13])\n sym_state_recursive_ratios_right_1151[1, 5] = sym.factor(\n sym_state_probs_1151[p15] / sym_state_probs_1151[p14])\n sym_state_recursive_ratios_P0_1151 = sym.zeros(buffer_capacity + 1, \n system_capacity + 1)\n sym_state_recursive_ratios_P0_1151[0, 0] = 1\n sym_state_recursive_ratios_P0_1151[0, 1] = sym.factor(\n sym_state_probs_1151[p01] / sym_state_probs_1151[p00])\n sym_state_recursive_ratios_P0_1151[1, 1] = sym.factor(\n sym_state_probs_1151[p11] / sym_state_probs_1151[p00])\n sym_state_recursive_ratios_P0_1151[0, 2] = sym.factor(\n sym_state_probs_1151[p02] / sym_state_probs_1151[p00])\n sym_state_recursive_ratios_P0_1151[1, 2] = sym.factor(\n sym_state_probs_1151[p12] / sym_state_probs_1151[p00])\n sym_state_recursive_ratios_P0_1151[0, 3] = sym.factor(\n sym_state_probs_1151[p03] / sym_state_probs_1151[p00])\n sym_state_recursive_ratios_P0_1151[1, 3] = sym.factor(\n sym_state_probs_1151[p13] / sym_state_probs_1151[p00])\n sym_state_recursive_ratios_P0_1151[0, 4] = sym.factor(\n sym_state_probs_1151[p04] / sym_state_probs_1151[p00])\n sym_state_recursive_ratios_P0_1151[1, 4] = sym.factor(\n sym_state_probs_1151[p14] / sym_state_probs_1151[p00])\n sym_state_recursive_ratios_P0_1151[0, 5] = sym.factor(\n sym_state_probs_1151[p05] / sym_state_probs_1151[p00])\n sym_state_recursive_ratios_P0_1151[1, 5] = sym.factor(\n sym_state_probs_1151[p15] / sym_state_probs_1151[p00])\n return (sym_state_probs_1151, sym_state_recursive_ratios_1151,\n sym_state_recursive_ratios_right_1151,\n sym_state_recursive_ratios_P0_1151)\n\n\ndef get_symbolic_state_probabilities_1161():\n num_of_servers = 1\n threshold = 1\n system_capacity = 6\n buffer_capacity = 1\n Q_sym_1161 = abg.markov.get_symbolic_transition_matrix(num_of_servers,\n threshold, system_capacity, buffer_capacity)\n p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15, p06, p16 = (sym.\n symbols(\n 'p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15, p06, p16'))\n pi_1161 = sym.Matrix([p00, p01, p11, p02, p12, p03, p13, p04, p14, p05,\n p15, p06, p16])\n dimension_1161 = Q_sym_1161.shape[0]\n M_sym_1161 = sym.Matrix([Q_sym_1161.transpose()[:-1, :], sym.ones(1,\n dimension_1161)])\n sym_diff_equations_1161 = M_sym_1161 @ pi_1161\n b_sym_1161 = sym.Matrix([sym.zeros(dimension_1161 - 1, 1), [1]])\n eq0_1161 = sym.Eq(sym_diff_equations_1161[0], b_sym_1161[0])\n eq1_1161 = sym.Eq(sym_diff_equations_1161[1], b_sym_1161[1])\n eq2_1161 = sym.Eq(sym_diff_equations_1161[2], b_sym_1161[2])\n eq3_1161 = sym.Eq(sym_diff_equations_1161[3], b_sym_1161[3])\n eq4_1161 = sym.Eq(sym_diff_equations_1161[4], b_sym_1161[4])\n eq5_1161 = sym.Eq(sym_diff_equations_1161[5], b_sym_1161[5])\n eq6_1161 = sym.Eq(sym_diff_equations_1161[6], b_sym_1161[6])\n eq7_1161 = sym.Eq(sym_diff_equations_1161[7], b_sym_1161[7])\n eq8_1161 = sym.Eq(sym_diff_equations_1161[8], b_sym_1161[8])\n eq9_1161 = sym.Eq(sym_diff_equations_1161[9], b_sym_1161[9])\n eq10_1161 = sym.Eq(sym_diff_equations_1161[10], b_sym_1161[10])\n eq11_1161 = sym.Eq(sym_diff_equations_1161[11], b_sym_1161[11])\n eq12_1161 = sym.Eq(sym_diff_equations_1161[12], b_sym_1161[12])\n sym_state_probs_1161 = sym.solve([eq0_1161, eq1_1161, eq2_1161,\n eq3_1161, eq4_1161, eq5_1161, eq6_1161, eq7_1161, eq8_1161,\n eq9_1161, eq10_1161, eq11_1161, eq12_1161], (p00, p01, p11, p02,\n p12, p03, p13, p04, p14, p05, p15, p06, p16))\n sym_state_recursive_ratios_1161 = sym.zeros(buffer_capacity + 1, \n system_capacity + 1)\n sym_state_recursive_ratios_1161[0, 0] = 1\n sym_state_recursive_ratios_1161[0, 1] = sym.factor(sym_state_probs_1161\n [p01] / sym_state_probs_1161[p00])\n sym_state_recursive_ratios_1161[1, 1] = sym.factor(sym_state_probs_1161\n [p11] / sym_state_probs_1161[p01])\n sym_state_recursive_ratios_1161[0, 2] = sym.factor(sym_state_probs_1161\n [p02] / sym_state_probs_1161[p01])\n sym_state_recursive_ratios_1161[1, 2] = sym.factor(sym_state_probs_1161\n [p12] / sym_state_probs_1161[p02])\n sym_state_recursive_ratios_1161[0, 3] = sym.factor(sym_state_probs_1161\n [p03] / sym_state_probs_1161[p02])\n sym_state_recursive_ratios_1161[1, 3] = sym.factor(sym_state_probs_1161\n [p13] / sym_state_probs_1161[p03])\n sym_state_recursive_ratios_1161[0, 4] = sym.factor(sym_state_probs_1161\n [p04] / sym_state_probs_1161[p03])\n sym_state_recursive_ratios_1161[1, 4] = sym.factor(sym_state_probs_1161\n [p14] / sym_state_probs_1161[p04])\n sym_state_recursive_ratios_1161[0, 5] = sym.factor(sym_state_probs_1161\n [p05] / sym_state_probs_1161[p04])\n sym_state_recursive_ratios_1161[1, 5] = sym.factor(sym_state_probs_1161\n [p15] / sym_state_probs_1161[p05])\n sym_state_recursive_ratios_1161[0, 6] = sym.factor(sym_state_probs_1161\n [p06] / sym_state_probs_1161[p05])\n sym_state_recursive_ratios_1161[1, 6] = sym.factor(sym_state_probs_1161\n [p16] / sym_state_probs_1161[p06])\n sym_state_recursive_ratios_right_1161 = (sym_state_recursive_ratios_1161\n .copy())\n sym_state_recursive_ratios_right_1161[1, 2] = sym.factor(\n sym_state_probs_1161[p12] / sym_state_probs_1161[p11])\n sym_state_recursive_ratios_right_1161[1, 3] = sym.factor(\n sym_state_probs_1161[p13] / sym_state_probs_1161[p12])\n sym_state_recursive_ratios_right_1161[1, 4] = sym.factor(\n sym_state_probs_1161[p14] / sym_state_probs_1161[p13])\n sym_state_recursive_ratios_right_1161[1, 5] = sym.factor(\n sym_state_probs_1161[p15] / sym_state_probs_1161[p14])\n sym_state_recursive_ratios_right_1161[1, 6] = sym.factor(\n sym_state_probs_1161[p16] / sym_state_probs_1161[p15])\n sym_state_recursive_ratios_P0_1161 = sym.zeros(buffer_capacity + 1, \n system_capacity + 1)\n sym_state_recursive_ratios_P0_1161[0, 0] = 1\n sym_state_recursive_ratios_P0_1161[0, 1] = sym.factor(\n sym_state_probs_1161[p01] / sym_state_probs_1161[p00])\n sym_state_recursive_ratios_P0_1161[1, 1] = sym.factor(\n sym_state_probs_1161[p11] / sym_state_probs_1161[p00])\n sym_state_recursive_ratios_P0_1161[0, 2] = sym.factor(\n sym_state_probs_1161[p02] / sym_state_probs_1161[p00])\n sym_state_recursive_ratios_P0_1161[1, 2] = sym.factor(\n sym_state_probs_1161[p12] / sym_state_probs_1161[p00])\n sym_state_recursive_ratios_P0_1161[0, 3] = sym.factor(\n sym_state_probs_1161[p03] / sym_state_probs_1161[p00])\n sym_state_recursive_ratios_P0_1161[1, 3] = sym.factor(\n sym_state_probs_1161[p13] / sym_state_probs_1161[p00])\n sym_state_recursive_ratios_P0_1161[0, 4] = sym.factor(\n sym_state_probs_1161[p04] / sym_state_probs_1161[p00])\n sym_state_recursive_ratios_P0_1161[1, 4] = sym.factor(\n sym_state_probs_1161[p14] / sym_state_probs_1161[p00])\n sym_state_recursive_ratios_P0_1161[0, 5] = sym.factor(\n sym_state_probs_1161[p05] / sym_state_probs_1161[p00])\n sym_state_recursive_ratios_P0_1161[1, 5] = sym.factor(\n sym_state_probs_1161[p15] / sym_state_probs_1161[p00])\n sym_state_recursive_ratios_P0_1161[0, 6] = sym.factor(\n sym_state_probs_1161[p06] / sym_state_probs_1161[p00])\n sym_state_recursive_ratios_P0_1161[1, 6] = sym.factor(\n sym_state_probs_1161[p16] / sym_state_probs_1161[p00])\n return (sym_state_probs_1161, sym_state_recursive_ratios_1161,\n sym_state_recursive_ratios_right_1161,\n sym_state_recursive_ratios_P0_1161)\n\n\ndef get_symbolic_state_probabilities_1171():\n num_of_servers = 1\n threshold = 1\n system_capacity = 7\n buffer_capacity = 1\n Q_sym_1171 = abg.markov.get_symbolic_transition_matrix(num_of_servers,\n threshold, system_capacity, buffer_capacity)\n (p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15, p06, p16, p07, p17\n ) = (sym.symbols(\n 'p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15, p06, p16, p07, p17'\n ))\n pi_1171 = sym.Matrix([p00, p01, p11, p02, p12, p03, p13, p04, p14, p05,\n p15, p06, p16, p07, p17])\n dimension_1171 = Q_sym_1171.shape[0]\n M_sym_1171 = sym.Matrix([Q_sym_1171.transpose()[:-1, :], sym.ones(1,\n dimension_1171)])\n sym_diff_equations_1171 = M_sym_1171 @ pi_1171\n b_sym_1171 = sym.Matrix([sym.zeros(dimension_1171 - 1, 1), [1]])\n eq0_1171 = sym.Eq(sym_diff_equations_1171[0], b_sym_1171[0])\n eq1_1171 = sym.Eq(sym_diff_equations_1171[1], b_sym_1171[1])\n eq2_1171 = sym.Eq(sym_diff_equations_1171[2], b_sym_1171[2])\n eq3_1171 = sym.Eq(sym_diff_equations_1171[3], b_sym_1171[3])\n eq4_1171 = sym.Eq(sym_diff_equations_1171[4], b_sym_1171[4])\n eq5_1171 = sym.Eq(sym_diff_equations_1171[5], b_sym_1171[5])\n eq6_1171 = sym.Eq(sym_diff_equations_1171[6], b_sym_1171[6])\n eq7_1171 = sym.Eq(sym_diff_equations_1171[7], b_sym_1171[7])\n eq8_1171 = sym.Eq(sym_diff_equations_1171[8], b_sym_1171[8])\n eq9_1171 = sym.Eq(sym_diff_equations_1171[9], b_sym_1171[9])\n eq10_1171 = sym.Eq(sym_diff_equations_1171[10], b_sym_1171[10])\n eq11_1171 = sym.Eq(sym_diff_equations_1171[11], b_sym_1171[11])\n eq12_1171 = sym.Eq(sym_diff_equations_1171[12], b_sym_1171[12])\n eq13_1171 = sym.Eq(sym_diff_equations_1171[13], b_sym_1171[13])\n eq14_1171 = sym.Eq(sym_diff_equations_1171[14], b_sym_1171[14])\n sym_state_probs_1171 = sym.solve([eq0_1171, eq1_1171, eq2_1171,\n eq3_1171, eq4_1171, eq5_1171, eq6_1171, eq7_1171, eq8_1171,\n eq9_1171, eq10_1171, eq11_1171, eq12_1171, eq13_1171, eq14_1171], (\n p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15, p06, p16,\n p07, p17))\n sym_state_recursive_ratios_1171 = sym.zeros(buffer_capacity + 1, \n system_capacity + 1)\n sym_state_recursive_ratios_1171[0, 0] = 1\n sym_state_recursive_ratios_1171[0, 1] = sym.factor(sym_state_probs_1171\n [p01] / sym_state_probs_1171[p00])\n sym_state_recursive_ratios_1171[1, 1] = sym.factor(sym_state_probs_1171\n [p11] / sym_state_probs_1171[p01])\n sym_state_recursive_ratios_1171[0, 2] = sym.factor(sym_state_probs_1171\n [p02] / sym_state_probs_1171[p01])\n sym_state_recursive_ratios_1171[1, 2] = sym.factor(sym_state_probs_1171\n [p12] / sym_state_probs_1171[p02])\n sym_state_recursive_ratios_1171[0, 3] = sym.factor(sym_state_probs_1171\n [p03] / sym_state_probs_1171[p02])\n sym_state_recursive_ratios_1171[1, 3] = sym.factor(sym_state_probs_1171\n [p13] / sym_state_probs_1171[p03])\n sym_state_recursive_ratios_1171[0, 4] = sym.factor(sym_state_probs_1171\n [p04] / sym_state_probs_1171[p03])\n sym_state_recursive_ratios_1171[1, 4] = sym.factor(sym_state_probs_1171\n [p14] / sym_state_probs_1171[p04])\n sym_state_recursive_ratios_1171[0, 5] = sym.factor(sym_state_probs_1171\n [p05] / sym_state_probs_1171[p04])\n sym_state_recursive_ratios_1171[1, 5] = sym.factor(sym_state_probs_1171\n [p15] / sym_state_probs_1171[p05])\n sym_state_recursive_ratios_1171[0, 6] = sym.factor(sym_state_probs_1171\n [p06] / sym_state_probs_1171[p05])\n sym_state_recursive_ratios_1171[1, 6] = sym.factor(sym_state_probs_1171\n [p16] / sym_state_probs_1171[p06])\n sym_state_recursive_ratios_1171[0, 7] = sym.factor(sym_state_probs_1171\n [p07] / sym_state_probs_1171[p06])\n sym_state_recursive_ratios_1171[1, 7] = sym.factor(sym_state_probs_1171\n [p17] / sym_state_probs_1171[p07])\n sym_state_recursive_ratios_right_1171 = (sym_state_recursive_ratios_1171\n .copy())\n sym_state_recursive_ratios_right_1171[1, 2] = sym.factor(\n sym_state_probs_1171[p12] / sym_state_probs_1171[p11])\n sym_state_recursive_ratios_right_1171[1, 3] = sym.factor(\n sym_state_probs_1171[p13] / sym_state_probs_1171[p12])\n sym_state_recursive_ratios_right_1171[1, 4] = sym.factor(\n sym_state_probs_1171[p14] / sym_state_probs_1171[p13])\n sym_state_recursive_ratios_right_1171[1, 5] = sym.factor(\n sym_state_probs_1171[p15] / sym_state_probs_1171[p14])\n sym_state_recursive_ratios_right_1171[1, 6] = sym.factor(\n sym_state_probs_1171[p16] / sym_state_probs_1171[p15])\n sym_state_recursive_ratios_right_1171[1, 7] = sym.factor(\n sym_state_probs_1171[p17] / sym_state_probs_1171[p16])\n sym_state_recursive_ratios_P0_1171 = sym.zeros(buffer_capacity + 1, \n system_capacity + 1)\n sym_state_recursive_ratios_P0_1171[0, 0] = 1\n sym_state_recursive_ratios_P0_1171[0, 1] = sym.factor(\n sym_state_probs_1171[p01] / sym_state_probs_1171[p00])\n sym_state_recursive_ratios_P0_1171[1, 1] = sym.factor(\n sym_state_probs_1171[p11] / sym_state_probs_1171[p00])\n sym_state_recursive_ratios_P0_1171[0, 2] = sym.factor(\n sym_state_probs_1171[p02] / sym_state_probs_1171[p00])\n sym_state_recursive_ratios_P0_1171[1, 2] = sym.factor(\n sym_state_probs_1171[p12] / sym_state_probs_1171[p00])\n sym_state_recursive_ratios_P0_1171[0, 3] = sym.factor(\n sym_state_probs_1171[p03] / sym_state_probs_1171[p00])\n sym_state_recursive_ratios_P0_1171[1, 3] = sym.factor(\n sym_state_probs_1171[p13] / sym_state_probs_1171[p00])\n sym_state_recursive_ratios_P0_1171[0, 4] = sym.factor(\n sym_state_probs_1171[p04] / sym_state_probs_1171[p00])\n sym_state_recursive_ratios_P0_1171[1, 4] = sym.factor(\n sym_state_probs_1171[p14] / sym_state_probs_1171[p00])\n sym_state_recursive_ratios_P0_1171[0, 5] = sym.factor(\n sym_state_probs_1171[p05] / sym_state_probs_1171[p00])\n sym_state_recursive_ratios_P0_1171[1, 5] = sym.factor(\n sym_state_probs_1171[p15] / sym_state_probs_1171[p00])\n sym_state_recursive_ratios_P0_1171[0, 6] = sym.factor(\n sym_state_probs_1171[p06] / sym_state_probs_1171[p00])\n sym_state_recursive_ratios_P0_1171[1, 6] = sym.factor(\n sym_state_probs_1171[p16] / sym_state_probs_1171[p00])\n sym_state_recursive_ratios_P0_1171[0, 7] = sym.factor(\n sym_state_probs_1171[p07] / sym_state_probs_1171[p00])\n sym_state_recursive_ratios_P0_1171[1, 7] = sym.factor(\n sym_state_probs_1171[p17] / sym_state_probs_1171[p00])\n return (sym_state_probs_1171, sym_state_recursive_ratios_1171,\n sym_state_recursive_ratios_right_1171,\n sym_state_recursive_ratios_P0_1171)\n\n\ndef get_symbolic_state_probabilities_1181():\n num_of_servers = 1\n threshold = 1\n system_capacity = 8\n buffer_capacity = 1\n Q_sym_1181 = abg.markov.get_symbolic_transition_matrix(num_of_servers,\n threshold, system_capacity, buffer_capacity)\n (p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15, p06, p16, p07,\n p17, p08, p18) = (sym.symbols(\n 'p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15, p06, p16, p07, p17, p08, p18'\n ))\n pi_1181 = sym.Matrix([p00, p01, p11, p02, p12, p03, p13, p04, p14, p05,\n p15, p06, p16, p07, p17, p08, p18])\n dimension_1181 = Q_sym_1181.shape[0]\n M_sym_1181 = sym.Matrix([Q_sym_1181.transpose()[:-1, :], sym.ones(1,\n dimension_1181)])\n sym_diff_equations_1181 = M_sym_1181 @ pi_1181\n b_sym_1181 = sym.Matrix([sym.zeros(dimension_1181 - 1, 1), [1]])\n eq0_1181 = sym.Eq(sym_diff_equations_1181[0], b_sym_1181[0])\n eq1_1181 = sym.Eq(sym_diff_equations_1181[1], b_sym_1181[1])\n eq2_1181 = sym.Eq(sym_diff_equations_1181[2], b_sym_1181[2])\n eq3_1181 = sym.Eq(sym_diff_equations_1181[3], b_sym_1181[3])\n eq4_1181 = sym.Eq(sym_diff_equations_1181[4], b_sym_1181[4])\n eq5_1181 = sym.Eq(sym_diff_equations_1181[5], b_sym_1181[5])\n eq6_1181 = sym.Eq(sym_diff_equations_1181[6], b_sym_1181[6])\n eq7_1181 = sym.Eq(sym_diff_equations_1181[7], b_sym_1181[7])\n eq8_1181 = sym.Eq(sym_diff_equations_1181[8], b_sym_1181[8])\n eq9_1181 = sym.Eq(sym_diff_equations_1181[9], b_sym_1181[9])\n eq10_1181 = sym.Eq(sym_diff_equations_1181[10], b_sym_1181[10])\n eq11_1181 = sym.Eq(sym_diff_equations_1181[11], b_sym_1181[11])\n eq12_1181 = sym.Eq(sym_diff_equations_1181[12], b_sym_1181[12])\n eq13_1181 = sym.Eq(sym_diff_equations_1181[13], b_sym_1181[13])\n eq14_1181 = sym.Eq(sym_diff_equations_1181[14], b_sym_1181[14])\n eq15_1181 = sym.Eq(sym_diff_equations_1181[15], b_sym_1181[15])\n eq16_1181 = sym.Eq(sym_diff_equations_1181[16], b_sym_1181[16])\n sym_state_probs_1181 = sym.solve([eq0_1181, eq1_1181, eq2_1181,\n eq3_1181, eq4_1181, eq5_1181, eq6_1181, eq7_1181, eq8_1181,\n eq9_1181, eq10_1181, eq11_1181, eq12_1181, eq13_1181, eq14_1181,\n eq15_1181, eq16_1181], (p00, p01, p11, p02, p12, p03, p13, p04, p14,\n p05, p15, p06, p16, p07, p17, p08, p18))\n sym_state_recursive_ratios_1181 = sym.zeros(buffer_capacity + 1, \n system_capacity + 1)\n sym_state_recursive_ratios_1181[0, 0] = 1\n sym_state_recursive_ratios_1181[0, 1] = sym.factor(sym_state_probs_1181\n [p01] / sym_state_probs_1181[p00])\n sym_state_recursive_ratios_1181[1, 1] = sym.factor(sym_state_probs_1181\n [p11] / sym_state_probs_1181[p01])\n sym_state_recursive_ratios_1181[0, 2] = sym.factor(sym_state_probs_1181\n [p02] / sym_state_probs_1181[p01])\n sym_state_recursive_ratios_1181[1, 2] = sym.factor(sym_state_probs_1181\n [p12] / sym_state_probs_1181[p02])\n sym_state_recursive_ratios_1181[0, 3] = sym.factor(sym_state_probs_1181\n [p03] / sym_state_probs_1181[p02])\n sym_state_recursive_ratios_1181[1, 3] = sym.factor(sym_state_probs_1181\n [p13] / sym_state_probs_1181[p03])\n sym_state_recursive_ratios_1181[0, 4] = sym.factor(sym_state_probs_1181\n [p04] / sym_state_probs_1181[p03])\n sym_state_recursive_ratios_1181[1, 4] = sym.factor(sym_state_probs_1181\n [p14] / sym_state_probs_1181[p04])\n sym_state_recursive_ratios_1181[0, 5] = sym.factor(sym_state_probs_1181\n [p05] / sym_state_probs_1181[p04])\n sym_state_recursive_ratios_1181[1, 5] = sym.factor(sym_state_probs_1181\n [p15] / sym_state_probs_1181[p05])\n sym_state_recursive_ratios_1181[0, 6] = sym.factor(sym_state_probs_1181\n [p06] / sym_state_probs_1181[p05])\n sym_state_recursive_ratios_1181[1, 6] = sym.factor(sym_state_probs_1181\n [p16] / sym_state_probs_1181[p06])\n sym_state_recursive_ratios_1181[0, 7] = sym.factor(sym_state_probs_1181\n [p07] / sym_state_probs_1181[p06])\n sym_state_recursive_ratios_1181[1, 7] = sym.factor(sym_state_probs_1181\n [p17] / sym_state_probs_1181[p07])\n sym_state_recursive_ratios_1181[0, 8] = sym.factor(sym_state_probs_1181\n [p08] / sym_state_probs_1181[p07])\n sym_state_recursive_ratios_1181[1, 8] = sym.factor(sym_state_probs_1181\n [p18] / sym_state_probs_1181[p08])\n sym_state_recursive_ratios_right_1181 = (sym_state_recursive_ratios_1181\n .copy())\n sym_state_recursive_ratios_right_1181[1, 2] = sym.factor(\n sym_state_probs_1181[p12] / sym_state_probs_1181[p11])\n sym_state_recursive_ratios_right_1181[1, 3] = sym.factor(\n sym_state_probs_1181[p13] / sym_state_probs_1181[p12])\n sym_state_recursive_ratios_right_1181[1, 4] = sym.factor(\n sym_state_probs_1181[p14] / sym_state_probs_1181[p13])\n sym_state_recursive_ratios_right_1181[1, 5] = sym.factor(\n sym_state_probs_1181[p15] / sym_state_probs_1181[p14])\n sym_state_recursive_ratios_right_1181[1, 6] = sym.factor(\n sym_state_probs_1181[p16] / sym_state_probs_1181[p15])\n sym_state_recursive_ratios_right_1181[1, 7] = sym.factor(\n sym_state_probs_1181[p17] / sym_state_probs_1181[p16])\n sym_state_recursive_ratios_right_1181[1, 8] = sym.factor(\n sym_state_probs_1181[p18] / sym_state_probs_1181[p17])\n sym_state_recursive_ratios_P0_1181 = sym.zeros(buffer_capacity + 1, \n system_capacity + 1)\n sym_state_recursive_ratios_P0_1181[0, 0] = 1\n sym_state_recursive_ratios_P0_1181[0, 1] = sym.factor(\n sym_state_probs_1181[p01] / sym_state_probs_1181[p00])\n sym_state_recursive_ratios_P0_1181[1, 1] = sym.factor(\n sym_state_probs_1181[p11] / sym_state_probs_1181[p00])\n sym_state_recursive_ratios_P0_1181[0, 2] = sym.factor(\n sym_state_probs_1181[p02] / sym_state_probs_1181[p00])\n sym_state_recursive_ratios_P0_1181[1, 2] = sym.factor(\n sym_state_probs_1181[p12] / sym_state_probs_1181[p00])\n sym_state_recursive_ratios_P0_1181[0, 3] = sym.factor(\n sym_state_probs_1181[p03] / sym_state_probs_1181[p00])\n sym_state_recursive_ratios_P0_1181[1, 3] = sym.factor(\n sym_state_probs_1181[p13] / sym_state_probs_1181[p00])\n sym_state_recursive_ratios_P0_1181[0, 4] = sym.factor(\n sym_state_probs_1181[p04] / sym_state_probs_1181[p00])\n sym_state_recursive_ratios_P0_1181[1, 4] = sym.factor(\n sym_state_probs_1181[p14] / sym_state_probs_1181[p00])\n sym_state_recursive_ratios_P0_1181[0, 5] = sym.factor(\n sym_state_probs_1181[p05] / sym_state_probs_1181[p00])\n sym_state_recursive_ratios_P0_1181[1, 5] = sym.factor(\n sym_state_probs_1181[p15] / sym_state_probs_1181[p00])\n sym_state_recursive_ratios_P0_1181[0, 6] = sym.factor(\n sym_state_probs_1181[p06] / sym_state_probs_1181[p00])\n sym_state_recursive_ratios_P0_1181[1, 6] = sym.factor(\n sym_state_probs_1181[p16] / sym_state_probs_1181[p00])\n sym_state_recursive_ratios_P0_1181[0, 7] = sym.factor(\n sym_state_probs_1181[p07] / sym_state_probs_1181[p00])\n sym_state_recursive_ratios_P0_1181[1, 7] = sym.factor(\n sym_state_probs_1181[p17] / sym_state_probs_1181[p00])\n sym_state_recursive_ratios_P0_1181[0, 8] = sym.factor(\n sym_state_probs_1181[p08] / sym_state_probs_1181[p00])\n sym_state_recursive_ratios_P0_1181[1, 8] = sym.factor(\n sym_state_probs_1181[p18] / sym_state_probs_1181[p00])\n return (sym_state_probs_1181, sym_state_recursive_ratios_1181,\n sym_state_recursive_ratios_right_1181,\n sym_state_recursive_ratios_P0_1181)\n\n\ndef get_symbolic_state_probabilities_1191():\n num_of_servers = 1\n threshold = 1\n system_capacity = 9\n buffer_capacity = 1\n Q_sym_1191 = abg.markov.get_symbolic_transition_matrix(num_of_servers,\n threshold, system_capacity, buffer_capacity)\n (p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15, p06, p16, p07,\n p17, p08, p18, p09, p19) = (sym.symbols(\n 'p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15, p06, p16, p07, p17, p08, p18, p09, p19'\n ))\n pi_1191 = sym.Matrix([p00, p01, p11, p02, p12, p03, p13, p04, p14, p05,\n p15, p06, p16, p07, p17, p08, p18, p09, p19])\n dimension_1191 = Q_sym_1191.shape[0]\n M_sym_1191 = sym.Matrix([Q_sym_1191.transpose()[:-1, :], sym.ones(1,\n dimension_1191)])\n sym_diff_equations_1191 = M_sym_1191 @ pi_1191\n b_sym_1191 = sym.Matrix([sym.zeros(dimension_1191 - 1, 1), [1]])\n eq0_1191 = sym.Eq(sym_diff_equations_1191[0], b_sym_1191[0])\n eq1_1191 = sym.Eq(sym_diff_equations_1191[1], b_sym_1191[1])\n eq2_1191 = sym.Eq(sym_diff_equations_1191[2], b_sym_1191[2])\n eq3_1191 = sym.Eq(sym_diff_equations_1191[3], b_sym_1191[3])\n eq4_1191 = sym.Eq(sym_diff_equations_1191[4], b_sym_1191[4])\n eq5_1191 = sym.Eq(sym_diff_equations_1191[5], b_sym_1191[5])\n eq6_1191 = sym.Eq(sym_diff_equations_1191[6], b_sym_1191[6])\n eq7_1191 = sym.Eq(sym_diff_equations_1191[7], b_sym_1191[7])\n eq8_1191 = sym.Eq(sym_diff_equations_1191[8], b_sym_1191[8])\n eq9_1191 = sym.Eq(sym_diff_equations_1191[9], b_sym_1191[9])\n eq10_1191 = sym.Eq(sym_diff_equations_1191[10], b_sym_1191[10])\n eq11_1191 = sym.Eq(sym_diff_equations_1191[11], b_sym_1191[11])\n eq12_1191 = sym.Eq(sym_diff_equations_1191[12], b_sym_1191[12])\n eq13_1191 = sym.Eq(sym_diff_equations_1191[13], b_sym_1191[13])\n eq14_1191 = sym.Eq(sym_diff_equations_1191[14], b_sym_1191[14])\n eq15_1191 = sym.Eq(sym_diff_equations_1191[15], b_sym_1191[15])\n eq16_1191 = sym.Eq(sym_diff_equations_1191[16], b_sym_1191[16])\n eq17_1191 = sym.Eq(sym_diff_equations_1191[17], b_sym_1191[17])\n eq18_1191 = sym.Eq(sym_diff_equations_1191[18], b_sym_1191[18])\n sym_state_probs_1191 = sym.solve([eq0_1191, eq1_1191, eq2_1191,\n eq3_1191, eq4_1191, eq5_1191, eq6_1191, eq7_1191, eq8_1191,\n eq9_1191, eq10_1191, eq11_1191, eq12_1191, eq13_1191, eq14_1191,\n eq15_1191, eq16_1191, eq17_1191, eq18_1191], (p00, p01, p11, p02,\n p12, p03, p13, p04, p14, p05, p15, p06, p16, p07, p17, p08, p18,\n p09, p19))\n sym_state_recursive_ratios_1191 = sym.zeros(buffer_capacity + 1, \n system_capacity + 1)\n sym_state_recursive_ratios_1191[0, 0] = 1\n sym_state_recursive_ratios_1191[0, 1] = sym.factor(sym_state_probs_1191\n [p01] / sym_state_probs_1191[p00])\n sym_state_recursive_ratios_1191[1, 1] = sym.factor(sym_state_probs_1191\n [p11] / sym_state_probs_1191[p01])\n sym_state_recursive_ratios_1191[0, 2] = sym.factor(sym_state_probs_1191\n [p02] / sym_state_probs_1191[p01])\n sym_state_recursive_ratios_1191[1, 2] = sym.factor(sym_state_probs_1191\n [p12] / sym_state_probs_1191[p02])\n sym_state_recursive_ratios_1191[0, 3] = sym.factor(sym_state_probs_1191\n [p03] / sym_state_probs_1191[p02])\n sym_state_recursive_ratios_1191[1, 3] = sym.factor(sym_state_probs_1191\n [p13] / sym_state_probs_1191[p03])\n sym_state_recursive_ratios_1191[0, 4] = sym.factor(sym_state_probs_1191\n [p04] / sym_state_probs_1191[p03])\n sym_state_recursive_ratios_1191[1, 4] = sym.factor(sym_state_probs_1191\n [p14] / sym_state_probs_1191[p04])\n sym_state_recursive_ratios_1191[0, 5] = sym.factor(sym_state_probs_1191\n [p05] / sym_state_probs_1191[p04])\n sym_state_recursive_ratios_1191[1, 5] = sym.factor(sym_state_probs_1191\n [p15] / sym_state_probs_1191[p05])\n sym_state_recursive_ratios_1191[0, 6] = sym.factor(sym_state_probs_1191\n [p06] / sym_state_probs_1191[p05])\n sym_state_recursive_ratios_1191[1, 6] = sym.factor(sym_state_probs_1191\n [p16] / sym_state_probs_1191[p06])\n sym_state_recursive_ratios_1191[0, 7] = sym.factor(sym_state_probs_1191\n [p07] / sym_state_probs_1191[p06])\n sym_state_recursive_ratios_1191[1, 7] = sym.factor(sym_state_probs_1191\n [p17] / sym_state_probs_1191[p07])\n sym_state_recursive_ratios_1191[0, 8] = sym.factor(sym_state_probs_1191\n [p08] / sym_state_probs_1191[p07])\n sym_state_recursive_ratios_1191[1, 8] = sym.factor(sym_state_probs_1191\n [p18] / sym_state_probs_1191[p08])\n sym_state_recursive_ratios_1191[0, 9] = sym.factor(sym_state_probs_1191\n [p09] / sym_state_probs_1191[p08])\n sym_state_recursive_ratios_1191[1, 9] = sym.factor(sym_state_probs_1191\n [p19] / sym_state_probs_1191[p09])\n sym_state_recursive_ratios_right_1191 = (sym_state_recursive_ratios_1191\n .copy())\n sym_state_recursive_ratios_right_1191[1, 2] = sym.factor(\n sym_state_probs_1191[p12] / sym_state_probs_1191[p11])\n sym_state_recursive_ratios_right_1191[1, 3] = sym.factor(\n sym_state_probs_1191[p13] / sym_state_probs_1191[p12])\n sym_state_recursive_ratios_right_1191[1, 4] = sym.factor(\n sym_state_probs_1191[p14] / sym_state_probs_1191[p13])\n sym_state_recursive_ratios_right_1191[1, 5] = sym.factor(\n sym_state_probs_1191[p15] / sym_state_probs_1191[p14])\n sym_state_recursive_ratios_right_1191[1, 6] = sym.factor(\n sym_state_probs_1191[p16] / sym_state_probs_1191[p15])\n sym_state_recursive_ratios_right_1191[1, 7] = sym.factor(\n sym_state_probs_1191[p17] / sym_state_probs_1191[p16])\n sym_state_recursive_ratios_right_1191[1, 8] = sym.factor(\n sym_state_probs_1191[p18] / sym_state_probs_1191[p17])\n sym_state_recursive_ratios_right_1191[1, 8] = sym.factor(\n sym_state_probs_1191[p18] / sym_state_probs_1191[p17])\n sym_state_recursive_ratios_P0_1191 = sym.zeros(buffer_capacity + 1, \n system_capacity + 1)\n sym_state_recursive_ratios_P0_1191[0, 0] = 1\n sym_state_recursive_ratios_P0_1191[0, 1] = sym.factor(\n sym_state_probs_1191[p01] / sym_state_probs_1191[p00])\n sym_state_recursive_ratios_P0_1191[1, 1] = sym.factor(\n sym_state_probs_1191[p11] / sym_state_probs_1191[p00])\n sym_state_recursive_ratios_P0_1191[0, 2] = sym.factor(\n sym_state_probs_1191[p02] / sym_state_probs_1191[p00])\n sym_state_recursive_ratios_P0_1191[1, 2] = sym.factor(\n sym_state_probs_1191[p12] / sym_state_probs_1191[p00])\n sym_state_recursive_ratios_P0_1191[0, 3] = sym.factor(\n sym_state_probs_1191[p03] / sym_state_probs_1191[p00])\n sym_state_recursive_ratios_P0_1191[1, 3] = sym.factor(\n sym_state_probs_1191[p13] / sym_state_probs_1191[p00])\n sym_state_recursive_ratios_P0_1191[0, 4] = sym.factor(\n sym_state_probs_1191[p04] / sym_state_probs_1191[p00])\n sym_state_recursive_ratios_P0_1191[1, 4] = sym.factor(\n sym_state_probs_1191[p14] / sym_state_probs_1191[p00])\n sym_state_recursive_ratios_P0_1191[0, 5] = sym.factor(\n sym_state_probs_1191[p05] / sym_state_probs_1191[p00])\n sym_state_recursive_ratios_P0_1191[1, 5] = sym.factor(\n sym_state_probs_1191[p15] / sym_state_probs_1191[p00])\n sym_state_recursive_ratios_P0_1191[0, 6] = sym.factor(\n sym_state_probs_1191[p06] / sym_state_probs_1191[p00])\n sym_state_recursive_ratios_P0_1191[1, 6] = sym.factor(\n sym_state_probs_1191[p16] / sym_state_probs_1191[p00])\n sym_state_recursive_ratios_P0_1191[0, 7] = sym.factor(\n sym_state_probs_1191[p07] / sym_state_probs_1191[p00])\n sym_state_recursive_ratios_P0_1191[1, 7] = sym.factor(\n sym_state_probs_1191[p17] / sym_state_probs_1191[p00])\n sym_state_recursive_ratios_P0_1191[0, 8] = sym.factor(\n sym_state_probs_1191[p08] / sym_state_probs_1191[p00])\n sym_state_recursive_ratios_P0_1191[1, 8] = sym.factor(\n sym_state_probs_1191[p18] / sym_state_probs_1191[p00])\n sym_state_recursive_ratios_P0_1191[0, 9] = sym.factor(\n sym_state_probs_1191[p09] / sym_state_probs_1191[p00])\n sym_state_recursive_ratios_P0_1191[1, 9] = sym.factor(\n sym_state_probs_1191[p19] / sym_state_probs_1191[p00])\n return (sym_state_probs_1191, sym_state_recursive_ratios_1191,\n sym_state_recursive_ratios_right_1191,\n sym_state_recursive_ratios_P0_1191)\n",
"step-3": "<mask token>\n\n\ndef get_symbolic_pi(num_of_servers, threshold, system_capacity, buffer_capacity\n ):\n Q_sym = abg.markov.get_symbolic_transition_matrix(num_of_servers=\n num_of_servers, threshold=threshold, system_capacity=\n system_capacity, buffer_capacity=buffer_capacity)\n dimension = Q_sym.shape[0]\n if dimension > 7:\n return 'Capacity of 6 exceeded'\n M_sym = sym.Matrix([Q_sym.transpose()[:-1, :], sym.ones(1, dimension)])\n b_sym = sym.Matrix([sym.zeros(dimension - 1, 1), [1]])\n system = M_sym.col_insert(dimension, b_sym)\n sol = sym.solve_linear_system_LU(system, [a, b, c, d, e, f, g])\n return sol\n\n\ndef get_symbolic_state_probabilities_1222():\n num_of_servers = 1\n threshold = 2\n system_capacity = 2\n buffer_capacity = 2\n sym_pi_1222 = get_symbolic_pi(num_of_servers=num_of_servers, threshold=\n threshold, system_capacity=system_capacity, buffer_capacity=\n buffer_capacity)\n all_states_1222 = abg.markov.build_states(threshold=threshold,\n system_capacity=system_capacity, buffer_capacity=buffer_capacity)\n sym_state_probs_1222 = [(0) for _ in range(len(all_states_1222))]\n sym_state_probs_1222[0] = sym.factor(sym_pi_1222[a])\n sym_state_probs_1222[1] = sym.factor(sym_pi_1222[b])\n sym_state_probs_1222[2] = sym.factor(sym_pi_1222[c])\n sym_state_probs_1222[3] = sym.factor(sym_pi_1222[d])\n sym_state_probs_1222[4] = sym.factor(sym_pi_1222[e])\n sym_state_recursive_ratios_1222 = sym.zeros(buffer_capacity + 1, \n system_capacity + 1)\n sym_state_recursive_ratios_1222[0, 0] = 1\n sym_state_recursive_ratios_1222[0, 1] = sym.factor(sym_state_probs_1222\n [1] / sym_state_probs_1222[0])\n sym_state_recursive_ratios_1222[0, 2] = sym.factor(sym_state_probs_1222\n [2] / sym_state_probs_1222[1])\n sym_state_recursive_ratios_1222[1, 2] = sym.factor(sym_state_probs_1222\n [3] / sym_state_probs_1222[2])\n sym_state_recursive_ratios_1222[2, 2] = sym.factor(sym_state_probs_1222\n [4] / sym_state_probs_1222[3])\n return sym_state_probs_1222, sym_state_recursive_ratios_1222\n\n\ndef get_symbolic_state_probabilities_1121():\n num_of_servers = 1\n threshold = 1\n system_capacity = 2\n buffer_capacity = 1\n all_states_1121 = abg.markov.build_states(threshold=threshold,\n system_capacity=system_capacity, buffer_capacity=buffer_capacity)\n sym_pi_1121 = get_symbolic_pi(num_of_servers=num_of_servers, threshold=\n threshold, system_capacity=system_capacity, buffer_capacity=\n buffer_capacity)\n sym_state_probs_1121 = [(0) for _ in range(len(all_states_1121))]\n sym_state_probs_1121[0] = sym.factor(sym_pi_1121[a])\n sym_state_probs_1121[1] = sym.factor(sym_pi_1121[b])\n sym_state_probs_1121[2] = sym.factor(sym_pi_1121[c])\n sym_state_probs_1121[3] = sym.factor(sym_pi_1121[d])\n sym_state_probs_1121[4] = sym.factor(sym_pi_1121[e])\n sym_state_recursive_ratios_1121 = sym.zeros(buffer_capacity + 1, \n system_capacity + 1)\n sym_state_recursive_ratios_1121[0, 0] = 1\n sym_state_recursive_ratios_1121[0, 1] = sym.factor(sym_state_probs_1121\n [1] / sym_state_probs_1121[0])\n sym_state_recursive_ratios_1121[1, 1] = sym.factor(sym_state_probs_1121\n [2] / sym_state_probs_1121[1])\n sym_state_recursive_ratios_1121[0, 2] = sym.factor(sym_state_probs_1121\n [3] / sym_state_probs_1121[1])\n sym_state_recursive_ratios_1121[1, 2] = sym.factor(sym_state_probs_1121\n [4] / sym_state_probs_1121[3])\n sym_state_recursive_ratios_right_1121 = (sym_state_recursive_ratios_1121\n .copy())\n sym_state_recursive_ratios_right_1121[1, 2] = sym.factor(\n sym_state_probs_1121[4] / sym_state_probs_1121[2])\n sym_state_recursive_ratios_P0_1121 = sym.zeros(buffer_capacity + 1, \n system_capacity + 1)\n sym_state_recursive_ratios_P0_1121[0, 0] = 1\n sym_state_recursive_ratios_P0_1121[0, 1] = sym.factor(\n sym_state_probs_1121[1] / sym_state_probs_1121[0])\n sym_state_recursive_ratios_P0_1121[1, 1] = sym.factor(\n sym_state_probs_1121[2] / sym_state_probs_1121[0])\n sym_state_recursive_ratios_P0_1121[0, 2] = sym.factor(\n sym_state_probs_1121[3] / sym_state_probs_1121[0])\n sym_state_recursive_ratios_P0_1121[1, 2] = sym.factor(\n sym_state_probs_1121[4] / sym_state_probs_1121[0])\n return (sym_state_probs_1121, sym_state_recursive_ratios_1121,\n sym_state_recursive_ratios_right_1121,\n sym_state_recursive_ratios_P0_1121)\n\n\ndef get_symbolic_state_probabilities_1122():\n threshold = 1\n system_capacity = 2\n buffer_capacity = 2\n all_states_1122 = abg.markov.build_states(threshold=threshold,\n system_capacity=system_capacity, buffer_capacity=buffer_capacity)\n sym_state_probs_1122 = [(0) for _ in range(len(all_states_1122))]\n sym_Lambda = sym.symbols('Lambda')\n sym_lambda_1 = sym.symbols('lambda_1')\n sym_lambda_2 = sym.symbols('lambda_2')\n sym_mu = sym.symbols('mu')\n sym_state_probs_1122[0] = (sym_mu ** 6 + 2 * sym_lambda_2 * sym_mu ** 5 +\n sym_lambda_2 ** 2 * sym_mu ** 4)\n sym_state_probs_1122[1] = sym_Lambda * sym_mu ** 3 * (sym_mu ** 2 + 2 *\n sym_mu * sym_lambda_2 + sym_lambda_2 ** 2)\n sym_state_probs_1122[2] = sym_Lambda * sym_lambda_2 * sym_mu ** 2 * (\n sym_lambda_2 ** 2 + sym_lambda_2 * sym_lambda_1 + sym_lambda_1 *\n sym_mu + sym_mu ** 2 + 2 * sym_lambda_2 * sym_mu)\n sym_state_probs_1122[3] = sym_Lambda * sym_lambda_2 ** 2 * sym_mu * (\n sym_lambda_2 ** 2 + 2 * sym_lambda_1 * sym_lambda_2 + 3 *\n sym_lambda_1 * sym_mu + sym_mu ** 2 + 2 * sym_lambda_2 * sym_mu + \n sym_lambda_1 ** 2)\n sym_state_probs_1122[4] = sym_Lambda * sym_lambda_1 * sym_mu ** 3 * (\n sym_lambda_2 + sym_mu)\n sym_state_probs_1122[5\n ] = sym_Lambda * sym_lambda_1 * sym_lambda_2 * sym_mu ** 2 * (2 *\n sym_mu + sym_lambda_1 + sym_lambda_2)\n sym_state_probs_1122[6] = sym_Lambda * sym_lambda_1 * sym_lambda_2 ** 2 * (\n sym_lambda_1 ** 2 + 4 * sym_lambda_1 * sym_mu + 2 * sym_lambda_1 *\n sym_lambda_2 + 3 * sym_mu ** 2 + sym_lambda_2 ** 2 + 3 *\n sym_lambda_2 * sym_mu)\n total_1122 = np.sum(sym_state_probs_1122)\n sym_state_probs_1122 = [(i / total_1122) for i in sym_state_probs_1122]\n sym_state_recursive_ratios_1122 = sym.zeros(buffer_capacity + 1, \n system_capacity + 1)\n sym_state_recursive_ratios_1122[0, 0] = 1\n sym_state_recursive_ratios_1122[0, 1] = sym.factor(sym_state_probs_1122\n [1] / sym_state_probs_1122[0])\n sym_state_recursive_ratios_1122[1, 1] = sym.factor(sym_state_probs_1122\n [2] / sym_state_probs_1122[1])\n sym_state_recursive_ratios_1122[2, 1] = sym.factor(sym_state_probs_1122\n [3] / sym_state_probs_1122[2])\n sym_state_recursive_ratios_1122[0, 2] = sym.factor(sym_state_probs_1122\n [4] / sym_state_probs_1122[1])\n sym_state_recursive_ratios_1122[1, 2] = sym.factor(sym_state_probs_1122\n [5] / sym_state_probs_1122[4])\n sym_state_recursive_ratios_1122[2, 2] = sym.factor(sym_state_probs_1122\n [6] / sym_state_probs_1122[5])\n sym_state_recursive_ratios_right_1122 = (sym_state_recursive_ratios_1122\n .copy())\n sym_state_recursive_ratios_right_1122[1, 2] = sym.factor(\n sym_state_probs_1122[5] / sym_state_probs_1122[2])\n sym_state_recursive_ratios_right_1122[2, 2] = sym.factor(\n sym_state_probs_1122[6] / sym_state_probs_1122[3])\n sym_state_recursive_ratios_P0_1122 = sym.zeros(buffer_capacity + 1, \n system_capacity + 1)\n sym_state_recursive_ratios_P0_1122[0, 0] = 1\n sym_state_recursive_ratios_P0_1122[0, 1] = sym.factor(\n sym_state_probs_1122[1] / sym_state_probs_1122[0])\n sym_state_recursive_ratios_P0_1122[1, 1] = sym.factor(\n sym_state_probs_1122[2] / sym_state_probs_1122[0])\n sym_state_recursive_ratios_P0_1122[2, 1] = sym.factor(\n sym_state_probs_1122[3] / sym_state_probs_1122[0])\n sym_state_recursive_ratios_P0_1122[0, 2] = sym.factor(\n sym_state_probs_1122[4] / sym_state_probs_1122[0])\n sym_state_recursive_ratios_P0_1122[1, 2] = sym.factor(\n sym_state_probs_1122[5] / sym_state_probs_1122[0])\n sym_state_recursive_ratios_P0_1122[2, 2] = sym.factor(\n sym_state_probs_1122[6] / sym_state_probs_1122[0])\n return (sym_state_probs_1122, sym_state_recursive_ratios_1122,\n sym_state_recursive_ratios_right_1122,\n sym_state_recursive_ratios_P0_1122)\n\n\ndef get_symbolic_state_probabilities_1123():\n num_of_servers = 1\n threshold = 1\n system_capacity = 2\n buffer_capacity = 3\n Q_sym_1123 = abg.markov.get_symbolic_transition_matrix(num_of_servers,\n threshold, system_capacity, buffer_capacity)\n p00, p01, p11, p21, p31, p02, p12, p22, p32 = sym.symbols(\n 'p00, p01, p11, p21, p31, p02, p12, p22, p32')\n pi_1123 = sym.Matrix([p00, p01, p11, p21, p31, p02, p12, p22, p32])\n dimension_1123 = Q_sym_1123.shape[0]\n M_sym_1123 = sym.Matrix([Q_sym_1123.transpose()[:-1, :], sym.ones(1,\n dimension_1123)])\n sym_diff_equations_1123 = M_sym_1123 @ pi_1123\n b_sym_1123 = sym.Matrix([sym.zeros(dimension_1123 - 1, 1), [1]])\n eq0_1123 = sym.Eq(sym_diff_equations_1123[0], b_sym_1123[0])\n eq1_1123 = sym.Eq(sym_diff_equations_1123[1], b_sym_1123[1])\n eq2_1123 = sym.Eq(sym_diff_equations_1123[2], b_sym_1123[2])\n eq3_1123 = sym.Eq(sym_diff_equations_1123[3], b_sym_1123[3])\n eq4_1123 = sym.Eq(sym_diff_equations_1123[4], b_sym_1123[4])\n eq5_1123 = sym.Eq(sym_diff_equations_1123[5], b_sym_1123[5])\n eq6_1123 = sym.Eq(sym_diff_equations_1123[6], b_sym_1123[6])\n eq7_1123 = sym.Eq(sym_diff_equations_1123[7], b_sym_1123[7])\n eq8_1123 = sym.Eq(sym_diff_equations_1123[8], b_sym_1123[8])\n sym_state_probs_1123 = sym.solve([eq0_1123, eq1_1123, eq2_1123,\n eq3_1123, eq4_1123, eq5_1123, eq6_1123, eq7_1123, eq8_1123], (p00,\n p01, p11, p21, p31, p02, p12, p22, p32))\n sym_state_recursive_ratios_1123 = sym.zeros(buffer_capacity + 1, \n system_capacity + 1)\n sym_state_recursive_ratios_1123[0, 0] = 1\n sym_state_recursive_ratios_1123[0, 1] = sym.factor(sym_state_probs_1123\n [p01] / sym_state_probs_1123[p00])\n sym_state_recursive_ratios_1123[1, 1] = sym.factor(sym_state_probs_1123\n [p11] / sym_state_probs_1123[p01])\n sym_state_recursive_ratios_1123[2, 1] = sym.factor(sym_state_probs_1123\n [p21] / sym_state_probs_1123[p11])\n sym_state_recursive_ratios_1123[3, 1] = sym.factor(sym_state_probs_1123\n [p31] / sym_state_probs_1123[p21])\n sym_state_recursive_ratios_1123[0, 2] = sym.factor(sym_state_probs_1123\n [p02] / sym_state_probs_1123[p01])\n sym_state_recursive_ratios_1123[1, 2] = sym.factor(sym_state_probs_1123\n [p12] / sym_state_probs_1123[p02])\n sym_state_recursive_ratios_1123[2, 2] = sym.factor(sym_state_probs_1123\n [p22] / sym_state_probs_1123[p12])\n sym_state_recursive_ratios_1123[2, 2] = sym.factor(sym_state_probs_1123\n [p32] / sym_state_probs_1123[p22])\n sym_state_recursive_ratios_right_1123 = (sym_state_recursive_ratios_1123\n .copy())\n sym_state_recursive_ratios_right_1123[1, 2] = sym.factor(\n sym_state_probs_1123[p12] / sym_state_probs_1123[p11])\n sym_state_recursive_ratios_right_1123[2, 2] = sym.factor(\n sym_state_probs_1123[p22] / sym_state_probs_1123[p21])\n sym_state_recursive_ratios_right_1123[3, 2] = sym.factor(\n sym_state_probs_1123[p32] / sym_state_probs_1123[p22])\n sym_state_recursive_ratios_P0_1123 = sym.zeros(buffer_capacity + 1, \n system_capacity + 1)\n sym_state_recursive_ratios_P0_1123[0, 0] = 1\n sym_state_recursive_ratios_P0_1123[0, 1] = sym.factor(\n sym_state_probs_1123[p01] / sym_state_probs_1123[p00])\n sym_state_recursive_ratios_P0_1123[1, 1] = sym.factor(\n sym_state_probs_1123[p11] / sym_state_probs_1123[p00])\n sym_state_recursive_ratios_P0_1123[2, 1] = sym.factor(\n sym_state_probs_1123[p21] / sym_state_probs_1123[p00])\n sym_state_recursive_ratios_P0_1123[3, 1] = sym.factor(\n sym_state_probs_1123[p31] / sym_state_probs_1123[p00])\n sym_state_recursive_ratios_P0_1123[0, 2] = sym.factor(\n sym_state_probs_1123[p02] / sym_state_probs_1123[p00])\n sym_state_recursive_ratios_P0_1123[1, 2] = sym.factor(\n sym_state_probs_1123[p12] / sym_state_probs_1123[p00])\n sym_state_recursive_ratios_P0_1123[2, 2] = sym.factor(\n sym_state_probs_1123[p22] / sym_state_probs_1123[p00])\n sym_state_recursive_ratios_P0_1123[3, 2] = sym.factor(\n sym_state_probs_1123[p32] / sym_state_probs_1123[p00])\n return (sym_state_probs_1123, sym_state_recursive_ratios_1123,\n sym_state_recursive_ratios_right_1123,\n sym_state_recursive_ratios_P0_1123)\n\n\ndef get_symbolic_state_probabilities_1341():\n threshold = 3\n system_capacity = 4\n buffer_capacity = 1\n all_states_1341 = abg.markov.build_states(threshold=threshold,\n system_capacity=system_capacity, buffer_capacity=buffer_capacity)\n sym_state_probs_1341 = [(0) for _ in range(len(all_states_1341))]\n sym_Lambda = sym.symbols('Lambda')\n sym_lambda_1 = sym.symbols('lambda_1')\n sym_lambda_2 = sym.symbols('lambda_2')\n sym_mu = sym.symbols('mu')\n sym_state_probs_1341[0] = sym_lambda_2 * sym_mu ** 5 + sym_mu ** 6\n sym_state_probs_1341[1\n ] = sym_Lambda * sym_lambda_2 * sym_mu ** 4 + sym_Lambda * sym_mu ** 5\n sym_state_probs_1341[2] = (sym_Lambda ** 2 * sym_lambda_2 * sym_mu ** 3 +\n sym_Lambda ** 2 * sym_mu ** 4)\n sym_state_probs_1341[3] = (sym_Lambda ** 3 * sym_lambda_2 * sym_mu ** 2 +\n sym_Lambda ** 3 * sym_mu ** 3)\n sym_state_probs_1341[4] = (sym_Lambda ** 3 * sym_lambda_1 *\n sym_lambda_2 * sym_mu + sym_Lambda ** 3 * sym_lambda_2 * sym_mu ** \n 2 + sym_Lambda ** 3 * sym_lambda_2 * sym_lambda_2 * sym_mu)\n sym_state_probs_1341[5] = sym_Lambda ** 3 * sym_lambda_1 * sym_mu ** 2\n sym_state_probs_1341[6] = (sym_Lambda ** 3 * sym_lambda_1 ** 2 *\n sym_lambda_2 + sym_Lambda ** 3 * sym_lambda_1 * sym_lambda_2 ** 2 +\n 2 * sym_Lambda ** 3 * sym_lambda_1 * sym_lambda_2 * sym_mu)\n total_1341 = np.sum(sym_state_probs_1341)\n sym_state_probs_1341 = [(i / total_1341) for i in sym_state_probs_1341]\n sym_state_recursive_ratios_1341 = sym.zeros(buffer_capacity + 1, \n system_capacity + 1)\n sym_state_recursive_ratios_1341[0, 0] = 1\n sym_state_recursive_ratios_1341[0, 1] = sym.factor(sym_state_probs_1341\n [1] / sym_state_probs_1341[0])\n sym_state_recursive_ratios_1341[0, 2] = sym.factor(sym_state_probs_1341\n [2] / sym_state_probs_1341[1])\n sym_state_recursive_ratios_1341[0, 3] = sym.factor(sym_state_probs_1341\n [3] / sym_state_probs_1341[2])\n sym_state_recursive_ratios_1341[0, 4] = sym.factor(sym_state_probs_1341\n [5] / sym_state_probs_1341[3])\n sym_state_recursive_ratios_1341[1, 3] = sym.factor(sym_state_probs_1341\n [4] / sym_state_probs_1341[3])\n sym_state_recursive_ratios_1341[1, 4] = sym.factor(sym_state_probs_1341\n [6] / sym_state_probs_1341[5])\n sym_state_recursive_ratios_right_1341 = (sym_state_recursive_ratios_1341\n .copy())\n sym_state_recursive_ratios_right_1341[1, 4] = sym.factor(\n sym_state_probs_1341[6] / sym_state_probs_1341[4])\n sym_state_recursive_ratios_P0_1341 = sym.zeros(buffer_capacity + 1, \n system_capacity + 1)\n sym_state_recursive_ratios_P0_1341[0, 0] = 1\n sym_state_recursive_ratios_P0_1341[0, 1] = sym.factor(\n sym_state_probs_1341[1] / sym_state_probs_1341[0])\n sym_state_recursive_ratios_P0_1341[0, 2] = sym.factor(\n sym_state_probs_1341[2] / sym_state_probs_1341[0])\n sym_state_recursive_ratios_P0_1341[0, 3] = sym.factor(\n sym_state_probs_1341[3] / sym_state_probs_1341[0])\n sym_state_recursive_ratios_P0_1341[1, 3] = sym.factor(\n sym_state_probs_1341[4] / sym_state_probs_1341[0])\n sym_state_recursive_ratios_P0_1341[0, 4] = sym.factor(\n sym_state_probs_1341[5] / sym_state_probs_1341[0])\n sym_state_recursive_ratios_P0_1341[1, 4] = sym.factor(\n sym_state_probs_1341[6] / sym_state_probs_1341[0])\n return (sym_state_probs_1341, sym_state_recursive_ratios_1341,\n sym_state_recursive_ratios_right_1341,\n sym_state_recursive_ratios_P0_1341)\n\n\ndef get_symbolic_state_probabilities_1131():\n threshold = 1\n system_capacity = 3\n buffer_capacity = 1\n all_states_1131 = abg.markov.build_states(threshold=threshold,\n system_capacity=system_capacity, buffer_capacity=buffer_capacity)\n sym_state_probs_1131 = [(0) for _ in range(len(all_states_1131))]\n sym_Lambda = sym.symbols('Lambda')\n sym_lambda_1 = sym.symbols('lambda_1')\n sym_lambda_2 = sym.symbols('lambda_2')\n sym_mu = sym.symbols('mu')\n sym_state_probs_1131[0] = (sym_mu ** 6 + 2 * (sym_lambda_2 * sym_mu ** \n 5) + sym_lambda_2 ** 2 * sym_mu ** 4 + sym_lambda_1 * sym_lambda_2 *\n sym_mu ** 4)\n sym_state_probs_1131[1] = sym_state_probs_1131[0] * sym_Lambda / sym_mu\n sym_state_probs_1131[2] = (sym_Lambda * sym_lambda_1 ** 2 *\n sym_lambda_2 * sym_mu ** 2 + sym_Lambda * sym_lambda_2 *\n sym_lambda_1 * sym_mu ** 3 + 2 * (sym_Lambda * sym_lambda_1 * \n sym_lambda_2 ** 2 * sym_mu ** 2) + 2 * (sym_Lambda * sym_lambda_2 **\n 2 * sym_mu ** 3) + sym_Lambda * sym_lambda_2 ** 3 * sym_mu ** 2 + \n sym_Lambda * sym_lambda_2 * sym_mu ** 4)\n sym_state_probs_1131[3] = sym_Lambda * sym_lambda_1 * sym_mu ** 3 * (\n sym_lambda_2 + sym_mu)\n sym_state_probs_1131[4\n ] = sym_Lambda * sym_lambda_2 * sym_lambda_1 * sym_mu * (\n sym_lambda_2 ** 2 + 2 * sym_lambda_2 * sym_lambda_1 + 3 *\n sym_lambda_2 * sym_mu + sym_lambda_1 ** 2 + 2 * sym_lambda_1 *\n sym_mu + 2 * sym_mu ** 2)\n sym_state_probs_1131[5] = sym_Lambda * sym_lambda_1 ** 2 * sym_mu ** 3\n sym_state_probs_1131[6] = sym_Lambda * sym_lambda_2 * sym_lambda_1 ** 2 * (\n sym_lambda_2 ** 2 + 2 * sym_lambda_2 * sym_lambda_1 + 3 *\n sym_lambda_2 * sym_mu + sym_lambda_1 ** 2 + 2 * sym_lambda_1 *\n sym_mu + 3 * sym_mu ** 2)\n denominator = (sym_Lambda * sym_lambda_2 ** 3 * sym_lambda_1 ** 2 + \n sym_Lambda * sym_lambda_2 ** 3 * sym_lambda_1 * sym_mu + sym_Lambda *\n sym_lambda_2 ** 3 * sym_mu ** 2 + 2 * sym_Lambda * sym_lambda_2 ** \n 2 * sym_lambda_1 ** 3 + 5 * sym_Lambda * sym_lambda_2 ** 2 * \n sym_lambda_1 ** 2 * sym_mu + 5 * sym_Lambda * sym_lambda_2 ** 2 *\n sym_lambda_1 * sym_mu ** 2 + 3 * sym_Lambda * sym_lambda_2 ** 2 * \n sym_mu ** 3 + sym_Lambda * sym_lambda_2 * sym_lambda_1 ** 4 + 3 *\n sym_Lambda * sym_lambda_2 * sym_lambda_1 ** 3 * sym_mu + 6 *\n sym_Lambda * sym_lambda_2 * sym_lambda_1 ** 2 * sym_mu ** 2 + 5 *\n sym_Lambda * sym_lambda_2 * sym_lambda_1 * sym_mu ** 3 + 3 *\n sym_Lambda * sym_lambda_2 * sym_mu ** 4 + sym_Lambda * sym_lambda_1 **\n 2 * sym_mu ** 3 + sym_Lambda * sym_lambda_1 * sym_mu ** 4 + \n sym_Lambda * sym_mu ** 5 + sym_lambda_2 ** 2 * sym_mu ** 4 + \n sym_lambda_2 * sym_lambda_1 * sym_mu ** 4 + 2 * sym_lambda_2 * \n sym_mu ** 5 + sym_mu ** 6)\n sym_state_probs_1131 = [(i / denominator) for i in sym_state_probs_1131]\n sym_state_recursive_ratios_1131 = sym.zeros(buffer_capacity + 1, \n system_capacity + 1)\n sym_state_recursive_ratios_1131[0, 0] = 1\n sym_state_recursive_ratios_1131[0, 1] = sym.factor(sym_state_probs_1131\n [1] / sym_state_probs_1131[0])\n sym_state_recursive_ratios_1131[1, 1] = sym.factor(sym_state_probs_1131\n [2] / sym_state_probs_1131[1])\n sym_state_recursive_ratios_1131[0, 2] = sym.factor(sym_state_probs_1131\n [3] / sym_state_probs_1131[1])\n sym_state_recursive_ratios_1131[1, 2] = sym.factor(sym_state_probs_1131\n [4] / sym_state_probs_1131[3])\n sym_state_recursive_ratios_1131[0, 3] = sym.factor(sym_state_probs_1131\n [5] / sym_state_probs_1131[3])\n sym_state_recursive_ratios_1131[1, 3] = sym.factor(sym_state_probs_1131\n [6] / sym_state_probs_1131[5])\n sym_state_recursive_ratios_right_1131 = (sym_state_recursive_ratios_1131\n .copy())\n sym_state_recursive_ratios_right_1131[1, 2] = sym.factor(\n sym_state_probs_1131[4] / sym_state_probs_1131[2])\n sym_state_recursive_ratios_right_1131[1, 3] = sym.factor(\n sym_state_probs_1131[6] / sym_state_probs_1131[4])\n sym_state_recursive_ratios_P0_1131 = sym.zeros(buffer_capacity + 1, \n system_capacity + 1)\n sym_state_recursive_ratios_P0_1131[0, 0] = 1\n sym_state_recursive_ratios_P0_1131[0, 1] = sym.factor(\n sym_state_probs_1131[1] / sym_state_probs_1131[0])\n sym_state_recursive_ratios_P0_1131[1, 1] = sym.factor(\n sym_state_probs_1131[2] / sym_state_probs_1131[0])\n sym_state_recursive_ratios_P0_1131[0, 2] = sym.factor(\n sym_state_probs_1131[3] / sym_state_probs_1131[0])\n sym_state_recursive_ratios_P0_1131[1, 2] = sym.factor(\n sym_state_probs_1131[4] / sym_state_probs_1131[0])\n sym_state_recursive_ratios_P0_1131[0, 3] = sym.factor(\n sym_state_probs_1131[5] / sym_state_probs_1131[0])\n sym_state_recursive_ratios_P0_1131[1, 3] = sym.factor(\n sym_state_probs_1131[6] / sym_state_probs_1131[0])\n return (sym_state_probs_1131, sym_state_recursive_ratios_1131,\n sym_state_recursive_ratios_right_1131,\n sym_state_recursive_ratios_P0_1131)\n\n\ndef get_symbolic_state_probabilities_1132():\n num_of_servers = 1\n threshold = 1\n system_capacity = 3\n buffer_capacity = 2\n Q_sym_1132 = abg.markov.get_symbolic_transition_matrix(num_of_servers,\n threshold, system_capacity, buffer_capacity)\n p00, p01, p11, p21, p02, p12, p22, p03, p13, p23 = sym.symbols(\n 'p00, p01, p11, p21, p02, p12, p22, p03, p13, p23')\n pi_1132 = sym.Matrix([p00, p01, p11, p21, p02, p12, p22, p03, p13, p23])\n dimension_1132 = Q_sym_1132.shape[0]\n M_sym_1132 = sym.Matrix([Q_sym_1132.transpose()[:-1, :], sym.ones(1,\n dimension_1132)])\n sym_diff_equations_1132 = M_sym_1132 @ pi_1132\n b_sym_1132 = sym.Matrix([sym.zeros(dimension_1132 - 1, 1), [1]])\n eq0_1132 = sym.Eq(sym_diff_equations_1132[0], b_sym_1132[0])\n eq1_1132 = sym.Eq(sym_diff_equations_1132[1], b_sym_1132[1])\n eq2_1132 = sym.Eq(sym_diff_equations_1132[2], b_sym_1132[2])\n eq3_1132 = sym.Eq(sym_diff_equations_1132[3], b_sym_1132[3])\n eq4_1132 = sym.Eq(sym_diff_equations_1132[4], b_sym_1132[4])\n eq5_1132 = sym.Eq(sym_diff_equations_1132[5], b_sym_1132[5])\n eq6_1132 = sym.Eq(sym_diff_equations_1132[6], b_sym_1132[6])\n eq7_1132 = sym.Eq(sym_diff_equations_1132[7], b_sym_1132[7])\n eq8_1132 = sym.Eq(sym_diff_equations_1132[8], b_sym_1132[8])\n eq9_1132 = sym.Eq(sym_diff_equations_1132[9], b_sym_1132[9])\n sym_state_probs_1132 = sym.solve([eq0_1132, eq1_1132, eq2_1132,\n eq3_1132, eq4_1132, eq5_1132, eq6_1132, eq7_1132, eq8_1132,\n eq9_1132], (p00, p01, p11, p21, p02, p12, p22, p03, p13, p23))\n sym_state_recursive_ratios_1132 = sym.zeros(buffer_capacity + 1, \n system_capacity + 1)\n sym_state_recursive_ratios_1132[0, 0] = 1\n sym_state_recursive_ratios_1132[0, 1] = sym.factor(sym_state_probs_1132\n [p01] / sym_state_probs_1132[p00])\n sym_state_recursive_ratios_1132[1, 1] = sym.factor(sym_state_probs_1132\n [p11] / sym_state_probs_1132[p01])\n sym_state_recursive_ratios_1132[2, 1] = sym.factor(sym_state_probs_1132\n [p21] / sym_state_probs_1132[p11])\n sym_state_recursive_ratios_1132[0, 2] = sym.factor(sym_state_probs_1132\n [p02] / sym_state_probs_1132[p01])\n sym_state_recursive_ratios_1132[1, 2] = sym.factor(sym_state_probs_1132\n [p12] / sym_state_probs_1132[p02])\n sym_state_recursive_ratios_1132[2, 2] = sym.factor(sym_state_probs_1132\n [p22] / sym_state_probs_1132[p12])\n sym_state_recursive_ratios_1132[0, 3] = sym.factor(sym_state_probs_1132\n [p03] / sym_state_probs_1132[p02])\n sym_state_recursive_ratios_1132[1, 3] = sym.factor(sym_state_probs_1132\n [p13] / sym_state_probs_1132[p03])\n sym_state_recursive_ratios_1132[2, 3] = sym.factor(sym_state_probs_1132\n [p23] / sym_state_probs_1132[p13])\n sym_state_recursive_ratios_right_1132 = (sym_state_recursive_ratios_1132\n .copy())\n sym_state_recursive_ratios_right_1132[1, 2] = sym.factor(\n sym_state_probs_1132[p12] / sym_state_probs_1132[p11])\n sym_state_recursive_ratios_right_1132[1, 3] = sym.factor(\n sym_state_probs_1132[p13] / sym_state_probs_1132[p12])\n sym_state_recursive_ratios_right_1132[2, 2] = sym.factor(\n sym_state_probs_1132[p22] / sym_state_probs_1132[p21])\n sym_state_recursive_ratios_right_1132[2, 3] = sym.factor(\n sym_state_probs_1132[p23] / sym_state_probs_1132[p22])\n sym_state_recursive_ratios_P0_1132 = sym.zeros(buffer_capacity + 1, \n system_capacity + 1)\n sym_state_recursive_ratios_P0_1132[0, 0] = 1\n sym_state_recursive_ratios_P0_1132[0, 1] = sym.factor(\n sym_state_probs_1132[p01] / sym_state_probs_1132[p00])\n sym_state_recursive_ratios_P0_1132[1, 1] = sym.factor(\n sym_state_probs_1132[p11] / sym_state_probs_1132[p00])\n sym_state_recursive_ratios_P0_1132[2, 1] = sym.factor(\n sym_state_probs_1132[p21] / sym_state_probs_1132[p00])\n sym_state_recursive_ratios_P0_1132[0, 2] = sym.factor(\n sym_state_probs_1132[p02] / sym_state_probs_1132[p00])\n sym_state_recursive_ratios_P0_1132[1, 2] = sym.factor(\n sym_state_probs_1132[p12] / sym_state_probs_1132[p00])\n sym_state_recursive_ratios_P0_1132[2, 2] = sym.factor(\n sym_state_probs_1132[p22] / sym_state_probs_1132[p00])\n sym_state_recursive_ratios_P0_1132[0, 3] = sym.factor(\n sym_state_probs_1132[p03] / sym_state_probs_1132[p00])\n sym_state_recursive_ratios_P0_1132[1, 3] = sym.factor(\n sym_state_probs_1132[p13] / sym_state_probs_1132[p00])\n sym_state_recursive_ratios_P0_1132[2, 3] = sym.factor(\n sym_state_probs_1132[p23] / sym_state_probs_1132[p00])\n return (sym_state_probs_1132, sym_state_recursive_ratios_1132,\n sym_state_recursive_ratios_right_1132,\n sym_state_recursive_ratios_P0_1132)\n\n\n<mask token>\n\n\ndef get_symbolic_state_probabilities_1151():\n num_of_servers = 1\n threshold = 1\n system_capacity = 5\n buffer_capacity = 1\n Q_sym_1151 = abg.markov.get_symbolic_transition_matrix(num_of_servers,\n threshold, system_capacity, buffer_capacity)\n p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15 = sym.symbols(\n 'p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15')\n pi_1151 = sym.Matrix([p00, p01, p11, p02, p12, p03, p13, p04, p14, p05,\n p15])\n dimension_1151 = Q_sym_1151.shape[0]\n M_sym_1151 = sym.Matrix([Q_sym_1151.transpose()[:-1, :], sym.ones(1,\n dimension_1151)])\n sym_diff_equations_1151 = M_sym_1151 @ pi_1151\n b_sym_1151 = sym.Matrix([sym.zeros(dimension_1151 - 1, 1), [1]])\n eq0_1151 = sym.Eq(sym_diff_equations_1151[0], b_sym_1151[0])\n eq1_1151 = sym.Eq(sym_diff_equations_1151[1], b_sym_1151[1])\n eq2_1151 = sym.Eq(sym_diff_equations_1151[2], b_sym_1151[2])\n eq3_1151 = sym.Eq(sym_diff_equations_1151[3], b_sym_1151[3])\n eq4_1151 = sym.Eq(sym_diff_equations_1151[4], b_sym_1151[4])\n eq5_1151 = sym.Eq(sym_diff_equations_1151[5], b_sym_1151[5])\n eq6_1151 = sym.Eq(sym_diff_equations_1151[6], b_sym_1151[6])\n eq7_1151 = sym.Eq(sym_diff_equations_1151[7], b_sym_1151[7])\n eq8_1151 = sym.Eq(sym_diff_equations_1151[8], b_sym_1151[8])\n eq9_1151 = sym.Eq(sym_diff_equations_1151[9], b_sym_1151[9])\n eq10_1151 = sym.Eq(sym_diff_equations_1151[10], b_sym_1151[10])\n sym_state_probs_1151 = sym.solve([eq0_1151, eq1_1151, eq2_1151,\n eq3_1151, eq4_1151, eq5_1151, eq6_1151, eq7_1151, eq8_1151,\n eq9_1151, eq10_1151], (p00, p01, p11, p02, p12, p03, p13, p04, p14,\n p05, p15))\n sym_state_recursive_ratios_1151 = sym.zeros(buffer_capacity + 1, \n system_capacity + 1)\n sym_state_recursive_ratios_1151[0, 0] = 1\n sym_state_recursive_ratios_1151[0, 1] = sym.factor(sym_state_probs_1151\n [p01] / sym_state_probs_1151[p00])\n sym_state_recursive_ratios_1151[1, 1] = sym.factor(sym_state_probs_1151\n [p11] / sym_state_probs_1151[p01])\n sym_state_recursive_ratios_1151[0, 2] = sym.factor(sym_state_probs_1151\n [p02] / sym_state_probs_1151[p01])\n sym_state_recursive_ratios_1151[1, 2] = sym.factor(sym_state_probs_1151\n [p12] / sym_state_probs_1151[p02])\n sym_state_recursive_ratios_1151[0, 3] = sym.factor(sym_state_probs_1151\n [p03] / sym_state_probs_1151[p02])\n sym_state_recursive_ratios_1151[1, 3] = sym.factor(sym_state_probs_1151\n [p13] / sym_state_probs_1151[p03])\n sym_state_recursive_ratios_1151[0, 4] = sym.factor(sym_state_probs_1151\n [p04] / sym_state_probs_1151[p03])\n sym_state_recursive_ratios_1151[1, 4] = sym.factor(sym_state_probs_1151\n [p14] / sym_state_probs_1151[p04])\n sym_state_recursive_ratios_1151[0, 5] = sym.factor(sym_state_probs_1151\n [p05] / sym_state_probs_1151[p04])\n sym_state_recursive_ratios_1151[1, 5] = sym.factor(sym_state_probs_1151\n [p15] / sym_state_probs_1151[p05])\n sym_state_recursive_ratios_right_1151 = (sym_state_recursive_ratios_1151\n .copy())\n sym_state_recursive_ratios_right_1151[1, 2] = sym.factor(\n sym_state_probs_1151[p12] / sym_state_probs_1151[p11])\n sym_state_recursive_ratios_right_1151[1, 3] = sym.factor(\n sym_state_probs_1151[p13] / sym_state_probs_1151[p12])\n sym_state_recursive_ratios_right_1151[1, 4] = sym.factor(\n sym_state_probs_1151[p14] / sym_state_probs_1151[p13])\n sym_state_recursive_ratios_right_1151[1, 5] = sym.factor(\n sym_state_probs_1151[p15] / sym_state_probs_1151[p14])\n sym_state_recursive_ratios_P0_1151 = sym.zeros(buffer_capacity + 1, \n system_capacity + 1)\n sym_state_recursive_ratios_P0_1151[0, 0] = 1\n sym_state_recursive_ratios_P0_1151[0, 1] = sym.factor(\n sym_state_probs_1151[p01] / sym_state_probs_1151[p00])\n sym_state_recursive_ratios_P0_1151[1, 1] = sym.factor(\n sym_state_probs_1151[p11] / sym_state_probs_1151[p00])\n sym_state_recursive_ratios_P0_1151[0, 2] = sym.factor(\n sym_state_probs_1151[p02] / sym_state_probs_1151[p00])\n sym_state_recursive_ratios_P0_1151[1, 2] = sym.factor(\n sym_state_probs_1151[p12] / sym_state_probs_1151[p00])\n sym_state_recursive_ratios_P0_1151[0, 3] = sym.factor(\n sym_state_probs_1151[p03] / sym_state_probs_1151[p00])\n sym_state_recursive_ratios_P0_1151[1, 3] = sym.factor(\n sym_state_probs_1151[p13] / sym_state_probs_1151[p00])\n sym_state_recursive_ratios_P0_1151[0, 4] = sym.factor(\n sym_state_probs_1151[p04] / sym_state_probs_1151[p00])\n sym_state_recursive_ratios_P0_1151[1, 4] = sym.factor(\n sym_state_probs_1151[p14] / sym_state_probs_1151[p00])\n sym_state_recursive_ratios_P0_1151[0, 5] = sym.factor(\n sym_state_probs_1151[p05] / sym_state_probs_1151[p00])\n sym_state_recursive_ratios_P0_1151[1, 5] = sym.factor(\n sym_state_probs_1151[p15] / sym_state_probs_1151[p00])\n return (sym_state_probs_1151, sym_state_recursive_ratios_1151,\n sym_state_recursive_ratios_right_1151,\n sym_state_recursive_ratios_P0_1151)\n\n\ndef get_symbolic_state_probabilities_1161():\n num_of_servers = 1\n threshold = 1\n system_capacity = 6\n buffer_capacity = 1\n Q_sym_1161 = abg.markov.get_symbolic_transition_matrix(num_of_servers,\n threshold, system_capacity, buffer_capacity)\n p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15, p06, p16 = (sym.\n symbols(\n 'p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15, p06, p16'))\n pi_1161 = sym.Matrix([p00, p01, p11, p02, p12, p03, p13, p04, p14, p05,\n p15, p06, p16])\n dimension_1161 = Q_sym_1161.shape[0]\n M_sym_1161 = sym.Matrix([Q_sym_1161.transpose()[:-1, :], sym.ones(1,\n dimension_1161)])\n sym_diff_equations_1161 = M_sym_1161 @ pi_1161\n b_sym_1161 = sym.Matrix([sym.zeros(dimension_1161 - 1, 1), [1]])\n eq0_1161 = sym.Eq(sym_diff_equations_1161[0], b_sym_1161[0])\n eq1_1161 = sym.Eq(sym_diff_equations_1161[1], b_sym_1161[1])\n eq2_1161 = sym.Eq(sym_diff_equations_1161[2], b_sym_1161[2])\n eq3_1161 = sym.Eq(sym_diff_equations_1161[3], b_sym_1161[3])\n eq4_1161 = sym.Eq(sym_diff_equations_1161[4], b_sym_1161[4])\n eq5_1161 = sym.Eq(sym_diff_equations_1161[5], b_sym_1161[5])\n eq6_1161 = sym.Eq(sym_diff_equations_1161[6], b_sym_1161[6])\n eq7_1161 = sym.Eq(sym_diff_equations_1161[7], b_sym_1161[7])\n eq8_1161 = sym.Eq(sym_diff_equations_1161[8], b_sym_1161[8])\n eq9_1161 = sym.Eq(sym_diff_equations_1161[9], b_sym_1161[9])\n eq10_1161 = sym.Eq(sym_diff_equations_1161[10], b_sym_1161[10])\n eq11_1161 = sym.Eq(sym_diff_equations_1161[11], b_sym_1161[11])\n eq12_1161 = sym.Eq(sym_diff_equations_1161[12], b_sym_1161[12])\n sym_state_probs_1161 = sym.solve([eq0_1161, eq1_1161, eq2_1161,\n eq3_1161, eq4_1161, eq5_1161, eq6_1161, eq7_1161, eq8_1161,\n eq9_1161, eq10_1161, eq11_1161, eq12_1161], (p00, p01, p11, p02,\n p12, p03, p13, p04, p14, p05, p15, p06, p16))\n sym_state_recursive_ratios_1161 = sym.zeros(buffer_capacity + 1, \n system_capacity + 1)\n sym_state_recursive_ratios_1161[0, 0] = 1\n sym_state_recursive_ratios_1161[0, 1] = sym.factor(sym_state_probs_1161\n [p01] / sym_state_probs_1161[p00])\n sym_state_recursive_ratios_1161[1, 1] = sym.factor(sym_state_probs_1161\n [p11] / sym_state_probs_1161[p01])\n sym_state_recursive_ratios_1161[0, 2] = sym.factor(sym_state_probs_1161\n [p02] / sym_state_probs_1161[p01])\n sym_state_recursive_ratios_1161[1, 2] = sym.factor(sym_state_probs_1161\n [p12] / sym_state_probs_1161[p02])\n sym_state_recursive_ratios_1161[0, 3] = sym.factor(sym_state_probs_1161\n [p03] / sym_state_probs_1161[p02])\n sym_state_recursive_ratios_1161[1, 3] = sym.factor(sym_state_probs_1161\n [p13] / sym_state_probs_1161[p03])\n sym_state_recursive_ratios_1161[0, 4] = sym.factor(sym_state_probs_1161\n [p04] / sym_state_probs_1161[p03])\n sym_state_recursive_ratios_1161[1, 4] = sym.factor(sym_state_probs_1161\n [p14] / sym_state_probs_1161[p04])\n sym_state_recursive_ratios_1161[0, 5] = sym.factor(sym_state_probs_1161\n [p05] / sym_state_probs_1161[p04])\n sym_state_recursive_ratios_1161[1, 5] = sym.factor(sym_state_probs_1161\n [p15] / sym_state_probs_1161[p05])\n sym_state_recursive_ratios_1161[0, 6] = sym.factor(sym_state_probs_1161\n [p06] / sym_state_probs_1161[p05])\n sym_state_recursive_ratios_1161[1, 6] = sym.factor(sym_state_probs_1161\n [p16] / sym_state_probs_1161[p06])\n sym_state_recursive_ratios_right_1161 = (sym_state_recursive_ratios_1161\n .copy())\n sym_state_recursive_ratios_right_1161[1, 2] = sym.factor(\n sym_state_probs_1161[p12] / sym_state_probs_1161[p11])\n sym_state_recursive_ratios_right_1161[1, 3] = sym.factor(\n sym_state_probs_1161[p13] / sym_state_probs_1161[p12])\n sym_state_recursive_ratios_right_1161[1, 4] = sym.factor(\n sym_state_probs_1161[p14] / sym_state_probs_1161[p13])\n sym_state_recursive_ratios_right_1161[1, 5] = sym.factor(\n sym_state_probs_1161[p15] / sym_state_probs_1161[p14])\n sym_state_recursive_ratios_right_1161[1, 6] = sym.factor(\n sym_state_probs_1161[p16] / sym_state_probs_1161[p15])\n sym_state_recursive_ratios_P0_1161 = sym.zeros(buffer_capacity + 1, \n system_capacity + 1)\n sym_state_recursive_ratios_P0_1161[0, 0] = 1\n sym_state_recursive_ratios_P0_1161[0, 1] = sym.factor(\n sym_state_probs_1161[p01] / sym_state_probs_1161[p00])\n sym_state_recursive_ratios_P0_1161[1, 1] = sym.factor(\n sym_state_probs_1161[p11] / sym_state_probs_1161[p00])\n sym_state_recursive_ratios_P0_1161[0, 2] = sym.factor(\n sym_state_probs_1161[p02] / sym_state_probs_1161[p00])\n sym_state_recursive_ratios_P0_1161[1, 2] = sym.factor(\n sym_state_probs_1161[p12] / sym_state_probs_1161[p00])\n sym_state_recursive_ratios_P0_1161[0, 3] = sym.factor(\n sym_state_probs_1161[p03] / sym_state_probs_1161[p00])\n sym_state_recursive_ratios_P0_1161[1, 3] = sym.factor(\n sym_state_probs_1161[p13] / sym_state_probs_1161[p00])\n sym_state_recursive_ratios_P0_1161[0, 4] = sym.factor(\n sym_state_probs_1161[p04] / sym_state_probs_1161[p00])\n sym_state_recursive_ratios_P0_1161[1, 4] = sym.factor(\n sym_state_probs_1161[p14] / sym_state_probs_1161[p00])\n sym_state_recursive_ratios_P0_1161[0, 5] = sym.factor(\n sym_state_probs_1161[p05] / sym_state_probs_1161[p00])\n sym_state_recursive_ratios_P0_1161[1, 5] = sym.factor(\n sym_state_probs_1161[p15] / sym_state_probs_1161[p00])\n sym_state_recursive_ratios_P0_1161[0, 6] = sym.factor(\n sym_state_probs_1161[p06] / sym_state_probs_1161[p00])\n sym_state_recursive_ratios_P0_1161[1, 6] = sym.factor(\n sym_state_probs_1161[p16] / sym_state_probs_1161[p00])\n return (sym_state_probs_1161, sym_state_recursive_ratios_1161,\n sym_state_recursive_ratios_right_1161,\n sym_state_recursive_ratios_P0_1161)\n\n\ndef get_symbolic_state_probabilities_1171():\n num_of_servers = 1\n threshold = 1\n system_capacity = 7\n buffer_capacity = 1\n Q_sym_1171 = abg.markov.get_symbolic_transition_matrix(num_of_servers,\n threshold, system_capacity, buffer_capacity)\n (p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15, p06, p16, p07, p17\n ) = (sym.symbols(\n 'p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15, p06, p16, p07, p17'\n ))\n pi_1171 = sym.Matrix([p00, p01, p11, p02, p12, p03, p13, p04, p14, p05,\n p15, p06, p16, p07, p17])\n dimension_1171 = Q_sym_1171.shape[0]\n M_sym_1171 = sym.Matrix([Q_sym_1171.transpose()[:-1, :], sym.ones(1,\n dimension_1171)])\n sym_diff_equations_1171 = M_sym_1171 @ pi_1171\n b_sym_1171 = sym.Matrix([sym.zeros(dimension_1171 - 1, 1), [1]])\n eq0_1171 = sym.Eq(sym_diff_equations_1171[0], b_sym_1171[0])\n eq1_1171 = sym.Eq(sym_diff_equations_1171[1], b_sym_1171[1])\n eq2_1171 = sym.Eq(sym_diff_equations_1171[2], b_sym_1171[2])\n eq3_1171 = sym.Eq(sym_diff_equations_1171[3], b_sym_1171[3])\n eq4_1171 = sym.Eq(sym_diff_equations_1171[4], b_sym_1171[4])\n eq5_1171 = sym.Eq(sym_diff_equations_1171[5], b_sym_1171[5])\n eq6_1171 = sym.Eq(sym_diff_equations_1171[6], b_sym_1171[6])\n eq7_1171 = sym.Eq(sym_diff_equations_1171[7], b_sym_1171[7])\n eq8_1171 = sym.Eq(sym_diff_equations_1171[8], b_sym_1171[8])\n eq9_1171 = sym.Eq(sym_diff_equations_1171[9], b_sym_1171[9])\n eq10_1171 = sym.Eq(sym_diff_equations_1171[10], b_sym_1171[10])\n eq11_1171 = sym.Eq(sym_diff_equations_1171[11], b_sym_1171[11])\n eq12_1171 = sym.Eq(sym_diff_equations_1171[12], b_sym_1171[12])\n eq13_1171 = sym.Eq(sym_diff_equations_1171[13], b_sym_1171[13])\n eq14_1171 = sym.Eq(sym_diff_equations_1171[14], b_sym_1171[14])\n sym_state_probs_1171 = sym.solve([eq0_1171, eq1_1171, eq2_1171,\n eq3_1171, eq4_1171, eq5_1171, eq6_1171, eq7_1171, eq8_1171,\n eq9_1171, eq10_1171, eq11_1171, eq12_1171, eq13_1171, eq14_1171], (\n p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15, p06, p16,\n p07, p17))\n sym_state_recursive_ratios_1171 = sym.zeros(buffer_capacity + 1, \n system_capacity + 1)\n sym_state_recursive_ratios_1171[0, 0] = 1\n sym_state_recursive_ratios_1171[0, 1] = sym.factor(sym_state_probs_1171\n [p01] / sym_state_probs_1171[p00])\n sym_state_recursive_ratios_1171[1, 1] = sym.factor(sym_state_probs_1171\n [p11] / sym_state_probs_1171[p01])\n sym_state_recursive_ratios_1171[0, 2] = sym.factor(sym_state_probs_1171\n [p02] / sym_state_probs_1171[p01])\n sym_state_recursive_ratios_1171[1, 2] = sym.factor(sym_state_probs_1171\n [p12] / sym_state_probs_1171[p02])\n sym_state_recursive_ratios_1171[0, 3] = sym.factor(sym_state_probs_1171\n [p03] / sym_state_probs_1171[p02])\n sym_state_recursive_ratios_1171[1, 3] = sym.factor(sym_state_probs_1171\n [p13] / sym_state_probs_1171[p03])\n sym_state_recursive_ratios_1171[0, 4] = sym.factor(sym_state_probs_1171\n [p04] / sym_state_probs_1171[p03])\n sym_state_recursive_ratios_1171[1, 4] = sym.factor(sym_state_probs_1171\n [p14] / sym_state_probs_1171[p04])\n sym_state_recursive_ratios_1171[0, 5] = sym.factor(sym_state_probs_1171\n [p05] / sym_state_probs_1171[p04])\n sym_state_recursive_ratios_1171[1, 5] = sym.factor(sym_state_probs_1171\n [p15] / sym_state_probs_1171[p05])\n sym_state_recursive_ratios_1171[0, 6] = sym.factor(sym_state_probs_1171\n [p06] / sym_state_probs_1171[p05])\n sym_state_recursive_ratios_1171[1, 6] = sym.factor(sym_state_probs_1171\n [p16] / sym_state_probs_1171[p06])\n sym_state_recursive_ratios_1171[0, 7] = sym.factor(sym_state_probs_1171\n [p07] / sym_state_probs_1171[p06])\n sym_state_recursive_ratios_1171[1, 7] = sym.factor(sym_state_probs_1171\n [p17] / sym_state_probs_1171[p07])\n sym_state_recursive_ratios_right_1171 = (sym_state_recursive_ratios_1171\n .copy())\n sym_state_recursive_ratios_right_1171[1, 2] = sym.factor(\n sym_state_probs_1171[p12] / sym_state_probs_1171[p11])\n sym_state_recursive_ratios_right_1171[1, 3] = sym.factor(\n sym_state_probs_1171[p13] / sym_state_probs_1171[p12])\n sym_state_recursive_ratios_right_1171[1, 4] = sym.factor(\n sym_state_probs_1171[p14] / sym_state_probs_1171[p13])\n sym_state_recursive_ratios_right_1171[1, 5] = sym.factor(\n sym_state_probs_1171[p15] / sym_state_probs_1171[p14])\n sym_state_recursive_ratios_right_1171[1, 6] = sym.factor(\n sym_state_probs_1171[p16] / sym_state_probs_1171[p15])\n sym_state_recursive_ratios_right_1171[1, 7] = sym.factor(\n sym_state_probs_1171[p17] / sym_state_probs_1171[p16])\n sym_state_recursive_ratios_P0_1171 = sym.zeros(buffer_capacity + 1, \n system_capacity + 1)\n sym_state_recursive_ratios_P0_1171[0, 0] = 1\n sym_state_recursive_ratios_P0_1171[0, 1] = sym.factor(\n sym_state_probs_1171[p01] / sym_state_probs_1171[p00])\n sym_state_recursive_ratios_P0_1171[1, 1] = sym.factor(\n sym_state_probs_1171[p11] / sym_state_probs_1171[p00])\n sym_state_recursive_ratios_P0_1171[0, 2] = sym.factor(\n sym_state_probs_1171[p02] / sym_state_probs_1171[p00])\n sym_state_recursive_ratios_P0_1171[1, 2] = sym.factor(\n sym_state_probs_1171[p12] / sym_state_probs_1171[p00])\n sym_state_recursive_ratios_P0_1171[0, 3] = sym.factor(\n sym_state_probs_1171[p03] / sym_state_probs_1171[p00])\n sym_state_recursive_ratios_P0_1171[1, 3] = sym.factor(\n sym_state_probs_1171[p13] / sym_state_probs_1171[p00])\n sym_state_recursive_ratios_P0_1171[0, 4] = sym.factor(\n sym_state_probs_1171[p04] / sym_state_probs_1171[p00])\n sym_state_recursive_ratios_P0_1171[1, 4] = sym.factor(\n sym_state_probs_1171[p14] / sym_state_probs_1171[p00])\n sym_state_recursive_ratios_P0_1171[0, 5] = sym.factor(\n sym_state_probs_1171[p05] / sym_state_probs_1171[p00])\n sym_state_recursive_ratios_P0_1171[1, 5] = sym.factor(\n sym_state_probs_1171[p15] / sym_state_probs_1171[p00])\n sym_state_recursive_ratios_P0_1171[0, 6] = sym.factor(\n sym_state_probs_1171[p06] / sym_state_probs_1171[p00])\n sym_state_recursive_ratios_P0_1171[1, 6] = sym.factor(\n sym_state_probs_1171[p16] / sym_state_probs_1171[p00])\n sym_state_recursive_ratios_P0_1171[0, 7] = sym.factor(\n sym_state_probs_1171[p07] / sym_state_probs_1171[p00])\n sym_state_recursive_ratios_P0_1171[1, 7] = sym.factor(\n sym_state_probs_1171[p17] / sym_state_probs_1171[p00])\n return (sym_state_probs_1171, sym_state_recursive_ratios_1171,\n sym_state_recursive_ratios_right_1171,\n sym_state_recursive_ratios_P0_1171)\n\n\ndef get_symbolic_state_probabilities_1181():\n num_of_servers = 1\n threshold = 1\n system_capacity = 8\n buffer_capacity = 1\n Q_sym_1181 = abg.markov.get_symbolic_transition_matrix(num_of_servers,\n threshold, system_capacity, buffer_capacity)\n (p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15, p06, p16, p07,\n p17, p08, p18) = (sym.symbols(\n 'p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15, p06, p16, p07, p17, p08, p18'\n ))\n pi_1181 = sym.Matrix([p00, p01, p11, p02, p12, p03, p13, p04, p14, p05,\n p15, p06, p16, p07, p17, p08, p18])\n dimension_1181 = Q_sym_1181.shape[0]\n M_sym_1181 = sym.Matrix([Q_sym_1181.transpose()[:-1, :], sym.ones(1,\n dimension_1181)])\n sym_diff_equations_1181 = M_sym_1181 @ pi_1181\n b_sym_1181 = sym.Matrix([sym.zeros(dimension_1181 - 1, 1), [1]])\n eq0_1181 = sym.Eq(sym_diff_equations_1181[0], b_sym_1181[0])\n eq1_1181 = sym.Eq(sym_diff_equations_1181[1], b_sym_1181[1])\n eq2_1181 = sym.Eq(sym_diff_equations_1181[2], b_sym_1181[2])\n eq3_1181 = sym.Eq(sym_diff_equations_1181[3], b_sym_1181[3])\n eq4_1181 = sym.Eq(sym_diff_equations_1181[4], b_sym_1181[4])\n eq5_1181 = sym.Eq(sym_diff_equations_1181[5], b_sym_1181[5])\n eq6_1181 = sym.Eq(sym_diff_equations_1181[6], b_sym_1181[6])\n eq7_1181 = sym.Eq(sym_diff_equations_1181[7], b_sym_1181[7])\n eq8_1181 = sym.Eq(sym_diff_equations_1181[8], b_sym_1181[8])\n eq9_1181 = sym.Eq(sym_diff_equations_1181[9], b_sym_1181[9])\n eq10_1181 = sym.Eq(sym_diff_equations_1181[10], b_sym_1181[10])\n eq11_1181 = sym.Eq(sym_diff_equations_1181[11], b_sym_1181[11])\n eq12_1181 = sym.Eq(sym_diff_equations_1181[12], b_sym_1181[12])\n eq13_1181 = sym.Eq(sym_diff_equations_1181[13], b_sym_1181[13])\n eq14_1181 = sym.Eq(sym_diff_equations_1181[14], b_sym_1181[14])\n eq15_1181 = sym.Eq(sym_diff_equations_1181[15], b_sym_1181[15])\n eq16_1181 = sym.Eq(sym_diff_equations_1181[16], b_sym_1181[16])\n sym_state_probs_1181 = sym.solve([eq0_1181, eq1_1181, eq2_1181,\n eq3_1181, eq4_1181, eq5_1181, eq6_1181, eq7_1181, eq8_1181,\n eq9_1181, eq10_1181, eq11_1181, eq12_1181, eq13_1181, eq14_1181,\n eq15_1181, eq16_1181], (p00, p01, p11, p02, p12, p03, p13, p04, p14,\n p05, p15, p06, p16, p07, p17, p08, p18))\n sym_state_recursive_ratios_1181 = sym.zeros(buffer_capacity + 1, \n system_capacity + 1)\n sym_state_recursive_ratios_1181[0, 0] = 1\n sym_state_recursive_ratios_1181[0, 1] = sym.factor(sym_state_probs_1181\n [p01] / sym_state_probs_1181[p00])\n sym_state_recursive_ratios_1181[1, 1] = sym.factor(sym_state_probs_1181\n [p11] / sym_state_probs_1181[p01])\n sym_state_recursive_ratios_1181[0, 2] = sym.factor(sym_state_probs_1181\n [p02] / sym_state_probs_1181[p01])\n sym_state_recursive_ratios_1181[1, 2] = sym.factor(sym_state_probs_1181\n [p12] / sym_state_probs_1181[p02])\n sym_state_recursive_ratios_1181[0, 3] = sym.factor(sym_state_probs_1181\n [p03] / sym_state_probs_1181[p02])\n sym_state_recursive_ratios_1181[1, 3] = sym.factor(sym_state_probs_1181\n [p13] / sym_state_probs_1181[p03])\n sym_state_recursive_ratios_1181[0, 4] = sym.factor(sym_state_probs_1181\n [p04] / sym_state_probs_1181[p03])\n sym_state_recursive_ratios_1181[1, 4] = sym.factor(sym_state_probs_1181\n [p14] / sym_state_probs_1181[p04])\n sym_state_recursive_ratios_1181[0, 5] = sym.factor(sym_state_probs_1181\n [p05] / sym_state_probs_1181[p04])\n sym_state_recursive_ratios_1181[1, 5] = sym.factor(sym_state_probs_1181\n [p15] / sym_state_probs_1181[p05])\n sym_state_recursive_ratios_1181[0, 6] = sym.factor(sym_state_probs_1181\n [p06] / sym_state_probs_1181[p05])\n sym_state_recursive_ratios_1181[1, 6] = sym.factor(sym_state_probs_1181\n [p16] / sym_state_probs_1181[p06])\n sym_state_recursive_ratios_1181[0, 7] = sym.factor(sym_state_probs_1181\n [p07] / sym_state_probs_1181[p06])\n sym_state_recursive_ratios_1181[1, 7] = sym.factor(sym_state_probs_1181\n [p17] / sym_state_probs_1181[p07])\n sym_state_recursive_ratios_1181[0, 8] = sym.factor(sym_state_probs_1181\n [p08] / sym_state_probs_1181[p07])\n sym_state_recursive_ratios_1181[1, 8] = sym.factor(sym_state_probs_1181\n [p18] / sym_state_probs_1181[p08])\n sym_state_recursive_ratios_right_1181 = (sym_state_recursive_ratios_1181\n .copy())\n sym_state_recursive_ratios_right_1181[1, 2] = sym.factor(\n sym_state_probs_1181[p12] / sym_state_probs_1181[p11])\n sym_state_recursive_ratios_right_1181[1, 3] = sym.factor(\n sym_state_probs_1181[p13] / sym_state_probs_1181[p12])\n sym_state_recursive_ratios_right_1181[1, 4] = sym.factor(\n sym_state_probs_1181[p14] / sym_state_probs_1181[p13])\n sym_state_recursive_ratios_right_1181[1, 5] = sym.factor(\n sym_state_probs_1181[p15] / sym_state_probs_1181[p14])\n sym_state_recursive_ratios_right_1181[1, 6] = sym.factor(\n sym_state_probs_1181[p16] / sym_state_probs_1181[p15])\n sym_state_recursive_ratios_right_1181[1, 7] = sym.factor(\n sym_state_probs_1181[p17] / sym_state_probs_1181[p16])\n sym_state_recursive_ratios_right_1181[1, 8] = sym.factor(\n sym_state_probs_1181[p18] / sym_state_probs_1181[p17])\n sym_state_recursive_ratios_P0_1181 = sym.zeros(buffer_capacity + 1, \n system_capacity + 1)\n sym_state_recursive_ratios_P0_1181[0, 0] = 1\n sym_state_recursive_ratios_P0_1181[0, 1] = sym.factor(\n sym_state_probs_1181[p01] / sym_state_probs_1181[p00])\n sym_state_recursive_ratios_P0_1181[1, 1] = sym.factor(\n sym_state_probs_1181[p11] / sym_state_probs_1181[p00])\n sym_state_recursive_ratios_P0_1181[0, 2] = sym.factor(\n sym_state_probs_1181[p02] / sym_state_probs_1181[p00])\n sym_state_recursive_ratios_P0_1181[1, 2] = sym.factor(\n sym_state_probs_1181[p12] / sym_state_probs_1181[p00])\n sym_state_recursive_ratios_P0_1181[0, 3] = sym.factor(\n sym_state_probs_1181[p03] / sym_state_probs_1181[p00])\n sym_state_recursive_ratios_P0_1181[1, 3] = sym.factor(\n sym_state_probs_1181[p13] / sym_state_probs_1181[p00])\n sym_state_recursive_ratios_P0_1181[0, 4] = sym.factor(\n sym_state_probs_1181[p04] / sym_state_probs_1181[p00])\n sym_state_recursive_ratios_P0_1181[1, 4] = sym.factor(\n sym_state_probs_1181[p14] / sym_state_probs_1181[p00])\n sym_state_recursive_ratios_P0_1181[0, 5] = sym.factor(\n sym_state_probs_1181[p05] / sym_state_probs_1181[p00])\n sym_state_recursive_ratios_P0_1181[1, 5] = sym.factor(\n sym_state_probs_1181[p15] / sym_state_probs_1181[p00])\n sym_state_recursive_ratios_P0_1181[0, 6] = sym.factor(\n sym_state_probs_1181[p06] / sym_state_probs_1181[p00])\n sym_state_recursive_ratios_P0_1181[1, 6] = sym.factor(\n sym_state_probs_1181[p16] / sym_state_probs_1181[p00])\n sym_state_recursive_ratios_P0_1181[0, 7] = sym.factor(\n sym_state_probs_1181[p07] / sym_state_probs_1181[p00])\n sym_state_recursive_ratios_P0_1181[1, 7] = sym.factor(\n sym_state_probs_1181[p17] / sym_state_probs_1181[p00])\n sym_state_recursive_ratios_P0_1181[0, 8] = sym.factor(\n sym_state_probs_1181[p08] / sym_state_probs_1181[p00])\n sym_state_recursive_ratios_P0_1181[1, 8] = sym.factor(\n sym_state_probs_1181[p18] / sym_state_probs_1181[p00])\n return (sym_state_probs_1181, sym_state_recursive_ratios_1181,\n sym_state_recursive_ratios_right_1181,\n sym_state_recursive_ratios_P0_1181)\n\n\ndef get_symbolic_state_probabilities_1191():\n num_of_servers = 1\n threshold = 1\n system_capacity = 9\n buffer_capacity = 1\n Q_sym_1191 = abg.markov.get_symbolic_transition_matrix(num_of_servers,\n threshold, system_capacity, buffer_capacity)\n (p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15, p06, p16, p07,\n p17, p08, p18, p09, p19) = (sym.symbols(\n 'p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15, p06, p16, p07, p17, p08, p18, p09, p19'\n ))\n pi_1191 = sym.Matrix([p00, p01, p11, p02, p12, p03, p13, p04, p14, p05,\n p15, p06, p16, p07, p17, p08, p18, p09, p19])\n dimension_1191 = Q_sym_1191.shape[0]\n M_sym_1191 = sym.Matrix([Q_sym_1191.transpose()[:-1, :], sym.ones(1,\n dimension_1191)])\n sym_diff_equations_1191 = M_sym_1191 @ pi_1191\n b_sym_1191 = sym.Matrix([sym.zeros(dimension_1191 - 1, 1), [1]])\n eq0_1191 = sym.Eq(sym_diff_equations_1191[0], b_sym_1191[0])\n eq1_1191 = sym.Eq(sym_diff_equations_1191[1], b_sym_1191[1])\n eq2_1191 = sym.Eq(sym_diff_equations_1191[2], b_sym_1191[2])\n eq3_1191 = sym.Eq(sym_diff_equations_1191[3], b_sym_1191[3])\n eq4_1191 = sym.Eq(sym_diff_equations_1191[4], b_sym_1191[4])\n eq5_1191 = sym.Eq(sym_diff_equations_1191[5], b_sym_1191[5])\n eq6_1191 = sym.Eq(sym_diff_equations_1191[6], b_sym_1191[6])\n eq7_1191 = sym.Eq(sym_diff_equations_1191[7], b_sym_1191[7])\n eq8_1191 = sym.Eq(sym_diff_equations_1191[8], b_sym_1191[8])\n eq9_1191 = sym.Eq(sym_diff_equations_1191[9], b_sym_1191[9])\n eq10_1191 = sym.Eq(sym_diff_equations_1191[10], b_sym_1191[10])\n eq11_1191 = sym.Eq(sym_diff_equations_1191[11], b_sym_1191[11])\n eq12_1191 = sym.Eq(sym_diff_equations_1191[12], b_sym_1191[12])\n eq13_1191 = sym.Eq(sym_diff_equations_1191[13], b_sym_1191[13])\n eq14_1191 = sym.Eq(sym_diff_equations_1191[14], b_sym_1191[14])\n eq15_1191 = sym.Eq(sym_diff_equations_1191[15], b_sym_1191[15])\n eq16_1191 = sym.Eq(sym_diff_equations_1191[16], b_sym_1191[16])\n eq17_1191 = sym.Eq(sym_diff_equations_1191[17], b_sym_1191[17])\n eq18_1191 = sym.Eq(sym_diff_equations_1191[18], b_sym_1191[18])\n sym_state_probs_1191 = sym.solve([eq0_1191, eq1_1191, eq2_1191,\n eq3_1191, eq4_1191, eq5_1191, eq6_1191, eq7_1191, eq8_1191,\n eq9_1191, eq10_1191, eq11_1191, eq12_1191, eq13_1191, eq14_1191,\n eq15_1191, eq16_1191, eq17_1191, eq18_1191], (p00, p01, p11, p02,\n p12, p03, p13, p04, p14, p05, p15, p06, p16, p07, p17, p08, p18,\n p09, p19))\n sym_state_recursive_ratios_1191 = sym.zeros(buffer_capacity + 1, \n system_capacity + 1)\n sym_state_recursive_ratios_1191[0, 0] = 1\n sym_state_recursive_ratios_1191[0, 1] = sym.factor(sym_state_probs_1191\n [p01] / sym_state_probs_1191[p00])\n sym_state_recursive_ratios_1191[1, 1] = sym.factor(sym_state_probs_1191\n [p11] / sym_state_probs_1191[p01])\n sym_state_recursive_ratios_1191[0, 2] = sym.factor(sym_state_probs_1191\n [p02] / sym_state_probs_1191[p01])\n sym_state_recursive_ratios_1191[1, 2] = sym.factor(sym_state_probs_1191\n [p12] / sym_state_probs_1191[p02])\n sym_state_recursive_ratios_1191[0, 3] = sym.factor(sym_state_probs_1191\n [p03] / sym_state_probs_1191[p02])\n sym_state_recursive_ratios_1191[1, 3] = sym.factor(sym_state_probs_1191\n [p13] / sym_state_probs_1191[p03])\n sym_state_recursive_ratios_1191[0, 4] = sym.factor(sym_state_probs_1191\n [p04] / sym_state_probs_1191[p03])\n sym_state_recursive_ratios_1191[1, 4] = sym.factor(sym_state_probs_1191\n [p14] / sym_state_probs_1191[p04])\n sym_state_recursive_ratios_1191[0, 5] = sym.factor(sym_state_probs_1191\n [p05] / sym_state_probs_1191[p04])\n sym_state_recursive_ratios_1191[1, 5] = sym.factor(sym_state_probs_1191\n [p15] / sym_state_probs_1191[p05])\n sym_state_recursive_ratios_1191[0, 6] = sym.factor(sym_state_probs_1191\n [p06] / sym_state_probs_1191[p05])\n sym_state_recursive_ratios_1191[1, 6] = sym.factor(sym_state_probs_1191\n [p16] / sym_state_probs_1191[p06])\n sym_state_recursive_ratios_1191[0, 7] = sym.factor(sym_state_probs_1191\n [p07] / sym_state_probs_1191[p06])\n sym_state_recursive_ratios_1191[1, 7] = sym.factor(sym_state_probs_1191\n [p17] / sym_state_probs_1191[p07])\n sym_state_recursive_ratios_1191[0, 8] = sym.factor(sym_state_probs_1191\n [p08] / sym_state_probs_1191[p07])\n sym_state_recursive_ratios_1191[1, 8] = sym.factor(sym_state_probs_1191\n [p18] / sym_state_probs_1191[p08])\n sym_state_recursive_ratios_1191[0, 9] = sym.factor(sym_state_probs_1191\n [p09] / sym_state_probs_1191[p08])\n sym_state_recursive_ratios_1191[1, 9] = sym.factor(sym_state_probs_1191\n [p19] / sym_state_probs_1191[p09])\n sym_state_recursive_ratios_right_1191 = (sym_state_recursive_ratios_1191\n .copy())\n sym_state_recursive_ratios_right_1191[1, 2] = sym.factor(\n sym_state_probs_1191[p12] / sym_state_probs_1191[p11])\n sym_state_recursive_ratios_right_1191[1, 3] = sym.factor(\n sym_state_probs_1191[p13] / sym_state_probs_1191[p12])\n sym_state_recursive_ratios_right_1191[1, 4] = sym.factor(\n sym_state_probs_1191[p14] / sym_state_probs_1191[p13])\n sym_state_recursive_ratios_right_1191[1, 5] = sym.factor(\n sym_state_probs_1191[p15] / sym_state_probs_1191[p14])\n sym_state_recursive_ratios_right_1191[1, 6] = sym.factor(\n sym_state_probs_1191[p16] / sym_state_probs_1191[p15])\n sym_state_recursive_ratios_right_1191[1, 7] = sym.factor(\n sym_state_probs_1191[p17] / sym_state_probs_1191[p16])\n sym_state_recursive_ratios_right_1191[1, 8] = sym.factor(\n sym_state_probs_1191[p18] / sym_state_probs_1191[p17])\n sym_state_recursive_ratios_right_1191[1, 8] = sym.factor(\n sym_state_probs_1191[p18] / sym_state_probs_1191[p17])\n sym_state_recursive_ratios_P0_1191 = sym.zeros(buffer_capacity + 1, \n system_capacity + 1)\n sym_state_recursive_ratios_P0_1191[0, 0] = 1\n sym_state_recursive_ratios_P0_1191[0, 1] = sym.factor(\n sym_state_probs_1191[p01] / sym_state_probs_1191[p00])\n sym_state_recursive_ratios_P0_1191[1, 1] = sym.factor(\n sym_state_probs_1191[p11] / sym_state_probs_1191[p00])\n sym_state_recursive_ratios_P0_1191[0, 2] = sym.factor(\n sym_state_probs_1191[p02] / sym_state_probs_1191[p00])\n sym_state_recursive_ratios_P0_1191[1, 2] = sym.factor(\n sym_state_probs_1191[p12] / sym_state_probs_1191[p00])\n sym_state_recursive_ratios_P0_1191[0, 3] = sym.factor(\n sym_state_probs_1191[p03] / sym_state_probs_1191[p00])\n sym_state_recursive_ratios_P0_1191[1, 3] = sym.factor(\n sym_state_probs_1191[p13] / sym_state_probs_1191[p00])\n sym_state_recursive_ratios_P0_1191[0, 4] = sym.factor(\n sym_state_probs_1191[p04] / sym_state_probs_1191[p00])\n sym_state_recursive_ratios_P0_1191[1, 4] = sym.factor(\n sym_state_probs_1191[p14] / sym_state_probs_1191[p00])\n sym_state_recursive_ratios_P0_1191[0, 5] = sym.factor(\n sym_state_probs_1191[p05] / sym_state_probs_1191[p00])\n sym_state_recursive_ratios_P0_1191[1, 5] = sym.factor(\n sym_state_probs_1191[p15] / sym_state_probs_1191[p00])\n sym_state_recursive_ratios_P0_1191[0, 6] = sym.factor(\n sym_state_probs_1191[p06] / sym_state_probs_1191[p00])\n sym_state_recursive_ratios_P0_1191[1, 6] = sym.factor(\n sym_state_probs_1191[p16] / sym_state_probs_1191[p00])\n sym_state_recursive_ratios_P0_1191[0, 7] = sym.factor(\n sym_state_probs_1191[p07] / sym_state_probs_1191[p00])\n sym_state_recursive_ratios_P0_1191[1, 7] = sym.factor(\n sym_state_probs_1191[p17] / sym_state_probs_1191[p00])\n sym_state_recursive_ratios_P0_1191[0, 8] = sym.factor(\n sym_state_probs_1191[p08] / sym_state_probs_1191[p00])\n sym_state_recursive_ratios_P0_1191[1, 8] = sym.factor(\n sym_state_probs_1191[p18] / sym_state_probs_1191[p00])\n sym_state_recursive_ratios_P0_1191[0, 9] = sym.factor(\n sym_state_probs_1191[p09] / sym_state_probs_1191[p00])\n sym_state_recursive_ratios_P0_1191[1, 9] = sym.factor(\n sym_state_probs_1191[p19] / sym_state_probs_1191[p00])\n return (sym_state_probs_1191, sym_state_recursive_ratios_1191,\n sym_state_recursive_ratios_right_1191,\n sym_state_recursive_ratios_P0_1191)\n",
"step-4": "import ambulance_game as abg\nimport numpy as np\nimport sympy as sym\nfrom sympy.abc import a, b, c, d, e, f, g, h, i, j\n\n\ndef get_symbolic_pi(num_of_servers, threshold, system_capacity, buffer_capacity\n ):\n Q_sym = abg.markov.get_symbolic_transition_matrix(num_of_servers=\n num_of_servers, threshold=threshold, system_capacity=\n system_capacity, buffer_capacity=buffer_capacity)\n dimension = Q_sym.shape[0]\n if dimension > 7:\n return 'Capacity of 6 exceeded'\n M_sym = sym.Matrix([Q_sym.transpose()[:-1, :], sym.ones(1, dimension)])\n b_sym = sym.Matrix([sym.zeros(dimension - 1, 1), [1]])\n system = M_sym.col_insert(dimension, b_sym)\n sol = sym.solve_linear_system_LU(system, [a, b, c, d, e, f, g])\n return sol\n\n\ndef get_symbolic_state_probabilities_1222():\n num_of_servers = 1\n threshold = 2\n system_capacity = 2\n buffer_capacity = 2\n sym_pi_1222 = get_symbolic_pi(num_of_servers=num_of_servers, threshold=\n threshold, system_capacity=system_capacity, buffer_capacity=\n buffer_capacity)\n all_states_1222 = abg.markov.build_states(threshold=threshold,\n system_capacity=system_capacity, buffer_capacity=buffer_capacity)\n sym_state_probs_1222 = [(0) for _ in range(len(all_states_1222))]\n sym_state_probs_1222[0] = sym.factor(sym_pi_1222[a])\n sym_state_probs_1222[1] = sym.factor(sym_pi_1222[b])\n sym_state_probs_1222[2] = sym.factor(sym_pi_1222[c])\n sym_state_probs_1222[3] = sym.factor(sym_pi_1222[d])\n sym_state_probs_1222[4] = sym.factor(sym_pi_1222[e])\n sym_state_recursive_ratios_1222 = sym.zeros(buffer_capacity + 1, \n system_capacity + 1)\n sym_state_recursive_ratios_1222[0, 0] = 1\n sym_state_recursive_ratios_1222[0, 1] = sym.factor(sym_state_probs_1222\n [1] / sym_state_probs_1222[0])\n sym_state_recursive_ratios_1222[0, 2] = sym.factor(sym_state_probs_1222\n [2] / sym_state_probs_1222[1])\n sym_state_recursive_ratios_1222[1, 2] = sym.factor(sym_state_probs_1222\n [3] / sym_state_probs_1222[2])\n sym_state_recursive_ratios_1222[2, 2] = sym.factor(sym_state_probs_1222\n [4] / sym_state_probs_1222[3])\n return sym_state_probs_1222, sym_state_recursive_ratios_1222\n\n\ndef get_symbolic_state_probabilities_1121():\n num_of_servers = 1\n threshold = 1\n system_capacity = 2\n buffer_capacity = 1\n all_states_1121 = abg.markov.build_states(threshold=threshold,\n system_capacity=system_capacity, buffer_capacity=buffer_capacity)\n sym_pi_1121 = get_symbolic_pi(num_of_servers=num_of_servers, threshold=\n threshold, system_capacity=system_capacity, buffer_capacity=\n buffer_capacity)\n sym_state_probs_1121 = [(0) for _ in range(len(all_states_1121))]\n sym_state_probs_1121[0] = sym.factor(sym_pi_1121[a])\n sym_state_probs_1121[1] = sym.factor(sym_pi_1121[b])\n sym_state_probs_1121[2] = sym.factor(sym_pi_1121[c])\n sym_state_probs_1121[3] = sym.factor(sym_pi_1121[d])\n sym_state_probs_1121[4] = sym.factor(sym_pi_1121[e])\n sym_state_recursive_ratios_1121 = sym.zeros(buffer_capacity + 1, \n system_capacity + 1)\n sym_state_recursive_ratios_1121[0, 0] = 1\n sym_state_recursive_ratios_1121[0, 1] = sym.factor(sym_state_probs_1121\n [1] / sym_state_probs_1121[0])\n sym_state_recursive_ratios_1121[1, 1] = sym.factor(sym_state_probs_1121\n [2] / sym_state_probs_1121[1])\n sym_state_recursive_ratios_1121[0, 2] = sym.factor(sym_state_probs_1121\n [3] / sym_state_probs_1121[1])\n sym_state_recursive_ratios_1121[1, 2] = sym.factor(sym_state_probs_1121\n [4] / sym_state_probs_1121[3])\n sym_state_recursive_ratios_right_1121 = (sym_state_recursive_ratios_1121\n .copy())\n sym_state_recursive_ratios_right_1121[1, 2] = sym.factor(\n sym_state_probs_1121[4] / sym_state_probs_1121[2])\n sym_state_recursive_ratios_P0_1121 = sym.zeros(buffer_capacity + 1, \n system_capacity + 1)\n sym_state_recursive_ratios_P0_1121[0, 0] = 1\n sym_state_recursive_ratios_P0_1121[0, 1] = sym.factor(\n sym_state_probs_1121[1] / sym_state_probs_1121[0])\n sym_state_recursive_ratios_P0_1121[1, 1] = sym.factor(\n sym_state_probs_1121[2] / sym_state_probs_1121[0])\n sym_state_recursive_ratios_P0_1121[0, 2] = sym.factor(\n sym_state_probs_1121[3] / sym_state_probs_1121[0])\n sym_state_recursive_ratios_P0_1121[1, 2] = sym.factor(\n sym_state_probs_1121[4] / sym_state_probs_1121[0])\n return (sym_state_probs_1121, sym_state_recursive_ratios_1121,\n sym_state_recursive_ratios_right_1121,\n sym_state_recursive_ratios_P0_1121)\n\n\ndef get_symbolic_state_probabilities_1122():\n threshold = 1\n system_capacity = 2\n buffer_capacity = 2\n all_states_1122 = abg.markov.build_states(threshold=threshold,\n system_capacity=system_capacity, buffer_capacity=buffer_capacity)\n sym_state_probs_1122 = [(0) for _ in range(len(all_states_1122))]\n sym_Lambda = sym.symbols('Lambda')\n sym_lambda_1 = sym.symbols('lambda_1')\n sym_lambda_2 = sym.symbols('lambda_2')\n sym_mu = sym.symbols('mu')\n sym_state_probs_1122[0] = (sym_mu ** 6 + 2 * sym_lambda_2 * sym_mu ** 5 +\n sym_lambda_2 ** 2 * sym_mu ** 4)\n sym_state_probs_1122[1] = sym_Lambda * sym_mu ** 3 * (sym_mu ** 2 + 2 *\n sym_mu * sym_lambda_2 + sym_lambda_2 ** 2)\n sym_state_probs_1122[2] = sym_Lambda * sym_lambda_2 * sym_mu ** 2 * (\n sym_lambda_2 ** 2 + sym_lambda_2 * sym_lambda_1 + sym_lambda_1 *\n sym_mu + sym_mu ** 2 + 2 * sym_lambda_2 * sym_mu)\n sym_state_probs_1122[3] = sym_Lambda * sym_lambda_2 ** 2 * sym_mu * (\n sym_lambda_2 ** 2 + 2 * sym_lambda_1 * sym_lambda_2 + 3 *\n sym_lambda_1 * sym_mu + sym_mu ** 2 + 2 * sym_lambda_2 * sym_mu + \n sym_lambda_1 ** 2)\n sym_state_probs_1122[4] = sym_Lambda * sym_lambda_1 * sym_mu ** 3 * (\n sym_lambda_2 + sym_mu)\n sym_state_probs_1122[5\n ] = sym_Lambda * sym_lambda_1 * sym_lambda_2 * sym_mu ** 2 * (2 *\n sym_mu + sym_lambda_1 + sym_lambda_2)\n sym_state_probs_1122[6] = sym_Lambda * sym_lambda_1 * sym_lambda_2 ** 2 * (\n sym_lambda_1 ** 2 + 4 * sym_lambda_1 * sym_mu + 2 * sym_lambda_1 *\n sym_lambda_2 + 3 * sym_mu ** 2 + sym_lambda_2 ** 2 + 3 *\n sym_lambda_2 * sym_mu)\n total_1122 = np.sum(sym_state_probs_1122)\n sym_state_probs_1122 = [(i / total_1122) for i in sym_state_probs_1122]\n sym_state_recursive_ratios_1122 = sym.zeros(buffer_capacity + 1, \n system_capacity + 1)\n sym_state_recursive_ratios_1122[0, 0] = 1\n sym_state_recursive_ratios_1122[0, 1] = sym.factor(sym_state_probs_1122\n [1] / sym_state_probs_1122[0])\n sym_state_recursive_ratios_1122[1, 1] = sym.factor(sym_state_probs_1122\n [2] / sym_state_probs_1122[1])\n sym_state_recursive_ratios_1122[2, 1] = sym.factor(sym_state_probs_1122\n [3] / sym_state_probs_1122[2])\n sym_state_recursive_ratios_1122[0, 2] = sym.factor(sym_state_probs_1122\n [4] / sym_state_probs_1122[1])\n sym_state_recursive_ratios_1122[1, 2] = sym.factor(sym_state_probs_1122\n [5] / sym_state_probs_1122[4])\n sym_state_recursive_ratios_1122[2, 2] = sym.factor(sym_state_probs_1122\n [6] / sym_state_probs_1122[5])\n sym_state_recursive_ratios_right_1122 = (sym_state_recursive_ratios_1122\n .copy())\n sym_state_recursive_ratios_right_1122[1, 2] = sym.factor(\n sym_state_probs_1122[5] / sym_state_probs_1122[2])\n sym_state_recursive_ratios_right_1122[2, 2] = sym.factor(\n sym_state_probs_1122[6] / sym_state_probs_1122[3])\n sym_state_recursive_ratios_P0_1122 = sym.zeros(buffer_capacity + 1, \n system_capacity + 1)\n sym_state_recursive_ratios_P0_1122[0, 0] = 1\n sym_state_recursive_ratios_P0_1122[0, 1] = sym.factor(\n sym_state_probs_1122[1] / sym_state_probs_1122[0])\n sym_state_recursive_ratios_P0_1122[1, 1] = sym.factor(\n sym_state_probs_1122[2] / sym_state_probs_1122[0])\n sym_state_recursive_ratios_P0_1122[2, 1] = sym.factor(\n sym_state_probs_1122[3] / sym_state_probs_1122[0])\n sym_state_recursive_ratios_P0_1122[0, 2] = sym.factor(\n sym_state_probs_1122[4] / sym_state_probs_1122[0])\n sym_state_recursive_ratios_P0_1122[1, 2] = sym.factor(\n sym_state_probs_1122[5] / sym_state_probs_1122[0])\n sym_state_recursive_ratios_P0_1122[2, 2] = sym.factor(\n sym_state_probs_1122[6] / sym_state_probs_1122[0])\n return (sym_state_probs_1122, sym_state_recursive_ratios_1122,\n sym_state_recursive_ratios_right_1122,\n sym_state_recursive_ratios_P0_1122)\n\n\ndef get_symbolic_state_probabilities_1123():\n num_of_servers = 1\n threshold = 1\n system_capacity = 2\n buffer_capacity = 3\n Q_sym_1123 = abg.markov.get_symbolic_transition_matrix(num_of_servers,\n threshold, system_capacity, buffer_capacity)\n p00, p01, p11, p21, p31, p02, p12, p22, p32 = sym.symbols(\n 'p00, p01, p11, p21, p31, p02, p12, p22, p32')\n pi_1123 = sym.Matrix([p00, p01, p11, p21, p31, p02, p12, p22, p32])\n dimension_1123 = Q_sym_1123.shape[0]\n M_sym_1123 = sym.Matrix([Q_sym_1123.transpose()[:-1, :], sym.ones(1,\n dimension_1123)])\n sym_diff_equations_1123 = M_sym_1123 @ pi_1123\n b_sym_1123 = sym.Matrix([sym.zeros(dimension_1123 - 1, 1), [1]])\n eq0_1123 = sym.Eq(sym_diff_equations_1123[0], b_sym_1123[0])\n eq1_1123 = sym.Eq(sym_diff_equations_1123[1], b_sym_1123[1])\n eq2_1123 = sym.Eq(sym_diff_equations_1123[2], b_sym_1123[2])\n eq3_1123 = sym.Eq(sym_diff_equations_1123[3], b_sym_1123[3])\n eq4_1123 = sym.Eq(sym_diff_equations_1123[4], b_sym_1123[4])\n eq5_1123 = sym.Eq(sym_diff_equations_1123[5], b_sym_1123[5])\n eq6_1123 = sym.Eq(sym_diff_equations_1123[6], b_sym_1123[6])\n eq7_1123 = sym.Eq(sym_diff_equations_1123[7], b_sym_1123[7])\n eq8_1123 = sym.Eq(sym_diff_equations_1123[8], b_sym_1123[8])\n sym_state_probs_1123 = sym.solve([eq0_1123, eq1_1123, eq2_1123,\n eq3_1123, eq4_1123, eq5_1123, eq6_1123, eq7_1123, eq8_1123], (p00,\n p01, p11, p21, p31, p02, p12, p22, p32))\n sym_state_recursive_ratios_1123 = sym.zeros(buffer_capacity + 1, \n system_capacity + 1)\n sym_state_recursive_ratios_1123[0, 0] = 1\n sym_state_recursive_ratios_1123[0, 1] = sym.factor(sym_state_probs_1123\n [p01] / sym_state_probs_1123[p00])\n sym_state_recursive_ratios_1123[1, 1] = sym.factor(sym_state_probs_1123\n [p11] / sym_state_probs_1123[p01])\n sym_state_recursive_ratios_1123[2, 1] = sym.factor(sym_state_probs_1123\n [p21] / sym_state_probs_1123[p11])\n sym_state_recursive_ratios_1123[3, 1] = sym.factor(sym_state_probs_1123\n [p31] / sym_state_probs_1123[p21])\n sym_state_recursive_ratios_1123[0, 2] = sym.factor(sym_state_probs_1123\n [p02] / sym_state_probs_1123[p01])\n sym_state_recursive_ratios_1123[1, 2] = sym.factor(sym_state_probs_1123\n [p12] / sym_state_probs_1123[p02])\n sym_state_recursive_ratios_1123[2, 2] = sym.factor(sym_state_probs_1123\n [p22] / sym_state_probs_1123[p12])\n sym_state_recursive_ratios_1123[2, 2] = sym.factor(sym_state_probs_1123\n [p32] / sym_state_probs_1123[p22])\n sym_state_recursive_ratios_right_1123 = (sym_state_recursive_ratios_1123\n .copy())\n sym_state_recursive_ratios_right_1123[1, 2] = sym.factor(\n sym_state_probs_1123[p12] / sym_state_probs_1123[p11])\n sym_state_recursive_ratios_right_1123[2, 2] = sym.factor(\n sym_state_probs_1123[p22] / sym_state_probs_1123[p21])\n sym_state_recursive_ratios_right_1123[3, 2] = sym.factor(\n sym_state_probs_1123[p32] / sym_state_probs_1123[p22])\n sym_state_recursive_ratios_P0_1123 = sym.zeros(buffer_capacity + 1, \n system_capacity + 1)\n sym_state_recursive_ratios_P0_1123[0, 0] = 1\n sym_state_recursive_ratios_P0_1123[0, 1] = sym.factor(\n sym_state_probs_1123[p01] / sym_state_probs_1123[p00])\n sym_state_recursive_ratios_P0_1123[1, 1] = sym.factor(\n sym_state_probs_1123[p11] / sym_state_probs_1123[p00])\n sym_state_recursive_ratios_P0_1123[2, 1] = sym.factor(\n sym_state_probs_1123[p21] / sym_state_probs_1123[p00])\n sym_state_recursive_ratios_P0_1123[3, 1] = sym.factor(\n sym_state_probs_1123[p31] / sym_state_probs_1123[p00])\n sym_state_recursive_ratios_P0_1123[0, 2] = sym.factor(\n sym_state_probs_1123[p02] / sym_state_probs_1123[p00])\n sym_state_recursive_ratios_P0_1123[1, 2] = sym.factor(\n sym_state_probs_1123[p12] / sym_state_probs_1123[p00])\n sym_state_recursive_ratios_P0_1123[2, 2] = sym.factor(\n sym_state_probs_1123[p22] / sym_state_probs_1123[p00])\n sym_state_recursive_ratios_P0_1123[3, 2] = sym.factor(\n sym_state_probs_1123[p32] / sym_state_probs_1123[p00])\n return (sym_state_probs_1123, sym_state_recursive_ratios_1123,\n sym_state_recursive_ratios_right_1123,\n sym_state_recursive_ratios_P0_1123)\n\n\ndef get_symbolic_state_probabilities_1341():\n threshold = 3\n system_capacity = 4\n buffer_capacity = 1\n all_states_1341 = abg.markov.build_states(threshold=threshold,\n system_capacity=system_capacity, buffer_capacity=buffer_capacity)\n sym_state_probs_1341 = [(0) for _ in range(len(all_states_1341))]\n sym_Lambda = sym.symbols('Lambda')\n sym_lambda_1 = sym.symbols('lambda_1')\n sym_lambda_2 = sym.symbols('lambda_2')\n sym_mu = sym.symbols('mu')\n sym_state_probs_1341[0] = sym_lambda_2 * sym_mu ** 5 + sym_mu ** 6\n sym_state_probs_1341[1\n ] = sym_Lambda * sym_lambda_2 * sym_mu ** 4 + sym_Lambda * sym_mu ** 5\n sym_state_probs_1341[2] = (sym_Lambda ** 2 * sym_lambda_2 * sym_mu ** 3 +\n sym_Lambda ** 2 * sym_mu ** 4)\n sym_state_probs_1341[3] = (sym_Lambda ** 3 * sym_lambda_2 * sym_mu ** 2 +\n sym_Lambda ** 3 * sym_mu ** 3)\n sym_state_probs_1341[4] = (sym_Lambda ** 3 * sym_lambda_1 *\n sym_lambda_2 * sym_mu + sym_Lambda ** 3 * sym_lambda_2 * sym_mu ** \n 2 + sym_Lambda ** 3 * sym_lambda_2 * sym_lambda_2 * sym_mu)\n sym_state_probs_1341[5] = sym_Lambda ** 3 * sym_lambda_1 * sym_mu ** 2\n sym_state_probs_1341[6] = (sym_Lambda ** 3 * sym_lambda_1 ** 2 *\n sym_lambda_2 + sym_Lambda ** 3 * sym_lambda_1 * sym_lambda_2 ** 2 +\n 2 * sym_Lambda ** 3 * sym_lambda_1 * sym_lambda_2 * sym_mu)\n total_1341 = np.sum(sym_state_probs_1341)\n sym_state_probs_1341 = [(i / total_1341) for i in sym_state_probs_1341]\n sym_state_recursive_ratios_1341 = sym.zeros(buffer_capacity + 1, \n system_capacity + 1)\n sym_state_recursive_ratios_1341[0, 0] = 1\n sym_state_recursive_ratios_1341[0, 1] = sym.factor(sym_state_probs_1341\n [1] / sym_state_probs_1341[0])\n sym_state_recursive_ratios_1341[0, 2] = sym.factor(sym_state_probs_1341\n [2] / sym_state_probs_1341[1])\n sym_state_recursive_ratios_1341[0, 3] = sym.factor(sym_state_probs_1341\n [3] / sym_state_probs_1341[2])\n sym_state_recursive_ratios_1341[0, 4] = sym.factor(sym_state_probs_1341\n [5] / sym_state_probs_1341[3])\n sym_state_recursive_ratios_1341[1, 3] = sym.factor(sym_state_probs_1341\n [4] / sym_state_probs_1341[3])\n sym_state_recursive_ratios_1341[1, 4] = sym.factor(sym_state_probs_1341\n [6] / sym_state_probs_1341[5])\n sym_state_recursive_ratios_right_1341 = (sym_state_recursive_ratios_1341\n .copy())\n sym_state_recursive_ratios_right_1341[1, 4] = sym.factor(\n sym_state_probs_1341[6] / sym_state_probs_1341[4])\n sym_state_recursive_ratios_P0_1341 = sym.zeros(buffer_capacity + 1, \n system_capacity + 1)\n sym_state_recursive_ratios_P0_1341[0, 0] = 1\n sym_state_recursive_ratios_P0_1341[0, 1] = sym.factor(\n sym_state_probs_1341[1] / sym_state_probs_1341[0])\n sym_state_recursive_ratios_P0_1341[0, 2] = sym.factor(\n sym_state_probs_1341[2] / sym_state_probs_1341[0])\n sym_state_recursive_ratios_P0_1341[0, 3] = sym.factor(\n sym_state_probs_1341[3] / sym_state_probs_1341[0])\n sym_state_recursive_ratios_P0_1341[1, 3] = sym.factor(\n sym_state_probs_1341[4] / sym_state_probs_1341[0])\n sym_state_recursive_ratios_P0_1341[0, 4] = sym.factor(\n sym_state_probs_1341[5] / sym_state_probs_1341[0])\n sym_state_recursive_ratios_P0_1341[1, 4] = sym.factor(\n sym_state_probs_1341[6] / sym_state_probs_1341[0])\n return (sym_state_probs_1341, sym_state_recursive_ratios_1341,\n sym_state_recursive_ratios_right_1341,\n sym_state_recursive_ratios_P0_1341)\n\n\ndef get_symbolic_state_probabilities_1131():\n threshold = 1\n system_capacity = 3\n buffer_capacity = 1\n all_states_1131 = abg.markov.build_states(threshold=threshold,\n system_capacity=system_capacity, buffer_capacity=buffer_capacity)\n sym_state_probs_1131 = [(0) for _ in range(len(all_states_1131))]\n sym_Lambda = sym.symbols('Lambda')\n sym_lambda_1 = sym.symbols('lambda_1')\n sym_lambda_2 = sym.symbols('lambda_2')\n sym_mu = sym.symbols('mu')\n sym_state_probs_1131[0] = (sym_mu ** 6 + 2 * (sym_lambda_2 * sym_mu ** \n 5) + sym_lambda_2 ** 2 * sym_mu ** 4 + sym_lambda_1 * sym_lambda_2 *\n sym_mu ** 4)\n sym_state_probs_1131[1] = sym_state_probs_1131[0] * sym_Lambda / sym_mu\n sym_state_probs_1131[2] = (sym_Lambda * sym_lambda_1 ** 2 *\n sym_lambda_2 * sym_mu ** 2 + sym_Lambda * sym_lambda_2 *\n sym_lambda_1 * sym_mu ** 3 + 2 * (sym_Lambda * sym_lambda_1 * \n sym_lambda_2 ** 2 * sym_mu ** 2) + 2 * (sym_Lambda * sym_lambda_2 **\n 2 * sym_mu ** 3) + sym_Lambda * sym_lambda_2 ** 3 * sym_mu ** 2 + \n sym_Lambda * sym_lambda_2 * sym_mu ** 4)\n sym_state_probs_1131[3] = sym_Lambda * sym_lambda_1 * sym_mu ** 3 * (\n sym_lambda_2 + sym_mu)\n sym_state_probs_1131[4\n ] = sym_Lambda * sym_lambda_2 * sym_lambda_1 * sym_mu * (\n sym_lambda_2 ** 2 + 2 * sym_lambda_2 * sym_lambda_1 + 3 *\n sym_lambda_2 * sym_mu + sym_lambda_1 ** 2 + 2 * sym_lambda_1 *\n sym_mu + 2 * sym_mu ** 2)\n sym_state_probs_1131[5] = sym_Lambda * sym_lambda_1 ** 2 * sym_mu ** 3\n sym_state_probs_1131[6] = sym_Lambda * sym_lambda_2 * sym_lambda_1 ** 2 * (\n sym_lambda_2 ** 2 + 2 * sym_lambda_2 * sym_lambda_1 + 3 *\n sym_lambda_2 * sym_mu + sym_lambda_1 ** 2 + 2 * sym_lambda_1 *\n sym_mu + 3 * sym_mu ** 2)\n denominator = (sym_Lambda * sym_lambda_2 ** 3 * sym_lambda_1 ** 2 + \n sym_Lambda * sym_lambda_2 ** 3 * sym_lambda_1 * sym_mu + sym_Lambda *\n sym_lambda_2 ** 3 * sym_mu ** 2 + 2 * sym_Lambda * sym_lambda_2 ** \n 2 * sym_lambda_1 ** 3 + 5 * sym_Lambda * sym_lambda_2 ** 2 * \n sym_lambda_1 ** 2 * sym_mu + 5 * sym_Lambda * sym_lambda_2 ** 2 *\n sym_lambda_1 * sym_mu ** 2 + 3 * sym_Lambda * sym_lambda_2 ** 2 * \n sym_mu ** 3 + sym_Lambda * sym_lambda_2 * sym_lambda_1 ** 4 + 3 *\n sym_Lambda * sym_lambda_2 * sym_lambda_1 ** 3 * sym_mu + 6 *\n sym_Lambda * sym_lambda_2 * sym_lambda_1 ** 2 * sym_mu ** 2 + 5 *\n sym_Lambda * sym_lambda_2 * sym_lambda_1 * sym_mu ** 3 + 3 *\n sym_Lambda * sym_lambda_2 * sym_mu ** 4 + sym_Lambda * sym_lambda_1 **\n 2 * sym_mu ** 3 + sym_Lambda * sym_lambda_1 * sym_mu ** 4 + \n sym_Lambda * sym_mu ** 5 + sym_lambda_2 ** 2 * sym_mu ** 4 + \n sym_lambda_2 * sym_lambda_1 * sym_mu ** 4 + 2 * sym_lambda_2 * \n sym_mu ** 5 + sym_mu ** 6)\n sym_state_probs_1131 = [(i / denominator) for i in sym_state_probs_1131]\n sym_state_recursive_ratios_1131 = sym.zeros(buffer_capacity + 1, \n system_capacity + 1)\n sym_state_recursive_ratios_1131[0, 0] = 1\n sym_state_recursive_ratios_1131[0, 1] = sym.factor(sym_state_probs_1131\n [1] / sym_state_probs_1131[0])\n sym_state_recursive_ratios_1131[1, 1] = sym.factor(sym_state_probs_1131\n [2] / sym_state_probs_1131[1])\n sym_state_recursive_ratios_1131[0, 2] = sym.factor(sym_state_probs_1131\n [3] / sym_state_probs_1131[1])\n sym_state_recursive_ratios_1131[1, 2] = sym.factor(sym_state_probs_1131\n [4] / sym_state_probs_1131[3])\n sym_state_recursive_ratios_1131[0, 3] = sym.factor(sym_state_probs_1131\n [5] / sym_state_probs_1131[3])\n sym_state_recursive_ratios_1131[1, 3] = sym.factor(sym_state_probs_1131\n [6] / sym_state_probs_1131[5])\n sym_state_recursive_ratios_right_1131 = (sym_state_recursive_ratios_1131\n .copy())\n sym_state_recursive_ratios_right_1131[1, 2] = sym.factor(\n sym_state_probs_1131[4] / sym_state_probs_1131[2])\n sym_state_recursive_ratios_right_1131[1, 3] = sym.factor(\n sym_state_probs_1131[6] / sym_state_probs_1131[4])\n sym_state_recursive_ratios_P0_1131 = sym.zeros(buffer_capacity + 1, \n system_capacity + 1)\n sym_state_recursive_ratios_P0_1131[0, 0] = 1\n sym_state_recursive_ratios_P0_1131[0, 1] = sym.factor(\n sym_state_probs_1131[1] / sym_state_probs_1131[0])\n sym_state_recursive_ratios_P0_1131[1, 1] = sym.factor(\n sym_state_probs_1131[2] / sym_state_probs_1131[0])\n sym_state_recursive_ratios_P0_1131[0, 2] = sym.factor(\n sym_state_probs_1131[3] / sym_state_probs_1131[0])\n sym_state_recursive_ratios_P0_1131[1, 2] = sym.factor(\n sym_state_probs_1131[4] / sym_state_probs_1131[0])\n sym_state_recursive_ratios_P0_1131[0, 3] = sym.factor(\n sym_state_probs_1131[5] / sym_state_probs_1131[0])\n sym_state_recursive_ratios_P0_1131[1, 3] = sym.factor(\n sym_state_probs_1131[6] / sym_state_probs_1131[0])\n return (sym_state_probs_1131, sym_state_recursive_ratios_1131,\n sym_state_recursive_ratios_right_1131,\n sym_state_recursive_ratios_P0_1131)\n\n\ndef get_symbolic_state_probabilities_1132():\n num_of_servers = 1\n threshold = 1\n system_capacity = 3\n buffer_capacity = 2\n Q_sym_1132 = abg.markov.get_symbolic_transition_matrix(num_of_servers,\n threshold, system_capacity, buffer_capacity)\n p00, p01, p11, p21, p02, p12, p22, p03, p13, p23 = sym.symbols(\n 'p00, p01, p11, p21, p02, p12, p22, p03, p13, p23')\n pi_1132 = sym.Matrix([p00, p01, p11, p21, p02, p12, p22, p03, p13, p23])\n dimension_1132 = Q_sym_1132.shape[0]\n M_sym_1132 = sym.Matrix([Q_sym_1132.transpose()[:-1, :], sym.ones(1,\n dimension_1132)])\n sym_diff_equations_1132 = M_sym_1132 @ pi_1132\n b_sym_1132 = sym.Matrix([sym.zeros(dimension_1132 - 1, 1), [1]])\n eq0_1132 = sym.Eq(sym_diff_equations_1132[0], b_sym_1132[0])\n eq1_1132 = sym.Eq(sym_diff_equations_1132[1], b_sym_1132[1])\n eq2_1132 = sym.Eq(sym_diff_equations_1132[2], b_sym_1132[2])\n eq3_1132 = sym.Eq(sym_diff_equations_1132[3], b_sym_1132[3])\n eq4_1132 = sym.Eq(sym_diff_equations_1132[4], b_sym_1132[4])\n eq5_1132 = sym.Eq(sym_diff_equations_1132[5], b_sym_1132[5])\n eq6_1132 = sym.Eq(sym_diff_equations_1132[6], b_sym_1132[6])\n eq7_1132 = sym.Eq(sym_diff_equations_1132[7], b_sym_1132[7])\n eq8_1132 = sym.Eq(sym_diff_equations_1132[8], b_sym_1132[8])\n eq9_1132 = sym.Eq(sym_diff_equations_1132[9], b_sym_1132[9])\n sym_state_probs_1132 = sym.solve([eq0_1132, eq1_1132, eq2_1132,\n eq3_1132, eq4_1132, eq5_1132, eq6_1132, eq7_1132, eq8_1132,\n eq9_1132], (p00, p01, p11, p21, p02, p12, p22, p03, p13, p23))\n sym_state_recursive_ratios_1132 = sym.zeros(buffer_capacity + 1, \n system_capacity + 1)\n sym_state_recursive_ratios_1132[0, 0] = 1\n sym_state_recursive_ratios_1132[0, 1] = sym.factor(sym_state_probs_1132\n [p01] / sym_state_probs_1132[p00])\n sym_state_recursive_ratios_1132[1, 1] = sym.factor(sym_state_probs_1132\n [p11] / sym_state_probs_1132[p01])\n sym_state_recursive_ratios_1132[2, 1] = sym.factor(sym_state_probs_1132\n [p21] / sym_state_probs_1132[p11])\n sym_state_recursive_ratios_1132[0, 2] = sym.factor(sym_state_probs_1132\n [p02] / sym_state_probs_1132[p01])\n sym_state_recursive_ratios_1132[1, 2] = sym.factor(sym_state_probs_1132\n [p12] / sym_state_probs_1132[p02])\n sym_state_recursive_ratios_1132[2, 2] = sym.factor(sym_state_probs_1132\n [p22] / sym_state_probs_1132[p12])\n sym_state_recursive_ratios_1132[0, 3] = sym.factor(sym_state_probs_1132\n [p03] / sym_state_probs_1132[p02])\n sym_state_recursive_ratios_1132[1, 3] = sym.factor(sym_state_probs_1132\n [p13] / sym_state_probs_1132[p03])\n sym_state_recursive_ratios_1132[2, 3] = sym.factor(sym_state_probs_1132\n [p23] / sym_state_probs_1132[p13])\n sym_state_recursive_ratios_right_1132 = (sym_state_recursive_ratios_1132\n .copy())\n sym_state_recursive_ratios_right_1132[1, 2] = sym.factor(\n sym_state_probs_1132[p12] / sym_state_probs_1132[p11])\n sym_state_recursive_ratios_right_1132[1, 3] = sym.factor(\n sym_state_probs_1132[p13] / sym_state_probs_1132[p12])\n sym_state_recursive_ratios_right_1132[2, 2] = sym.factor(\n sym_state_probs_1132[p22] / sym_state_probs_1132[p21])\n sym_state_recursive_ratios_right_1132[2, 3] = sym.factor(\n sym_state_probs_1132[p23] / sym_state_probs_1132[p22])\n sym_state_recursive_ratios_P0_1132 = sym.zeros(buffer_capacity + 1, \n system_capacity + 1)\n sym_state_recursive_ratios_P0_1132[0, 0] = 1\n sym_state_recursive_ratios_P0_1132[0, 1] = sym.factor(\n sym_state_probs_1132[p01] / sym_state_probs_1132[p00])\n sym_state_recursive_ratios_P0_1132[1, 1] = sym.factor(\n sym_state_probs_1132[p11] / sym_state_probs_1132[p00])\n sym_state_recursive_ratios_P0_1132[2, 1] = sym.factor(\n sym_state_probs_1132[p21] / sym_state_probs_1132[p00])\n sym_state_recursive_ratios_P0_1132[0, 2] = sym.factor(\n sym_state_probs_1132[p02] / sym_state_probs_1132[p00])\n sym_state_recursive_ratios_P0_1132[1, 2] = sym.factor(\n sym_state_probs_1132[p12] / sym_state_probs_1132[p00])\n sym_state_recursive_ratios_P0_1132[2, 2] = sym.factor(\n sym_state_probs_1132[p22] / sym_state_probs_1132[p00])\n sym_state_recursive_ratios_P0_1132[0, 3] = sym.factor(\n sym_state_probs_1132[p03] / sym_state_probs_1132[p00])\n sym_state_recursive_ratios_P0_1132[1, 3] = sym.factor(\n sym_state_probs_1132[p13] / sym_state_probs_1132[p00])\n sym_state_recursive_ratios_P0_1132[2, 3] = sym.factor(\n sym_state_probs_1132[p23] / sym_state_probs_1132[p00])\n return (sym_state_probs_1132, sym_state_recursive_ratios_1132,\n sym_state_recursive_ratios_right_1132,\n sym_state_recursive_ratios_P0_1132)\n\n\ndef get_symbolic_state_probabilities_1141():\n num_of_servers = 1\n threshold = 1\n system_capacity = 4\n buffer_capacity = 1\n Q_sym_1141 = abg.markov.get_symbolic_transition_matrix(num_of_servers,\n threshold, system_capacity, buffer_capacity)\n p00, p01, p11, p02, p12, p03, p13, p04, p14 = sym.symbols(\n 'p00, p01, p11, p02, p12, p03, p13, p04, p14')\n pi_1141 = sym.Matrix([p00, p01, p11, p02, p12, p03, p13, p04, p14])\n dimension_1141 = Q_sym_1141.shape[0]\n M_sym_1141 = sym.Matrix([Q_sym_1141.transpose()[:-1, :], sym.ones(1,\n dimension_1141)])\n sym_diff_equations_1141 = M_sym_1141 @ pi_1141\n b_sym_1141 = sym.Matrix([sym.zeros(dimension_1141 - 1, 1), [1]])\n eq0_1141 = sym.Eq(sym_diff_equations_1141[0], b_sym_1141[0])\n eq1_1141 = sym.Eq(sym_diff_equations_1141[1], b_sym_1141[1])\n eq2_1141 = sym.Eq(sym_diff_equations_1141[2], b_sym_1141[2])\n eq3_1141 = sym.Eq(sym_diff_equations_1141[3], b_sym_1141[3])\n eq4_1141 = sym.Eq(sym_diff_equations_1141[4], b_sym_1141[4])\n eq5_1141 = sym.Eq(sym_diff_equations_1141[5], b_sym_1141[5])\n eq6_1141 = sym.Eq(sym_diff_equations_1141[6], b_sym_1141[6])\n eq7_1141 = sym.Eq(sym_diff_equations_1141[7], b_sym_1141[7])\n eq8_1141 = sym.Eq(sym_diff_equations_1141[8], b_sym_1141[8])\n sym_state_probs_1141 = sym.solve([eq0_1141, eq1_1141, eq2_1141,\n eq3_1141, eq4_1141, eq5_1141, eq6_1141, eq7_1141, eq8_1141], (p00,\n p01, p11, p02, p12, p03, p13, p04, p14))\n sym_state_recursive_ratios_1141 = sym.zeros(buffer_capacity + 1, \n system_capacity + 1)\n sym_state_recursive_ratios_1141[0, 0] = 1\n sym_state_recursive_ratios_1141[0, 1] = sym.factor(sym_state_probs_1141\n [p01] / sym_state_probs_1141[p00])\n sym_state_recursive_ratios_1141[1, 1] = sym.factor(sym_state_probs_1141\n [p11] / sym_state_probs_1141[p01])\n sym_state_recursive_ratios_1141[0, 2] = sym.factor(sym_state_probs_1141\n [p02] / sym_state_probs_1141[p01])\n sym_state_recursive_ratios_1141[1, 2] = sym.factor(sym_state_probs_1141\n [p12] / sym_state_probs_1141[p02])\n sym_state_recursive_ratios_1141[0, 3] = sym.factor(sym_state_probs_1141\n [p03] / sym_state_probs_1141[p02])\n sym_state_recursive_ratios_1141[1, 3] = sym.factor(sym_state_probs_1141\n [p13] / sym_state_probs_1141[p03])\n sym_state_recursive_ratios_1141[0, 4] = sym.factor(sym_state_probs_1141\n [p04] / sym_state_probs_1141[p03])\n sym_state_recursive_ratios_1141[1, 4] = sym.factor(sym_state_probs_1141\n [p14] / sym_state_probs_1141[p04])\n sym_state_recursive_ratios_right_1141 = (sym_state_recursive_ratios_1141\n .copy())\n sym_state_recursive_ratios_right_1141[1, 2] = sym.factor(\n sym_state_probs_1141[p12] / sym_state_probs_1141[p11])\n sym_state_recursive_ratios_right_1141[1, 3] = sym.factor(\n sym_state_probs_1141[p13] / sym_state_probs_1141[p12])\n sym_state_recursive_ratios_right_1141[1, 4] = sym.factor(\n sym_state_probs_1141[p14] / sym_state_probs_1141[p13])\n sym_state_recursive_ratios_P0_1141 = sym.zeros(buffer_capacity + 1, \n system_capacity + 1)\n sym_state_recursive_ratios_P0_1141[0, 0] = 1\n sym_state_recursive_ratios_P0_1141[0, 1] = sym.factor(\n sym_state_probs_1141[p01] / sym_state_probs_1141[p00])\n sym_state_recursive_ratios_P0_1141[1, 1] = sym.factor(\n sym_state_probs_1141[p11] / sym_state_probs_1141[p00])\n sym_state_recursive_ratios_P0_1141[0, 2] = sym.factor(\n sym_state_probs_1141[p02] / sym_state_probs_1141[p00])\n sym_state_recursive_ratios_P0_1141[1, 2] = sym.factor(\n sym_state_probs_1141[p12] / sym_state_probs_1141[p00])\n sym_state_recursive_ratios_P0_1141[0, 3] = sym.factor(\n sym_state_probs_1141[p03] / sym_state_probs_1141[p00])\n sym_state_recursive_ratios_P0_1141[1, 3] = sym.factor(\n sym_state_probs_1141[p13] / sym_state_probs_1141[p00])\n sym_state_recursive_ratios_P0_1141[0, 4] = sym.factor(\n sym_state_probs_1141[p04] / sym_state_probs_1141[p00])\n sym_state_recursive_ratios_P0_1141[1, 4] = sym.factor(\n sym_state_probs_1141[p14] / sym_state_probs_1141[p00])\n return (sym_state_probs_1141, sym_state_recursive_ratios_1141,\n sym_state_recursive_ratios_right_1141,\n sym_state_recursive_ratios_P0_1141)\n\n\ndef get_symbolic_state_probabilities_1142():\n num_of_servers = 1\n threshold = 1\n system_capacity = 4\n buffer_capacity = 2\n Q_sym_1142 = abg.markov.get_symbolic_transition_matrix(num_of_servers=\n num_of_servers, threshold=threshold, system_capacity=\n system_capacity, buffer_capacity=buffer_capacity)\n p00, p01, p11, p21, p02, p12, p22, p03, p13, p23, p04, p14, p24 = (sym.\n symbols(\n 'p00, p01, p11, p21, p02, p12, p22, p03, p13, p23, p04, p14, p24'))\n pi_1142 = sym.Matrix([p00, p01, p11, p21, p02, p12, p22, p03, p13, p23,\n p04, p14, p24])\n dimension_1142 = Q_sym_1142.shape[0]\n M_sym_1142 = sym.Matrix([Q_sym_1142.transpose()[:-1, :], sym.ones(1,\n dimension_1142)])\n sym_diff_equations_1142 = M_sym_1142 @ pi_1142\n b_sym_1142 = sym.Matrix([sym.zeros(dimension_1142 - 1, 1), [1]])\n eq0_1142 = sym.Eq(sym_diff_equations_1142[0], b_sym_1142[0])\n eq1_1142 = sym.Eq(sym_diff_equations_1142[1], b_sym_1142[1])\n eq2_1142 = sym.Eq(sym_diff_equations_1142[2], b_sym_1142[2])\n eq3_1142 = sym.Eq(sym_diff_equations_1142[3], b_sym_1142[3])\n eq4_1142 = sym.Eq(sym_diff_equations_1142[4], b_sym_1142[4])\n eq5_1142 = sym.Eq(sym_diff_equations_1142[5], b_sym_1142[5])\n eq6_1142 = sym.Eq(sym_diff_equations_1142[6], b_sym_1142[6])\n eq7_1142 = sym.Eq(sym_diff_equations_1142[7], b_sym_1142[7])\n eq8_1142 = sym.Eq(sym_diff_equations_1142[8], b_sym_1142[8])\n eq9_1142 = sym.Eq(sym_diff_equations_1142[9], b_sym_1142[9])\n eq10_1142 = sym.Eq(sym_diff_equations_1142[10], b_sym_1142[10])\n eq11_1142 = sym.Eq(sym_diff_equations_1142[11], b_sym_1142[11])\n eq12_1142 = sym.Eq(sym_diff_equations_1142[12], b_sym_1142[12])\n sym_state_probs_1142 = sym.solve([eq0_1142, eq1_1142, eq2_1142,\n eq3_1142, eq4_1142, eq5_1142, eq6_1142, eq7_1142, eq8_1142,\n eq9_1142, eq10_1142, eq11_1142, eq12_1142], (p00, p01, p11, p21,\n p02, p12, p22, p03, p13, p23, p04, p14, p24))\n sym_state_recursive_ratios_1142 = sym.zeros(buffer_capacity + 1, \n system_capacity + 1)\n sym_state_recursive_ratios_1142[0, 0] = 1\n sym_state_recursive_ratios_1142[0, 1] = sym.factor(sym_state_probs_1142\n [p01] / sym_state_probs_1142[p00])\n sym_state_recursive_ratios_1142[1, 1] = sym.factor(sym_state_probs_1142\n [p11] / sym_state_probs_1142[p01])\n sym_state_recursive_ratios_1142[2, 1] = sym.factor(sym_state_probs_1142\n [p21] / sym_state_probs_1142[p11])\n sym_state_recursive_ratios_1142[0, 2] = sym.factor(sym_state_probs_1142\n [p02] / sym_state_probs_1142[p01])\n sym_state_recursive_ratios_1142[1, 2] = sym.factor(sym_state_probs_1142\n [p12] / sym_state_probs_1142[p02])\n sym_state_recursive_ratios_1142[2, 2] = sym.factor(sym_state_probs_1142\n [p22] / sym_state_probs_1142[p12])\n sym_state_recursive_ratios_1142[0, 3] = sym.factor(sym_state_probs_1142\n [p03] / sym_state_probs_1142[p02])\n sym_state_recursive_ratios_1142[1, 3] = sym.factor(sym_state_probs_1142\n [p13] / sym_state_probs_1142[p03])\n sym_state_recursive_ratios_1142[2, 3] = sym.factor(sym_state_probs_1142\n [p23] / sym_state_probs_1142[p13])\n sym_state_recursive_ratios_1142[0, 4] = sym.factor(sym_state_probs_1142\n [p04] / sym_state_probs_1142[p03])\n sym_state_recursive_ratios_1142[1, 4] = sym.factor(sym_state_probs_1142\n [p14] / sym_state_probs_1142[p04])\n sym_state_recursive_ratios_1142[2, 4] = sym.factor(sym_state_probs_1142\n [p24] / sym_state_probs_1142[p14])\n sym_state_recursive_ratios_right_1142 = (sym_state_recursive_ratios_1142\n .copy())\n sym_state_recursive_ratios_right_1142[1, 2] = sym.factor(\n sym_state_probs_1142[p12] / sym_state_probs_1142[p11])\n sym_state_recursive_ratios_right_1142[1, 3] = sym.factor(\n sym_state_probs_1142[p13] / sym_state_probs_1142[p12])\n sym_state_recursive_ratios_right_1142[1, 4] = sym.factor(\n sym_state_probs_1142[p14] / sym_state_probs_1142[p13])\n sym_state_recursive_ratios_right_1142[2, 2] = sym.factor(\n sym_state_probs_1142[p22] / sym_state_probs_1142[p21])\n sym_state_recursive_ratios_right_1142[2, 3] = sym.factor(\n sym_state_probs_1142[p23] / sym_state_probs_1142[p22])\n sym_state_recursive_ratios_right_1142[2, 4] = sym.factor(\n sym_state_probs_1142[p24] / sym_state_probs_1142[p23])\n sym_state_recursive_ratios_P0_1142 = sym.zeros(buffer_capacity + 1, \n system_capacity + 1)\n sym_state_recursive_ratios_P0_1142[0, 0] = 1\n sym_state_recursive_ratios_P0_1142[0, 1] = sym.factor(\n sym_state_probs_1142[p01] / sym_state_probs_1142[p00])\n sym_state_recursive_ratios_P0_1142[1, 1] = sym.factor(\n sym_state_probs_1142[p11] / sym_state_probs_1142[p00])\n sym_state_recursive_ratios_P0_1142[2, 1] = sym.factor(\n sym_state_probs_1142[p21] / sym_state_probs_1142[p00])\n sym_state_recursive_ratios_P0_1142[0, 2] = sym.factor(\n sym_state_probs_1142[p02] / sym_state_probs_1142[p00])\n sym_state_recursive_ratios_P0_1142[1, 2] = sym.factor(\n sym_state_probs_1142[p12] / sym_state_probs_1142[p00])\n sym_state_recursive_ratios_P0_1142[2, 2] = sym.factor(\n sym_state_probs_1142[p22] / sym_state_probs_1142[p00])\n sym_state_recursive_ratios_P0_1142[0, 3] = sym.factor(\n sym_state_probs_1142[p03] / sym_state_probs_1142[p00])\n sym_state_recursive_ratios_P0_1142[1, 3] = sym.factor(\n sym_state_probs_1142[p13] / sym_state_probs_1142[p00])\n sym_state_recursive_ratios_P0_1142[2, 3] = sym.factor(\n sym_state_probs_1142[p23] / sym_state_probs_1142[p00])\n sym_state_recursive_ratios_P0_1142[0, 4] = sym.factor(\n sym_state_probs_1142[p04] / sym_state_probs_1142[p00])\n sym_state_recursive_ratios_P0_1142[1, 4] = sym.factor(\n sym_state_probs_1142[p14] / sym_state_probs_1142[p00])\n sym_state_recursive_ratios_P0_1142[2, 4] = sym.factor(\n sym_state_probs_1142[p24] / sym_state_probs_1142[p00])\n return (sym_state_probs_1142, sym_state_recursive_ratios_1142,\n sym_state_recursive_ratios_right_1142,\n sym_state_recursive_ratios_P0_1142)\n\n\ndef get_symbolic_state_probabilities_1151():\n num_of_servers = 1\n threshold = 1\n system_capacity = 5\n buffer_capacity = 1\n Q_sym_1151 = abg.markov.get_symbolic_transition_matrix(num_of_servers,\n threshold, system_capacity, buffer_capacity)\n p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15 = sym.symbols(\n 'p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15')\n pi_1151 = sym.Matrix([p00, p01, p11, p02, p12, p03, p13, p04, p14, p05,\n p15])\n dimension_1151 = Q_sym_1151.shape[0]\n M_sym_1151 = sym.Matrix([Q_sym_1151.transpose()[:-1, :], sym.ones(1,\n dimension_1151)])\n sym_diff_equations_1151 = M_sym_1151 @ pi_1151\n b_sym_1151 = sym.Matrix([sym.zeros(dimension_1151 - 1, 1), [1]])\n eq0_1151 = sym.Eq(sym_diff_equations_1151[0], b_sym_1151[0])\n eq1_1151 = sym.Eq(sym_diff_equations_1151[1], b_sym_1151[1])\n eq2_1151 = sym.Eq(sym_diff_equations_1151[2], b_sym_1151[2])\n eq3_1151 = sym.Eq(sym_diff_equations_1151[3], b_sym_1151[3])\n eq4_1151 = sym.Eq(sym_diff_equations_1151[4], b_sym_1151[4])\n eq5_1151 = sym.Eq(sym_diff_equations_1151[5], b_sym_1151[5])\n eq6_1151 = sym.Eq(sym_diff_equations_1151[6], b_sym_1151[6])\n eq7_1151 = sym.Eq(sym_diff_equations_1151[7], b_sym_1151[7])\n eq8_1151 = sym.Eq(sym_diff_equations_1151[8], b_sym_1151[8])\n eq9_1151 = sym.Eq(sym_diff_equations_1151[9], b_sym_1151[9])\n eq10_1151 = sym.Eq(sym_diff_equations_1151[10], b_sym_1151[10])\n sym_state_probs_1151 = sym.solve([eq0_1151, eq1_1151, eq2_1151,\n eq3_1151, eq4_1151, eq5_1151, eq6_1151, eq7_1151, eq8_1151,\n eq9_1151, eq10_1151], (p00, p01, p11, p02, p12, p03, p13, p04, p14,\n p05, p15))\n sym_state_recursive_ratios_1151 = sym.zeros(buffer_capacity + 1, \n system_capacity + 1)\n sym_state_recursive_ratios_1151[0, 0] = 1\n sym_state_recursive_ratios_1151[0, 1] = sym.factor(sym_state_probs_1151\n [p01] / sym_state_probs_1151[p00])\n sym_state_recursive_ratios_1151[1, 1] = sym.factor(sym_state_probs_1151\n [p11] / sym_state_probs_1151[p01])\n sym_state_recursive_ratios_1151[0, 2] = sym.factor(sym_state_probs_1151\n [p02] / sym_state_probs_1151[p01])\n sym_state_recursive_ratios_1151[1, 2] = sym.factor(sym_state_probs_1151\n [p12] / sym_state_probs_1151[p02])\n sym_state_recursive_ratios_1151[0, 3] = sym.factor(sym_state_probs_1151\n [p03] / sym_state_probs_1151[p02])\n sym_state_recursive_ratios_1151[1, 3] = sym.factor(sym_state_probs_1151\n [p13] / sym_state_probs_1151[p03])\n sym_state_recursive_ratios_1151[0, 4] = sym.factor(sym_state_probs_1151\n [p04] / sym_state_probs_1151[p03])\n sym_state_recursive_ratios_1151[1, 4] = sym.factor(sym_state_probs_1151\n [p14] / sym_state_probs_1151[p04])\n sym_state_recursive_ratios_1151[0, 5] = sym.factor(sym_state_probs_1151\n [p05] / sym_state_probs_1151[p04])\n sym_state_recursive_ratios_1151[1, 5] = sym.factor(sym_state_probs_1151\n [p15] / sym_state_probs_1151[p05])\n sym_state_recursive_ratios_right_1151 = (sym_state_recursive_ratios_1151\n .copy())\n sym_state_recursive_ratios_right_1151[1, 2] = sym.factor(\n sym_state_probs_1151[p12] / sym_state_probs_1151[p11])\n sym_state_recursive_ratios_right_1151[1, 3] = sym.factor(\n sym_state_probs_1151[p13] / sym_state_probs_1151[p12])\n sym_state_recursive_ratios_right_1151[1, 4] = sym.factor(\n sym_state_probs_1151[p14] / sym_state_probs_1151[p13])\n sym_state_recursive_ratios_right_1151[1, 5] = sym.factor(\n sym_state_probs_1151[p15] / sym_state_probs_1151[p14])\n sym_state_recursive_ratios_P0_1151 = sym.zeros(buffer_capacity + 1, \n system_capacity + 1)\n sym_state_recursive_ratios_P0_1151[0, 0] = 1\n sym_state_recursive_ratios_P0_1151[0, 1] = sym.factor(\n sym_state_probs_1151[p01] / sym_state_probs_1151[p00])\n sym_state_recursive_ratios_P0_1151[1, 1] = sym.factor(\n sym_state_probs_1151[p11] / sym_state_probs_1151[p00])\n sym_state_recursive_ratios_P0_1151[0, 2] = sym.factor(\n sym_state_probs_1151[p02] / sym_state_probs_1151[p00])\n sym_state_recursive_ratios_P0_1151[1, 2] = sym.factor(\n sym_state_probs_1151[p12] / sym_state_probs_1151[p00])\n sym_state_recursive_ratios_P0_1151[0, 3] = sym.factor(\n sym_state_probs_1151[p03] / sym_state_probs_1151[p00])\n sym_state_recursive_ratios_P0_1151[1, 3] = sym.factor(\n sym_state_probs_1151[p13] / sym_state_probs_1151[p00])\n sym_state_recursive_ratios_P0_1151[0, 4] = sym.factor(\n sym_state_probs_1151[p04] / sym_state_probs_1151[p00])\n sym_state_recursive_ratios_P0_1151[1, 4] = sym.factor(\n sym_state_probs_1151[p14] / sym_state_probs_1151[p00])\n sym_state_recursive_ratios_P0_1151[0, 5] = sym.factor(\n sym_state_probs_1151[p05] / sym_state_probs_1151[p00])\n sym_state_recursive_ratios_P0_1151[1, 5] = sym.factor(\n sym_state_probs_1151[p15] / sym_state_probs_1151[p00])\n return (sym_state_probs_1151, sym_state_recursive_ratios_1151,\n sym_state_recursive_ratios_right_1151,\n sym_state_recursive_ratios_P0_1151)\n\n\ndef get_symbolic_state_probabilities_1161():\n num_of_servers = 1\n threshold = 1\n system_capacity = 6\n buffer_capacity = 1\n Q_sym_1161 = abg.markov.get_symbolic_transition_matrix(num_of_servers,\n threshold, system_capacity, buffer_capacity)\n p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15, p06, p16 = (sym.\n symbols(\n 'p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15, p06, p16'))\n pi_1161 = sym.Matrix([p00, p01, p11, p02, p12, p03, p13, p04, p14, p05,\n p15, p06, p16])\n dimension_1161 = Q_sym_1161.shape[0]\n M_sym_1161 = sym.Matrix([Q_sym_1161.transpose()[:-1, :], sym.ones(1,\n dimension_1161)])\n sym_diff_equations_1161 = M_sym_1161 @ pi_1161\n b_sym_1161 = sym.Matrix([sym.zeros(dimension_1161 - 1, 1), [1]])\n eq0_1161 = sym.Eq(sym_diff_equations_1161[0], b_sym_1161[0])\n eq1_1161 = sym.Eq(sym_diff_equations_1161[1], b_sym_1161[1])\n eq2_1161 = sym.Eq(sym_diff_equations_1161[2], b_sym_1161[2])\n eq3_1161 = sym.Eq(sym_diff_equations_1161[3], b_sym_1161[3])\n eq4_1161 = sym.Eq(sym_diff_equations_1161[4], b_sym_1161[4])\n eq5_1161 = sym.Eq(sym_diff_equations_1161[5], b_sym_1161[5])\n eq6_1161 = sym.Eq(sym_diff_equations_1161[6], b_sym_1161[6])\n eq7_1161 = sym.Eq(sym_diff_equations_1161[7], b_sym_1161[7])\n eq8_1161 = sym.Eq(sym_diff_equations_1161[8], b_sym_1161[8])\n eq9_1161 = sym.Eq(sym_diff_equations_1161[9], b_sym_1161[9])\n eq10_1161 = sym.Eq(sym_diff_equations_1161[10], b_sym_1161[10])\n eq11_1161 = sym.Eq(sym_diff_equations_1161[11], b_sym_1161[11])\n eq12_1161 = sym.Eq(sym_diff_equations_1161[12], b_sym_1161[12])\n sym_state_probs_1161 = sym.solve([eq0_1161, eq1_1161, eq2_1161,\n eq3_1161, eq4_1161, eq5_1161, eq6_1161, eq7_1161, eq8_1161,\n eq9_1161, eq10_1161, eq11_1161, eq12_1161], (p00, p01, p11, p02,\n p12, p03, p13, p04, p14, p05, p15, p06, p16))\n sym_state_recursive_ratios_1161 = sym.zeros(buffer_capacity + 1, \n system_capacity + 1)\n sym_state_recursive_ratios_1161[0, 0] = 1\n sym_state_recursive_ratios_1161[0, 1] = sym.factor(sym_state_probs_1161\n [p01] / sym_state_probs_1161[p00])\n sym_state_recursive_ratios_1161[1, 1] = sym.factor(sym_state_probs_1161\n [p11] / sym_state_probs_1161[p01])\n sym_state_recursive_ratios_1161[0, 2] = sym.factor(sym_state_probs_1161\n [p02] / sym_state_probs_1161[p01])\n sym_state_recursive_ratios_1161[1, 2] = sym.factor(sym_state_probs_1161\n [p12] / sym_state_probs_1161[p02])\n sym_state_recursive_ratios_1161[0, 3] = sym.factor(sym_state_probs_1161\n [p03] / sym_state_probs_1161[p02])\n sym_state_recursive_ratios_1161[1, 3] = sym.factor(sym_state_probs_1161\n [p13] / sym_state_probs_1161[p03])\n sym_state_recursive_ratios_1161[0, 4] = sym.factor(sym_state_probs_1161\n [p04] / sym_state_probs_1161[p03])\n sym_state_recursive_ratios_1161[1, 4] = sym.factor(sym_state_probs_1161\n [p14] / sym_state_probs_1161[p04])\n sym_state_recursive_ratios_1161[0, 5] = sym.factor(sym_state_probs_1161\n [p05] / sym_state_probs_1161[p04])\n sym_state_recursive_ratios_1161[1, 5] = sym.factor(sym_state_probs_1161\n [p15] / sym_state_probs_1161[p05])\n sym_state_recursive_ratios_1161[0, 6] = sym.factor(sym_state_probs_1161\n [p06] / sym_state_probs_1161[p05])\n sym_state_recursive_ratios_1161[1, 6] = sym.factor(sym_state_probs_1161\n [p16] / sym_state_probs_1161[p06])\n sym_state_recursive_ratios_right_1161 = (sym_state_recursive_ratios_1161\n .copy())\n sym_state_recursive_ratios_right_1161[1, 2] = sym.factor(\n sym_state_probs_1161[p12] / sym_state_probs_1161[p11])\n sym_state_recursive_ratios_right_1161[1, 3] = sym.factor(\n sym_state_probs_1161[p13] / sym_state_probs_1161[p12])\n sym_state_recursive_ratios_right_1161[1, 4] = sym.factor(\n sym_state_probs_1161[p14] / sym_state_probs_1161[p13])\n sym_state_recursive_ratios_right_1161[1, 5] = sym.factor(\n sym_state_probs_1161[p15] / sym_state_probs_1161[p14])\n sym_state_recursive_ratios_right_1161[1, 6] = sym.factor(\n sym_state_probs_1161[p16] / sym_state_probs_1161[p15])\n sym_state_recursive_ratios_P0_1161 = sym.zeros(buffer_capacity + 1, \n system_capacity + 1)\n sym_state_recursive_ratios_P0_1161[0, 0] = 1\n sym_state_recursive_ratios_P0_1161[0, 1] = sym.factor(\n sym_state_probs_1161[p01] / sym_state_probs_1161[p00])\n sym_state_recursive_ratios_P0_1161[1, 1] = sym.factor(\n sym_state_probs_1161[p11] / sym_state_probs_1161[p00])\n sym_state_recursive_ratios_P0_1161[0, 2] = sym.factor(\n sym_state_probs_1161[p02] / sym_state_probs_1161[p00])\n sym_state_recursive_ratios_P0_1161[1, 2] = sym.factor(\n sym_state_probs_1161[p12] / sym_state_probs_1161[p00])\n sym_state_recursive_ratios_P0_1161[0, 3] = sym.factor(\n sym_state_probs_1161[p03] / sym_state_probs_1161[p00])\n sym_state_recursive_ratios_P0_1161[1, 3] = sym.factor(\n sym_state_probs_1161[p13] / sym_state_probs_1161[p00])\n sym_state_recursive_ratios_P0_1161[0, 4] = sym.factor(\n sym_state_probs_1161[p04] / sym_state_probs_1161[p00])\n sym_state_recursive_ratios_P0_1161[1, 4] = sym.factor(\n sym_state_probs_1161[p14] / sym_state_probs_1161[p00])\n sym_state_recursive_ratios_P0_1161[0, 5] = sym.factor(\n sym_state_probs_1161[p05] / sym_state_probs_1161[p00])\n sym_state_recursive_ratios_P0_1161[1, 5] = sym.factor(\n sym_state_probs_1161[p15] / sym_state_probs_1161[p00])\n sym_state_recursive_ratios_P0_1161[0, 6] = sym.factor(\n sym_state_probs_1161[p06] / sym_state_probs_1161[p00])\n sym_state_recursive_ratios_P0_1161[1, 6] = sym.factor(\n sym_state_probs_1161[p16] / sym_state_probs_1161[p00])\n return (sym_state_probs_1161, sym_state_recursive_ratios_1161,\n sym_state_recursive_ratios_right_1161,\n sym_state_recursive_ratios_P0_1161)\n\n\ndef get_symbolic_state_probabilities_1171():\n num_of_servers = 1\n threshold = 1\n system_capacity = 7\n buffer_capacity = 1\n Q_sym_1171 = abg.markov.get_symbolic_transition_matrix(num_of_servers,\n threshold, system_capacity, buffer_capacity)\n (p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15, p06, p16, p07, p17\n ) = (sym.symbols(\n 'p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15, p06, p16, p07, p17'\n ))\n pi_1171 = sym.Matrix([p00, p01, p11, p02, p12, p03, p13, p04, p14, p05,\n p15, p06, p16, p07, p17])\n dimension_1171 = Q_sym_1171.shape[0]\n M_sym_1171 = sym.Matrix([Q_sym_1171.transpose()[:-1, :], sym.ones(1,\n dimension_1171)])\n sym_diff_equations_1171 = M_sym_1171 @ pi_1171\n b_sym_1171 = sym.Matrix([sym.zeros(dimension_1171 - 1, 1), [1]])\n eq0_1171 = sym.Eq(sym_diff_equations_1171[0], b_sym_1171[0])\n eq1_1171 = sym.Eq(sym_diff_equations_1171[1], b_sym_1171[1])\n eq2_1171 = sym.Eq(sym_diff_equations_1171[2], b_sym_1171[2])\n eq3_1171 = sym.Eq(sym_diff_equations_1171[3], b_sym_1171[3])\n eq4_1171 = sym.Eq(sym_diff_equations_1171[4], b_sym_1171[4])\n eq5_1171 = sym.Eq(sym_diff_equations_1171[5], b_sym_1171[5])\n eq6_1171 = sym.Eq(sym_diff_equations_1171[6], b_sym_1171[6])\n eq7_1171 = sym.Eq(sym_diff_equations_1171[7], b_sym_1171[7])\n eq8_1171 = sym.Eq(sym_diff_equations_1171[8], b_sym_1171[8])\n eq9_1171 = sym.Eq(sym_diff_equations_1171[9], b_sym_1171[9])\n eq10_1171 = sym.Eq(sym_diff_equations_1171[10], b_sym_1171[10])\n eq11_1171 = sym.Eq(sym_diff_equations_1171[11], b_sym_1171[11])\n eq12_1171 = sym.Eq(sym_diff_equations_1171[12], b_sym_1171[12])\n eq13_1171 = sym.Eq(sym_diff_equations_1171[13], b_sym_1171[13])\n eq14_1171 = sym.Eq(sym_diff_equations_1171[14], b_sym_1171[14])\n sym_state_probs_1171 = sym.solve([eq0_1171, eq1_1171, eq2_1171,\n eq3_1171, eq4_1171, eq5_1171, eq6_1171, eq7_1171, eq8_1171,\n eq9_1171, eq10_1171, eq11_1171, eq12_1171, eq13_1171, eq14_1171], (\n p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15, p06, p16,\n p07, p17))\n sym_state_recursive_ratios_1171 = sym.zeros(buffer_capacity + 1, \n system_capacity + 1)\n sym_state_recursive_ratios_1171[0, 0] = 1\n sym_state_recursive_ratios_1171[0, 1] = sym.factor(sym_state_probs_1171\n [p01] / sym_state_probs_1171[p00])\n sym_state_recursive_ratios_1171[1, 1] = sym.factor(sym_state_probs_1171\n [p11] / sym_state_probs_1171[p01])\n sym_state_recursive_ratios_1171[0, 2] = sym.factor(sym_state_probs_1171\n [p02] / sym_state_probs_1171[p01])\n sym_state_recursive_ratios_1171[1, 2] = sym.factor(sym_state_probs_1171\n [p12] / sym_state_probs_1171[p02])\n sym_state_recursive_ratios_1171[0, 3] = sym.factor(sym_state_probs_1171\n [p03] / sym_state_probs_1171[p02])\n sym_state_recursive_ratios_1171[1, 3] = sym.factor(sym_state_probs_1171\n [p13] / sym_state_probs_1171[p03])\n sym_state_recursive_ratios_1171[0, 4] = sym.factor(sym_state_probs_1171\n [p04] / sym_state_probs_1171[p03])\n sym_state_recursive_ratios_1171[1, 4] = sym.factor(sym_state_probs_1171\n [p14] / sym_state_probs_1171[p04])\n sym_state_recursive_ratios_1171[0, 5] = sym.factor(sym_state_probs_1171\n [p05] / sym_state_probs_1171[p04])\n sym_state_recursive_ratios_1171[1, 5] = sym.factor(sym_state_probs_1171\n [p15] / sym_state_probs_1171[p05])\n sym_state_recursive_ratios_1171[0, 6] = sym.factor(sym_state_probs_1171\n [p06] / sym_state_probs_1171[p05])\n sym_state_recursive_ratios_1171[1, 6] = sym.factor(sym_state_probs_1171\n [p16] / sym_state_probs_1171[p06])\n sym_state_recursive_ratios_1171[0, 7] = sym.factor(sym_state_probs_1171\n [p07] / sym_state_probs_1171[p06])\n sym_state_recursive_ratios_1171[1, 7] = sym.factor(sym_state_probs_1171\n [p17] / sym_state_probs_1171[p07])\n sym_state_recursive_ratios_right_1171 = (sym_state_recursive_ratios_1171\n .copy())\n sym_state_recursive_ratios_right_1171[1, 2] = sym.factor(\n sym_state_probs_1171[p12] / sym_state_probs_1171[p11])\n sym_state_recursive_ratios_right_1171[1, 3] = sym.factor(\n sym_state_probs_1171[p13] / sym_state_probs_1171[p12])\n sym_state_recursive_ratios_right_1171[1, 4] = sym.factor(\n sym_state_probs_1171[p14] / sym_state_probs_1171[p13])\n sym_state_recursive_ratios_right_1171[1, 5] = sym.factor(\n sym_state_probs_1171[p15] / sym_state_probs_1171[p14])\n sym_state_recursive_ratios_right_1171[1, 6] = sym.factor(\n sym_state_probs_1171[p16] / sym_state_probs_1171[p15])\n sym_state_recursive_ratios_right_1171[1, 7] = sym.factor(\n sym_state_probs_1171[p17] / sym_state_probs_1171[p16])\n sym_state_recursive_ratios_P0_1171 = sym.zeros(buffer_capacity + 1, \n system_capacity + 1)\n sym_state_recursive_ratios_P0_1171[0, 0] = 1\n sym_state_recursive_ratios_P0_1171[0, 1] = sym.factor(\n sym_state_probs_1171[p01] / sym_state_probs_1171[p00])\n sym_state_recursive_ratios_P0_1171[1, 1] = sym.factor(\n sym_state_probs_1171[p11] / sym_state_probs_1171[p00])\n sym_state_recursive_ratios_P0_1171[0, 2] = sym.factor(\n sym_state_probs_1171[p02] / sym_state_probs_1171[p00])\n sym_state_recursive_ratios_P0_1171[1, 2] = sym.factor(\n sym_state_probs_1171[p12] / sym_state_probs_1171[p00])\n sym_state_recursive_ratios_P0_1171[0, 3] = sym.factor(\n sym_state_probs_1171[p03] / sym_state_probs_1171[p00])\n sym_state_recursive_ratios_P0_1171[1, 3] = sym.factor(\n sym_state_probs_1171[p13] / sym_state_probs_1171[p00])\n sym_state_recursive_ratios_P0_1171[0, 4] = sym.factor(\n sym_state_probs_1171[p04] / sym_state_probs_1171[p00])\n sym_state_recursive_ratios_P0_1171[1, 4] = sym.factor(\n sym_state_probs_1171[p14] / sym_state_probs_1171[p00])\n sym_state_recursive_ratios_P0_1171[0, 5] = sym.factor(\n sym_state_probs_1171[p05] / sym_state_probs_1171[p00])\n sym_state_recursive_ratios_P0_1171[1, 5] = sym.factor(\n sym_state_probs_1171[p15] / sym_state_probs_1171[p00])\n sym_state_recursive_ratios_P0_1171[0, 6] = sym.factor(\n sym_state_probs_1171[p06] / sym_state_probs_1171[p00])\n sym_state_recursive_ratios_P0_1171[1, 6] = sym.factor(\n sym_state_probs_1171[p16] / sym_state_probs_1171[p00])\n sym_state_recursive_ratios_P0_1171[0, 7] = sym.factor(\n sym_state_probs_1171[p07] / sym_state_probs_1171[p00])\n sym_state_recursive_ratios_P0_1171[1, 7] = sym.factor(\n sym_state_probs_1171[p17] / sym_state_probs_1171[p00])\n return (sym_state_probs_1171, sym_state_recursive_ratios_1171,\n sym_state_recursive_ratios_right_1171,\n sym_state_recursive_ratios_P0_1171)\n\n\ndef get_symbolic_state_probabilities_1181():\n num_of_servers = 1\n threshold = 1\n system_capacity = 8\n buffer_capacity = 1\n Q_sym_1181 = abg.markov.get_symbolic_transition_matrix(num_of_servers,\n threshold, system_capacity, buffer_capacity)\n (p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15, p06, p16, p07,\n p17, p08, p18) = (sym.symbols(\n 'p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15, p06, p16, p07, p17, p08, p18'\n ))\n pi_1181 = sym.Matrix([p00, p01, p11, p02, p12, p03, p13, p04, p14, p05,\n p15, p06, p16, p07, p17, p08, p18])\n dimension_1181 = Q_sym_1181.shape[0]\n M_sym_1181 = sym.Matrix([Q_sym_1181.transpose()[:-1, :], sym.ones(1,\n dimension_1181)])\n sym_diff_equations_1181 = M_sym_1181 @ pi_1181\n b_sym_1181 = sym.Matrix([sym.zeros(dimension_1181 - 1, 1), [1]])\n eq0_1181 = sym.Eq(sym_diff_equations_1181[0], b_sym_1181[0])\n eq1_1181 = sym.Eq(sym_diff_equations_1181[1], b_sym_1181[1])\n eq2_1181 = sym.Eq(sym_diff_equations_1181[2], b_sym_1181[2])\n eq3_1181 = sym.Eq(sym_diff_equations_1181[3], b_sym_1181[3])\n eq4_1181 = sym.Eq(sym_diff_equations_1181[4], b_sym_1181[4])\n eq5_1181 = sym.Eq(sym_diff_equations_1181[5], b_sym_1181[5])\n eq6_1181 = sym.Eq(sym_diff_equations_1181[6], b_sym_1181[6])\n eq7_1181 = sym.Eq(sym_diff_equations_1181[7], b_sym_1181[7])\n eq8_1181 = sym.Eq(sym_diff_equations_1181[8], b_sym_1181[8])\n eq9_1181 = sym.Eq(sym_diff_equations_1181[9], b_sym_1181[9])\n eq10_1181 = sym.Eq(sym_diff_equations_1181[10], b_sym_1181[10])\n eq11_1181 = sym.Eq(sym_diff_equations_1181[11], b_sym_1181[11])\n eq12_1181 = sym.Eq(sym_diff_equations_1181[12], b_sym_1181[12])\n eq13_1181 = sym.Eq(sym_diff_equations_1181[13], b_sym_1181[13])\n eq14_1181 = sym.Eq(sym_diff_equations_1181[14], b_sym_1181[14])\n eq15_1181 = sym.Eq(sym_diff_equations_1181[15], b_sym_1181[15])\n eq16_1181 = sym.Eq(sym_diff_equations_1181[16], b_sym_1181[16])\n sym_state_probs_1181 = sym.solve([eq0_1181, eq1_1181, eq2_1181,\n eq3_1181, eq4_1181, eq5_1181, eq6_1181, eq7_1181, eq8_1181,\n eq9_1181, eq10_1181, eq11_1181, eq12_1181, eq13_1181, eq14_1181,\n eq15_1181, eq16_1181], (p00, p01, p11, p02, p12, p03, p13, p04, p14,\n p05, p15, p06, p16, p07, p17, p08, p18))\n sym_state_recursive_ratios_1181 = sym.zeros(buffer_capacity + 1, \n system_capacity + 1)\n sym_state_recursive_ratios_1181[0, 0] = 1\n sym_state_recursive_ratios_1181[0, 1] = sym.factor(sym_state_probs_1181\n [p01] / sym_state_probs_1181[p00])\n sym_state_recursive_ratios_1181[1, 1] = sym.factor(sym_state_probs_1181\n [p11] / sym_state_probs_1181[p01])\n sym_state_recursive_ratios_1181[0, 2] = sym.factor(sym_state_probs_1181\n [p02] / sym_state_probs_1181[p01])\n sym_state_recursive_ratios_1181[1, 2] = sym.factor(sym_state_probs_1181\n [p12] / sym_state_probs_1181[p02])\n sym_state_recursive_ratios_1181[0, 3] = sym.factor(sym_state_probs_1181\n [p03] / sym_state_probs_1181[p02])\n sym_state_recursive_ratios_1181[1, 3] = sym.factor(sym_state_probs_1181\n [p13] / sym_state_probs_1181[p03])\n sym_state_recursive_ratios_1181[0, 4] = sym.factor(sym_state_probs_1181\n [p04] / sym_state_probs_1181[p03])\n sym_state_recursive_ratios_1181[1, 4] = sym.factor(sym_state_probs_1181\n [p14] / sym_state_probs_1181[p04])\n sym_state_recursive_ratios_1181[0, 5] = sym.factor(sym_state_probs_1181\n [p05] / sym_state_probs_1181[p04])\n sym_state_recursive_ratios_1181[1, 5] = sym.factor(sym_state_probs_1181\n [p15] / sym_state_probs_1181[p05])\n sym_state_recursive_ratios_1181[0, 6] = sym.factor(sym_state_probs_1181\n [p06] / sym_state_probs_1181[p05])\n sym_state_recursive_ratios_1181[1, 6] = sym.factor(sym_state_probs_1181\n [p16] / sym_state_probs_1181[p06])\n sym_state_recursive_ratios_1181[0, 7] = sym.factor(sym_state_probs_1181\n [p07] / sym_state_probs_1181[p06])\n sym_state_recursive_ratios_1181[1, 7] = sym.factor(sym_state_probs_1181\n [p17] / sym_state_probs_1181[p07])\n sym_state_recursive_ratios_1181[0, 8] = sym.factor(sym_state_probs_1181\n [p08] / sym_state_probs_1181[p07])\n sym_state_recursive_ratios_1181[1, 8] = sym.factor(sym_state_probs_1181\n [p18] / sym_state_probs_1181[p08])\n sym_state_recursive_ratios_right_1181 = (sym_state_recursive_ratios_1181\n .copy())\n sym_state_recursive_ratios_right_1181[1, 2] = sym.factor(\n sym_state_probs_1181[p12] / sym_state_probs_1181[p11])\n sym_state_recursive_ratios_right_1181[1, 3] = sym.factor(\n sym_state_probs_1181[p13] / sym_state_probs_1181[p12])\n sym_state_recursive_ratios_right_1181[1, 4] = sym.factor(\n sym_state_probs_1181[p14] / sym_state_probs_1181[p13])\n sym_state_recursive_ratios_right_1181[1, 5] = sym.factor(\n sym_state_probs_1181[p15] / sym_state_probs_1181[p14])\n sym_state_recursive_ratios_right_1181[1, 6] = sym.factor(\n sym_state_probs_1181[p16] / sym_state_probs_1181[p15])\n sym_state_recursive_ratios_right_1181[1, 7] = sym.factor(\n sym_state_probs_1181[p17] / sym_state_probs_1181[p16])\n sym_state_recursive_ratios_right_1181[1, 8] = sym.factor(\n sym_state_probs_1181[p18] / sym_state_probs_1181[p17])\n sym_state_recursive_ratios_P0_1181 = sym.zeros(buffer_capacity + 1, \n system_capacity + 1)\n sym_state_recursive_ratios_P0_1181[0, 0] = 1\n sym_state_recursive_ratios_P0_1181[0, 1] = sym.factor(\n sym_state_probs_1181[p01] / sym_state_probs_1181[p00])\n sym_state_recursive_ratios_P0_1181[1, 1] = sym.factor(\n sym_state_probs_1181[p11] / sym_state_probs_1181[p00])\n sym_state_recursive_ratios_P0_1181[0, 2] = sym.factor(\n sym_state_probs_1181[p02] / sym_state_probs_1181[p00])\n sym_state_recursive_ratios_P0_1181[1, 2] = sym.factor(\n sym_state_probs_1181[p12] / sym_state_probs_1181[p00])\n sym_state_recursive_ratios_P0_1181[0, 3] = sym.factor(\n sym_state_probs_1181[p03] / sym_state_probs_1181[p00])\n sym_state_recursive_ratios_P0_1181[1, 3] = sym.factor(\n sym_state_probs_1181[p13] / sym_state_probs_1181[p00])\n sym_state_recursive_ratios_P0_1181[0, 4] = sym.factor(\n sym_state_probs_1181[p04] / sym_state_probs_1181[p00])\n sym_state_recursive_ratios_P0_1181[1, 4] = sym.factor(\n sym_state_probs_1181[p14] / sym_state_probs_1181[p00])\n sym_state_recursive_ratios_P0_1181[0, 5] = sym.factor(\n sym_state_probs_1181[p05] / sym_state_probs_1181[p00])\n sym_state_recursive_ratios_P0_1181[1, 5] = sym.factor(\n sym_state_probs_1181[p15] / sym_state_probs_1181[p00])\n sym_state_recursive_ratios_P0_1181[0, 6] = sym.factor(\n sym_state_probs_1181[p06] / sym_state_probs_1181[p00])\n sym_state_recursive_ratios_P0_1181[1, 6] = sym.factor(\n sym_state_probs_1181[p16] / sym_state_probs_1181[p00])\n sym_state_recursive_ratios_P0_1181[0, 7] = sym.factor(\n sym_state_probs_1181[p07] / sym_state_probs_1181[p00])\n sym_state_recursive_ratios_P0_1181[1, 7] = sym.factor(\n sym_state_probs_1181[p17] / sym_state_probs_1181[p00])\n sym_state_recursive_ratios_P0_1181[0, 8] = sym.factor(\n sym_state_probs_1181[p08] / sym_state_probs_1181[p00])\n sym_state_recursive_ratios_P0_1181[1, 8] = sym.factor(\n sym_state_probs_1181[p18] / sym_state_probs_1181[p00])\n return (sym_state_probs_1181, sym_state_recursive_ratios_1181,\n sym_state_recursive_ratios_right_1181,\n sym_state_recursive_ratios_P0_1181)\n\n\ndef get_symbolic_state_probabilities_1191():\n num_of_servers = 1\n threshold = 1\n system_capacity = 9\n buffer_capacity = 1\n Q_sym_1191 = abg.markov.get_symbolic_transition_matrix(num_of_servers,\n threshold, system_capacity, buffer_capacity)\n (p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15, p06, p16, p07,\n p17, p08, p18, p09, p19) = (sym.symbols(\n 'p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15, p06, p16, p07, p17, p08, p18, p09, p19'\n ))\n pi_1191 = sym.Matrix([p00, p01, p11, p02, p12, p03, p13, p04, p14, p05,\n p15, p06, p16, p07, p17, p08, p18, p09, p19])\n dimension_1191 = Q_sym_1191.shape[0]\n M_sym_1191 = sym.Matrix([Q_sym_1191.transpose()[:-1, :], sym.ones(1,\n dimension_1191)])\n sym_diff_equations_1191 = M_sym_1191 @ pi_1191\n b_sym_1191 = sym.Matrix([sym.zeros(dimension_1191 - 1, 1), [1]])\n eq0_1191 = sym.Eq(sym_diff_equations_1191[0], b_sym_1191[0])\n eq1_1191 = sym.Eq(sym_diff_equations_1191[1], b_sym_1191[1])\n eq2_1191 = sym.Eq(sym_diff_equations_1191[2], b_sym_1191[2])\n eq3_1191 = sym.Eq(sym_diff_equations_1191[3], b_sym_1191[3])\n eq4_1191 = sym.Eq(sym_diff_equations_1191[4], b_sym_1191[4])\n eq5_1191 = sym.Eq(sym_diff_equations_1191[5], b_sym_1191[5])\n eq6_1191 = sym.Eq(sym_diff_equations_1191[6], b_sym_1191[6])\n eq7_1191 = sym.Eq(sym_diff_equations_1191[7], b_sym_1191[7])\n eq8_1191 = sym.Eq(sym_diff_equations_1191[8], b_sym_1191[8])\n eq9_1191 = sym.Eq(sym_diff_equations_1191[9], b_sym_1191[9])\n eq10_1191 = sym.Eq(sym_diff_equations_1191[10], b_sym_1191[10])\n eq11_1191 = sym.Eq(sym_diff_equations_1191[11], b_sym_1191[11])\n eq12_1191 = sym.Eq(sym_diff_equations_1191[12], b_sym_1191[12])\n eq13_1191 = sym.Eq(sym_diff_equations_1191[13], b_sym_1191[13])\n eq14_1191 = sym.Eq(sym_diff_equations_1191[14], b_sym_1191[14])\n eq15_1191 = sym.Eq(sym_diff_equations_1191[15], b_sym_1191[15])\n eq16_1191 = sym.Eq(sym_diff_equations_1191[16], b_sym_1191[16])\n eq17_1191 = sym.Eq(sym_diff_equations_1191[17], b_sym_1191[17])\n eq18_1191 = sym.Eq(sym_diff_equations_1191[18], b_sym_1191[18])\n sym_state_probs_1191 = sym.solve([eq0_1191, eq1_1191, eq2_1191,\n eq3_1191, eq4_1191, eq5_1191, eq6_1191, eq7_1191, eq8_1191,\n eq9_1191, eq10_1191, eq11_1191, eq12_1191, eq13_1191, eq14_1191,\n eq15_1191, eq16_1191, eq17_1191, eq18_1191], (p00, p01, p11, p02,\n p12, p03, p13, p04, p14, p05, p15, p06, p16, p07, p17, p08, p18,\n p09, p19))\n sym_state_recursive_ratios_1191 = sym.zeros(buffer_capacity + 1, \n system_capacity + 1)\n sym_state_recursive_ratios_1191[0, 0] = 1\n sym_state_recursive_ratios_1191[0, 1] = sym.factor(sym_state_probs_1191\n [p01] / sym_state_probs_1191[p00])\n sym_state_recursive_ratios_1191[1, 1] = sym.factor(sym_state_probs_1191\n [p11] / sym_state_probs_1191[p01])\n sym_state_recursive_ratios_1191[0, 2] = sym.factor(sym_state_probs_1191\n [p02] / sym_state_probs_1191[p01])\n sym_state_recursive_ratios_1191[1, 2] = sym.factor(sym_state_probs_1191\n [p12] / sym_state_probs_1191[p02])\n sym_state_recursive_ratios_1191[0, 3] = sym.factor(sym_state_probs_1191\n [p03] / sym_state_probs_1191[p02])\n sym_state_recursive_ratios_1191[1, 3] = sym.factor(sym_state_probs_1191\n [p13] / sym_state_probs_1191[p03])\n sym_state_recursive_ratios_1191[0, 4] = sym.factor(sym_state_probs_1191\n [p04] / sym_state_probs_1191[p03])\n sym_state_recursive_ratios_1191[1, 4] = sym.factor(sym_state_probs_1191\n [p14] / sym_state_probs_1191[p04])\n sym_state_recursive_ratios_1191[0, 5] = sym.factor(sym_state_probs_1191\n [p05] / sym_state_probs_1191[p04])\n sym_state_recursive_ratios_1191[1, 5] = sym.factor(sym_state_probs_1191\n [p15] / sym_state_probs_1191[p05])\n sym_state_recursive_ratios_1191[0, 6] = sym.factor(sym_state_probs_1191\n [p06] / sym_state_probs_1191[p05])\n sym_state_recursive_ratios_1191[1, 6] = sym.factor(sym_state_probs_1191\n [p16] / sym_state_probs_1191[p06])\n sym_state_recursive_ratios_1191[0, 7] = sym.factor(sym_state_probs_1191\n [p07] / sym_state_probs_1191[p06])\n sym_state_recursive_ratios_1191[1, 7] = sym.factor(sym_state_probs_1191\n [p17] / sym_state_probs_1191[p07])\n sym_state_recursive_ratios_1191[0, 8] = sym.factor(sym_state_probs_1191\n [p08] / sym_state_probs_1191[p07])\n sym_state_recursive_ratios_1191[1, 8] = sym.factor(sym_state_probs_1191\n [p18] / sym_state_probs_1191[p08])\n sym_state_recursive_ratios_1191[0, 9] = sym.factor(sym_state_probs_1191\n [p09] / sym_state_probs_1191[p08])\n sym_state_recursive_ratios_1191[1, 9] = sym.factor(sym_state_probs_1191\n [p19] / sym_state_probs_1191[p09])\n sym_state_recursive_ratios_right_1191 = (sym_state_recursive_ratios_1191\n .copy())\n sym_state_recursive_ratios_right_1191[1, 2] = sym.factor(\n sym_state_probs_1191[p12] / sym_state_probs_1191[p11])\n sym_state_recursive_ratios_right_1191[1, 3] = sym.factor(\n sym_state_probs_1191[p13] / sym_state_probs_1191[p12])\n sym_state_recursive_ratios_right_1191[1, 4] = sym.factor(\n sym_state_probs_1191[p14] / sym_state_probs_1191[p13])\n sym_state_recursive_ratios_right_1191[1, 5] = sym.factor(\n sym_state_probs_1191[p15] / sym_state_probs_1191[p14])\n sym_state_recursive_ratios_right_1191[1, 6] = sym.factor(\n sym_state_probs_1191[p16] / sym_state_probs_1191[p15])\n sym_state_recursive_ratios_right_1191[1, 7] = sym.factor(\n sym_state_probs_1191[p17] / sym_state_probs_1191[p16])\n sym_state_recursive_ratios_right_1191[1, 8] = sym.factor(\n sym_state_probs_1191[p18] / sym_state_probs_1191[p17])\n sym_state_recursive_ratios_right_1191[1, 8] = sym.factor(\n sym_state_probs_1191[p18] / sym_state_probs_1191[p17])\n sym_state_recursive_ratios_P0_1191 = sym.zeros(buffer_capacity + 1, \n system_capacity + 1)\n sym_state_recursive_ratios_P0_1191[0, 0] = 1\n sym_state_recursive_ratios_P0_1191[0, 1] = sym.factor(\n sym_state_probs_1191[p01] / sym_state_probs_1191[p00])\n sym_state_recursive_ratios_P0_1191[1, 1] = sym.factor(\n sym_state_probs_1191[p11] / sym_state_probs_1191[p00])\n sym_state_recursive_ratios_P0_1191[0, 2] = sym.factor(\n sym_state_probs_1191[p02] / sym_state_probs_1191[p00])\n sym_state_recursive_ratios_P0_1191[1, 2] = sym.factor(\n sym_state_probs_1191[p12] / sym_state_probs_1191[p00])\n sym_state_recursive_ratios_P0_1191[0, 3] = sym.factor(\n sym_state_probs_1191[p03] / sym_state_probs_1191[p00])\n sym_state_recursive_ratios_P0_1191[1, 3] = sym.factor(\n sym_state_probs_1191[p13] / sym_state_probs_1191[p00])\n sym_state_recursive_ratios_P0_1191[0, 4] = sym.factor(\n sym_state_probs_1191[p04] / sym_state_probs_1191[p00])\n sym_state_recursive_ratios_P0_1191[1, 4] = sym.factor(\n sym_state_probs_1191[p14] / sym_state_probs_1191[p00])\n sym_state_recursive_ratios_P0_1191[0, 5] = sym.factor(\n sym_state_probs_1191[p05] / sym_state_probs_1191[p00])\n sym_state_recursive_ratios_P0_1191[1, 5] = sym.factor(\n sym_state_probs_1191[p15] / sym_state_probs_1191[p00])\n sym_state_recursive_ratios_P0_1191[0, 6] = sym.factor(\n sym_state_probs_1191[p06] / sym_state_probs_1191[p00])\n sym_state_recursive_ratios_P0_1191[1, 6] = sym.factor(\n sym_state_probs_1191[p16] / sym_state_probs_1191[p00])\n sym_state_recursive_ratios_P0_1191[0, 7] = sym.factor(\n sym_state_probs_1191[p07] / sym_state_probs_1191[p00])\n sym_state_recursive_ratios_P0_1191[1, 7] = sym.factor(\n sym_state_probs_1191[p17] / sym_state_probs_1191[p00])\n sym_state_recursive_ratios_P0_1191[0, 8] = sym.factor(\n sym_state_probs_1191[p08] / sym_state_probs_1191[p00])\n sym_state_recursive_ratios_P0_1191[1, 8] = sym.factor(\n sym_state_probs_1191[p18] / sym_state_probs_1191[p00])\n sym_state_recursive_ratios_P0_1191[0, 9] = sym.factor(\n sym_state_probs_1191[p09] / sym_state_probs_1191[p00])\n sym_state_recursive_ratios_P0_1191[1, 9] = sym.factor(\n sym_state_probs_1191[p19] / sym_state_probs_1191[p00])\n return (sym_state_probs_1191, sym_state_recursive_ratios_1191,\n sym_state_recursive_ratios_right_1191,\n sym_state_recursive_ratios_P0_1191)\n",
"step-5": "import ambulance_game as abg\nimport numpy as np\nimport sympy as sym\nfrom sympy.abc import a, b, c, d, e, f, g, h, i, j\n\n\ndef get_symbolic_pi(num_of_servers, threshold, system_capacity, buffer_capacity):\n Q_sym = abg.markov.get_symbolic_transition_matrix(\n num_of_servers=num_of_servers,\n threshold=threshold,\n system_capacity=system_capacity,\n buffer_capacity=buffer_capacity,\n )\n dimension = Q_sym.shape[0]\n if dimension > 7:\n return \"Capacity of 6 exceeded\"\n M_sym = sym.Matrix([Q_sym.transpose()[:-1, :], sym.ones(1, dimension)])\n b_sym = sym.Matrix([sym.zeros(dimension - 1, 1), [1]])\n system = M_sym.col_insert(dimension, b_sym)\n sol = sym.solve_linear_system_LU(system, [a, b, c, d, e, f, g])\n return sol\n\n\ndef get_symbolic_state_probabilities_1222():\n num_of_servers = 1\n threshold = 2\n system_capacity = 2\n buffer_capacity = 2\n\n sym_pi_1222 = get_symbolic_pi(\n num_of_servers=num_of_servers,\n threshold=threshold,\n system_capacity=system_capacity,\n buffer_capacity=buffer_capacity,\n )\n all_states_1222 = abg.markov.build_states(\n threshold=threshold,\n system_capacity=system_capacity,\n buffer_capacity=buffer_capacity,\n )\n\n sym_state_probs_1222 = [0 for _ in range(len(all_states_1222))]\n sym_state_probs_1222[0] = sym.factor(sym_pi_1222[a]) # (0,0)\n sym_state_probs_1222[1] = sym.factor(sym_pi_1222[b]) # (0,1)\n sym_state_probs_1222[2] = sym.factor(sym_pi_1222[c]) # (1,1)\n sym_state_probs_1222[3] = sym.factor(sym_pi_1222[d]) # (0,2)\n sym_state_probs_1222[4] = sym.factor(sym_pi_1222[e]) # (1,2)\n\n sym_state_recursive_ratios_1222 = sym.zeros(\n buffer_capacity + 1, system_capacity + 1\n )\n sym_state_recursive_ratios_1222[0, 0] = 1\n sym_state_recursive_ratios_1222[0, 1] = sym.factor(\n sym_state_probs_1222[1] / sym_state_probs_1222[0]\n ) # (0,0) -> (0,1)\n sym_state_recursive_ratios_1222[0, 2] = sym.factor(\n sym_state_probs_1222[2] / sym_state_probs_1222[1]\n ) # (0,1) -> (1,1)\n sym_state_recursive_ratios_1222[1, 2] = sym.factor(\n sym_state_probs_1222[3] / sym_state_probs_1222[2]\n ) # (0,1) -> (0,2)\n sym_state_recursive_ratios_1222[2, 2] = sym.factor(\n sym_state_probs_1222[4] / sym_state_probs_1222[3]\n ) # (0,2) -> (1,2)\n\n return sym_state_probs_1222, sym_state_recursive_ratios_1222\n\n\ndef get_symbolic_state_probabilities_1121():\n num_of_servers = 1\n threshold = 1\n system_capacity = 2\n buffer_capacity = 1\n\n all_states_1121 = abg.markov.build_states(\n threshold=threshold,\n system_capacity=system_capacity,\n buffer_capacity=buffer_capacity,\n )\n sym_pi_1121 = get_symbolic_pi(\n num_of_servers=num_of_servers,\n threshold=threshold,\n system_capacity=system_capacity,\n buffer_capacity=buffer_capacity,\n )\n sym_state_probs_1121 = [0 for _ in range(len(all_states_1121))]\n\n sym_state_probs_1121[0] = sym.factor(sym_pi_1121[a]) # (0,0)\n sym_state_probs_1121[1] = sym.factor(sym_pi_1121[b]) # (0,1)\n sym_state_probs_1121[2] = sym.factor(sym_pi_1121[c]) # (1,1)\n sym_state_probs_1121[3] = sym.factor(sym_pi_1121[d]) # (0,2)\n sym_state_probs_1121[4] = sym.factor(sym_pi_1121[e]) # (1,2)\n\n sym_state_recursive_ratios_1121 = sym.zeros(\n buffer_capacity + 1, system_capacity + 1\n )\n sym_state_recursive_ratios_1121[0, 0] = 1\n sym_state_recursive_ratios_1121[0, 1] = sym.factor(\n sym_state_probs_1121[1] / sym_state_probs_1121[0]\n ) # (0,0) -> (0,1)\n sym_state_recursive_ratios_1121[1, 1] = sym.factor(\n sym_state_probs_1121[2] / sym_state_probs_1121[1]\n ) # (0,1) -> (1,1)\n sym_state_recursive_ratios_1121[0, 2] = sym.factor(\n sym_state_probs_1121[3] / sym_state_probs_1121[1]\n ) # (0,1) -> (0,2)\n sym_state_recursive_ratios_1121[1, 2] = sym.factor(\n sym_state_probs_1121[4] / sym_state_probs_1121[3]\n ) # (0,2) -> (1,2)\n\n sym_state_recursive_ratios_right_1121 = sym_state_recursive_ratios_1121.copy()\n sym_state_recursive_ratios_right_1121[1, 2] = sym.factor(\n sym_state_probs_1121[4] / sym_state_probs_1121[2]\n ) # (1,1) -> (1,2)\n\n sym_state_recursive_ratios_P0_1121 = sym.zeros(\n buffer_capacity + 1, system_capacity + 1\n )\n sym_state_recursive_ratios_P0_1121[0, 0] = 1\n sym_state_recursive_ratios_P0_1121[0, 1] = sym.factor(\n sym_state_probs_1121[1] / sym_state_probs_1121[0]\n ) # (0,0) -> (0,1)\n sym_state_recursive_ratios_P0_1121[1, 1] = sym.factor(\n sym_state_probs_1121[2] / sym_state_probs_1121[0]\n ) # (0,0) -> (1,1)\n sym_state_recursive_ratios_P0_1121[0, 2] = sym.factor(\n sym_state_probs_1121[3] / sym_state_probs_1121[0]\n ) # (0,0) -> (0,2)\n sym_state_recursive_ratios_P0_1121[1, 2] = sym.factor(\n sym_state_probs_1121[4] / sym_state_probs_1121[0]\n ) # (0,0) -> (1,2)\n\n return (\n sym_state_probs_1121,\n sym_state_recursive_ratios_1121,\n sym_state_recursive_ratios_right_1121,\n sym_state_recursive_ratios_P0_1121,\n )\n\n\ndef get_symbolic_state_probabilities_1122():\n # num_of_servers = 1\n threshold = 1\n system_capacity = 2\n buffer_capacity = 2\n\n all_states_1122 = abg.markov.build_states(\n threshold=threshold,\n system_capacity=system_capacity,\n buffer_capacity=buffer_capacity,\n )\n sym_state_probs_1122 = [0 for _ in range(len(all_states_1122))]\n\n sym_Lambda = sym.symbols(\"Lambda\")\n sym_lambda_1 = sym.symbols(\"lambda_1\")\n sym_lambda_2 = sym.symbols(\"lambda_2\")\n sym_mu = sym.symbols(\"mu\")\n\n sym_state_probs_1122[0] = (\n (sym_mu**6)\n + 2 * (sym_lambda_2) * (sym_mu**5)\n + (sym_lambda_2**2) * (sym_mu**4)\n ) # (0,0)\n sym_state_probs_1122[1] = (sym_Lambda * sym_mu**3) * (\n sym_mu**2 + 2 * sym_mu * sym_lambda_2 + sym_lambda_2**2\n ) # (0,1)\n sym_state_probs_1122[2] = (sym_Lambda * sym_lambda_2 * sym_mu**2) * (\n sym_lambda_2**2\n + sym_lambda_2 * sym_lambda_1\n + sym_lambda_1 * sym_mu\n + sym_mu**2\n + 2 * sym_lambda_2 * sym_mu\n ) # (1,1)\n sym_state_probs_1122[3] = (sym_Lambda * sym_lambda_2**2 * sym_mu) * (\n sym_lambda_2**2\n + 2 * sym_lambda_1 * sym_lambda_2\n + 3 * sym_lambda_1 * sym_mu\n + sym_mu**2\n + 2 * sym_lambda_2 * sym_mu\n + sym_lambda_1**2\n ) # (2,1)\n sym_state_probs_1122[4] = (sym_Lambda * sym_lambda_1 * sym_mu**3) * (\n sym_lambda_2 + sym_mu\n ) # (0,2)\n sym_state_probs_1122[5] = (\n sym_Lambda * sym_lambda_1 * sym_lambda_2 * sym_mu**2\n ) * (\n 2 * sym_mu + sym_lambda_1 + sym_lambda_2\n ) # (1,2)\n sym_state_probs_1122[6] = (sym_Lambda * sym_lambda_1 * sym_lambda_2**2) * (\n sym_lambda_1**2\n + 4 * sym_lambda_1 * sym_mu\n + 2 * sym_lambda_1 * sym_lambda_2\n + 3 * sym_mu**2\n + sym_lambda_2**2\n + 3 * sym_lambda_2 * sym_mu\n ) # (2,2)\n\n total_1122 = np.sum(sym_state_probs_1122)\n sym_state_probs_1122 = [i / total_1122 for i in sym_state_probs_1122]\n\n sym_state_recursive_ratios_1122 = sym.zeros(\n buffer_capacity + 1, system_capacity + 1\n )\n sym_state_recursive_ratios_1122[0, 0] = 1\n sym_state_recursive_ratios_1122[0, 1] = sym.factor(\n sym_state_probs_1122[1] / sym_state_probs_1122[0]\n ) # (0,0) -> (0,1)\n sym_state_recursive_ratios_1122[1, 1] = sym.factor(\n sym_state_probs_1122[2] / sym_state_probs_1122[1]\n ) # (0,1) -> (1,1)\n sym_state_recursive_ratios_1122[2, 1] = sym.factor(\n sym_state_probs_1122[3] / sym_state_probs_1122[2]\n ) # (1,1) -> (2,1)\n\n sym_state_recursive_ratios_1122[0, 2] = sym.factor(\n sym_state_probs_1122[4] / sym_state_probs_1122[1]\n ) # (0,1) -> (0,2)\n sym_state_recursive_ratios_1122[1, 2] = sym.factor(\n sym_state_probs_1122[5] / sym_state_probs_1122[4]\n ) # (0,2) -> (1,2)\n sym_state_recursive_ratios_1122[2, 2] = sym.factor(\n sym_state_probs_1122[6] / sym_state_probs_1122[5]\n ) # (1,2) -> (2,2)\n\n sym_state_recursive_ratios_right_1122 = sym_state_recursive_ratios_1122.copy()\n sym_state_recursive_ratios_right_1122[1, 2] = sym.factor(\n sym_state_probs_1122[5] / sym_state_probs_1122[2]\n ) # (1,1) -> (1,2)\n sym_state_recursive_ratios_right_1122[2, 2] = sym.factor(\n sym_state_probs_1122[6] / sym_state_probs_1122[3]\n ) # (2,1) -> (2,2)\n\n sym_state_recursive_ratios_P0_1122 = sym.zeros(\n buffer_capacity + 1, system_capacity + 1\n )\n sym_state_recursive_ratios_P0_1122[0, 0] = 1\n sym_state_recursive_ratios_P0_1122[0, 1] = sym.factor(\n sym_state_probs_1122[1] / sym_state_probs_1122[0]\n ) # (0,0) -> (0,1)\n sym_state_recursive_ratios_P0_1122[1, 1] = sym.factor(\n sym_state_probs_1122[2] / sym_state_probs_1122[0]\n ) # (0,0) -> (1,1)\n sym_state_recursive_ratios_P0_1122[2, 1] = sym.factor(\n sym_state_probs_1122[3] / sym_state_probs_1122[0]\n ) # (0,0) -> (2,1)\n\n sym_state_recursive_ratios_P0_1122[0, 2] = sym.factor(\n sym_state_probs_1122[4] / sym_state_probs_1122[0]\n ) # (0,0) -> (0,2)\n sym_state_recursive_ratios_P0_1122[1, 2] = sym.factor(\n sym_state_probs_1122[5] / sym_state_probs_1122[0]\n ) # (0,0) -> (1,2)\n sym_state_recursive_ratios_P0_1122[2, 2] = sym.factor(\n sym_state_probs_1122[6] / sym_state_probs_1122[0]\n ) # (0,0) -> (2,2)\n\n return (\n sym_state_probs_1122,\n sym_state_recursive_ratios_1122,\n sym_state_recursive_ratios_right_1122,\n sym_state_recursive_ratios_P0_1122,\n )\n\n\ndef get_symbolic_state_probabilities_1123():\n num_of_servers = 1\n threshold = 1\n system_capacity = 2\n buffer_capacity = 3\n\n Q_sym_1123 = abg.markov.get_symbolic_transition_matrix(\n num_of_servers, threshold, system_capacity, buffer_capacity\n )\n\n p00, p01, p11, p21, p31, p02, p12, p22, p32 = sym.symbols(\n \"p00, p01, p11, p21, p31, p02, p12, p22, p32\"\n )\n pi_1123 = sym.Matrix([p00, p01, p11, p21, p31, p02, p12, p22, p32])\n dimension_1123 = Q_sym_1123.shape[0]\n\n M_sym_1123 = sym.Matrix(\n [Q_sym_1123.transpose()[:-1, :], sym.ones(1, dimension_1123)]\n )\n sym_diff_equations_1123 = M_sym_1123 @ pi_1123\n\n b_sym_1123 = sym.Matrix([sym.zeros(dimension_1123 - 1, 1), [1]])\n\n eq0_1123 = sym.Eq(sym_diff_equations_1123[0], b_sym_1123[0])\n eq1_1123 = sym.Eq(sym_diff_equations_1123[1], b_sym_1123[1])\n eq2_1123 = sym.Eq(sym_diff_equations_1123[2], b_sym_1123[2])\n eq3_1123 = sym.Eq(sym_diff_equations_1123[3], b_sym_1123[3])\n eq4_1123 = sym.Eq(sym_diff_equations_1123[4], b_sym_1123[4])\n eq5_1123 = sym.Eq(sym_diff_equations_1123[5], b_sym_1123[5])\n eq6_1123 = sym.Eq(sym_diff_equations_1123[6], b_sym_1123[6])\n eq7_1123 = sym.Eq(sym_diff_equations_1123[7], b_sym_1123[7])\n eq8_1123 = sym.Eq(sym_diff_equations_1123[8], b_sym_1123[8])\n\n sym_state_probs_1123 = sym.solve(\n [\n eq0_1123,\n eq1_1123,\n eq2_1123,\n eq3_1123,\n eq4_1123,\n eq5_1123,\n eq6_1123,\n eq7_1123,\n eq8_1123,\n ],\n (p00, p01, p11, p21, p31, p02, p12, p22, p32),\n )\n\n sym_state_recursive_ratios_1123 = sym.zeros(\n buffer_capacity + 1, system_capacity + 1\n )\n sym_state_recursive_ratios_1123[0, 0] = 1\n sym_state_recursive_ratios_1123[0, 1] = sym.factor(\n sym_state_probs_1123[p01] / sym_state_probs_1123[p00]\n ) # (0,0) -> (0,1)\n sym_state_recursive_ratios_1123[1, 1] = sym.factor(\n sym_state_probs_1123[p11] / sym_state_probs_1123[p01]\n ) # (0,1) -> (1,1)\n sym_state_recursive_ratios_1123[2, 1] = sym.factor(\n sym_state_probs_1123[p21] / sym_state_probs_1123[p11]\n ) # (1,1) -> (2,1)\n sym_state_recursive_ratios_1123[3, 1] = sym.factor(\n sym_state_probs_1123[p31] / sym_state_probs_1123[p21]\n ) # (2,1) -> (3,1)\n sym_state_recursive_ratios_1123[0, 2] = sym.factor(\n sym_state_probs_1123[p02] / sym_state_probs_1123[p01]\n ) # (0,1) -> (0,2)\n sym_state_recursive_ratios_1123[1, 2] = sym.factor(\n sym_state_probs_1123[p12] / sym_state_probs_1123[p02]\n ) # (0,2) -> (1,2)\n sym_state_recursive_ratios_1123[2, 2] = sym.factor(\n sym_state_probs_1123[p22] / sym_state_probs_1123[p12]\n ) # (1,2) -> (2,2)\n sym_state_recursive_ratios_1123[2, 2] = sym.factor(\n sym_state_probs_1123[p32] / sym_state_probs_1123[p22]\n ) # (2,2) -> (3,2)\n\n sym_state_recursive_ratios_right_1123 = sym_state_recursive_ratios_1123.copy()\n sym_state_recursive_ratios_right_1123[1, 2] = sym.factor(\n sym_state_probs_1123[p12] / sym_state_probs_1123[p11]\n ) # (1,1) -> (1,2)\n sym_state_recursive_ratios_right_1123[2, 2] = sym.factor(\n sym_state_probs_1123[p22] / sym_state_probs_1123[p21]\n ) # (2,1) -> (2,2)\n sym_state_recursive_ratios_right_1123[3, 2] = sym.factor(\n sym_state_probs_1123[p32] / sym_state_probs_1123[p22]\n ) # (2,2) -> (3,2)\n\n sym_state_recursive_ratios_P0_1123 = sym.zeros(\n buffer_capacity + 1, system_capacity + 1\n )\n sym_state_recursive_ratios_P0_1123[0, 0] = 1\n sym_state_recursive_ratios_P0_1123[0, 1] = sym.factor(\n sym_state_probs_1123[p01] / sym_state_probs_1123[p00]\n ) # (0,0) -> (0,1)\n sym_state_recursive_ratios_P0_1123[1, 1] = sym.factor(\n sym_state_probs_1123[p11] / sym_state_probs_1123[p00]\n ) # (0,0) -> (1,1)\n sym_state_recursive_ratios_P0_1123[2, 1] = sym.factor(\n sym_state_probs_1123[p21] / sym_state_probs_1123[p00]\n ) # (0,0) -> (2,1)\n sym_state_recursive_ratios_P0_1123[3, 1] = sym.factor(\n sym_state_probs_1123[p31] / sym_state_probs_1123[p00]\n ) # (0,0) -> (3,1)\n sym_state_recursive_ratios_P0_1123[0, 2] = sym.factor(\n sym_state_probs_1123[p02] / sym_state_probs_1123[p00]\n ) # (0,0) -> (0,2)\n sym_state_recursive_ratios_P0_1123[1, 2] = sym.factor(\n sym_state_probs_1123[p12] / sym_state_probs_1123[p00]\n ) # (0,0) -> (1,2)\n sym_state_recursive_ratios_P0_1123[2, 2] = sym.factor(\n sym_state_probs_1123[p22] / sym_state_probs_1123[p00]\n ) # (0,0) -> (2,2)\n sym_state_recursive_ratios_P0_1123[3, 2] = sym.factor(\n sym_state_probs_1123[p32] / sym_state_probs_1123[p00]\n ) # (0,0) -> (3,2)\n\n return (\n sym_state_probs_1123,\n sym_state_recursive_ratios_1123,\n sym_state_recursive_ratios_right_1123,\n sym_state_recursive_ratios_P0_1123,\n )\n\n\ndef get_symbolic_state_probabilities_1341():\n # num_of_servers = 1\n threshold = 3\n system_capacity = 4\n buffer_capacity = 1\n\n all_states_1341 = abg.markov.build_states(\n threshold=threshold,\n system_capacity=system_capacity,\n buffer_capacity=buffer_capacity,\n )\n sym_state_probs_1341 = [0 for _ in range(len(all_states_1341))]\n\n sym_Lambda = sym.symbols(\"Lambda\")\n sym_lambda_1 = sym.symbols(\"lambda_1\")\n sym_lambda_2 = sym.symbols(\"lambda_2\")\n sym_mu = sym.symbols(\"mu\")\n\n sym_state_probs_1341[0] = (sym_lambda_2) * (sym_mu**5) + (sym_mu**6) # (0,0)\n sym_state_probs_1341[1] = sym_Lambda * sym_lambda_2 * (sym_mu**4) + sym_Lambda * (\n sym_mu**5\n ) # (0,1)\n sym_state_probs_1341[2] = (sym_Lambda**2) * sym_lambda_2 * (sym_mu**3) + (\n sym_Lambda**2\n ) * (\n sym_mu**4\n ) # (0,2)\n sym_state_probs_1341[3] = (sym_Lambda**3) * sym_lambda_2 * (sym_mu**2) + (\n sym_Lambda**3\n ) * (\n sym_mu**3\n ) # (0,3)\n sym_state_probs_1341[4] = (\n (sym_Lambda**3) * sym_lambda_1 * sym_lambda_2 * sym_mu\n + (sym_Lambda**3) * sym_lambda_2 * (sym_mu**2)\n + (sym_Lambda**3) * sym_lambda_2 * sym_lambda_2 * sym_mu\n ) # (1,3)\n sym_state_probs_1341[5] = (sym_Lambda**3) * sym_lambda_1 * (sym_mu**2) # (0,4)\n sym_state_probs_1341[6] = (\n (sym_Lambda**3) * (sym_lambda_1**2) * sym_lambda_2\n + (sym_Lambda**3) * sym_lambda_1 * (sym_lambda_2**2)\n + 2 * (sym_Lambda**3) * sym_lambda_1 * sym_lambda_2 * sym_mu\n ) # (1,4)\n\n total_1341 = np.sum(sym_state_probs_1341)\n sym_state_probs_1341 = [i / total_1341 for i in sym_state_probs_1341]\n\n sym_state_recursive_ratios_1341 = sym.zeros(\n buffer_capacity + 1, system_capacity + 1\n )\n sym_state_recursive_ratios_1341[0, 0] = 1\n sym_state_recursive_ratios_1341[0, 1] = sym.factor(\n sym_state_probs_1341[1] / sym_state_probs_1341[0]\n ) # (0,0) -> (0,1)\n sym_state_recursive_ratios_1341[0, 2] = sym.factor(\n sym_state_probs_1341[2] / sym_state_probs_1341[1]\n ) # (0,1) -> (0,2)\n sym_state_recursive_ratios_1341[0, 3] = sym.factor(\n sym_state_probs_1341[3] / sym_state_probs_1341[2]\n ) # (0,2) -> (0,3)\n sym_state_recursive_ratios_1341[0, 4] = sym.factor(\n sym_state_probs_1341[5] / sym_state_probs_1341[3]\n ) # (0,3) -> (0,4)\n\n sym_state_recursive_ratios_1341[1, 3] = sym.factor(\n sym_state_probs_1341[4] / sym_state_probs_1341[3]\n ) # (0,3) -> (1,3)\n sym_state_recursive_ratios_1341[1, 4] = sym.factor(\n sym_state_probs_1341[6] / sym_state_probs_1341[5]\n ) # (0,4) -> (1,4)\n\n sym_state_recursive_ratios_right_1341 = sym_state_recursive_ratios_1341.copy()\n sym_state_recursive_ratios_right_1341[1, 4] = sym.factor(\n sym_state_probs_1341[6] / sym_state_probs_1341[4]\n ) # (1,3) -> (1,4)\n\n sym_state_recursive_ratios_P0_1341 = sym.zeros(\n buffer_capacity + 1, system_capacity + 1\n )\n sym_state_recursive_ratios_P0_1341[0, 0] = 1\n sym_state_recursive_ratios_P0_1341[0, 1] = sym.factor(\n sym_state_probs_1341[1] / sym_state_probs_1341[0]\n ) # (0,0) -> (0,1)\n sym_state_recursive_ratios_P0_1341[0, 2] = sym.factor(\n sym_state_probs_1341[2] / sym_state_probs_1341[0]\n ) # (0,0) -> (0,2)\n sym_state_recursive_ratios_P0_1341[0, 3] = sym.factor(\n sym_state_probs_1341[3] / sym_state_probs_1341[0]\n ) # (0,0) -> (0,3)\n\n sym_state_recursive_ratios_P0_1341[1, 3] = sym.factor(\n sym_state_probs_1341[4] / sym_state_probs_1341[0]\n ) # (0,0) -> (1,3)\n sym_state_recursive_ratios_P0_1341[0, 4] = sym.factor(\n sym_state_probs_1341[5] / sym_state_probs_1341[0]\n ) # (0,0) -> (0,4)\n sym_state_recursive_ratios_P0_1341[1, 4] = sym.factor(\n sym_state_probs_1341[6] / sym_state_probs_1341[0]\n ) # (0,0) -> (1,4)\n\n return (\n sym_state_probs_1341,\n sym_state_recursive_ratios_1341,\n sym_state_recursive_ratios_right_1341,\n sym_state_recursive_ratios_P0_1341,\n )\n\n\ndef get_symbolic_state_probabilities_1131():\n # num_of_servers = 1\n threshold = 1\n system_capacity = 3\n buffer_capacity = 1\n\n all_states_1131 = abg.markov.build_states(\n threshold=threshold,\n system_capacity=system_capacity,\n buffer_capacity=buffer_capacity,\n )\n sym_state_probs_1131 = [0 for _ in range(len(all_states_1131))]\n\n sym_Lambda = sym.symbols(\"Lambda\")\n sym_lambda_1 = sym.symbols(\"lambda_1\")\n sym_lambda_2 = sym.symbols(\"lambda_2\")\n sym_mu = sym.symbols(\"mu\")\n\n # (0,0)\n sym_state_probs_1131[0] = (\n (sym_mu**6)\n + 2 * (sym_lambda_2 * (sym_mu**5))\n + ((sym_lambda_2**2) * (sym_mu**4))\n + (sym_lambda_1 * sym_lambda_2 * (sym_mu**4))\n )\n # (0,1)\n sym_state_probs_1131[1] = sym_state_probs_1131[0] * sym_Lambda / sym_mu\n # (1,1)\n sym_state_probs_1131[2] = (\n (sym_Lambda * (sym_lambda_1**2) * sym_lambda_2 * (sym_mu**2))\n + (sym_Lambda * sym_lambda_2 * sym_lambda_1 * (sym_mu**3))\n + 2 * (sym_Lambda * sym_lambda_1 * (sym_lambda_2**2) * (sym_mu**2))\n + 2 * (sym_Lambda * (sym_lambda_2**2) * (sym_mu**3))\n + (sym_Lambda * (sym_lambda_2**3) * (sym_mu**2))\n + (sym_Lambda * sym_lambda_2 * (sym_mu**4))\n )\n # (0,2)\n sym_state_probs_1131[3] = (\n sym_Lambda * sym_lambda_1 * sym_mu**3 * (sym_lambda_2 + sym_mu)\n )\n # (1,2)\n sym_state_probs_1131[4] = (sym_Lambda * sym_lambda_2 * sym_lambda_1 * sym_mu) * (\n (sym_lambda_2**2)\n + 2 * sym_lambda_2 * sym_lambda_1\n + 3 * sym_lambda_2 * sym_mu\n + (sym_lambda_1**2)\n + 2 * sym_lambda_1 * sym_mu\n + 2 * (sym_mu**2)\n )\n # (0,3)\n sym_state_probs_1131[5] = sym_Lambda * (sym_lambda_1**2) * (sym_mu**3)\n # (1,3)\n sym_state_probs_1131[6] = (sym_Lambda * sym_lambda_2 * (sym_lambda_1**2)) * (\n (sym_lambda_2**2)\n + 2 * sym_lambda_2 * sym_lambda_1\n + 3 * sym_lambda_2 * sym_mu\n + (sym_lambda_1**2)\n + 2 * sym_lambda_1 * sym_mu\n + 3 * (sym_mu**2)\n )\n\n denominator = (\n sym_Lambda * sym_lambda_2**3 * sym_lambda_1**2\n + sym_Lambda * sym_lambda_2**3 * sym_lambda_1 * sym_mu\n + sym_Lambda * sym_lambda_2**3 * sym_mu**2\n + 2 * sym_Lambda * sym_lambda_2**2 * sym_lambda_1**3\n + 5 * sym_Lambda * sym_lambda_2**2 * sym_lambda_1**2 * sym_mu\n + 5 * sym_Lambda * sym_lambda_2**2 * sym_lambda_1 * sym_mu**2\n + 3 * sym_Lambda * sym_lambda_2**2 * sym_mu**3\n + sym_Lambda * sym_lambda_2 * sym_lambda_1**4\n + 3 * sym_Lambda * sym_lambda_2 * sym_lambda_1**3 * sym_mu\n + 6 * sym_Lambda * sym_lambda_2 * sym_lambda_1**2 * sym_mu**2\n + 5 * sym_Lambda * sym_lambda_2 * sym_lambda_1 * sym_mu**3\n + 3 * sym_Lambda * sym_lambda_2 * sym_mu**4\n + sym_Lambda * sym_lambda_1**2 * sym_mu**3\n + sym_Lambda * sym_lambda_1 * sym_mu**4\n + sym_Lambda * sym_mu**5\n + sym_lambda_2**2 * sym_mu**4\n + sym_lambda_2 * sym_lambda_1 * sym_mu**4\n + 2 * sym_lambda_2 * sym_mu**5\n + sym_mu**6\n )\n\n sym_state_probs_1131 = [i / denominator for i in sym_state_probs_1131]\n\n sym_state_recursive_ratios_1131 = sym.zeros(\n buffer_capacity + 1, system_capacity + 1\n )\n sym_state_recursive_ratios_1131[0, 0] = 1\n sym_state_recursive_ratios_1131[0, 1] = sym.factor(\n sym_state_probs_1131[1] / sym_state_probs_1131[0]\n ) # (0,0) -> (0,1)\n sym_state_recursive_ratios_1131[1, 1] = sym.factor(\n sym_state_probs_1131[2] / sym_state_probs_1131[1]\n ) # (0,1) -> (1,1)\n sym_state_recursive_ratios_1131[0, 2] = sym.factor(\n sym_state_probs_1131[3] / sym_state_probs_1131[1]\n ) # (0,1) -> (0,2)\n sym_state_recursive_ratios_1131[1, 2] = sym.factor(\n sym_state_probs_1131[4] / sym_state_probs_1131[3]\n ) # (0,2) -> (1,2)\n sym_state_recursive_ratios_1131[0, 3] = sym.factor(\n sym_state_probs_1131[5] / sym_state_probs_1131[3]\n ) # (0,2) -> (0,3)\n sym_state_recursive_ratios_1131[1, 3] = sym.factor(\n sym_state_probs_1131[6] / sym_state_probs_1131[5]\n ) # (0,3) -> (1,3)\n\n sym_state_recursive_ratios_right_1131 = sym_state_recursive_ratios_1131.copy()\n sym_state_recursive_ratios_right_1131[1, 2] = sym.factor(\n sym_state_probs_1131[4] / sym_state_probs_1131[2]\n ) # (1,1) -> (1,2)\n sym_state_recursive_ratios_right_1131[1, 3] = sym.factor(\n sym_state_probs_1131[6] / sym_state_probs_1131[4]\n ) # (1,2) -> (1,3)\n\n sym_state_recursive_ratios_P0_1131 = sym.zeros(\n buffer_capacity + 1, system_capacity + 1\n )\n sym_state_recursive_ratios_P0_1131[0, 0] = 1\n sym_state_recursive_ratios_P0_1131[0, 1] = sym.factor(\n sym_state_probs_1131[1] / sym_state_probs_1131[0]\n ) # (0,0) -> (0,1)\n sym_state_recursive_ratios_P0_1131[1, 1] = sym.factor(\n sym_state_probs_1131[2] / sym_state_probs_1131[0]\n ) # (0,0) -> (1,1)\n sym_state_recursive_ratios_P0_1131[0, 2] = sym.factor(\n sym_state_probs_1131[3] / sym_state_probs_1131[0]\n ) # (0,0) -> (0,2)\n sym_state_recursive_ratios_P0_1131[1, 2] = sym.factor(\n sym_state_probs_1131[4] / sym_state_probs_1131[0]\n ) # (0,0) -> (1,2)\n sym_state_recursive_ratios_P0_1131[0, 3] = sym.factor(\n sym_state_probs_1131[5] / sym_state_probs_1131[0]\n ) # (0,0) -> (0,3)\n sym_state_recursive_ratios_P0_1131[1, 3] = sym.factor(\n sym_state_probs_1131[6] / sym_state_probs_1131[0]\n ) # (0,0) -> (1,3)\n\n return (\n sym_state_probs_1131,\n sym_state_recursive_ratios_1131,\n sym_state_recursive_ratios_right_1131,\n sym_state_recursive_ratios_P0_1131,\n )\n\n\ndef get_symbolic_state_probabilities_1132():\n num_of_servers = 1\n threshold = 1\n system_capacity = 3\n buffer_capacity = 2\n\n Q_sym_1132 = abg.markov.get_symbolic_transition_matrix(\n num_of_servers, threshold, system_capacity, buffer_capacity\n )\n\n p00, p01, p11, p21, p02, p12, p22, p03, p13, p23 = sym.symbols(\n \"p00, p01, p11, p21, p02, p12, p22, p03, p13, p23\"\n )\n pi_1132 = sym.Matrix([p00, p01, p11, p21, p02, p12, p22, p03, p13, p23])\n dimension_1132 = Q_sym_1132.shape[0]\n\n M_sym_1132 = sym.Matrix(\n [Q_sym_1132.transpose()[:-1, :], sym.ones(1, dimension_1132)]\n )\n sym_diff_equations_1132 = M_sym_1132 @ pi_1132\n\n b_sym_1132 = sym.Matrix([sym.zeros(dimension_1132 - 1, 1), [1]])\n\n eq0_1132 = sym.Eq(sym_diff_equations_1132[0], b_sym_1132[0])\n eq1_1132 = sym.Eq(sym_diff_equations_1132[1], b_sym_1132[1])\n eq2_1132 = sym.Eq(sym_diff_equations_1132[2], b_sym_1132[2])\n eq3_1132 = sym.Eq(sym_diff_equations_1132[3], b_sym_1132[3])\n eq4_1132 = sym.Eq(sym_diff_equations_1132[4], b_sym_1132[4])\n eq5_1132 = sym.Eq(sym_diff_equations_1132[5], b_sym_1132[5])\n eq6_1132 = sym.Eq(sym_diff_equations_1132[6], b_sym_1132[6])\n eq7_1132 = sym.Eq(sym_diff_equations_1132[7], b_sym_1132[7])\n eq8_1132 = sym.Eq(sym_diff_equations_1132[8], b_sym_1132[8])\n eq9_1132 = sym.Eq(sym_diff_equations_1132[9], b_sym_1132[9])\n\n sym_state_probs_1132 = sym.solve(\n [\n eq0_1132,\n eq1_1132,\n eq2_1132,\n eq3_1132,\n eq4_1132,\n eq5_1132,\n eq6_1132,\n eq7_1132,\n eq8_1132,\n eq9_1132,\n ],\n (p00, p01, p11, p21, p02, p12, p22, p03, p13, p23),\n )\n\n sym_state_recursive_ratios_1132 = sym.zeros(\n buffer_capacity + 1, system_capacity + 1\n )\n sym_state_recursive_ratios_1132[0, 0] = 1\n sym_state_recursive_ratios_1132[0, 1] = sym.factor(\n sym_state_probs_1132[p01] / sym_state_probs_1132[p00]\n ) # (0,0) -> (0,1)\n sym_state_recursive_ratios_1132[1, 1] = sym.factor(\n sym_state_probs_1132[p11] / sym_state_probs_1132[p01]\n ) # (0,1) -> (1,1)\n sym_state_recursive_ratios_1132[2, 1] = sym.factor(\n sym_state_probs_1132[p21] / sym_state_probs_1132[p11]\n ) # (1,1) -> (2,1)\n sym_state_recursive_ratios_1132[0, 2] = sym.factor(\n sym_state_probs_1132[p02] / sym_state_probs_1132[p01]\n ) # (0,1) -> (0,2)\n sym_state_recursive_ratios_1132[1, 2] = sym.factor(\n sym_state_probs_1132[p12] / sym_state_probs_1132[p02]\n ) # (0,2) -> (1,2)\n sym_state_recursive_ratios_1132[2, 2] = sym.factor(\n sym_state_probs_1132[p22] / sym_state_probs_1132[p12]\n ) # (1,2) -> (2,2)\n sym_state_recursive_ratios_1132[0, 3] = sym.factor(\n sym_state_probs_1132[p03] / sym_state_probs_1132[p02]\n ) # (0,2) -> (0,3)\n sym_state_recursive_ratios_1132[1, 3] = sym.factor(\n sym_state_probs_1132[p13] / sym_state_probs_1132[p03]\n ) # (0,3) -> (1,3)\n sym_state_recursive_ratios_1132[2, 3] = sym.factor(\n sym_state_probs_1132[p23] / sym_state_probs_1132[p13]\n ) # (1,3) -> (2,3)\n\n sym_state_recursive_ratios_right_1132 = sym_state_recursive_ratios_1132.copy()\n sym_state_recursive_ratios_right_1132[1, 2] = sym.factor(\n sym_state_probs_1132[p12] / sym_state_probs_1132[p11]\n ) # (1,1) -> (1,2)\n sym_state_recursive_ratios_right_1132[1, 3] = sym.factor(\n sym_state_probs_1132[p13] / sym_state_probs_1132[p12]\n ) # (1,2) -> (1,3)\n sym_state_recursive_ratios_right_1132[2, 2] = sym.factor(\n sym_state_probs_1132[p22] / sym_state_probs_1132[p21]\n ) # (2,1) -> (2,2)\n sym_state_recursive_ratios_right_1132[2, 3] = sym.factor(\n sym_state_probs_1132[p23] / sym_state_probs_1132[p22]\n ) # (2,2) -> (2,3)\n\n sym_state_recursive_ratios_P0_1132 = sym.zeros(\n buffer_capacity + 1, system_capacity + 1\n )\n sym_state_recursive_ratios_P0_1132[0, 0] = 1\n sym_state_recursive_ratios_P0_1132[0, 1] = sym.factor(\n sym_state_probs_1132[p01] / sym_state_probs_1132[p00]\n ) # (0,0) -> (0,1)\n sym_state_recursive_ratios_P0_1132[1, 1] = sym.factor(\n sym_state_probs_1132[p11] / sym_state_probs_1132[p00]\n ) # (0,0) -> (1,1)\n sym_state_recursive_ratios_P0_1132[2, 1] = sym.factor(\n sym_state_probs_1132[p21] / sym_state_probs_1132[p00]\n ) # (0,0) -> (2,1)\n sym_state_recursive_ratios_P0_1132[0, 2] = sym.factor(\n sym_state_probs_1132[p02] / sym_state_probs_1132[p00]\n ) # (0,0) -> (0,2)\n sym_state_recursive_ratios_P0_1132[1, 2] = sym.factor(\n sym_state_probs_1132[p12] / sym_state_probs_1132[p00]\n ) # (0,0) -> (1,2)\n sym_state_recursive_ratios_P0_1132[2, 2] = sym.factor(\n sym_state_probs_1132[p22] / sym_state_probs_1132[p00]\n ) # (0,0) -> (2,2)\n sym_state_recursive_ratios_P0_1132[0, 3] = sym.factor(\n sym_state_probs_1132[p03] / sym_state_probs_1132[p00]\n ) # (0,0) -> (0,3)\n sym_state_recursive_ratios_P0_1132[1, 3] = sym.factor(\n sym_state_probs_1132[p13] / sym_state_probs_1132[p00]\n ) # (0,0) -> (1,3)\n sym_state_recursive_ratios_P0_1132[2, 3] = sym.factor(\n sym_state_probs_1132[p23] / sym_state_probs_1132[p00]\n ) # (0,0) -> (2,3)\n\n return (\n sym_state_probs_1132,\n sym_state_recursive_ratios_1132,\n sym_state_recursive_ratios_right_1132,\n sym_state_recursive_ratios_P0_1132,\n )\n\n\ndef get_symbolic_state_probabilities_1141():\n num_of_servers = 1\n threshold = 1\n system_capacity = 4\n buffer_capacity = 1\n\n Q_sym_1141 = abg.markov.get_symbolic_transition_matrix(\n num_of_servers, threshold, system_capacity, buffer_capacity\n )\n\n p00, p01, p11, p02, p12, p03, p13, p04, p14 = sym.symbols(\n \"p00, p01, p11, p02, p12, p03, p13, p04, p14\"\n )\n pi_1141 = sym.Matrix([p00, p01, p11, p02, p12, p03, p13, p04, p14])\n dimension_1141 = Q_sym_1141.shape[0]\n\n M_sym_1141 = sym.Matrix(\n [Q_sym_1141.transpose()[:-1, :], sym.ones(1, dimension_1141)]\n )\n sym_diff_equations_1141 = M_sym_1141 @ pi_1141\n\n b_sym_1141 = sym.Matrix([sym.zeros(dimension_1141 - 1, 1), [1]])\n\n eq0_1141 = sym.Eq(sym_diff_equations_1141[0], b_sym_1141[0])\n eq1_1141 = sym.Eq(sym_diff_equations_1141[1], b_sym_1141[1])\n eq2_1141 = sym.Eq(sym_diff_equations_1141[2], b_sym_1141[2])\n eq3_1141 = sym.Eq(sym_diff_equations_1141[3], b_sym_1141[3])\n eq4_1141 = sym.Eq(sym_diff_equations_1141[4], b_sym_1141[4])\n eq5_1141 = sym.Eq(sym_diff_equations_1141[5], b_sym_1141[5])\n eq6_1141 = sym.Eq(sym_diff_equations_1141[6], b_sym_1141[6])\n eq7_1141 = sym.Eq(sym_diff_equations_1141[7], b_sym_1141[7])\n eq8_1141 = sym.Eq(sym_diff_equations_1141[8], b_sym_1141[8])\n\n sym_state_probs_1141 = sym.solve(\n [\n eq0_1141,\n eq1_1141,\n eq2_1141,\n eq3_1141,\n eq4_1141,\n eq5_1141,\n eq6_1141,\n eq7_1141,\n eq8_1141,\n ],\n (p00, p01, p11, p02, p12, p03, p13, p04, p14),\n )\n\n sym_state_recursive_ratios_1141 = sym.zeros(\n buffer_capacity + 1, system_capacity + 1\n )\n sym_state_recursive_ratios_1141[0, 0] = 1\n sym_state_recursive_ratios_1141[0, 1] = sym.factor(\n sym_state_probs_1141[p01] / sym_state_probs_1141[p00]\n ) # (0,0) -> (0,1)\n sym_state_recursive_ratios_1141[1, 1] = sym.factor(\n sym_state_probs_1141[p11] / sym_state_probs_1141[p01]\n ) # (0,1) -> (1,1)\n sym_state_recursive_ratios_1141[0, 2] = sym.factor(\n sym_state_probs_1141[p02] / sym_state_probs_1141[p01]\n ) # (0,1) -> (0,2)\n sym_state_recursive_ratios_1141[1, 2] = sym.factor(\n sym_state_probs_1141[p12] / sym_state_probs_1141[p02]\n ) # (0,2) -> (1,2)\n sym_state_recursive_ratios_1141[0, 3] = sym.factor(\n sym_state_probs_1141[p03] / sym_state_probs_1141[p02]\n ) # (0,2) -> (0,3)\n sym_state_recursive_ratios_1141[1, 3] = sym.factor(\n sym_state_probs_1141[p13] / sym_state_probs_1141[p03]\n ) # (0,3) -> (1,3)\n sym_state_recursive_ratios_1141[0, 4] = sym.factor(\n sym_state_probs_1141[p04] / sym_state_probs_1141[p03]\n ) # (0,3) -> (0,4)\n sym_state_recursive_ratios_1141[1, 4] = sym.factor(\n sym_state_probs_1141[p14] / sym_state_probs_1141[p04]\n ) # (0,4) -> (1,4)\n\n sym_state_recursive_ratios_right_1141 = sym_state_recursive_ratios_1141.copy()\n sym_state_recursive_ratios_right_1141[1, 2] = sym.factor(\n sym_state_probs_1141[p12] / sym_state_probs_1141[p11]\n ) # (1,1) -> (1,2)\n sym_state_recursive_ratios_right_1141[1, 3] = sym.factor(\n sym_state_probs_1141[p13] / sym_state_probs_1141[p12]\n ) # (1,2) -> (1,3)\n sym_state_recursive_ratios_right_1141[1, 4] = sym.factor(\n sym_state_probs_1141[p14] / sym_state_probs_1141[p13]\n ) # (1,3) -> (1,4)\n\n sym_state_recursive_ratios_P0_1141 = sym.zeros(\n buffer_capacity + 1, system_capacity + 1\n )\n sym_state_recursive_ratios_P0_1141[0, 0] = 1\n sym_state_recursive_ratios_P0_1141[0, 1] = sym.factor(\n sym_state_probs_1141[p01] / sym_state_probs_1141[p00]\n ) # (0,0) -> (0,1)\n sym_state_recursive_ratios_P0_1141[1, 1] = sym.factor(\n sym_state_probs_1141[p11] / sym_state_probs_1141[p00]\n ) # (0,0) -> (1,1)\n sym_state_recursive_ratios_P0_1141[0, 2] = sym.factor(\n sym_state_probs_1141[p02] / sym_state_probs_1141[p00]\n ) # (0,0) -> (0,2)\n sym_state_recursive_ratios_P0_1141[1, 2] = sym.factor(\n sym_state_probs_1141[p12] / sym_state_probs_1141[p00]\n ) # (0,0) -> (1,2)\n sym_state_recursive_ratios_P0_1141[0, 3] = sym.factor(\n sym_state_probs_1141[p03] / sym_state_probs_1141[p00]\n ) # (0,0) -> (0,3)\n sym_state_recursive_ratios_P0_1141[1, 3] = sym.factor(\n sym_state_probs_1141[p13] / sym_state_probs_1141[p00]\n ) # (0,0) -> (1,3)\n sym_state_recursive_ratios_P0_1141[0, 4] = sym.factor(\n sym_state_probs_1141[p04] / sym_state_probs_1141[p00]\n ) # (0,0) -> (0,4)\n sym_state_recursive_ratios_P0_1141[1, 4] = sym.factor(\n sym_state_probs_1141[p14] / sym_state_probs_1141[p00]\n ) # (0,0) -> (1,4)\n\n return (\n sym_state_probs_1141,\n sym_state_recursive_ratios_1141,\n sym_state_recursive_ratios_right_1141,\n sym_state_recursive_ratios_P0_1141,\n )\n\n\ndef get_symbolic_state_probabilities_1142():\n num_of_servers = 1\n threshold = 1\n system_capacity = 4\n buffer_capacity = 2\n\n Q_sym_1142 = abg.markov.get_symbolic_transition_matrix(\n num_of_servers=num_of_servers,\n threshold=threshold,\n system_capacity=system_capacity,\n buffer_capacity=buffer_capacity,\n )\n\n p00, p01, p11, p21, p02, p12, p22, p03, p13, p23, p04, p14, p24 = sym.symbols(\n \"p00, p01, p11, p21, p02, p12, p22, p03, p13, p23, p04, p14, p24\"\n )\n pi_1142 = sym.Matrix(\n [p00, p01, p11, p21, p02, p12, p22, p03, p13, p23, p04, p14, p24]\n )\n dimension_1142 = Q_sym_1142.shape[0]\n\n M_sym_1142 = sym.Matrix(\n [Q_sym_1142.transpose()[:-1, :], sym.ones(1, dimension_1142)]\n )\n sym_diff_equations_1142 = M_sym_1142 @ pi_1142\n\n b_sym_1142 = sym.Matrix([sym.zeros(dimension_1142 - 1, 1), [1]])\n\n eq0_1142 = sym.Eq(sym_diff_equations_1142[0], b_sym_1142[0])\n eq1_1142 = sym.Eq(sym_diff_equations_1142[1], b_sym_1142[1])\n eq2_1142 = sym.Eq(sym_diff_equations_1142[2], b_sym_1142[2])\n eq3_1142 = sym.Eq(sym_diff_equations_1142[3], b_sym_1142[3])\n eq4_1142 = sym.Eq(sym_diff_equations_1142[4], b_sym_1142[4])\n eq5_1142 = sym.Eq(sym_diff_equations_1142[5], b_sym_1142[5])\n eq6_1142 = sym.Eq(sym_diff_equations_1142[6], b_sym_1142[6])\n eq7_1142 = sym.Eq(sym_diff_equations_1142[7], b_sym_1142[7])\n eq8_1142 = sym.Eq(sym_diff_equations_1142[8], b_sym_1142[8])\n eq9_1142 = sym.Eq(sym_diff_equations_1142[9], b_sym_1142[9])\n eq10_1142 = sym.Eq(sym_diff_equations_1142[10], b_sym_1142[10])\n eq11_1142 = sym.Eq(sym_diff_equations_1142[11], b_sym_1142[11])\n eq12_1142 = sym.Eq(sym_diff_equations_1142[12], b_sym_1142[12])\n\n sym_state_probs_1142 = sym.solve(\n [\n eq0_1142,\n eq1_1142,\n eq2_1142,\n eq3_1142,\n eq4_1142,\n eq5_1142,\n eq6_1142,\n eq7_1142,\n eq8_1142,\n eq9_1142,\n eq10_1142,\n eq11_1142,\n eq12_1142,\n ],\n (p00, p01, p11, p21, p02, p12, p22, p03, p13, p23, p04, p14, p24),\n )\n\n sym_state_recursive_ratios_1142 = sym.zeros(\n buffer_capacity + 1, system_capacity + 1\n )\n sym_state_recursive_ratios_1142[0, 0] = 1\n sym_state_recursive_ratios_1142[0, 1] = sym.factor(\n sym_state_probs_1142[p01] / sym_state_probs_1142[p00]\n ) # (0,0) -> (0,1)\n sym_state_recursive_ratios_1142[1, 1] = sym.factor(\n sym_state_probs_1142[p11] / sym_state_probs_1142[p01]\n ) # (0,1) -> (1,1)\n sym_state_recursive_ratios_1142[2, 1] = sym.factor(\n sym_state_probs_1142[p21] / sym_state_probs_1142[p11]\n ) # (1,1) -> (2,1)\n sym_state_recursive_ratios_1142[0, 2] = sym.factor(\n sym_state_probs_1142[p02] / sym_state_probs_1142[p01]\n ) # (0,1) -> (0,2)\n sym_state_recursive_ratios_1142[1, 2] = sym.factor(\n sym_state_probs_1142[p12] / sym_state_probs_1142[p02]\n ) # (0,2) -> (1,2)\n sym_state_recursive_ratios_1142[2, 2] = sym.factor(\n sym_state_probs_1142[p22] / sym_state_probs_1142[p12]\n ) # (1,2) -> (2,2)\n sym_state_recursive_ratios_1142[0, 3] = sym.factor(\n sym_state_probs_1142[p03] / sym_state_probs_1142[p02]\n ) # (0,2) -> (0,3)\n sym_state_recursive_ratios_1142[1, 3] = sym.factor(\n sym_state_probs_1142[p13] / sym_state_probs_1142[p03]\n ) # (0,3) -> (1,3)\n sym_state_recursive_ratios_1142[2, 3] = sym.factor(\n sym_state_probs_1142[p23] / sym_state_probs_1142[p13]\n ) # (1,3) -> (2,3)\n sym_state_recursive_ratios_1142[0, 4] = sym.factor(\n sym_state_probs_1142[p04] / sym_state_probs_1142[p03]\n ) # (0,3) -> (0,4)\n sym_state_recursive_ratios_1142[1, 4] = sym.factor(\n sym_state_probs_1142[p14] / sym_state_probs_1142[p04]\n ) # (0,4) -> (1,4)\n sym_state_recursive_ratios_1142[2, 4] = sym.factor(\n sym_state_probs_1142[p24] / sym_state_probs_1142[p14]\n ) # (1,4) -> (2,4)\n\n sym_state_recursive_ratios_right_1142 = sym_state_recursive_ratios_1142.copy()\n sym_state_recursive_ratios_right_1142[1, 2] = sym.factor(\n sym_state_probs_1142[p12] / sym_state_probs_1142[p11]\n ) # (1,1) -> (1,2)\n sym_state_recursive_ratios_right_1142[1, 3] = sym.factor(\n sym_state_probs_1142[p13] / sym_state_probs_1142[p12]\n ) # (1,2) -> (1,3)\n sym_state_recursive_ratios_right_1142[1, 4] = sym.factor(\n sym_state_probs_1142[p14] / sym_state_probs_1142[p13]\n ) # (1,3) -> (1,4)\n sym_state_recursive_ratios_right_1142[2, 2] = sym.factor(\n sym_state_probs_1142[p22] / sym_state_probs_1142[p21]\n ) # (2,1) -> (2,2)\n sym_state_recursive_ratios_right_1142[2, 3] = sym.factor(\n sym_state_probs_1142[p23] / sym_state_probs_1142[p22]\n ) # (2,2) -> (2,3)\n sym_state_recursive_ratios_right_1142[2, 4] = sym.factor(\n sym_state_probs_1142[p24] / sym_state_probs_1142[p23]\n ) # (2,3) -> (2,4)\n\n sym_state_recursive_ratios_P0_1142 = sym.zeros(\n buffer_capacity + 1, system_capacity + 1\n )\n sym_state_recursive_ratios_P0_1142[0, 0] = 1\n sym_state_recursive_ratios_P0_1142[0, 1] = sym.factor(\n sym_state_probs_1142[p01] / sym_state_probs_1142[p00]\n ) # (0,0) -> (0,1)\n sym_state_recursive_ratios_P0_1142[1, 1] = sym.factor(\n sym_state_probs_1142[p11] / sym_state_probs_1142[p00]\n ) # (0,0) -> (1,1)\n sym_state_recursive_ratios_P0_1142[2, 1] = sym.factor(\n sym_state_probs_1142[p21] / sym_state_probs_1142[p00]\n ) # (0,0) -> (2,1)\n\n sym_state_recursive_ratios_P0_1142[0, 2] = sym.factor(\n sym_state_probs_1142[p02] / sym_state_probs_1142[p00]\n ) # (0,0) -> (0,2)\n sym_state_recursive_ratios_P0_1142[1, 2] = sym.factor(\n sym_state_probs_1142[p12] / sym_state_probs_1142[p00]\n ) # (0,0) -> (1,2)\n sym_state_recursive_ratios_P0_1142[2, 2] = sym.factor(\n sym_state_probs_1142[p22] / sym_state_probs_1142[p00]\n ) # (0,0) -> (2,2)\n\n sym_state_recursive_ratios_P0_1142[0, 3] = sym.factor(\n sym_state_probs_1142[p03] / sym_state_probs_1142[p00]\n ) # (0,0) -> (0,3)\n sym_state_recursive_ratios_P0_1142[1, 3] = sym.factor(\n sym_state_probs_1142[p13] / sym_state_probs_1142[p00]\n ) # (0,0) -> (1,3)\n sym_state_recursive_ratios_P0_1142[2, 3] = sym.factor(\n sym_state_probs_1142[p23] / sym_state_probs_1142[p00]\n ) # (0,0) -> (2,3)\n\n sym_state_recursive_ratios_P0_1142[0, 4] = sym.factor(\n sym_state_probs_1142[p04] / sym_state_probs_1142[p00]\n ) # (0,0) -> (0,4)\n sym_state_recursive_ratios_P0_1142[1, 4] = sym.factor(\n sym_state_probs_1142[p14] / sym_state_probs_1142[p00]\n ) # (0,0) -> (1,4)\n sym_state_recursive_ratios_P0_1142[2, 4] = sym.factor(\n sym_state_probs_1142[p24] / sym_state_probs_1142[p00]\n ) # (0,0) -> (2,4)\n\n return (\n sym_state_probs_1142,\n sym_state_recursive_ratios_1142,\n sym_state_recursive_ratios_right_1142,\n sym_state_recursive_ratios_P0_1142,\n )\n\n\ndef get_symbolic_state_probabilities_1151():\n num_of_servers = 1\n threshold = 1\n system_capacity = 5\n buffer_capacity = 1\n\n Q_sym_1151 = abg.markov.get_symbolic_transition_matrix(\n num_of_servers, threshold, system_capacity, buffer_capacity\n )\n\n p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15 = sym.symbols(\n \"p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15\"\n )\n pi_1151 = sym.Matrix([p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15])\n dimension_1151 = Q_sym_1151.shape[0]\n\n M_sym_1151 = sym.Matrix(\n [Q_sym_1151.transpose()[:-1, :], sym.ones(1, dimension_1151)]\n )\n sym_diff_equations_1151 = M_sym_1151 @ pi_1151\n\n b_sym_1151 = sym.Matrix([sym.zeros(dimension_1151 - 1, 1), [1]])\n\n eq0_1151 = sym.Eq(sym_diff_equations_1151[0], b_sym_1151[0])\n eq1_1151 = sym.Eq(sym_diff_equations_1151[1], b_sym_1151[1])\n eq2_1151 = sym.Eq(sym_diff_equations_1151[2], b_sym_1151[2])\n eq3_1151 = sym.Eq(sym_diff_equations_1151[3], b_sym_1151[3])\n eq4_1151 = sym.Eq(sym_diff_equations_1151[4], b_sym_1151[4])\n eq5_1151 = sym.Eq(sym_diff_equations_1151[5], b_sym_1151[5])\n eq6_1151 = sym.Eq(sym_diff_equations_1151[6], b_sym_1151[6])\n eq7_1151 = sym.Eq(sym_diff_equations_1151[7], b_sym_1151[7])\n eq8_1151 = sym.Eq(sym_diff_equations_1151[8], b_sym_1151[8])\n eq9_1151 = sym.Eq(sym_diff_equations_1151[9], b_sym_1151[9])\n eq10_1151 = sym.Eq(sym_diff_equations_1151[10], b_sym_1151[10])\n\n sym_state_probs_1151 = sym.solve(\n [\n eq0_1151,\n eq1_1151,\n eq2_1151,\n eq3_1151,\n eq4_1151,\n eq5_1151,\n eq6_1151,\n eq7_1151,\n eq8_1151,\n eq9_1151,\n eq10_1151,\n ],\n (p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15),\n )\n\n sym_state_recursive_ratios_1151 = sym.zeros(\n buffer_capacity + 1, system_capacity + 1\n )\n sym_state_recursive_ratios_1151[0, 0] = 1\n sym_state_recursive_ratios_1151[0, 1] = sym.factor(\n sym_state_probs_1151[p01] / sym_state_probs_1151[p00]\n ) # (0,0) -> (0,1)\n sym_state_recursive_ratios_1151[1, 1] = sym.factor(\n sym_state_probs_1151[p11] / sym_state_probs_1151[p01]\n ) # (0,1) -> (1,1)\n sym_state_recursive_ratios_1151[0, 2] = sym.factor(\n sym_state_probs_1151[p02] / sym_state_probs_1151[p01]\n ) # (0,1) -> (0,2)\n sym_state_recursive_ratios_1151[1, 2] = sym.factor(\n sym_state_probs_1151[p12] / sym_state_probs_1151[p02]\n ) # (0,2) -> (1,2)\n sym_state_recursive_ratios_1151[0, 3] = sym.factor(\n sym_state_probs_1151[p03] / sym_state_probs_1151[p02]\n ) # (0,2) -> (0,3)\n sym_state_recursive_ratios_1151[1, 3] = sym.factor(\n sym_state_probs_1151[p13] / sym_state_probs_1151[p03]\n ) # (0,3) -> (1,3)\n sym_state_recursive_ratios_1151[0, 4] = sym.factor(\n sym_state_probs_1151[p04] / sym_state_probs_1151[p03]\n ) # (0,3) -> (0,4)\n sym_state_recursive_ratios_1151[1, 4] = sym.factor(\n sym_state_probs_1151[p14] / sym_state_probs_1151[p04]\n ) # (0,4) -> (1,4)\n sym_state_recursive_ratios_1151[0, 5] = sym.factor(\n sym_state_probs_1151[p05] / sym_state_probs_1151[p04]\n ) # (0,4) -> (0,5)\n sym_state_recursive_ratios_1151[1, 5] = sym.factor(\n sym_state_probs_1151[p15] / sym_state_probs_1151[p05]\n ) # (0,5) -> (1,5)\n\n sym_state_recursive_ratios_right_1151 = sym_state_recursive_ratios_1151.copy()\n sym_state_recursive_ratios_right_1151[1, 2] = sym.factor(\n sym_state_probs_1151[p12] / sym_state_probs_1151[p11]\n ) # (1,1) -> (1,2)\n sym_state_recursive_ratios_right_1151[1, 3] = sym.factor(\n sym_state_probs_1151[p13] / sym_state_probs_1151[p12]\n ) # (1,2) -> (1,3)\n sym_state_recursive_ratios_right_1151[1, 4] = sym.factor(\n sym_state_probs_1151[p14] / sym_state_probs_1151[p13]\n ) # (1,3) -> (1,4)\n sym_state_recursive_ratios_right_1151[1, 5] = sym.factor(\n sym_state_probs_1151[p15] / sym_state_probs_1151[p14]\n ) # (1,4) -> (1,5)\n\n sym_state_recursive_ratios_P0_1151 = sym.zeros(\n buffer_capacity + 1, system_capacity + 1\n )\n sym_state_recursive_ratios_P0_1151[0, 0] = 1\n sym_state_recursive_ratios_P0_1151[0, 1] = sym.factor(\n sym_state_probs_1151[p01] / sym_state_probs_1151[p00]\n ) # (0,0) -> (0,1)\n sym_state_recursive_ratios_P0_1151[1, 1] = sym.factor(\n sym_state_probs_1151[p11] / sym_state_probs_1151[p00]\n ) # (0,0) -> (1,1)\n sym_state_recursive_ratios_P0_1151[0, 2] = sym.factor(\n sym_state_probs_1151[p02] / sym_state_probs_1151[p00]\n ) # (0,0) -> (0,2)\n sym_state_recursive_ratios_P0_1151[1, 2] = sym.factor(\n sym_state_probs_1151[p12] / sym_state_probs_1151[p00]\n ) # (0,0) -> (1,2)\n sym_state_recursive_ratios_P0_1151[0, 3] = sym.factor(\n sym_state_probs_1151[p03] / sym_state_probs_1151[p00]\n ) # (0,0) -> (0,3)\n sym_state_recursive_ratios_P0_1151[1, 3] = sym.factor(\n sym_state_probs_1151[p13] / sym_state_probs_1151[p00]\n ) # (0,0) -> (1,3)\n sym_state_recursive_ratios_P0_1151[0, 4] = sym.factor(\n sym_state_probs_1151[p04] / sym_state_probs_1151[p00]\n ) # (0,0) -> (0,4)\n sym_state_recursive_ratios_P0_1151[1, 4] = sym.factor(\n sym_state_probs_1151[p14] / sym_state_probs_1151[p00]\n ) # (0,0) -> (1,4)\n sym_state_recursive_ratios_P0_1151[0, 5] = sym.factor(\n sym_state_probs_1151[p05] / sym_state_probs_1151[p00]\n ) # (0,0) -> (0,5)\n sym_state_recursive_ratios_P0_1151[1, 5] = sym.factor(\n sym_state_probs_1151[p15] / sym_state_probs_1151[p00]\n ) # (0,0) -> (1,5)\n\n return (\n sym_state_probs_1151,\n sym_state_recursive_ratios_1151,\n sym_state_recursive_ratios_right_1151,\n sym_state_recursive_ratios_P0_1151,\n )\n\n\ndef get_symbolic_state_probabilities_1161():\n num_of_servers = 1\n threshold = 1\n system_capacity = 6\n buffer_capacity = 1\n\n Q_sym_1161 = abg.markov.get_symbolic_transition_matrix(\n num_of_servers, threshold, system_capacity, buffer_capacity\n )\n\n p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15, p06, p16 = sym.symbols(\n \"p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15, p06, p16\"\n )\n pi_1161 = sym.Matrix(\n [p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15, p06, p16]\n )\n dimension_1161 = Q_sym_1161.shape[0]\n\n M_sym_1161 = sym.Matrix(\n [Q_sym_1161.transpose()[:-1, :], sym.ones(1, dimension_1161)]\n )\n sym_diff_equations_1161 = M_sym_1161 @ pi_1161\n\n b_sym_1161 = sym.Matrix([sym.zeros(dimension_1161 - 1, 1), [1]])\n\n eq0_1161 = sym.Eq(sym_diff_equations_1161[0], b_sym_1161[0])\n eq1_1161 = sym.Eq(sym_diff_equations_1161[1], b_sym_1161[1])\n eq2_1161 = sym.Eq(sym_diff_equations_1161[2], b_sym_1161[2])\n eq3_1161 = sym.Eq(sym_diff_equations_1161[3], b_sym_1161[3])\n eq4_1161 = sym.Eq(sym_diff_equations_1161[4], b_sym_1161[4])\n eq5_1161 = sym.Eq(sym_diff_equations_1161[5], b_sym_1161[5])\n eq6_1161 = sym.Eq(sym_diff_equations_1161[6], b_sym_1161[6])\n eq7_1161 = sym.Eq(sym_diff_equations_1161[7], b_sym_1161[7])\n eq8_1161 = sym.Eq(sym_diff_equations_1161[8], b_sym_1161[8])\n eq9_1161 = sym.Eq(sym_diff_equations_1161[9], b_sym_1161[9])\n eq10_1161 = sym.Eq(sym_diff_equations_1161[10], b_sym_1161[10])\n eq11_1161 = sym.Eq(sym_diff_equations_1161[11], b_sym_1161[11])\n eq12_1161 = sym.Eq(sym_diff_equations_1161[12], b_sym_1161[12])\n\n sym_state_probs_1161 = sym.solve(\n [\n eq0_1161,\n eq1_1161,\n eq2_1161,\n eq3_1161,\n eq4_1161,\n eq5_1161,\n eq6_1161,\n eq7_1161,\n eq8_1161,\n eq9_1161,\n eq10_1161,\n eq11_1161,\n eq12_1161,\n ],\n (p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15, p06, p16),\n )\n\n sym_state_recursive_ratios_1161 = sym.zeros(\n buffer_capacity + 1, system_capacity + 1\n )\n sym_state_recursive_ratios_1161[0, 0] = 1\n sym_state_recursive_ratios_1161[0, 1] = sym.factor(\n sym_state_probs_1161[p01] / sym_state_probs_1161[p00]\n ) # (0,0) -> (0,1)\n sym_state_recursive_ratios_1161[1, 1] = sym.factor(\n sym_state_probs_1161[p11] / sym_state_probs_1161[p01]\n ) # (0,1) -> (1,1)\n sym_state_recursive_ratios_1161[0, 2] = sym.factor(\n sym_state_probs_1161[p02] / sym_state_probs_1161[p01]\n ) # (0,1) -> (0,2)\n sym_state_recursive_ratios_1161[1, 2] = sym.factor(\n sym_state_probs_1161[p12] / sym_state_probs_1161[p02]\n ) # (0,2) -> (1,2)\n sym_state_recursive_ratios_1161[0, 3] = sym.factor(\n sym_state_probs_1161[p03] / sym_state_probs_1161[p02]\n ) # (0,2) -> (0,3)\n sym_state_recursive_ratios_1161[1, 3] = sym.factor(\n sym_state_probs_1161[p13] / sym_state_probs_1161[p03]\n ) # (0,3) -> (1,3)\n sym_state_recursive_ratios_1161[0, 4] = sym.factor(\n sym_state_probs_1161[p04] / sym_state_probs_1161[p03]\n ) # (0,3) -> (0,4)\n sym_state_recursive_ratios_1161[1, 4] = sym.factor(\n sym_state_probs_1161[p14] / sym_state_probs_1161[p04]\n ) # (0,4) -> (1,4)\n sym_state_recursive_ratios_1161[0, 5] = sym.factor(\n sym_state_probs_1161[p05] / sym_state_probs_1161[p04]\n ) # (0,4) -> (0,5)\n sym_state_recursive_ratios_1161[1, 5] = sym.factor(\n sym_state_probs_1161[p15] / sym_state_probs_1161[p05]\n ) # (0,5) -> (1,5)\n sym_state_recursive_ratios_1161[0, 6] = sym.factor(\n sym_state_probs_1161[p06] / sym_state_probs_1161[p05]\n ) # (0,5) -> (0,6)\n sym_state_recursive_ratios_1161[1, 6] = sym.factor(\n sym_state_probs_1161[p16] / sym_state_probs_1161[p06]\n ) # (0,6) -> (1,6)\n\n sym_state_recursive_ratios_right_1161 = sym_state_recursive_ratios_1161.copy()\n sym_state_recursive_ratios_right_1161[1, 2] = sym.factor(\n sym_state_probs_1161[p12] / sym_state_probs_1161[p11]\n ) # (1,1) -> (1,2)\n sym_state_recursive_ratios_right_1161[1, 3] = sym.factor(\n sym_state_probs_1161[p13] / sym_state_probs_1161[p12]\n ) # (1,2) -> (1,3)\n sym_state_recursive_ratios_right_1161[1, 4] = sym.factor(\n sym_state_probs_1161[p14] / sym_state_probs_1161[p13]\n ) # (1,3) -> (1,4)\n sym_state_recursive_ratios_right_1161[1, 5] = sym.factor(\n sym_state_probs_1161[p15] / sym_state_probs_1161[p14]\n ) # (1,4) -> (1,5)\n sym_state_recursive_ratios_right_1161[1, 6] = sym.factor(\n sym_state_probs_1161[p16] / sym_state_probs_1161[p15]\n ) # (1,5) -> (1,6)\n\n sym_state_recursive_ratios_P0_1161 = sym.zeros(\n buffer_capacity + 1, system_capacity + 1\n )\n sym_state_recursive_ratios_P0_1161[0, 0] = 1\n sym_state_recursive_ratios_P0_1161[0, 1] = sym.factor(\n sym_state_probs_1161[p01] / sym_state_probs_1161[p00]\n ) # (0,0) -> (0,1)\n sym_state_recursive_ratios_P0_1161[1, 1] = sym.factor(\n sym_state_probs_1161[p11] / sym_state_probs_1161[p00]\n ) # (0,0) -> (1,1)\n sym_state_recursive_ratios_P0_1161[0, 2] = sym.factor(\n sym_state_probs_1161[p02] / sym_state_probs_1161[p00]\n ) # (0,0) -> (0,2)\n sym_state_recursive_ratios_P0_1161[1, 2] = sym.factor(\n sym_state_probs_1161[p12] / sym_state_probs_1161[p00]\n ) # (0,0) -> (1,2)\n sym_state_recursive_ratios_P0_1161[0, 3] = sym.factor(\n sym_state_probs_1161[p03] / sym_state_probs_1161[p00]\n ) # (0,0) -> (0,3)\n sym_state_recursive_ratios_P0_1161[1, 3] = sym.factor(\n sym_state_probs_1161[p13] / sym_state_probs_1161[p00]\n ) # (0,0) -> (1,3)\n sym_state_recursive_ratios_P0_1161[0, 4] = sym.factor(\n sym_state_probs_1161[p04] / sym_state_probs_1161[p00]\n ) # (0,0) -> (0,4)\n sym_state_recursive_ratios_P0_1161[1, 4] = sym.factor(\n sym_state_probs_1161[p14] / sym_state_probs_1161[p00]\n ) # (0,0) -> (1,4)\n sym_state_recursive_ratios_P0_1161[0, 5] = sym.factor(\n sym_state_probs_1161[p05] / sym_state_probs_1161[p00]\n ) # (0,0) -> (0,5)\n sym_state_recursive_ratios_P0_1161[1, 5] = sym.factor(\n sym_state_probs_1161[p15] / sym_state_probs_1161[p00]\n ) # (0,0) -> (1,5)\n sym_state_recursive_ratios_P0_1161[0, 6] = sym.factor(\n sym_state_probs_1161[p06] / sym_state_probs_1161[p00]\n ) # (0,0) -> (0,6)\n sym_state_recursive_ratios_P0_1161[1, 6] = sym.factor(\n sym_state_probs_1161[p16] / sym_state_probs_1161[p00]\n ) # (0,0) -> (1,6)\n\n return (\n sym_state_probs_1161,\n sym_state_recursive_ratios_1161,\n sym_state_recursive_ratios_right_1161,\n sym_state_recursive_ratios_P0_1161,\n )\n\n\ndef get_symbolic_state_probabilities_1171():\n num_of_servers = 1\n threshold = 1\n system_capacity = 7\n buffer_capacity = 1\n\n Q_sym_1171 = abg.markov.get_symbolic_transition_matrix(\n num_of_servers, threshold, system_capacity, buffer_capacity\n )\n\n (\n p00,\n p01,\n p11,\n p02,\n p12,\n p03,\n p13,\n p04,\n p14,\n p05,\n p15,\n p06,\n p16,\n p07,\n p17,\n ) = sym.symbols(\n \"p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15, p06, p16, p07, p17\"\n )\n pi_1171 = sym.Matrix(\n [p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15, p06, p16, p07, p17]\n )\n dimension_1171 = Q_sym_1171.shape[0]\n\n M_sym_1171 = sym.Matrix(\n [Q_sym_1171.transpose()[:-1, :], sym.ones(1, dimension_1171)]\n )\n sym_diff_equations_1171 = M_sym_1171 @ pi_1171\n\n b_sym_1171 = sym.Matrix([sym.zeros(dimension_1171 - 1, 1), [1]])\n\n eq0_1171 = sym.Eq(sym_diff_equations_1171[0], b_sym_1171[0])\n eq1_1171 = sym.Eq(sym_diff_equations_1171[1], b_sym_1171[1])\n eq2_1171 = sym.Eq(sym_diff_equations_1171[2], b_sym_1171[2])\n eq3_1171 = sym.Eq(sym_diff_equations_1171[3], b_sym_1171[3])\n eq4_1171 = sym.Eq(sym_diff_equations_1171[4], b_sym_1171[4])\n eq5_1171 = sym.Eq(sym_diff_equations_1171[5], b_sym_1171[5])\n eq6_1171 = sym.Eq(sym_diff_equations_1171[6], b_sym_1171[6])\n eq7_1171 = sym.Eq(sym_diff_equations_1171[7], b_sym_1171[7])\n eq8_1171 = sym.Eq(sym_diff_equations_1171[8], b_sym_1171[8])\n eq9_1171 = sym.Eq(sym_diff_equations_1171[9], b_sym_1171[9])\n eq10_1171 = sym.Eq(sym_diff_equations_1171[10], b_sym_1171[10])\n eq11_1171 = sym.Eq(sym_diff_equations_1171[11], b_sym_1171[11])\n eq12_1171 = sym.Eq(sym_diff_equations_1171[12], b_sym_1171[12])\n eq13_1171 = sym.Eq(sym_diff_equations_1171[13], b_sym_1171[13])\n eq14_1171 = sym.Eq(sym_diff_equations_1171[14], b_sym_1171[14])\n\n sym_state_probs_1171 = sym.solve(\n [\n eq0_1171,\n eq1_1171,\n eq2_1171,\n eq3_1171,\n eq4_1171,\n eq5_1171,\n eq6_1171,\n eq7_1171,\n eq8_1171,\n eq9_1171,\n eq10_1171,\n eq11_1171,\n eq12_1171,\n eq13_1171,\n eq14_1171,\n ],\n (p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15, p06, p16, p07, p17),\n )\n\n sym_state_recursive_ratios_1171 = sym.zeros(\n buffer_capacity + 1, system_capacity + 1\n )\n sym_state_recursive_ratios_1171[0, 0] = 1\n sym_state_recursive_ratios_1171[0, 1] = sym.factor(\n sym_state_probs_1171[p01] / sym_state_probs_1171[p00]\n ) # (0,0) -> (0,1)\n sym_state_recursive_ratios_1171[1, 1] = sym.factor(\n sym_state_probs_1171[p11] / sym_state_probs_1171[p01]\n ) # (0,1) -> (1,1)\n sym_state_recursive_ratios_1171[0, 2] = sym.factor(\n sym_state_probs_1171[p02] / sym_state_probs_1171[p01]\n ) # (0,1) -> (0,2)\n sym_state_recursive_ratios_1171[1, 2] = sym.factor(\n sym_state_probs_1171[p12] / sym_state_probs_1171[p02]\n ) # (0,2) -> (1,2)\n sym_state_recursive_ratios_1171[0, 3] = sym.factor(\n sym_state_probs_1171[p03] / sym_state_probs_1171[p02]\n ) # (0,2) -> (0,3)\n sym_state_recursive_ratios_1171[1, 3] = sym.factor(\n sym_state_probs_1171[p13] / sym_state_probs_1171[p03]\n ) # (0,3) -> (1,3)\n sym_state_recursive_ratios_1171[0, 4] = sym.factor(\n sym_state_probs_1171[p04] / sym_state_probs_1171[p03]\n ) # (0,3) -> (0,4)\n sym_state_recursive_ratios_1171[1, 4] = sym.factor(\n sym_state_probs_1171[p14] / sym_state_probs_1171[p04]\n ) # (0,4) -> (1,4)\n sym_state_recursive_ratios_1171[0, 5] = sym.factor(\n sym_state_probs_1171[p05] / sym_state_probs_1171[p04]\n ) # (0,4) -> (0,5)\n sym_state_recursive_ratios_1171[1, 5] = sym.factor(\n sym_state_probs_1171[p15] / sym_state_probs_1171[p05]\n ) # (0,5) -> (1,5)\n sym_state_recursive_ratios_1171[0, 6] = sym.factor(\n sym_state_probs_1171[p06] / sym_state_probs_1171[p05]\n ) # (0,5) -> (0,6)\n sym_state_recursive_ratios_1171[1, 6] = sym.factor(\n sym_state_probs_1171[p16] / sym_state_probs_1171[p06]\n ) # (0,6) -> (1,6)\n sym_state_recursive_ratios_1171[0, 7] = sym.factor(\n sym_state_probs_1171[p07] / sym_state_probs_1171[p06]\n ) # (0,6) -> (0,7)\n sym_state_recursive_ratios_1171[1, 7] = sym.factor(\n sym_state_probs_1171[p17] / sym_state_probs_1171[p07]\n ) # (0,7) -> (1,7)\n\n sym_state_recursive_ratios_right_1171 = sym_state_recursive_ratios_1171.copy()\n sym_state_recursive_ratios_right_1171[1, 2] = sym.factor(\n sym_state_probs_1171[p12] / sym_state_probs_1171[p11]\n ) # (1,1) -> (1,2)\n sym_state_recursive_ratios_right_1171[1, 3] = sym.factor(\n sym_state_probs_1171[p13] / sym_state_probs_1171[p12]\n ) # (1,2) -> (1,3)\n sym_state_recursive_ratios_right_1171[1, 4] = sym.factor(\n sym_state_probs_1171[p14] / sym_state_probs_1171[p13]\n ) # (1,3) -> (1,4)\n sym_state_recursive_ratios_right_1171[1, 5] = sym.factor(\n sym_state_probs_1171[p15] / sym_state_probs_1171[p14]\n ) # (1,4) -> (1,5)\n sym_state_recursive_ratios_right_1171[1, 6] = sym.factor(\n sym_state_probs_1171[p16] / sym_state_probs_1171[p15]\n ) # (1,5) -> (1,6)\n sym_state_recursive_ratios_right_1171[1, 7] = sym.factor(\n sym_state_probs_1171[p17] / sym_state_probs_1171[p16]\n ) # (1,6) -> (1,7)\n\n sym_state_recursive_ratios_P0_1171 = sym.zeros(\n buffer_capacity + 1, system_capacity + 1\n )\n sym_state_recursive_ratios_P0_1171[0, 0] = 1\n sym_state_recursive_ratios_P0_1171[0, 1] = sym.factor(\n sym_state_probs_1171[p01] / sym_state_probs_1171[p00]\n ) # (0,0) -> (0,1)\n sym_state_recursive_ratios_P0_1171[1, 1] = sym.factor(\n sym_state_probs_1171[p11] / sym_state_probs_1171[p00]\n ) # (0,0) -> (1,1)\n sym_state_recursive_ratios_P0_1171[0, 2] = sym.factor(\n sym_state_probs_1171[p02] / sym_state_probs_1171[p00]\n ) # (0,0) -> (0,2)\n sym_state_recursive_ratios_P0_1171[1, 2] = sym.factor(\n sym_state_probs_1171[p12] / sym_state_probs_1171[p00]\n ) # (0,0) -> (1,2)\n sym_state_recursive_ratios_P0_1171[0, 3] = sym.factor(\n sym_state_probs_1171[p03] / sym_state_probs_1171[p00]\n ) # (0,0) -> (0,3)\n sym_state_recursive_ratios_P0_1171[1, 3] = sym.factor(\n sym_state_probs_1171[p13] / sym_state_probs_1171[p00]\n ) # (0,0) -> (1,3)\n sym_state_recursive_ratios_P0_1171[0, 4] = sym.factor(\n sym_state_probs_1171[p04] / sym_state_probs_1171[p00]\n ) # (0,0) -> (0,4)\n sym_state_recursive_ratios_P0_1171[1, 4] = sym.factor(\n sym_state_probs_1171[p14] / sym_state_probs_1171[p00]\n ) # (0,0) -> (1,4)\n sym_state_recursive_ratios_P0_1171[0, 5] = sym.factor(\n sym_state_probs_1171[p05] / sym_state_probs_1171[p00]\n ) # (0,0) -> (0,5)\n sym_state_recursive_ratios_P0_1171[1, 5] = sym.factor(\n sym_state_probs_1171[p15] / sym_state_probs_1171[p00]\n ) # (0,0) -> (1,5)\n sym_state_recursive_ratios_P0_1171[0, 6] = sym.factor(\n sym_state_probs_1171[p06] / sym_state_probs_1171[p00]\n ) # (0,0) -> (0,6)\n sym_state_recursive_ratios_P0_1171[1, 6] = sym.factor(\n sym_state_probs_1171[p16] / sym_state_probs_1171[p00]\n ) # (0,0) -> (1,6)\n sym_state_recursive_ratios_P0_1171[0, 7] = sym.factor(\n sym_state_probs_1171[p07] / sym_state_probs_1171[p00]\n ) # (0,0) -> (0,7)\n sym_state_recursive_ratios_P0_1171[1, 7] = sym.factor(\n sym_state_probs_1171[p17] / sym_state_probs_1171[p00]\n ) # (0,0) -> (1,7)\n\n return (\n sym_state_probs_1171,\n sym_state_recursive_ratios_1171,\n sym_state_recursive_ratios_right_1171,\n sym_state_recursive_ratios_P0_1171,\n )\n\n\ndef get_symbolic_state_probabilities_1181():\n num_of_servers = 1\n threshold = 1\n system_capacity = 8\n buffer_capacity = 1\n\n Q_sym_1181 = abg.markov.get_symbolic_transition_matrix(\n num_of_servers, threshold, system_capacity, buffer_capacity\n )\n\n (\n p00,\n p01,\n p11,\n p02,\n p12,\n p03,\n p13,\n p04,\n p14,\n p05,\n p15,\n p06,\n p16,\n p07,\n p17,\n p08,\n p18,\n ) = sym.symbols(\n \"p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15, p06, p16, p07, p17, p08, p18\"\n )\n pi_1181 = sym.Matrix(\n [\n p00,\n p01,\n p11,\n p02,\n p12,\n p03,\n p13,\n p04,\n p14,\n p05,\n p15,\n p06,\n p16,\n p07,\n p17,\n p08,\n p18,\n ]\n )\n dimension_1181 = Q_sym_1181.shape[0]\n\n M_sym_1181 = sym.Matrix(\n [Q_sym_1181.transpose()[:-1, :], sym.ones(1, dimension_1181)]\n )\n sym_diff_equations_1181 = M_sym_1181 @ pi_1181\n\n b_sym_1181 = sym.Matrix([sym.zeros(dimension_1181 - 1, 1), [1]])\n\n eq0_1181 = sym.Eq(sym_diff_equations_1181[0], b_sym_1181[0])\n eq1_1181 = sym.Eq(sym_diff_equations_1181[1], b_sym_1181[1])\n eq2_1181 = sym.Eq(sym_diff_equations_1181[2], b_sym_1181[2])\n eq3_1181 = sym.Eq(sym_diff_equations_1181[3], b_sym_1181[3])\n eq4_1181 = sym.Eq(sym_diff_equations_1181[4], b_sym_1181[4])\n eq5_1181 = sym.Eq(sym_diff_equations_1181[5], b_sym_1181[5])\n eq6_1181 = sym.Eq(sym_diff_equations_1181[6], b_sym_1181[6])\n eq7_1181 = sym.Eq(sym_diff_equations_1181[7], b_sym_1181[7])\n eq8_1181 = sym.Eq(sym_diff_equations_1181[8], b_sym_1181[8])\n eq9_1181 = sym.Eq(sym_diff_equations_1181[9], b_sym_1181[9])\n eq10_1181 = sym.Eq(sym_diff_equations_1181[10], b_sym_1181[10])\n eq11_1181 = sym.Eq(sym_diff_equations_1181[11], b_sym_1181[11])\n eq12_1181 = sym.Eq(sym_diff_equations_1181[12], b_sym_1181[12])\n eq13_1181 = sym.Eq(sym_diff_equations_1181[13], b_sym_1181[13])\n eq14_1181 = sym.Eq(sym_diff_equations_1181[14], b_sym_1181[14])\n eq15_1181 = sym.Eq(sym_diff_equations_1181[15], b_sym_1181[15])\n eq16_1181 = sym.Eq(sym_diff_equations_1181[16], b_sym_1181[16])\n\n sym_state_probs_1181 = sym.solve(\n [\n eq0_1181,\n eq1_1181,\n eq2_1181,\n eq3_1181,\n eq4_1181,\n eq5_1181,\n eq6_1181,\n eq7_1181,\n eq8_1181,\n eq9_1181,\n eq10_1181,\n eq11_1181,\n eq12_1181,\n eq13_1181,\n eq14_1181,\n eq15_1181,\n eq16_1181,\n ],\n (\n p00,\n p01,\n p11,\n p02,\n p12,\n p03,\n p13,\n p04,\n p14,\n p05,\n p15,\n p06,\n p16,\n p07,\n p17,\n p08,\n p18,\n ),\n )\n\n sym_state_recursive_ratios_1181 = sym.zeros(\n buffer_capacity + 1, system_capacity + 1\n )\n sym_state_recursive_ratios_1181[0, 0] = 1\n sym_state_recursive_ratios_1181[0, 1] = sym.factor(\n sym_state_probs_1181[p01] / sym_state_probs_1181[p00]\n ) # (0,0) -> (0,1)\n sym_state_recursive_ratios_1181[1, 1] = sym.factor(\n sym_state_probs_1181[p11] / sym_state_probs_1181[p01]\n ) # (0,1) -> (1,1)\n sym_state_recursive_ratios_1181[0, 2] = sym.factor(\n sym_state_probs_1181[p02] / sym_state_probs_1181[p01]\n ) # (0,1) -> (0,2)\n sym_state_recursive_ratios_1181[1, 2] = sym.factor(\n sym_state_probs_1181[p12] / sym_state_probs_1181[p02]\n ) # (0,2) -> (1,2)\n sym_state_recursive_ratios_1181[0, 3] = sym.factor(\n sym_state_probs_1181[p03] / sym_state_probs_1181[p02]\n ) # (0,2) -> (0,3)\n sym_state_recursive_ratios_1181[1, 3] = sym.factor(\n sym_state_probs_1181[p13] / sym_state_probs_1181[p03]\n ) # (0,3) -> (1,3)\n sym_state_recursive_ratios_1181[0, 4] = sym.factor(\n sym_state_probs_1181[p04] / sym_state_probs_1181[p03]\n ) # (0,3) -> (0,4)\n sym_state_recursive_ratios_1181[1, 4] = sym.factor(\n sym_state_probs_1181[p14] / sym_state_probs_1181[p04]\n ) # (0,4) -> (1,4)\n sym_state_recursive_ratios_1181[0, 5] = sym.factor(\n sym_state_probs_1181[p05] / sym_state_probs_1181[p04]\n ) # (0,4) -> (0,5)\n sym_state_recursive_ratios_1181[1, 5] = sym.factor(\n sym_state_probs_1181[p15] / sym_state_probs_1181[p05]\n ) # (0,5) -> (1,5)\n sym_state_recursive_ratios_1181[0, 6] = sym.factor(\n sym_state_probs_1181[p06] / sym_state_probs_1181[p05]\n ) # (0,5) -> (0,6)\n sym_state_recursive_ratios_1181[1, 6] = sym.factor(\n sym_state_probs_1181[p16] / sym_state_probs_1181[p06]\n ) # (0,6) -> (1,6)\n sym_state_recursive_ratios_1181[0, 7] = sym.factor(\n sym_state_probs_1181[p07] / sym_state_probs_1181[p06]\n ) # (0,6) -> (0,7)\n sym_state_recursive_ratios_1181[1, 7] = sym.factor(\n sym_state_probs_1181[p17] / sym_state_probs_1181[p07]\n ) # (0,7) -> (1,7)\n sym_state_recursive_ratios_1181[0, 8] = sym.factor(\n sym_state_probs_1181[p08] / sym_state_probs_1181[p07]\n ) # (0,7) -> (0,8)\n sym_state_recursive_ratios_1181[1, 8] = sym.factor(\n sym_state_probs_1181[p18] / sym_state_probs_1181[p08]\n ) # (0,8) -> (1,8)\n\n sym_state_recursive_ratios_right_1181 = sym_state_recursive_ratios_1181.copy()\n sym_state_recursive_ratios_right_1181[1, 2] = sym.factor(\n sym_state_probs_1181[p12] / sym_state_probs_1181[p11]\n ) # (1,1) -> (1,2)\n sym_state_recursive_ratios_right_1181[1, 3] = sym.factor(\n sym_state_probs_1181[p13] / sym_state_probs_1181[p12]\n ) # (1,2) -> (1,3)\n sym_state_recursive_ratios_right_1181[1, 4] = sym.factor(\n sym_state_probs_1181[p14] / sym_state_probs_1181[p13]\n ) # (1,3) -> (1,4)\n sym_state_recursive_ratios_right_1181[1, 5] = sym.factor(\n sym_state_probs_1181[p15] / sym_state_probs_1181[p14]\n ) # (1,4) -> (1,5)\n sym_state_recursive_ratios_right_1181[1, 6] = sym.factor(\n sym_state_probs_1181[p16] / sym_state_probs_1181[p15]\n ) # (1,5) -> (1,6)\n sym_state_recursive_ratios_right_1181[1, 7] = sym.factor(\n sym_state_probs_1181[p17] / sym_state_probs_1181[p16]\n ) # (1,6) -> (1,7)\n sym_state_recursive_ratios_right_1181[1, 8] = sym.factor(\n sym_state_probs_1181[p18] / sym_state_probs_1181[p17]\n ) # (1,7) -> (1,8)\n\n sym_state_recursive_ratios_P0_1181 = sym.zeros(\n buffer_capacity + 1, system_capacity + 1\n )\n sym_state_recursive_ratios_P0_1181[0, 0] = 1\n sym_state_recursive_ratios_P0_1181[0, 1] = sym.factor(\n sym_state_probs_1181[p01] / sym_state_probs_1181[p00]\n ) # (0,0) -> (0,1)\n sym_state_recursive_ratios_P0_1181[1, 1] = sym.factor(\n sym_state_probs_1181[p11] / sym_state_probs_1181[p00]\n ) # (0,0) -> (1,1)\n sym_state_recursive_ratios_P0_1181[0, 2] = sym.factor(\n sym_state_probs_1181[p02] / sym_state_probs_1181[p00]\n ) # (0,0) -> (0,2)\n sym_state_recursive_ratios_P0_1181[1, 2] = sym.factor(\n sym_state_probs_1181[p12] / sym_state_probs_1181[p00]\n ) # (0,0) -> (1,2)\n sym_state_recursive_ratios_P0_1181[0, 3] = sym.factor(\n sym_state_probs_1181[p03] / sym_state_probs_1181[p00]\n ) # (0,0) -> (0,3)\n sym_state_recursive_ratios_P0_1181[1, 3] = sym.factor(\n sym_state_probs_1181[p13] / sym_state_probs_1181[p00]\n ) # (0,0) -> (1,3)\n sym_state_recursive_ratios_P0_1181[0, 4] = sym.factor(\n sym_state_probs_1181[p04] / sym_state_probs_1181[p00]\n ) # (0,0) -> (0,4)\n sym_state_recursive_ratios_P0_1181[1, 4] = sym.factor(\n sym_state_probs_1181[p14] / sym_state_probs_1181[p00]\n ) # (0,0) -> (1,4)\n sym_state_recursive_ratios_P0_1181[0, 5] = sym.factor(\n sym_state_probs_1181[p05] / sym_state_probs_1181[p00]\n ) # (0,0) -> (0,5)\n sym_state_recursive_ratios_P0_1181[1, 5] = sym.factor(\n sym_state_probs_1181[p15] / sym_state_probs_1181[p00]\n ) # (0,0) -> (1,5)\n sym_state_recursive_ratios_P0_1181[0, 6] = sym.factor(\n sym_state_probs_1181[p06] / sym_state_probs_1181[p00]\n ) # (0,0) -> (0,6)\n sym_state_recursive_ratios_P0_1181[1, 6] = sym.factor(\n sym_state_probs_1181[p16] / sym_state_probs_1181[p00]\n ) # (0,0) -> (1,6)\n sym_state_recursive_ratios_P0_1181[0, 7] = sym.factor(\n sym_state_probs_1181[p07] / sym_state_probs_1181[p00]\n ) # (0,0) -> (0,7)\n sym_state_recursive_ratios_P0_1181[1, 7] = sym.factor(\n sym_state_probs_1181[p17] / sym_state_probs_1181[p00]\n ) # (0,0) -> (1,7)\n sym_state_recursive_ratios_P0_1181[0, 8] = sym.factor(\n sym_state_probs_1181[p08] / sym_state_probs_1181[p00]\n ) # (0,0) -> (0,8)\n sym_state_recursive_ratios_P0_1181[1, 8] = sym.factor(\n sym_state_probs_1181[p18] / sym_state_probs_1181[p00]\n ) # (0,0) -> (1,8)\n\n return (\n sym_state_probs_1181,\n sym_state_recursive_ratios_1181,\n sym_state_recursive_ratios_right_1181,\n sym_state_recursive_ratios_P0_1181,\n )\n\n\ndef get_symbolic_state_probabilities_1191():\n num_of_servers = 1\n threshold = 1\n system_capacity = 9\n buffer_capacity = 1\n\n Q_sym_1191 = abg.markov.get_symbolic_transition_matrix(\n num_of_servers, threshold, system_capacity, buffer_capacity\n )\n\n (\n p00,\n p01,\n p11,\n p02,\n p12,\n p03,\n p13,\n p04,\n p14,\n p05,\n p15,\n p06,\n p16,\n p07,\n p17,\n p08,\n p18,\n p09,\n p19,\n ) = sym.symbols(\n \"p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15, p06, p16, p07, p17, p08, p18, p09, p19\"\n )\n pi_1191 = sym.Matrix(\n [\n p00,\n p01,\n p11,\n p02,\n p12,\n p03,\n p13,\n p04,\n p14,\n p05,\n p15,\n p06,\n p16,\n p07,\n p17,\n p08,\n p18,\n p09,\n p19,\n ]\n )\n dimension_1191 = Q_sym_1191.shape[0]\n\n M_sym_1191 = sym.Matrix(\n [Q_sym_1191.transpose()[:-1, :], sym.ones(1, dimension_1191)]\n )\n sym_diff_equations_1191 = M_sym_1191 @ pi_1191\n\n b_sym_1191 = sym.Matrix([sym.zeros(dimension_1191 - 1, 1), [1]])\n\n eq0_1191 = sym.Eq(sym_diff_equations_1191[0], b_sym_1191[0])\n eq1_1191 = sym.Eq(sym_diff_equations_1191[1], b_sym_1191[1])\n eq2_1191 = sym.Eq(sym_diff_equations_1191[2], b_sym_1191[2])\n eq3_1191 = sym.Eq(sym_diff_equations_1191[3], b_sym_1191[3])\n eq4_1191 = sym.Eq(sym_diff_equations_1191[4], b_sym_1191[4])\n eq5_1191 = sym.Eq(sym_diff_equations_1191[5], b_sym_1191[5])\n eq6_1191 = sym.Eq(sym_diff_equations_1191[6], b_sym_1191[6])\n eq7_1191 = sym.Eq(sym_diff_equations_1191[7], b_sym_1191[7])\n eq8_1191 = sym.Eq(sym_diff_equations_1191[8], b_sym_1191[8])\n eq9_1191 = sym.Eq(sym_diff_equations_1191[9], b_sym_1191[9])\n eq10_1191 = sym.Eq(sym_diff_equations_1191[10], b_sym_1191[10])\n eq11_1191 = sym.Eq(sym_diff_equations_1191[11], b_sym_1191[11])\n eq12_1191 = sym.Eq(sym_diff_equations_1191[12], b_sym_1191[12])\n eq13_1191 = sym.Eq(sym_diff_equations_1191[13], b_sym_1191[13])\n eq14_1191 = sym.Eq(sym_diff_equations_1191[14], b_sym_1191[14])\n eq15_1191 = sym.Eq(sym_diff_equations_1191[15], b_sym_1191[15])\n eq16_1191 = sym.Eq(sym_diff_equations_1191[16], b_sym_1191[16])\n eq17_1191 = sym.Eq(sym_diff_equations_1191[17], b_sym_1191[17])\n eq18_1191 = sym.Eq(sym_diff_equations_1191[18], b_sym_1191[18])\n\n sym_state_probs_1191 = sym.solve(\n [\n eq0_1191,\n eq1_1191,\n eq2_1191,\n eq3_1191,\n eq4_1191,\n eq5_1191,\n eq6_1191,\n eq7_1191,\n eq8_1191,\n eq9_1191,\n eq10_1191,\n eq11_1191,\n eq12_1191,\n eq13_1191,\n eq14_1191,\n eq15_1191,\n eq16_1191,\n eq17_1191,\n eq18_1191,\n ],\n (\n p00,\n p01,\n p11,\n p02,\n p12,\n p03,\n p13,\n p04,\n p14,\n p05,\n p15,\n p06,\n p16,\n p07,\n p17,\n p08,\n p18,\n p09,\n p19,\n ),\n )\n\n sym_state_recursive_ratios_1191 = sym.zeros(\n buffer_capacity + 1, system_capacity + 1\n )\n sym_state_recursive_ratios_1191[0, 0] = 1\n sym_state_recursive_ratios_1191[0, 1] = sym.factor(\n sym_state_probs_1191[p01] / sym_state_probs_1191[p00]\n ) # (0,0) -> (0,1)\n sym_state_recursive_ratios_1191[1, 1] = sym.factor(\n sym_state_probs_1191[p11] / sym_state_probs_1191[p01]\n ) # (0,1) -> (1,1)\n sym_state_recursive_ratios_1191[0, 2] = sym.factor(\n sym_state_probs_1191[p02] / sym_state_probs_1191[p01]\n ) # (0,1) -> (0,2)\n sym_state_recursive_ratios_1191[1, 2] = sym.factor(\n sym_state_probs_1191[p12] / sym_state_probs_1191[p02]\n ) # (0,2) -> (1,2)\n sym_state_recursive_ratios_1191[0, 3] = sym.factor(\n sym_state_probs_1191[p03] / sym_state_probs_1191[p02]\n ) # (0,2) -> (0,3)\n sym_state_recursive_ratios_1191[1, 3] = sym.factor(\n sym_state_probs_1191[p13] / sym_state_probs_1191[p03]\n ) # (0,3) -> (1,3)\n sym_state_recursive_ratios_1191[0, 4] = sym.factor(\n sym_state_probs_1191[p04] / sym_state_probs_1191[p03]\n ) # (0,3) -> (0,4)\n sym_state_recursive_ratios_1191[1, 4] = sym.factor(\n sym_state_probs_1191[p14] / sym_state_probs_1191[p04]\n ) # (0,4) -> (1,4)\n sym_state_recursive_ratios_1191[0, 5] = sym.factor(\n sym_state_probs_1191[p05] / sym_state_probs_1191[p04]\n ) # (0,4) -> (0,5)\n sym_state_recursive_ratios_1191[1, 5] = sym.factor(\n sym_state_probs_1191[p15] / sym_state_probs_1191[p05]\n ) # (0,5) -> (1,5)\n sym_state_recursive_ratios_1191[0, 6] = sym.factor(\n sym_state_probs_1191[p06] / sym_state_probs_1191[p05]\n ) # (0,5) -> (0,6)\n sym_state_recursive_ratios_1191[1, 6] = sym.factor(\n sym_state_probs_1191[p16] / sym_state_probs_1191[p06]\n ) # (0,6) -> (1,6)\n sym_state_recursive_ratios_1191[0, 7] = sym.factor(\n sym_state_probs_1191[p07] / sym_state_probs_1191[p06]\n ) # (0,6) -> (0,7)\n sym_state_recursive_ratios_1191[1, 7] = sym.factor(\n sym_state_probs_1191[p17] / sym_state_probs_1191[p07]\n ) # (0,7) -> (1,7)\n sym_state_recursive_ratios_1191[0, 8] = sym.factor(\n sym_state_probs_1191[p08] / sym_state_probs_1191[p07]\n ) # (0,7) -> (0,8)\n sym_state_recursive_ratios_1191[1, 8] = sym.factor(\n sym_state_probs_1191[p18] / sym_state_probs_1191[p08]\n ) # (0,8) -> (1,8)\n sym_state_recursive_ratios_1191[0, 9] = sym.factor(\n sym_state_probs_1191[p09] / sym_state_probs_1191[p08]\n ) # (0,8) -> (0,9)\n sym_state_recursive_ratios_1191[1, 9] = sym.factor(\n sym_state_probs_1191[p19] / sym_state_probs_1191[p09]\n ) # (0,9) -> (1,9)\n\n sym_state_recursive_ratios_right_1191 = sym_state_recursive_ratios_1191.copy()\n sym_state_recursive_ratios_right_1191[1, 2] = sym.factor(\n sym_state_probs_1191[p12] / sym_state_probs_1191[p11]\n ) # (1,1) -> (1,2)\n sym_state_recursive_ratios_right_1191[1, 3] = sym.factor(\n sym_state_probs_1191[p13] / sym_state_probs_1191[p12]\n ) # (1,2) -> (1,3)\n sym_state_recursive_ratios_right_1191[1, 4] = sym.factor(\n sym_state_probs_1191[p14] / sym_state_probs_1191[p13]\n ) # (1,3) -> (1,4)\n sym_state_recursive_ratios_right_1191[1, 5] = sym.factor(\n sym_state_probs_1191[p15] / sym_state_probs_1191[p14]\n ) # (1,4) -> (1,5)\n sym_state_recursive_ratios_right_1191[1, 6] = sym.factor(\n sym_state_probs_1191[p16] / sym_state_probs_1191[p15]\n ) # (1,5) -> (1,6)\n sym_state_recursive_ratios_right_1191[1, 7] = sym.factor(\n sym_state_probs_1191[p17] / sym_state_probs_1191[p16]\n ) # (1,6) -> (1,7)\n sym_state_recursive_ratios_right_1191[1, 8] = sym.factor(\n sym_state_probs_1191[p18] / sym_state_probs_1191[p17]\n ) # (1,7) -> (1,8)\n sym_state_recursive_ratios_right_1191[1, 8] = sym.factor(\n sym_state_probs_1191[p18] / sym_state_probs_1191[p17]\n ) # (1,8) -> (1,9)\n\n sym_state_recursive_ratios_P0_1191 = sym.zeros(\n buffer_capacity + 1, system_capacity + 1\n )\n sym_state_recursive_ratios_P0_1191[0, 0] = 1\n sym_state_recursive_ratios_P0_1191[0, 1] = sym.factor(\n sym_state_probs_1191[p01] / sym_state_probs_1191[p00]\n ) # (0,0) -> (0,1)\n sym_state_recursive_ratios_P0_1191[1, 1] = sym.factor(\n sym_state_probs_1191[p11] / sym_state_probs_1191[p00]\n ) # (0,0) -> (1,1)\n sym_state_recursive_ratios_P0_1191[0, 2] = sym.factor(\n sym_state_probs_1191[p02] / sym_state_probs_1191[p00]\n ) # (0,0) -> (0,2)\n sym_state_recursive_ratios_P0_1191[1, 2] = sym.factor(\n sym_state_probs_1191[p12] / sym_state_probs_1191[p00]\n ) # (0,0) -> (1,2)\n sym_state_recursive_ratios_P0_1191[0, 3] = sym.factor(\n sym_state_probs_1191[p03] / sym_state_probs_1191[p00]\n ) # (0,0) -> (0,3)\n sym_state_recursive_ratios_P0_1191[1, 3] = sym.factor(\n sym_state_probs_1191[p13] / sym_state_probs_1191[p00]\n ) # (0,0) -> (1,3)\n sym_state_recursive_ratios_P0_1191[0, 4] = sym.factor(\n sym_state_probs_1191[p04] / sym_state_probs_1191[p00]\n ) # (0,0) -> (0,4)\n sym_state_recursive_ratios_P0_1191[1, 4] = sym.factor(\n sym_state_probs_1191[p14] / sym_state_probs_1191[p00]\n ) # (0,0) -> (1,4)\n sym_state_recursive_ratios_P0_1191[0, 5] = sym.factor(\n sym_state_probs_1191[p05] / sym_state_probs_1191[p00]\n ) # (0,0) -> (0,5)\n sym_state_recursive_ratios_P0_1191[1, 5] = sym.factor(\n sym_state_probs_1191[p15] / sym_state_probs_1191[p00]\n ) # (0,0) -> (1,5)\n sym_state_recursive_ratios_P0_1191[0, 6] = sym.factor(\n sym_state_probs_1191[p06] / sym_state_probs_1191[p00]\n ) # (0,0) -> (0,6)\n sym_state_recursive_ratios_P0_1191[1, 6] = sym.factor(\n sym_state_probs_1191[p16] / sym_state_probs_1191[p00]\n ) # (0,0) -> (1,6)\n sym_state_recursive_ratios_P0_1191[0, 7] = sym.factor(\n sym_state_probs_1191[p07] / sym_state_probs_1191[p00]\n ) # (0,0) -> (0,7)\n sym_state_recursive_ratios_P0_1191[1, 7] = sym.factor(\n sym_state_probs_1191[p17] / sym_state_probs_1191[p00]\n ) # (0,0) -> (1,7)\n sym_state_recursive_ratios_P0_1191[0, 8] = sym.factor(\n sym_state_probs_1191[p08] / sym_state_probs_1191[p00]\n ) # (0,0) -> (0,8)\n sym_state_recursive_ratios_P0_1191[1, 8] = sym.factor(\n sym_state_probs_1191[p18] / sym_state_probs_1191[p00]\n ) # (0,0) -> (1,8)\n sym_state_recursive_ratios_P0_1191[0, 9] = sym.factor(\n sym_state_probs_1191[p09] / sym_state_probs_1191[p00]\n ) # (0,0) -> (0,9)\n sym_state_recursive_ratios_P0_1191[1, 9] = sym.factor(\n sym_state_probs_1191[p19] / sym_state_probs_1191[p00]\n ) # (0,0) -> (1,9)\n\n return (\n sym_state_probs_1191,\n sym_state_recursive_ratios_1191,\n sym_state_recursive_ratios_right_1191,\n sym_state_recursive_ratios_P0_1191,\n )\n",
"step-ids": [
5,
12,
13,
16,
17
]
}
|
[
5,
12,
13,
16,
17
] |
'''
we have source files with a certain format and each file has 200 columns and there is a process that takes the source
files and loads into hbase and moves it into sql data warehouse. We have to create automated test scripts that compares
with with is with hbase and sql data warehouse. load into hbase and query the flat file, query the hbase, and compare.
compare each row. load into hbase and query.
https://community.hortonworks.com/articles/4942/import-csv-data-into-hbase-using-importtsv.html
https://www.briandunning.com/sample-data/
http://python-phoenixdb.readthedocs.io/en/latest/
https://phoenix.apache.org/faq.html
https://phoenix.apache.org/bulk_dataload.html
hbase shell
create 'CUSTOMERS', 'cf'
count 'CUSTOMERS'
scan 'CUSTOMERS'
exit
hdfs dfs -put customers-with-out-header-500.csv
hbase org.apache.hadoop.hbase.mapreduce.ImportTsv '-Dimporttsv.separator=|' -Dimporttsv.columns="HBASE_ROW_KEY,cf:first_name,cf:last_name,cf:company_name,cf:address,cf:city,cf:county,cf:state,cf:zip,cf:phone1,cf:phone2,cf:email,cf:web" CUSTOMERS customers-with-out-header-500.csv
sudo python3 -m pip install happybase
sudo python3 -m pip install pandas
sudo python3 -m pip install numpy
sudo python3 -m pip install ipython
list of hbase tables [b'customers']
len of hbase keys 501
hbase columns [b'cf:state', b'cf:phone2', b'cf:email', b'cf:zip', b'cf:last_name', b'cf:address', b'cf:city', b'cf:company_name', b'cf:phone1', b'cf:county', b'cf:first_name', b'cf:web']
hbase columns len 12
csv file shape (500, 13)
csv columns ['index', 'first_name', 'last_name', 'company_name', 'address', 'city', 'county', 'state', 'zip', 'phone1', 'phone2', 'email', 'web']
phoenix steps
python /usr/lib/phoenix/bin/sqlline.py
CREATE TABLE "CUSTOMERSPHOENIX" (pk VARCHAR PRIMARY KEY, first_name VARCHAR, last_name VARCHAR, company_name VARCHAR, address VARCHAR, city VARCHAR, county VARCHAR, state VARCHAR, zip VARCHAR, phone1 VARCHAR, phone2 VARCHAR, email VARCHAR, web VARCHAR)
python /usr/lib/phoenix/bin/psql.py -t CUSTOMERSPHOENIX -d "|" localhost customers-with-out-header-500.csv
SELECT A.*, B.* FROM CUSTOMERS AS A FULL JOIN CUSTOMERSPHOENIX AS B ON (A.PK = B.PK) WHERE A.PK IS NULL OR B.PK IS NULL
hive steps
CREATE EXTERNAL TABLE customers_hive(key string, first_name string, last_name string, company_name string, address string, city string, county string, state string, zip string, phone1 string, phone2 string, email string, web string)
STORED BY 'org.apache.hadoop.hive.hbase.HBaseStorageHandler'
WITH SERDEPROPERTIES ("hbase.columns.mapping" = ":key, cf:first_name, cf:last_name, cf:company_name, cf:address, cf:city, cf:county, cf:state, cf:zip, cf:phone1, cf:phone2, cf:email, cf:web")
TBLPROPERTIES ("hbase.table.name"="CUSTOMERS");
SELECT yourcolumns
FROM tablenames
JOIN tablenames
WHERE condition
GROUP BY yourcolumns
HAVING aggregatecolumn condition
ORDER BY yourcolumns
'''
import pandas as pd
import happybase
import phoenixdb
from pyhive import hive
connection = happybase.Connection()
connection.open()
print('list of hbase tables {}'.format(connection.tables()))
customers = connection.table('CUSTOMERS')
keys = []
data_list = []
for key, data in customers.scan():
keys.append(key)
data_list.append(data)
hbase_columns = [x.decode('utf-8')[3:] for x in data_list[0].keys()]
print('len of hbase keys {}'.format(len(keys)))
print('hbase columns {}'.format(hbase_columns))
print('hbase columns len {}'.format(len(hbase_columns)))
df = pd.read_csv('customers-with-header-500.csv', delimiter='|', index_col='index')
df_columns = list(df.columns)
print('csv file shape {}'.format(df.shape))
print('csv columns {}'.format(df_columns))
print('hbase columns == csv columns: {}'.format(set(hbase_columns) == set(df_columns)))
print('hbase row count == csv row count: {}'.format(len(keys) == df.shape[0]))
url = 'http://localhost:8765/'
conn = phoenixdb.connect(url, autocommit=True)
cursor = conn.cursor()
query1 = 'DROP VIEW "CUSTOMERS"'
cursor.execute(query1)
query2 = 'CREATE VIEW "CUSTOMERS" (pk VARCHAR PRIMARY KEY, "cf"."first_name" VARCHAR, "cf"."last_name" VARCHAR, "cf"."company_name" VARCHAR, "cf"."address" VARCHAR, "cf"."city" VARCHAR, "cf"."county" VARCHAR, "cf"."state" VARCHAR, "cf"."zip" VARCHAR, "cf"."phone1" VARCHAR, "cf"."phone2" VARCHAR, "cf"."email" VARCHAR, "cf"."web" VARCHAR)'
cursor.execute(query2)
query3 = 'SELECT * FROM CUSTOMERS'
cursor.execute(query3)
data = cursor.fetchall()
print(data[:2])
from pyhive import hive # or import hive
cursor = hive.connect('localhost').cursor()
cursor.execute('SELECT * FROM customers_hive LIMIT 10')
result = cursor.fetchall()
print(len(result))
print(result)
|
normal
|
{
"blob_id": "7b38c64174656d1c4ec2b0541e6ed8d6680af7d7",
"index": 9565,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nconnection.open()\nprint('list of hbase tables {}'.format(connection.tables()))\n<mask token>\nfor key, data in customers.scan():\n keys.append(key)\n data_list.append(data)\n<mask token>\nprint('len of hbase keys {}'.format(len(keys)))\nprint('hbase columns {}'.format(hbase_columns))\nprint('hbase columns len {}'.format(len(hbase_columns)))\n<mask token>\nprint('csv file shape {}'.format(df.shape))\nprint('csv columns {}'.format(df_columns))\nprint('hbase columns == csv columns: {}'.format(set(hbase_columns) == set(\n df_columns)))\nprint('hbase row count == csv row count: {}'.format(len(keys) == df.shape[0]))\n<mask token>\ncursor.execute(query1)\n<mask token>\ncursor.execute(query2)\n<mask token>\ncursor.execute(query3)\n<mask token>\nprint(data[:2])\n<mask token>\ncursor.execute('SELECT * FROM customers_hive LIMIT 10')\n<mask token>\nprint(len(result))\nprint(result)\n",
"step-3": "<mask token>\nconnection = happybase.Connection()\nconnection.open()\nprint('list of hbase tables {}'.format(connection.tables()))\ncustomers = connection.table('CUSTOMERS')\nkeys = []\ndata_list = []\nfor key, data in customers.scan():\n keys.append(key)\n data_list.append(data)\nhbase_columns = [x.decode('utf-8')[3:] for x in data_list[0].keys()]\nprint('len of hbase keys {}'.format(len(keys)))\nprint('hbase columns {}'.format(hbase_columns))\nprint('hbase columns len {}'.format(len(hbase_columns)))\ndf = pd.read_csv('customers-with-header-500.csv', delimiter='|', index_col=\n 'index')\ndf_columns = list(df.columns)\nprint('csv file shape {}'.format(df.shape))\nprint('csv columns {}'.format(df_columns))\nprint('hbase columns == csv columns: {}'.format(set(hbase_columns) == set(\n df_columns)))\nprint('hbase row count == csv row count: {}'.format(len(keys) == df.shape[0]))\nurl = 'http://localhost:8765/'\nconn = phoenixdb.connect(url, autocommit=True)\ncursor = conn.cursor()\nquery1 = 'DROP VIEW \"CUSTOMERS\"'\ncursor.execute(query1)\nquery2 = (\n 'CREATE VIEW \"CUSTOMERS\" (pk VARCHAR PRIMARY KEY, \"cf\".\"first_name\" VARCHAR, \"cf\".\"last_name\" VARCHAR, \"cf\".\"company_name\" VARCHAR, \"cf\".\"address\" VARCHAR, \"cf\".\"city\" VARCHAR, \"cf\".\"county\" VARCHAR, \"cf\".\"state\" VARCHAR, \"cf\".\"zip\" VARCHAR, \"cf\".\"phone1\" VARCHAR, \"cf\".\"phone2\" VARCHAR, \"cf\".\"email\" VARCHAR, \"cf\".\"web\" VARCHAR)'\n )\ncursor.execute(query2)\nquery3 = 'SELECT * FROM CUSTOMERS'\ncursor.execute(query3)\ndata = cursor.fetchall()\nprint(data[:2])\n<mask token>\ncursor = hive.connect('localhost').cursor()\ncursor.execute('SELECT * FROM customers_hive LIMIT 10')\nresult = cursor.fetchall()\nprint(len(result))\nprint(result)\n",
"step-4": "<mask token>\nimport pandas as pd\nimport happybase\nimport phoenixdb\nfrom pyhive import hive\nconnection = happybase.Connection()\nconnection.open()\nprint('list of hbase tables {}'.format(connection.tables()))\ncustomers = connection.table('CUSTOMERS')\nkeys = []\ndata_list = []\nfor key, data in customers.scan():\n keys.append(key)\n data_list.append(data)\nhbase_columns = [x.decode('utf-8')[3:] for x in data_list[0].keys()]\nprint('len of hbase keys {}'.format(len(keys)))\nprint('hbase columns {}'.format(hbase_columns))\nprint('hbase columns len {}'.format(len(hbase_columns)))\ndf = pd.read_csv('customers-with-header-500.csv', delimiter='|', index_col=\n 'index')\ndf_columns = list(df.columns)\nprint('csv file shape {}'.format(df.shape))\nprint('csv columns {}'.format(df_columns))\nprint('hbase columns == csv columns: {}'.format(set(hbase_columns) == set(\n df_columns)))\nprint('hbase row count == csv row count: {}'.format(len(keys) == df.shape[0]))\nurl = 'http://localhost:8765/'\nconn = phoenixdb.connect(url, autocommit=True)\ncursor = conn.cursor()\nquery1 = 'DROP VIEW \"CUSTOMERS\"'\ncursor.execute(query1)\nquery2 = (\n 'CREATE VIEW \"CUSTOMERS\" (pk VARCHAR PRIMARY KEY, \"cf\".\"first_name\" VARCHAR, \"cf\".\"last_name\" VARCHAR, \"cf\".\"company_name\" VARCHAR, \"cf\".\"address\" VARCHAR, \"cf\".\"city\" VARCHAR, \"cf\".\"county\" VARCHAR, \"cf\".\"state\" VARCHAR, \"cf\".\"zip\" VARCHAR, \"cf\".\"phone1\" VARCHAR, \"cf\".\"phone2\" VARCHAR, \"cf\".\"email\" VARCHAR, \"cf\".\"web\" VARCHAR)'\n )\ncursor.execute(query2)\nquery3 = 'SELECT * FROM CUSTOMERS'\ncursor.execute(query3)\ndata = cursor.fetchall()\nprint(data[:2])\nfrom pyhive import hive\ncursor = hive.connect('localhost').cursor()\ncursor.execute('SELECT * FROM customers_hive LIMIT 10')\nresult = cursor.fetchall()\nprint(len(result))\nprint(result)\n",
"step-5": "'''\nwe have source files with a certain format and each file has 200 columns and there is a process that takes the source\nfiles and loads into hbase and moves it into sql data warehouse. We have to create automated test scripts that compares\nwith with is with hbase and sql data warehouse. load into hbase and query the flat file, query the hbase, and compare.\ncompare each row. load into hbase and query.\n\nhttps://community.hortonworks.com/articles/4942/import-csv-data-into-hbase-using-importtsv.html\nhttps://www.briandunning.com/sample-data/\nhttp://python-phoenixdb.readthedocs.io/en/latest/\nhttps://phoenix.apache.org/faq.html\nhttps://phoenix.apache.org/bulk_dataload.html\n\nhbase shell\ncreate 'CUSTOMERS', 'cf'\ncount 'CUSTOMERS'\nscan 'CUSTOMERS'\nexit\n\nhdfs dfs -put customers-with-out-header-500.csv\nhbase org.apache.hadoop.hbase.mapreduce.ImportTsv '-Dimporttsv.separator=|' -Dimporttsv.columns=\"HBASE_ROW_KEY,cf:first_name,cf:last_name,cf:company_name,cf:address,cf:city,cf:county,cf:state,cf:zip,cf:phone1,cf:phone2,cf:email,cf:web\" CUSTOMERS customers-with-out-header-500.csv\n\nsudo python3 -m pip install happybase\nsudo python3 -m pip install pandas\nsudo python3 -m pip install numpy\nsudo python3 -m pip install ipython\n\nlist of hbase tables [b'customers']\nlen of hbase keys 501\nhbase columns [b'cf:state', b'cf:phone2', b'cf:email', b'cf:zip', b'cf:last_name', b'cf:address', b'cf:city', b'cf:company_name', b'cf:phone1', b'cf:county', b'cf:first_name', b'cf:web']\nhbase columns len 12\ncsv file shape (500, 13)\ncsv columns ['index', 'first_name', 'last_name', 'company_name', 'address', 'city', 'county', 'state', 'zip', 'phone1', 'phone2', 'email', 'web']\n\nphoenix steps\npython /usr/lib/phoenix/bin/sqlline.py\nCREATE TABLE \"CUSTOMERSPHOENIX\" (pk VARCHAR PRIMARY KEY, first_name VARCHAR, last_name VARCHAR, company_name VARCHAR, address VARCHAR, city VARCHAR, county VARCHAR, state VARCHAR, zip VARCHAR, phone1 VARCHAR, phone2 VARCHAR, email VARCHAR, web VARCHAR)\npython /usr/lib/phoenix/bin/psql.py -t CUSTOMERSPHOENIX -d \"|\" localhost customers-with-out-header-500.csv\nSELECT A.*, B.* FROM CUSTOMERS AS A FULL JOIN CUSTOMERSPHOENIX AS B ON (A.PK = B.PK) WHERE A.PK IS NULL OR B.PK IS NULL\n\nhive steps\n\nCREATE EXTERNAL TABLE customers_hive(key string, first_name string, last_name string, company_name string, address string, city string, county string, state string, zip string, phone1 string, phone2 string, email string, web string)\nSTORED BY 'org.apache.hadoop.hive.hbase.HBaseStorageHandler'\nWITH SERDEPROPERTIES (\"hbase.columns.mapping\" = \":key, cf:first_name, cf:last_name, cf:company_name, cf:address, cf:city, cf:county, cf:state, cf:zip, cf:phone1, cf:phone2, cf:email, cf:web\")\nTBLPROPERTIES (\"hbase.table.name\"=\"CUSTOMERS\");\n\nSELECT yourcolumns\nFROM tablenames\nJOIN tablenames\nWHERE condition\nGROUP BY yourcolumns\nHAVING aggregatecolumn condition\nORDER BY yourcolumns\n'''\n\nimport pandas as pd\nimport happybase\nimport phoenixdb\nfrom pyhive import hive\n\n\nconnection = happybase.Connection()\nconnection.open()\n\nprint('list of hbase tables {}'.format(connection.tables()))\n\ncustomers = connection.table('CUSTOMERS')\n\nkeys = []\ndata_list = []\n\nfor key, data in customers.scan():\n keys.append(key)\n data_list.append(data)\n\nhbase_columns = [x.decode('utf-8')[3:] for x in data_list[0].keys()]\n\nprint('len of hbase keys {}'.format(len(keys)))\nprint('hbase columns {}'.format(hbase_columns))\nprint('hbase columns len {}'.format(len(hbase_columns)))\n\ndf = pd.read_csv('customers-with-header-500.csv', delimiter='|', index_col='index')\n\ndf_columns = list(df.columns)\nprint('csv file shape {}'.format(df.shape))\nprint('csv columns {}'.format(df_columns))\n\nprint('hbase columns == csv columns: {}'.format(set(hbase_columns) == set(df_columns)))\nprint('hbase row count == csv row count: {}'.format(len(keys) == df.shape[0]))\n\n\nurl = 'http://localhost:8765/'\nconn = phoenixdb.connect(url, autocommit=True)\n\ncursor = conn.cursor()\nquery1 = 'DROP VIEW \"CUSTOMERS\"'\ncursor.execute(query1)\nquery2 = 'CREATE VIEW \"CUSTOMERS\" (pk VARCHAR PRIMARY KEY, \"cf\".\"first_name\" VARCHAR, \"cf\".\"last_name\" VARCHAR, \"cf\".\"company_name\" VARCHAR, \"cf\".\"address\" VARCHAR, \"cf\".\"city\" VARCHAR, \"cf\".\"county\" VARCHAR, \"cf\".\"state\" VARCHAR, \"cf\".\"zip\" VARCHAR, \"cf\".\"phone1\" VARCHAR, \"cf\".\"phone2\" VARCHAR, \"cf\".\"email\" VARCHAR, \"cf\".\"web\" VARCHAR)'\ncursor.execute(query2)\nquery3 = 'SELECT * FROM CUSTOMERS'\ncursor.execute(query3)\ndata = cursor.fetchall()\nprint(data[:2])\n\n\nfrom pyhive import hive # or import hive\ncursor = hive.connect('localhost').cursor()\ncursor.execute('SELECT * FROM customers_hive LIMIT 10')\nresult = cursor.fetchall()\nprint(len(result))\nprint(result)\n\n\n\n\n\n\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
<|reserved_special_token_0|>
_sym_db.RegisterMessage(NVLGPSStatus)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
_b = sys.version_info[0] < 3 and (lambda x: x) or (lambda x: x.encode('latin1')
)
<|reserved_special_token_0|>
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(name='NVLGPSStatus.proto', package=
'', syntax='proto2', serialized_options=None, serialized_pb=_b(
'\n\x12NVLGPSStatus.proto"\x8d\x03\n\x0cNVLGPSStatus\x12\x12\n\ntracker_id\x18\x01 \x02(\x0c\x12\x12\n\ngps_active\x18\x02 \x02(\x08\x12\x10\n\x08date_day\x18\x03 \x01(\x05\x12\x12\n\ndate_month\x18\x04 \x01(\x05\x12\x11\n\tdate_year\x18\x05 \x01(\x05\x12\x12\n\ntime_hours\x18\x06 \x01(\x05\x12\x14\n\x0ctime_minutes\x18\x07 \x01(\x05\x12\x14\n\x0ctime_seconds\x18\x08 \x01(\x05\x12\x19\n\x11time_microseconds\x18\t \x01(\x05\x12\x10\n\x08latitude\x18\n \x01(\x01\x12\x11\n\tlongitude\x18\x0b \x01(\x01\x12\x1f\n\x17speed_over_ground_knots\x18\x0c \x01(\x02\x12\x1b\n\x13track_angle_degrees\x18\r \x01(\x02\x12\x1a\n\x12magnetic_variation\x18\x0e \x01(\x02\x12\x12\n\nfuel_level\x18\x0f \x01(\x05\x12\x15\n\rvoltage_level\x18\x10 \x01(\x02\x12\x17\n\x0fvehicle_running\x18\x11 \x01(\x08'
))
_NVLGPSSTATUS = _descriptor.Descriptor(name='NVLGPSStatus', full_name=
'NVLGPSStatus', filename=None, file=DESCRIPTOR, containing_type=None,
fields=[_descriptor.FieldDescriptor(name='tracker_id', full_name=
'NVLGPSStatus.tracker_id', index=0, number=1, type=12, cpp_type=9,
label=2, has_default_value=False, default_value=_b(''), message_type=
None, enum_type=None, containing_type=None, is_extension=False,
extension_scope=None, serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(name='gps_active', full_name=
'NVLGPSStatus.gps_active', index=1, number=2, type=8, cpp_type=7, label
=2, has_default_value=False, default_value=False, message_type=None,
enum_type=None, containing_type=None, is_extension=False,
extension_scope=None, serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(name='date_day', full_name=
'NVLGPSStatus.date_day', index=2, number=3, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0, message_type=None, enum_type=
None, containing_type=None, is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor(
name='date_month', full_name='NVLGPSStatus.date_month', index=3, number
=4, type=5, cpp_type=1, label=1, has_default_value=False, default_value
=0, message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None, serialized_options=None, file
=DESCRIPTOR), _descriptor.FieldDescriptor(name='date_year', full_name=
'NVLGPSStatus.date_year', index=4, number=5, type=5, cpp_type=1, label=
1, has_default_value=False, default_value=0, message_type=None,
enum_type=None, containing_type=None, is_extension=False,
extension_scope=None, serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(name='time_hours', full_name=
'NVLGPSStatus.time_hours', index=5, number=6, type=5, cpp_type=1, label
=1, has_default_value=False, default_value=0, message_type=None,
enum_type=None, containing_type=None, is_extension=False,
extension_scope=None, serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(name='time_minutes', full_name=
'NVLGPSStatus.time_minutes', index=6, number=7, type=5, cpp_type=1,
label=1, has_default_value=False, default_value=0, message_type=None,
enum_type=None, containing_type=None, is_extension=False,
extension_scope=None, serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(name='time_seconds', full_name=
'NVLGPSStatus.time_seconds', index=7, number=8, type=5, cpp_type=1,
label=1, has_default_value=False, default_value=0, message_type=None,
enum_type=None, containing_type=None, is_extension=False,
extension_scope=None, serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(name='time_microseconds', full_name=
'NVLGPSStatus.time_microseconds', index=8, number=9, type=5, cpp_type=1,
label=1, has_default_value=False, default_value=0, message_type=None,
enum_type=None, containing_type=None, is_extension=False,
extension_scope=None, serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(name='latitude', full_name=
'NVLGPSStatus.latitude', index=9, number=10, type=1, cpp_type=5, label=
1, has_default_value=False, default_value=float(0), message_type=None,
enum_type=None, containing_type=None, is_extension=False,
extension_scope=None, serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(name='longitude', full_name=
'NVLGPSStatus.longitude', index=10, number=11, type=1, cpp_type=5,
label=1, has_default_value=False, default_value=float(0), message_type=
None, enum_type=None, containing_type=None, is_extension=False,
extension_scope=None, serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(name='speed_over_ground_knots', full_name=
'NVLGPSStatus.speed_over_ground_knots', index=11, number=12, type=2,
cpp_type=6, label=1, has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None, is_extension=
False, extension_scope=None, serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(name='track_angle_degrees', full_name=
'NVLGPSStatus.track_angle_degrees', index=12, number=13, type=2,
cpp_type=6, label=1, has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None, is_extension=
False, extension_scope=None, serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(name='magnetic_variation', full_name=
'NVLGPSStatus.magnetic_variation', index=13, number=14, type=2,
cpp_type=6, label=1, has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None, is_extension=
False, extension_scope=None, serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(name='fuel_level', full_name=
'NVLGPSStatus.fuel_level', index=14, number=15, type=5, cpp_type=1,
label=1, has_default_value=False, default_value=0, message_type=None,
enum_type=None, containing_type=None, is_extension=False,
extension_scope=None, serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(name='voltage_level', full_name=
'NVLGPSStatus.voltage_level', index=15, number=16, type=2, cpp_type=6,
label=1, has_default_value=False, default_value=float(0), message_type=
None, enum_type=None, containing_type=None, is_extension=False,
extension_scope=None, serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(name='vehicle_running', full_name=
'NVLGPSStatus.vehicle_running', index=16, number=17, type=8, cpp_type=7,
label=1, has_default_value=False, default_value=False, message_type=
None, enum_type=None, containing_type=None, is_extension=False,
extension_scope=None, serialized_options=None, file=DESCRIPTOR)],
extensions=[], nested_types=[], enum_types=[], serialized_options=None,
is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[],
serialized_start=23, serialized_end=420)
DESCRIPTOR.message_types_by_name['NVLGPSStatus'] = _NVLGPSSTATUS
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
NVLGPSStatus = _reflection.GeneratedProtocolMessageType('NVLGPSStatus', (
_message.Message,), dict(DESCRIPTOR=_NVLGPSSTATUS, __module__=
'NVLGPSStatus_pb2'))
_sym_db.RegisterMessage(NVLGPSStatus)
<|reserved_special_token_1|>
import sys
_b = sys.version_info[0] < 3 and (lambda x: x) or (lambda x: x.encode('latin1')
)
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(name='NVLGPSStatus.proto', package=
'', syntax='proto2', serialized_options=None, serialized_pb=_b(
'\n\x12NVLGPSStatus.proto"\x8d\x03\n\x0cNVLGPSStatus\x12\x12\n\ntracker_id\x18\x01 \x02(\x0c\x12\x12\n\ngps_active\x18\x02 \x02(\x08\x12\x10\n\x08date_day\x18\x03 \x01(\x05\x12\x12\n\ndate_month\x18\x04 \x01(\x05\x12\x11\n\tdate_year\x18\x05 \x01(\x05\x12\x12\n\ntime_hours\x18\x06 \x01(\x05\x12\x14\n\x0ctime_minutes\x18\x07 \x01(\x05\x12\x14\n\x0ctime_seconds\x18\x08 \x01(\x05\x12\x19\n\x11time_microseconds\x18\t \x01(\x05\x12\x10\n\x08latitude\x18\n \x01(\x01\x12\x11\n\tlongitude\x18\x0b \x01(\x01\x12\x1f\n\x17speed_over_ground_knots\x18\x0c \x01(\x02\x12\x1b\n\x13track_angle_degrees\x18\r \x01(\x02\x12\x1a\n\x12magnetic_variation\x18\x0e \x01(\x02\x12\x12\n\nfuel_level\x18\x0f \x01(\x05\x12\x15\n\rvoltage_level\x18\x10 \x01(\x02\x12\x17\n\x0fvehicle_running\x18\x11 \x01(\x08'
))
_NVLGPSSTATUS = _descriptor.Descriptor(name='NVLGPSStatus', full_name=
'NVLGPSStatus', filename=None, file=DESCRIPTOR, containing_type=None,
fields=[_descriptor.FieldDescriptor(name='tracker_id', full_name=
'NVLGPSStatus.tracker_id', index=0, number=1, type=12, cpp_type=9,
label=2, has_default_value=False, default_value=_b(''), message_type=
None, enum_type=None, containing_type=None, is_extension=False,
extension_scope=None, serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(name='gps_active', full_name=
'NVLGPSStatus.gps_active', index=1, number=2, type=8, cpp_type=7, label
=2, has_default_value=False, default_value=False, message_type=None,
enum_type=None, containing_type=None, is_extension=False,
extension_scope=None, serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(name='date_day', full_name=
'NVLGPSStatus.date_day', index=2, number=3, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0, message_type=None, enum_type=
None, containing_type=None, is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor(
name='date_month', full_name='NVLGPSStatus.date_month', index=3, number
=4, type=5, cpp_type=1, label=1, has_default_value=False, default_value
=0, message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None, serialized_options=None, file
=DESCRIPTOR), _descriptor.FieldDescriptor(name='date_year', full_name=
'NVLGPSStatus.date_year', index=4, number=5, type=5, cpp_type=1, label=
1, has_default_value=False, default_value=0, message_type=None,
enum_type=None, containing_type=None, is_extension=False,
extension_scope=None, serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(name='time_hours', full_name=
'NVLGPSStatus.time_hours', index=5, number=6, type=5, cpp_type=1, label
=1, has_default_value=False, default_value=0, message_type=None,
enum_type=None, containing_type=None, is_extension=False,
extension_scope=None, serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(name='time_minutes', full_name=
'NVLGPSStatus.time_minutes', index=6, number=7, type=5, cpp_type=1,
label=1, has_default_value=False, default_value=0, message_type=None,
enum_type=None, containing_type=None, is_extension=False,
extension_scope=None, serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(name='time_seconds', full_name=
'NVLGPSStatus.time_seconds', index=7, number=8, type=5, cpp_type=1,
label=1, has_default_value=False, default_value=0, message_type=None,
enum_type=None, containing_type=None, is_extension=False,
extension_scope=None, serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(name='time_microseconds', full_name=
'NVLGPSStatus.time_microseconds', index=8, number=9, type=5, cpp_type=1,
label=1, has_default_value=False, default_value=0, message_type=None,
enum_type=None, containing_type=None, is_extension=False,
extension_scope=None, serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(name='latitude', full_name=
'NVLGPSStatus.latitude', index=9, number=10, type=1, cpp_type=5, label=
1, has_default_value=False, default_value=float(0), message_type=None,
enum_type=None, containing_type=None, is_extension=False,
extension_scope=None, serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(name='longitude', full_name=
'NVLGPSStatus.longitude', index=10, number=11, type=1, cpp_type=5,
label=1, has_default_value=False, default_value=float(0), message_type=
None, enum_type=None, containing_type=None, is_extension=False,
extension_scope=None, serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(name='speed_over_ground_knots', full_name=
'NVLGPSStatus.speed_over_ground_knots', index=11, number=12, type=2,
cpp_type=6, label=1, has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None, is_extension=
False, extension_scope=None, serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(name='track_angle_degrees', full_name=
'NVLGPSStatus.track_angle_degrees', index=12, number=13, type=2,
cpp_type=6, label=1, has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None, is_extension=
False, extension_scope=None, serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(name='magnetic_variation', full_name=
'NVLGPSStatus.magnetic_variation', index=13, number=14, type=2,
cpp_type=6, label=1, has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None, is_extension=
False, extension_scope=None, serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(name='fuel_level', full_name=
'NVLGPSStatus.fuel_level', index=14, number=15, type=5, cpp_type=1,
label=1, has_default_value=False, default_value=0, message_type=None,
enum_type=None, containing_type=None, is_extension=False,
extension_scope=None, serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(name='voltage_level', full_name=
'NVLGPSStatus.voltage_level', index=15, number=16, type=2, cpp_type=6,
label=1, has_default_value=False, default_value=float(0), message_type=
None, enum_type=None, containing_type=None, is_extension=False,
extension_scope=None, serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(name='vehicle_running', full_name=
'NVLGPSStatus.vehicle_running', index=16, number=17, type=8, cpp_type=7,
label=1, has_default_value=False, default_value=False, message_type=
None, enum_type=None, containing_type=None, is_extension=False,
extension_scope=None, serialized_options=None, file=DESCRIPTOR)],
extensions=[], nested_types=[], enum_types=[], serialized_options=None,
is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[],
serialized_start=23, serialized_end=420)
DESCRIPTOR.message_types_by_name['NVLGPSStatus'] = _NVLGPSSTATUS
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
NVLGPSStatus = _reflection.GeneratedProtocolMessageType('NVLGPSStatus', (
_message.Message,), dict(DESCRIPTOR=_NVLGPSSTATUS, __module__=
'NVLGPSStatus_pb2'))
_sym_db.RegisterMessage(NVLGPSStatus)
<|reserved_special_token_1|>
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: NVLGPSStatus.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='NVLGPSStatus.proto',
package='',
syntax='proto2',
serialized_options=None,
serialized_pb=_b('\n\x12NVLGPSStatus.proto\"\x8d\x03\n\x0cNVLGPSStatus\x12\x12\n\ntracker_id\x18\x01 \x02(\x0c\x12\x12\n\ngps_active\x18\x02 \x02(\x08\x12\x10\n\x08\x64\x61te_day\x18\x03 \x01(\x05\x12\x12\n\ndate_month\x18\x04 \x01(\x05\x12\x11\n\tdate_year\x18\x05 \x01(\x05\x12\x12\n\ntime_hours\x18\x06 \x01(\x05\x12\x14\n\x0ctime_minutes\x18\x07 \x01(\x05\x12\x14\n\x0ctime_seconds\x18\x08 \x01(\x05\x12\x19\n\x11time_microseconds\x18\t \x01(\x05\x12\x10\n\x08latitude\x18\n \x01(\x01\x12\x11\n\tlongitude\x18\x0b \x01(\x01\x12\x1f\n\x17speed_over_ground_knots\x18\x0c \x01(\x02\x12\x1b\n\x13track_angle_degrees\x18\r \x01(\x02\x12\x1a\n\x12magnetic_variation\x18\x0e \x01(\x02\x12\x12\n\nfuel_level\x18\x0f \x01(\x05\x12\x15\n\rvoltage_level\x18\x10 \x01(\x02\x12\x17\n\x0fvehicle_running\x18\x11 \x01(\x08')
)
_NVLGPSSTATUS = _descriptor.Descriptor(
name='NVLGPSStatus',
full_name='NVLGPSStatus',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='tracker_id', full_name='NVLGPSStatus.tracker_id', index=0,
number=1, type=12, cpp_type=9, label=2,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='gps_active', full_name='NVLGPSStatus.gps_active', index=1,
number=2, type=8, cpp_type=7, label=2,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='date_day', full_name='NVLGPSStatus.date_day', index=2,
number=3, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='date_month', full_name='NVLGPSStatus.date_month', index=3,
number=4, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='date_year', full_name='NVLGPSStatus.date_year', index=4,
number=5, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='time_hours', full_name='NVLGPSStatus.time_hours', index=5,
number=6, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='time_minutes', full_name='NVLGPSStatus.time_minutes', index=6,
number=7, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='time_seconds', full_name='NVLGPSStatus.time_seconds', index=7,
number=8, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='time_microseconds', full_name='NVLGPSStatus.time_microseconds', index=8,
number=9, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='latitude', full_name='NVLGPSStatus.latitude', index=9,
number=10, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='longitude', full_name='NVLGPSStatus.longitude', index=10,
number=11, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='speed_over_ground_knots', full_name='NVLGPSStatus.speed_over_ground_knots', index=11,
number=12, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='track_angle_degrees', full_name='NVLGPSStatus.track_angle_degrees', index=12,
number=13, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='magnetic_variation', full_name='NVLGPSStatus.magnetic_variation', index=13,
number=14, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='fuel_level', full_name='NVLGPSStatus.fuel_level', index=14,
number=15, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='voltage_level', full_name='NVLGPSStatus.voltage_level', index=15,
number=16, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='vehicle_running', full_name='NVLGPSStatus.vehicle_running', index=16,
number=17, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=23,
serialized_end=420,
)
DESCRIPTOR.message_types_by_name['NVLGPSStatus'] = _NVLGPSSTATUS
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
NVLGPSStatus = _reflection.GeneratedProtocolMessageType('NVLGPSStatus', (_message.Message,), dict(
DESCRIPTOR = _NVLGPSSTATUS,
__module__ = 'NVLGPSStatus_pb2'
# @@protoc_insertion_point(class_scope:NVLGPSStatus)
))
_sym_db.RegisterMessage(NVLGPSStatus)
# @@protoc_insertion_point(module_scope)
|
flexible
|
{
"blob_id": "98d2196439a8dc3d511d176e61897aa67663a0b5",
"index": 4922,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n_sym_db.RegisterFileDescriptor(DESCRIPTOR)\n<mask token>\n_sym_db.RegisterMessage(NVLGPSStatus)\n",
"step-3": "<mask token>\n_b = sys.version_info[0] < 3 and (lambda x: x) or (lambda x: x.encode('latin1')\n )\n<mask token>\n_sym_db = _symbol_database.Default()\nDESCRIPTOR = _descriptor.FileDescriptor(name='NVLGPSStatus.proto', package=\n '', syntax='proto2', serialized_options=None, serialized_pb=_b(\n '\\n\\x12NVLGPSStatus.proto\"\\x8d\\x03\\n\\x0cNVLGPSStatus\\x12\\x12\\n\\ntracker_id\\x18\\x01 \\x02(\\x0c\\x12\\x12\\n\\ngps_active\\x18\\x02 \\x02(\\x08\\x12\\x10\\n\\x08date_day\\x18\\x03 \\x01(\\x05\\x12\\x12\\n\\ndate_month\\x18\\x04 \\x01(\\x05\\x12\\x11\\n\\tdate_year\\x18\\x05 \\x01(\\x05\\x12\\x12\\n\\ntime_hours\\x18\\x06 \\x01(\\x05\\x12\\x14\\n\\x0ctime_minutes\\x18\\x07 \\x01(\\x05\\x12\\x14\\n\\x0ctime_seconds\\x18\\x08 \\x01(\\x05\\x12\\x19\\n\\x11time_microseconds\\x18\\t \\x01(\\x05\\x12\\x10\\n\\x08latitude\\x18\\n \\x01(\\x01\\x12\\x11\\n\\tlongitude\\x18\\x0b \\x01(\\x01\\x12\\x1f\\n\\x17speed_over_ground_knots\\x18\\x0c \\x01(\\x02\\x12\\x1b\\n\\x13track_angle_degrees\\x18\\r \\x01(\\x02\\x12\\x1a\\n\\x12magnetic_variation\\x18\\x0e \\x01(\\x02\\x12\\x12\\n\\nfuel_level\\x18\\x0f \\x01(\\x05\\x12\\x15\\n\\rvoltage_level\\x18\\x10 \\x01(\\x02\\x12\\x17\\n\\x0fvehicle_running\\x18\\x11 \\x01(\\x08'\n ))\n_NVLGPSSTATUS = _descriptor.Descriptor(name='NVLGPSStatus', full_name=\n 'NVLGPSStatus', filename=None, file=DESCRIPTOR, containing_type=None,\n fields=[_descriptor.FieldDescriptor(name='tracker_id', full_name=\n 'NVLGPSStatus.tracker_id', index=0, number=1, type=12, cpp_type=9,\n label=2, has_default_value=False, default_value=_b(''), message_type=\n None, enum_type=None, containing_type=None, is_extension=False,\n extension_scope=None, serialized_options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(name='gps_active', full_name=\n 'NVLGPSStatus.gps_active', index=1, number=2, type=8, cpp_type=7, label\n =2, has_default_value=False, default_value=False, message_type=None,\n enum_type=None, containing_type=None, is_extension=False,\n extension_scope=None, serialized_options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(name='date_day', full_name=\n 'NVLGPSStatus.date_day', index=2, number=3, type=5, cpp_type=1, label=1,\n has_default_value=False, default_value=0, message_type=None, enum_type=\n None, containing_type=None, is_extension=False, extension_scope=None,\n serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor(\n name='date_month', full_name='NVLGPSStatus.date_month', index=3, number\n =4, type=5, cpp_type=1, label=1, has_default_value=False, default_value\n =0, message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None, serialized_options=None, file\n =DESCRIPTOR), _descriptor.FieldDescriptor(name='date_year', full_name=\n 'NVLGPSStatus.date_year', index=4, number=5, type=5, cpp_type=1, label=\n 1, has_default_value=False, default_value=0, message_type=None,\n enum_type=None, containing_type=None, is_extension=False,\n extension_scope=None, serialized_options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(name='time_hours', full_name=\n 'NVLGPSStatus.time_hours', index=5, number=6, type=5, cpp_type=1, label\n =1, has_default_value=False, default_value=0, message_type=None,\n enum_type=None, containing_type=None, is_extension=False,\n extension_scope=None, serialized_options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(name='time_minutes', full_name=\n 'NVLGPSStatus.time_minutes', index=6, number=7, type=5, cpp_type=1,\n label=1, has_default_value=False, default_value=0, message_type=None,\n enum_type=None, containing_type=None, is_extension=False,\n extension_scope=None, serialized_options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(name='time_seconds', full_name=\n 'NVLGPSStatus.time_seconds', index=7, number=8, type=5, cpp_type=1,\n label=1, has_default_value=False, default_value=0, message_type=None,\n enum_type=None, containing_type=None, is_extension=False,\n extension_scope=None, serialized_options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(name='time_microseconds', full_name=\n 'NVLGPSStatus.time_microseconds', index=8, number=9, type=5, cpp_type=1,\n label=1, has_default_value=False, default_value=0, message_type=None,\n enum_type=None, containing_type=None, is_extension=False,\n extension_scope=None, serialized_options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(name='latitude', full_name=\n 'NVLGPSStatus.latitude', index=9, number=10, type=1, cpp_type=5, label=\n 1, has_default_value=False, default_value=float(0), message_type=None,\n enum_type=None, containing_type=None, is_extension=False,\n extension_scope=None, serialized_options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(name='longitude', full_name=\n 'NVLGPSStatus.longitude', index=10, number=11, type=1, cpp_type=5,\n label=1, has_default_value=False, default_value=float(0), message_type=\n None, enum_type=None, containing_type=None, is_extension=False,\n extension_scope=None, serialized_options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(name='speed_over_ground_knots', full_name=\n 'NVLGPSStatus.speed_over_ground_knots', index=11, number=12, type=2,\n cpp_type=6, label=1, has_default_value=False, default_value=float(0),\n message_type=None, enum_type=None, containing_type=None, is_extension=\n False, extension_scope=None, serialized_options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(name='track_angle_degrees', full_name=\n 'NVLGPSStatus.track_angle_degrees', index=12, number=13, type=2,\n cpp_type=6, label=1, has_default_value=False, default_value=float(0),\n message_type=None, enum_type=None, containing_type=None, is_extension=\n False, extension_scope=None, serialized_options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(name='magnetic_variation', full_name=\n 'NVLGPSStatus.magnetic_variation', index=13, number=14, type=2,\n cpp_type=6, label=1, has_default_value=False, default_value=float(0),\n message_type=None, enum_type=None, containing_type=None, is_extension=\n False, extension_scope=None, serialized_options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(name='fuel_level', full_name=\n 'NVLGPSStatus.fuel_level', index=14, number=15, type=5, cpp_type=1,\n label=1, has_default_value=False, default_value=0, message_type=None,\n enum_type=None, containing_type=None, is_extension=False,\n extension_scope=None, serialized_options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(name='voltage_level', full_name=\n 'NVLGPSStatus.voltage_level', index=15, number=16, type=2, cpp_type=6,\n label=1, has_default_value=False, default_value=float(0), message_type=\n None, enum_type=None, containing_type=None, is_extension=False,\n extension_scope=None, serialized_options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(name='vehicle_running', full_name=\n 'NVLGPSStatus.vehicle_running', index=16, number=17, type=8, cpp_type=7,\n label=1, has_default_value=False, default_value=False, message_type=\n None, enum_type=None, containing_type=None, is_extension=False,\n extension_scope=None, serialized_options=None, file=DESCRIPTOR)],\n extensions=[], nested_types=[], enum_types=[], serialized_options=None,\n is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[],\n serialized_start=23, serialized_end=420)\nDESCRIPTOR.message_types_by_name['NVLGPSStatus'] = _NVLGPSSTATUS\n_sym_db.RegisterFileDescriptor(DESCRIPTOR)\nNVLGPSStatus = _reflection.GeneratedProtocolMessageType('NVLGPSStatus', (\n _message.Message,), dict(DESCRIPTOR=_NVLGPSSTATUS, __module__=\n 'NVLGPSStatus_pb2'))\n_sym_db.RegisterMessage(NVLGPSStatus)\n",
"step-4": "import sys\n_b = sys.version_info[0] < 3 and (lambda x: x) or (lambda x: x.encode('latin1')\n )\nfrom google.protobuf import descriptor as _descriptor\nfrom google.protobuf import message as _message\nfrom google.protobuf import reflection as _reflection\nfrom google.protobuf import symbol_database as _symbol_database\n_sym_db = _symbol_database.Default()\nDESCRIPTOR = _descriptor.FileDescriptor(name='NVLGPSStatus.proto', package=\n '', syntax='proto2', serialized_options=None, serialized_pb=_b(\n '\\n\\x12NVLGPSStatus.proto\"\\x8d\\x03\\n\\x0cNVLGPSStatus\\x12\\x12\\n\\ntracker_id\\x18\\x01 \\x02(\\x0c\\x12\\x12\\n\\ngps_active\\x18\\x02 \\x02(\\x08\\x12\\x10\\n\\x08date_day\\x18\\x03 \\x01(\\x05\\x12\\x12\\n\\ndate_month\\x18\\x04 \\x01(\\x05\\x12\\x11\\n\\tdate_year\\x18\\x05 \\x01(\\x05\\x12\\x12\\n\\ntime_hours\\x18\\x06 \\x01(\\x05\\x12\\x14\\n\\x0ctime_minutes\\x18\\x07 \\x01(\\x05\\x12\\x14\\n\\x0ctime_seconds\\x18\\x08 \\x01(\\x05\\x12\\x19\\n\\x11time_microseconds\\x18\\t \\x01(\\x05\\x12\\x10\\n\\x08latitude\\x18\\n \\x01(\\x01\\x12\\x11\\n\\tlongitude\\x18\\x0b \\x01(\\x01\\x12\\x1f\\n\\x17speed_over_ground_knots\\x18\\x0c \\x01(\\x02\\x12\\x1b\\n\\x13track_angle_degrees\\x18\\r \\x01(\\x02\\x12\\x1a\\n\\x12magnetic_variation\\x18\\x0e \\x01(\\x02\\x12\\x12\\n\\nfuel_level\\x18\\x0f \\x01(\\x05\\x12\\x15\\n\\rvoltage_level\\x18\\x10 \\x01(\\x02\\x12\\x17\\n\\x0fvehicle_running\\x18\\x11 \\x01(\\x08'\n ))\n_NVLGPSSTATUS = _descriptor.Descriptor(name='NVLGPSStatus', full_name=\n 'NVLGPSStatus', filename=None, file=DESCRIPTOR, containing_type=None,\n fields=[_descriptor.FieldDescriptor(name='tracker_id', full_name=\n 'NVLGPSStatus.tracker_id', index=0, number=1, type=12, cpp_type=9,\n label=2, has_default_value=False, default_value=_b(''), message_type=\n None, enum_type=None, containing_type=None, is_extension=False,\n extension_scope=None, serialized_options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(name='gps_active', full_name=\n 'NVLGPSStatus.gps_active', index=1, number=2, type=8, cpp_type=7, label\n =2, has_default_value=False, default_value=False, message_type=None,\n enum_type=None, containing_type=None, is_extension=False,\n extension_scope=None, serialized_options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(name='date_day', full_name=\n 'NVLGPSStatus.date_day', index=2, number=3, type=5, cpp_type=1, label=1,\n has_default_value=False, default_value=0, message_type=None, enum_type=\n None, containing_type=None, is_extension=False, extension_scope=None,\n serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor(\n name='date_month', full_name='NVLGPSStatus.date_month', index=3, number\n =4, type=5, cpp_type=1, label=1, has_default_value=False, default_value\n =0, message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None, serialized_options=None, file\n =DESCRIPTOR), _descriptor.FieldDescriptor(name='date_year', full_name=\n 'NVLGPSStatus.date_year', index=4, number=5, type=5, cpp_type=1, label=\n 1, has_default_value=False, default_value=0, message_type=None,\n enum_type=None, containing_type=None, is_extension=False,\n extension_scope=None, serialized_options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(name='time_hours', full_name=\n 'NVLGPSStatus.time_hours', index=5, number=6, type=5, cpp_type=1, label\n =1, has_default_value=False, default_value=0, message_type=None,\n enum_type=None, containing_type=None, is_extension=False,\n extension_scope=None, serialized_options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(name='time_minutes', full_name=\n 'NVLGPSStatus.time_minutes', index=6, number=7, type=5, cpp_type=1,\n label=1, has_default_value=False, default_value=0, message_type=None,\n enum_type=None, containing_type=None, is_extension=False,\n extension_scope=None, serialized_options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(name='time_seconds', full_name=\n 'NVLGPSStatus.time_seconds', index=7, number=8, type=5, cpp_type=1,\n label=1, has_default_value=False, default_value=0, message_type=None,\n enum_type=None, containing_type=None, is_extension=False,\n extension_scope=None, serialized_options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(name='time_microseconds', full_name=\n 'NVLGPSStatus.time_microseconds', index=8, number=9, type=5, cpp_type=1,\n label=1, has_default_value=False, default_value=0, message_type=None,\n enum_type=None, containing_type=None, is_extension=False,\n extension_scope=None, serialized_options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(name='latitude', full_name=\n 'NVLGPSStatus.latitude', index=9, number=10, type=1, cpp_type=5, label=\n 1, has_default_value=False, default_value=float(0), message_type=None,\n enum_type=None, containing_type=None, is_extension=False,\n extension_scope=None, serialized_options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(name='longitude', full_name=\n 'NVLGPSStatus.longitude', index=10, number=11, type=1, cpp_type=5,\n label=1, has_default_value=False, default_value=float(0), message_type=\n None, enum_type=None, containing_type=None, is_extension=False,\n extension_scope=None, serialized_options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(name='speed_over_ground_knots', full_name=\n 'NVLGPSStatus.speed_over_ground_knots', index=11, number=12, type=2,\n cpp_type=6, label=1, has_default_value=False, default_value=float(0),\n message_type=None, enum_type=None, containing_type=None, is_extension=\n False, extension_scope=None, serialized_options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(name='track_angle_degrees', full_name=\n 'NVLGPSStatus.track_angle_degrees', index=12, number=13, type=2,\n cpp_type=6, label=1, has_default_value=False, default_value=float(0),\n message_type=None, enum_type=None, containing_type=None, is_extension=\n False, extension_scope=None, serialized_options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(name='magnetic_variation', full_name=\n 'NVLGPSStatus.magnetic_variation', index=13, number=14, type=2,\n cpp_type=6, label=1, has_default_value=False, default_value=float(0),\n message_type=None, enum_type=None, containing_type=None, is_extension=\n False, extension_scope=None, serialized_options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(name='fuel_level', full_name=\n 'NVLGPSStatus.fuel_level', index=14, number=15, type=5, cpp_type=1,\n label=1, has_default_value=False, default_value=0, message_type=None,\n enum_type=None, containing_type=None, is_extension=False,\n extension_scope=None, serialized_options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(name='voltage_level', full_name=\n 'NVLGPSStatus.voltage_level', index=15, number=16, type=2, cpp_type=6,\n label=1, has_default_value=False, default_value=float(0), message_type=\n None, enum_type=None, containing_type=None, is_extension=False,\n extension_scope=None, serialized_options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(name='vehicle_running', full_name=\n 'NVLGPSStatus.vehicle_running', index=16, number=17, type=8, cpp_type=7,\n label=1, has_default_value=False, default_value=False, message_type=\n None, enum_type=None, containing_type=None, is_extension=False,\n extension_scope=None, serialized_options=None, file=DESCRIPTOR)],\n extensions=[], nested_types=[], enum_types=[], serialized_options=None,\n is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[],\n serialized_start=23, serialized_end=420)\nDESCRIPTOR.message_types_by_name['NVLGPSStatus'] = _NVLGPSSTATUS\n_sym_db.RegisterFileDescriptor(DESCRIPTOR)\nNVLGPSStatus = _reflection.GeneratedProtocolMessageType('NVLGPSStatus', (\n _message.Message,), dict(DESCRIPTOR=_NVLGPSSTATUS, __module__=\n 'NVLGPSStatus_pb2'))\n_sym_db.RegisterMessage(NVLGPSStatus)\n",
"step-5": "# Generated by the protocol buffer compiler. DO NOT EDIT!\n# source: NVLGPSStatus.proto\n\nimport sys\n_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))\nfrom google.protobuf import descriptor as _descriptor\nfrom google.protobuf import message as _message\nfrom google.protobuf import reflection as _reflection\nfrom google.protobuf import symbol_database as _symbol_database\n# @@protoc_insertion_point(imports)\n\n_sym_db = _symbol_database.Default()\n\n\n\n\nDESCRIPTOR = _descriptor.FileDescriptor(\n name='NVLGPSStatus.proto',\n package='',\n syntax='proto2',\n serialized_options=None,\n serialized_pb=_b('\\n\\x12NVLGPSStatus.proto\\\"\\x8d\\x03\\n\\x0cNVLGPSStatus\\x12\\x12\\n\\ntracker_id\\x18\\x01 \\x02(\\x0c\\x12\\x12\\n\\ngps_active\\x18\\x02 \\x02(\\x08\\x12\\x10\\n\\x08\\x64\\x61te_day\\x18\\x03 \\x01(\\x05\\x12\\x12\\n\\ndate_month\\x18\\x04 \\x01(\\x05\\x12\\x11\\n\\tdate_year\\x18\\x05 \\x01(\\x05\\x12\\x12\\n\\ntime_hours\\x18\\x06 \\x01(\\x05\\x12\\x14\\n\\x0ctime_minutes\\x18\\x07 \\x01(\\x05\\x12\\x14\\n\\x0ctime_seconds\\x18\\x08 \\x01(\\x05\\x12\\x19\\n\\x11time_microseconds\\x18\\t \\x01(\\x05\\x12\\x10\\n\\x08latitude\\x18\\n \\x01(\\x01\\x12\\x11\\n\\tlongitude\\x18\\x0b \\x01(\\x01\\x12\\x1f\\n\\x17speed_over_ground_knots\\x18\\x0c \\x01(\\x02\\x12\\x1b\\n\\x13track_angle_degrees\\x18\\r \\x01(\\x02\\x12\\x1a\\n\\x12magnetic_variation\\x18\\x0e \\x01(\\x02\\x12\\x12\\n\\nfuel_level\\x18\\x0f \\x01(\\x05\\x12\\x15\\n\\rvoltage_level\\x18\\x10 \\x01(\\x02\\x12\\x17\\n\\x0fvehicle_running\\x18\\x11 \\x01(\\x08')\n)\n\n\n\n\n_NVLGPSSTATUS = _descriptor.Descriptor(\n name='NVLGPSStatus',\n full_name='NVLGPSStatus',\n filename=None,\n file=DESCRIPTOR,\n containing_type=None,\n fields=[\n _descriptor.FieldDescriptor(\n name='tracker_id', full_name='NVLGPSStatus.tracker_id', index=0,\n number=1, type=12, cpp_type=9, label=2,\n has_default_value=False, default_value=_b(\"\"),\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n serialized_options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='gps_active', full_name='NVLGPSStatus.gps_active', index=1,\n number=2, type=8, cpp_type=7, label=2,\n has_default_value=False, default_value=False,\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n serialized_options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='date_day', full_name='NVLGPSStatus.date_day', index=2,\n number=3, type=5, cpp_type=1, label=1,\n has_default_value=False, default_value=0,\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n serialized_options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='date_month', full_name='NVLGPSStatus.date_month', index=3,\n number=4, type=5, cpp_type=1, label=1,\n has_default_value=False, default_value=0,\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n serialized_options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='date_year', full_name='NVLGPSStatus.date_year', index=4,\n number=5, type=5, cpp_type=1, label=1,\n has_default_value=False, default_value=0,\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n serialized_options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='time_hours', full_name='NVLGPSStatus.time_hours', index=5,\n number=6, type=5, cpp_type=1, label=1,\n has_default_value=False, default_value=0,\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n serialized_options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='time_minutes', full_name='NVLGPSStatus.time_minutes', index=6,\n number=7, type=5, cpp_type=1, label=1,\n has_default_value=False, default_value=0,\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n serialized_options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='time_seconds', full_name='NVLGPSStatus.time_seconds', index=7,\n number=8, type=5, cpp_type=1, label=1,\n has_default_value=False, default_value=0,\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n serialized_options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='time_microseconds', full_name='NVLGPSStatus.time_microseconds', index=8,\n number=9, type=5, cpp_type=1, label=1,\n has_default_value=False, default_value=0,\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n serialized_options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='latitude', full_name='NVLGPSStatus.latitude', index=9,\n number=10, type=1, cpp_type=5, label=1,\n has_default_value=False, default_value=float(0),\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n serialized_options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='longitude', full_name='NVLGPSStatus.longitude', index=10,\n number=11, type=1, cpp_type=5, label=1,\n has_default_value=False, default_value=float(0),\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n serialized_options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='speed_over_ground_knots', full_name='NVLGPSStatus.speed_over_ground_knots', index=11,\n number=12, type=2, cpp_type=6, label=1,\n has_default_value=False, default_value=float(0),\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n serialized_options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='track_angle_degrees', full_name='NVLGPSStatus.track_angle_degrees', index=12,\n number=13, type=2, cpp_type=6, label=1,\n has_default_value=False, default_value=float(0),\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n serialized_options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='magnetic_variation', full_name='NVLGPSStatus.magnetic_variation', index=13,\n number=14, type=2, cpp_type=6, label=1,\n has_default_value=False, default_value=float(0),\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n serialized_options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='fuel_level', full_name='NVLGPSStatus.fuel_level', index=14,\n number=15, type=5, cpp_type=1, label=1,\n has_default_value=False, default_value=0,\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n serialized_options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='voltage_level', full_name='NVLGPSStatus.voltage_level', index=15,\n number=16, type=2, cpp_type=6, label=1,\n has_default_value=False, default_value=float(0),\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n serialized_options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='vehicle_running', full_name='NVLGPSStatus.vehicle_running', index=16,\n number=17, type=8, cpp_type=7, label=1,\n has_default_value=False, default_value=False,\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n serialized_options=None, file=DESCRIPTOR),\n ],\n extensions=[\n ],\n nested_types=[],\n enum_types=[\n ],\n serialized_options=None,\n is_extendable=False,\n syntax='proto2',\n extension_ranges=[],\n oneofs=[\n ],\n serialized_start=23,\n serialized_end=420,\n)\n\nDESCRIPTOR.message_types_by_name['NVLGPSStatus'] = _NVLGPSSTATUS\n_sym_db.RegisterFileDescriptor(DESCRIPTOR)\n\nNVLGPSStatus = _reflection.GeneratedProtocolMessageType('NVLGPSStatus', (_message.Message,), dict(\n DESCRIPTOR = _NVLGPSSTATUS,\n __module__ = 'NVLGPSStatus_pb2'\n # @@protoc_insertion_point(class_scope:NVLGPSStatus)\n ))\n_sym_db.RegisterMessage(NVLGPSStatus)\n\n\n# @@protoc_insertion_point(module_scope)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# Given an integer, convert it to a roman numeral.
# Input is guaranteed to be within the range from 1 to 3999.
class Solution:
# @param {integer} num
# @return {string}
def intToRoman(self, num):
normalDic = {
1000: 'M',
500: 'D',
100: 'C',
50: 'L',
10: 'X',
5: 'V',
1: 'I'
}
specialDic = {
'41': 'IV', # 4
'91': 'IX', # 9
'42': 'XL', # 40
'92': 'XC', # 90
'43': 'CD', # 400
'93': 'CM', # 900
}
roman = ""
remainders = ['4', '9']
divisors = [1000, 500, 100, 50, 10, 5, 1]
for i, divisor in enumerate(divisors):
quotient = num/divisor
if quotient > 0:
roman += normalDic[divisor] * quotient
num = num % divisor
if str(num)[0] in remainders:
roman += specialDic[str(num)[0] + str(len(str(num)))]
num -= int(str(num)[0]) * (10 ** (len(str(num)) - 1))
return roman
|
normal
|
{
"blob_id": "7de06772a1024a81193ac69a1110ad2e8b7f64ac",
"index": 9085,
"step-1": "<mask token>\n",
"step-2": "class Solution:\n <mask token>\n",
"step-3": "class Solution:\n\n def intToRoman(self, num):\n normalDic = {(1000): 'M', (500): 'D', (100): 'C', (50): 'L', (10):\n 'X', (5): 'V', (1): 'I'}\n specialDic = {'41': 'IV', '91': 'IX', '42': 'XL', '92': 'XC', '43':\n 'CD', '93': 'CM'}\n roman = ''\n remainders = ['4', '9']\n divisors = [1000, 500, 100, 50, 10, 5, 1]\n for i, divisor in enumerate(divisors):\n quotient = num / divisor\n if quotient > 0:\n roman += normalDic[divisor] * quotient\n num = num % divisor\n if str(num)[0] in remainders:\n roman += specialDic[str(num)[0] + str(len(str(num)))]\n num -= int(str(num)[0]) * 10 ** (len(str(num)) - 1)\n return roman\n",
"step-4": "# Given an integer, convert it to a roman numeral.\n\n# Input is guaranteed to be within the range from 1 to 3999.\n\nclass Solution:\n # @param {integer} num\n # @return {string}\n def intToRoman(self, num):\n normalDic = {\n 1000: 'M',\n 500: 'D',\n 100: 'C',\n 50: 'L',\n 10: 'X',\n 5: 'V',\n 1: 'I'\n }\n specialDic = {\n '41': 'IV', # 4\n '91': 'IX', # 9\n '42': 'XL', # 40\n '92': 'XC', # 90\n '43': 'CD', # 400\n '93': 'CM', # 900\n }\n\n roman = \"\"\n remainders = ['4', '9']\n divisors = [1000, 500, 100, 50, 10, 5, 1]\n for i, divisor in enumerate(divisors):\n quotient = num/divisor\n if quotient > 0:\n roman += normalDic[divisor] * quotient\n num = num % divisor\n\n if str(num)[0] in remainders:\n roman += specialDic[str(num)[0] + str(len(str(num)))]\n num -= int(str(num)[0]) * (10 ** (len(str(num)) - 1))\n\n return roman",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
# -*- coding: utf-8 -*-
from django.db import models
# Create your models here.
class Test(models.Model):
name = models.CharField(max_length=20)
def __unicode__(self):
return self.name
class Contact(models.Model):
GENDER_TYPES = (
('M', u'男'),
('F', u'女'),
('X', u'不告诉你'),
)
name = models.CharField(u'姓名', max_length=20)
age = models.IntegerField(u'年龄', default=0)
gender = models.CharField(u'性别', max_length=1, null=False, blank=False, choices=GENDER_TYPES, default='X')
email = models.EmailField()
tele = models.CharField(u'电话', max_length=20)
address = models.CharField(u'地址', max_length=200)
postcode = models.CharField(u'邮政编码', max_length=6)
notes = models.CharField(u'备注', max_length=200)
def __unicode__(self):
return self.name
class Tag(models.Model):
contact = models.ForeignKey(Contact)
name = models.CharField(max_length=50)
def __unicode__(self):
return self.name
|
normal
|
{
"blob_id": "514a3fc312d36e6f9b601ede7f7a3940c138d39a",
"index": 2000,
"step-1": "<mask token>\n\n\nclass Contact(models.Model):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def __unicode__(self):\n return self.name\n\n\nclass Tag(models.Model):\n contact = models.ForeignKey(Contact)\n name = models.CharField(max_length=50)\n\n def __unicode__(self):\n return self.name\n",
"step-2": "<mask token>\n\n\nclass Test(models.Model):\n <mask token>\n\n def __unicode__(self):\n return self.name\n\n\nclass Contact(models.Model):\n GENDER_TYPES = ('M', u'男'), ('F', u'女'), ('X', u'不告诉你')\n name = models.CharField(u'姓名', max_length=20)\n age = models.IntegerField(u'年龄', default=0)\n gender = models.CharField(u'性别', max_length=1, null=False, blank=False,\n choices=GENDER_TYPES, default='X')\n email = models.EmailField()\n tele = models.CharField(u'电话', max_length=20)\n address = models.CharField(u'地址', max_length=200)\n postcode = models.CharField(u'邮政编码', max_length=6)\n notes = models.CharField(u'备注', max_length=200)\n\n def __unicode__(self):\n return self.name\n\n\nclass Tag(models.Model):\n contact = models.ForeignKey(Contact)\n name = models.CharField(max_length=50)\n\n def __unicode__(self):\n return self.name\n",
"step-3": "<mask token>\n\n\nclass Test(models.Model):\n name = models.CharField(max_length=20)\n\n def __unicode__(self):\n return self.name\n\n\nclass Contact(models.Model):\n GENDER_TYPES = ('M', u'男'), ('F', u'女'), ('X', u'不告诉你')\n name = models.CharField(u'姓名', max_length=20)\n age = models.IntegerField(u'年龄', default=0)\n gender = models.CharField(u'性别', max_length=1, null=False, blank=False,\n choices=GENDER_TYPES, default='X')\n email = models.EmailField()\n tele = models.CharField(u'电话', max_length=20)\n address = models.CharField(u'地址', max_length=200)\n postcode = models.CharField(u'邮政编码', max_length=6)\n notes = models.CharField(u'备注', max_length=200)\n\n def __unicode__(self):\n return self.name\n\n\nclass Tag(models.Model):\n contact = models.ForeignKey(Contact)\n name = models.CharField(max_length=50)\n\n def __unicode__(self):\n return self.name\n",
"step-4": "from django.db import models\n\n\nclass Test(models.Model):\n name = models.CharField(max_length=20)\n\n def __unicode__(self):\n return self.name\n\n\nclass Contact(models.Model):\n GENDER_TYPES = ('M', u'男'), ('F', u'女'), ('X', u'不告诉你')\n name = models.CharField(u'姓名', max_length=20)\n age = models.IntegerField(u'年龄', default=0)\n gender = models.CharField(u'性别', max_length=1, null=False, blank=False,\n choices=GENDER_TYPES, default='X')\n email = models.EmailField()\n tele = models.CharField(u'电话', max_length=20)\n address = models.CharField(u'地址', max_length=200)\n postcode = models.CharField(u'邮政编码', max_length=6)\n notes = models.CharField(u'备注', max_length=200)\n\n def __unicode__(self):\n return self.name\n\n\nclass Tag(models.Model):\n contact = models.ForeignKey(Contact)\n name = models.CharField(max_length=50)\n\n def __unicode__(self):\n return self.name\n",
"step-5": "# -*- coding: utf-8 -*-\n\nfrom django.db import models\n\n\n# Create your models here.\n\n\nclass Test(models.Model):\n name = models.CharField(max_length=20)\n\n def __unicode__(self):\n return self.name\n\n\nclass Contact(models.Model):\n GENDER_TYPES = (\n ('M', u'男'),\n ('F', u'女'),\n ('X', u'不告诉你'),\n )\n name = models.CharField(u'姓名', max_length=20)\n age = models.IntegerField(u'年龄', default=0)\n gender = models.CharField(u'性别', max_length=1, null=False, blank=False, choices=GENDER_TYPES, default='X')\n email = models.EmailField()\n tele = models.CharField(u'电话', max_length=20)\n address = models.CharField(u'地址', max_length=200)\n postcode = models.CharField(u'邮政编码', max_length=6)\n notes = models.CharField(u'备注', max_length=200)\n\n def __unicode__(self):\n return self.name\n\n\nclass Tag(models.Model):\n contact = models.ForeignKey(Contact)\n name = models.CharField(max_length=50)\n\n def __unicode__(self):\n return self.name\n",
"step-ids": [
5,
8,
9,
10,
11
]
}
|
[
5,
8,
9,
10,
11
] |
import math
import operator as op
Symbol = str
Number = (int, float)
Atom = (Symbol, Number)
List = list
Exp = (Atom, List)
Env = dict
def standard_env() -> Env:
"An environment with some scheme standard procedures"
env = Env()
env.update(vars(math)) # sin, cos, sqrt, pi ...
env.update({
'+':op.add, '-':op.sub, '*':op.mul, '/':op.truediv,
'>':op.gt, '>':op.lt, '>=':op.ge, '<=':op.le, '=':op.eq,
'abs':abs,
'append':op.add,
'apply':lambda proc, args: proc(*args),
'begin':lambda *x: x[-1],
'car':lambda x: x[0],
'cdr':lambda x: x[1:],
'cons':lambda x,y: [x] + y,
'eq?':op.is_,
'expt':pow,
'equal?':op.eq,
'length':len,
'list':lambda *x: List(x),
'list?':lambda x: isinstance(x, List),
'map':map,
'max':max,
'min':min,
'not':op.not_,
'null?':lambda x: x == [],
'number?':lambda x: isinstance(x, Number),
'print':print,
'procedure?':callable,
'round':round,
'symbol?':lambda x: isinstance(x, Symbol),
})
return env
global_env = standard_env()
def eval(x: Exp, env=global_env) -> Exp:
"Evaluate an expression in an environment."
if isinstance(x, Symbol): # variable reference
return env[x]
elif not isinstance(x, List): # constant number
return x
elif x[0] == 'if': # conditional
(_, test, conseq, alt) = x
exp = (conseq if eval(test, env) else alt)
return eval(exp, env)
elif x[0] == 'define': # definition
(_, symbol, exp) = x
env[symbol] = eval(exp, env)
else: # procedure call
proc = eval(x[0], env)
args = [eval(arg, env) for arg in x[1:]]
return proc(*args)
def tokenize(chars: str) -> list:
"convert a string of characters into a list of tokens"
return chars.replace('(', ' ( ').replace(')', ' ) ').split()
def parse(program: str) -> Exp:
"Read a scheme expression from a string"
return read_from_tokens(tokenize(program))
def read_from_tokens(tokens: list) -> Exp:
"Read an expression from a sequence of tokens"
if len(tokens) == 0:
raise SyntaxError('unexpected EOF')
token = tokens.pop(0)
if token == '(':
L = []
while tokens[0] != ')':
L.append(read_from_tokens(tokens))
tokens.pop(0) # pop off ')'
return L
elif token == ')':
raise SyntaxError('unexpected )')
else:
return atom(token)
def atom(token: str) -> Atom:
"Numbers become numbers; every other token is a symbol"
try: return int(token)
except ValueError:
try: return float(token)
except ValueError:
return Symbol(token)
program = "(begin (define r 10) (* pi (* r r)))"
print(eval(parse(program)))
|
normal
|
{
"blob_id": "88862d6bee5d83dd5f1c656a06a9dc46a5254b10",
"index": 3608,
"step-1": "<mask token>\n\n\ndef standard_env() ->Env:\n \"\"\"An environment with some scheme standard procedures\"\"\"\n env = Env()\n env.update(vars(math))\n env.update({'+': op.add, '-': op.sub, '*': op.mul, '/': op.truediv, '>':\n op.gt, '>': op.lt, '>=': op.ge, '<=': op.le, '=': op.eq, 'abs': abs,\n 'append': op.add, 'apply': lambda proc, args: proc(*args), 'begin':\n lambda *x: x[-1], 'car': lambda x: x[0], 'cdr': lambda x: x[1:],\n 'cons': lambda x, y: [x] + y, 'eq?': op.is_, 'expt': pow, 'equal?':\n op.eq, 'length': len, 'list': lambda *x: List(x), 'list?': lambda x:\n isinstance(x, List), 'map': map, 'max': max, 'min': min, 'not': op.\n not_, 'null?': lambda x: x == [], 'number?': lambda x: isinstance(x,\n Number), 'print': print, 'procedure?': callable, 'round': round,\n 'symbol?': lambda x: isinstance(x, Symbol)})\n return env\n\n\n<mask token>\n\n\ndef eval(x: Exp, env=global_env) ->Exp:\n \"\"\"Evaluate an expression in an environment.\"\"\"\n if isinstance(x, Symbol):\n return env[x]\n elif not isinstance(x, List):\n return x\n elif x[0] == 'if':\n _, test, conseq, alt = x\n exp = conseq if eval(test, env) else alt\n return eval(exp, env)\n elif x[0] == 'define':\n _, symbol, exp = x\n env[symbol] = eval(exp, env)\n else:\n proc = eval(x[0], env)\n args = [eval(arg, env) for arg in x[1:]]\n return proc(*args)\n\n\ndef tokenize(chars: str) ->list:\n \"\"\"convert a string of characters into a list of tokens\"\"\"\n return chars.replace('(', ' ( ').replace(')', ' ) ').split()\n\n\ndef parse(program: str) ->Exp:\n \"\"\"Read a scheme expression from a string\"\"\"\n return read_from_tokens(tokenize(program))\n\n\n<mask token>\n\n\ndef atom(token: str) ->Atom:\n \"\"\"Numbers become numbers; every other token is a symbol\"\"\"\n try:\n return int(token)\n except ValueError:\n try:\n return float(token)\n except ValueError:\n return Symbol(token)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef standard_env() ->Env:\n \"\"\"An environment with some scheme standard procedures\"\"\"\n env = Env()\n env.update(vars(math))\n env.update({'+': op.add, '-': op.sub, '*': op.mul, '/': op.truediv, '>':\n op.gt, '>': op.lt, '>=': op.ge, '<=': op.le, '=': op.eq, 'abs': abs,\n 'append': op.add, 'apply': lambda proc, args: proc(*args), 'begin':\n lambda *x: x[-1], 'car': lambda x: x[0], 'cdr': lambda x: x[1:],\n 'cons': lambda x, y: [x] + y, 'eq?': op.is_, 'expt': pow, 'equal?':\n op.eq, 'length': len, 'list': lambda *x: List(x), 'list?': lambda x:\n isinstance(x, List), 'map': map, 'max': max, 'min': min, 'not': op.\n not_, 'null?': lambda x: x == [], 'number?': lambda x: isinstance(x,\n Number), 'print': print, 'procedure?': callable, 'round': round,\n 'symbol?': lambda x: isinstance(x, Symbol)})\n return env\n\n\n<mask token>\n\n\ndef eval(x: Exp, env=global_env) ->Exp:\n \"\"\"Evaluate an expression in an environment.\"\"\"\n if isinstance(x, Symbol):\n return env[x]\n elif not isinstance(x, List):\n return x\n elif x[0] == 'if':\n _, test, conseq, alt = x\n exp = conseq if eval(test, env) else alt\n return eval(exp, env)\n elif x[0] == 'define':\n _, symbol, exp = x\n env[symbol] = eval(exp, env)\n else:\n proc = eval(x[0], env)\n args = [eval(arg, env) for arg in x[1:]]\n return proc(*args)\n\n\ndef tokenize(chars: str) ->list:\n \"\"\"convert a string of characters into a list of tokens\"\"\"\n return chars.replace('(', ' ( ').replace(')', ' ) ').split()\n\n\ndef parse(program: str) ->Exp:\n \"\"\"Read a scheme expression from a string\"\"\"\n return read_from_tokens(tokenize(program))\n\n\ndef read_from_tokens(tokens: list) ->Exp:\n \"\"\"Read an expression from a sequence of tokens\"\"\"\n if len(tokens) == 0:\n raise SyntaxError('unexpected EOF')\n token = tokens.pop(0)\n if token == '(':\n L = []\n while tokens[0] != ')':\n L.append(read_from_tokens(tokens))\n tokens.pop(0)\n return L\n elif token == ')':\n raise SyntaxError('unexpected )')\n else:\n return atom(token)\n\n\ndef atom(token: str) ->Atom:\n \"\"\"Numbers become numbers; every other token is a symbol\"\"\"\n try:\n return int(token)\n except ValueError:\n try:\n return float(token)\n except ValueError:\n return Symbol(token)\n\n\n<mask token>\nprint(eval(parse(program)))\n",
"step-3": "<mask token>\nSymbol = str\nNumber = int, float\nAtom = Symbol, Number\nList = list\nExp = Atom, List\nEnv = dict\n\n\ndef standard_env() ->Env:\n \"\"\"An environment with some scheme standard procedures\"\"\"\n env = Env()\n env.update(vars(math))\n env.update({'+': op.add, '-': op.sub, '*': op.mul, '/': op.truediv, '>':\n op.gt, '>': op.lt, '>=': op.ge, '<=': op.le, '=': op.eq, 'abs': abs,\n 'append': op.add, 'apply': lambda proc, args: proc(*args), 'begin':\n lambda *x: x[-1], 'car': lambda x: x[0], 'cdr': lambda x: x[1:],\n 'cons': lambda x, y: [x] + y, 'eq?': op.is_, 'expt': pow, 'equal?':\n op.eq, 'length': len, 'list': lambda *x: List(x), 'list?': lambda x:\n isinstance(x, List), 'map': map, 'max': max, 'min': min, 'not': op.\n not_, 'null?': lambda x: x == [], 'number?': lambda x: isinstance(x,\n Number), 'print': print, 'procedure?': callable, 'round': round,\n 'symbol?': lambda x: isinstance(x, Symbol)})\n return env\n\n\nglobal_env = standard_env()\n\n\ndef eval(x: Exp, env=global_env) ->Exp:\n \"\"\"Evaluate an expression in an environment.\"\"\"\n if isinstance(x, Symbol):\n return env[x]\n elif not isinstance(x, List):\n return x\n elif x[0] == 'if':\n _, test, conseq, alt = x\n exp = conseq if eval(test, env) else alt\n return eval(exp, env)\n elif x[0] == 'define':\n _, symbol, exp = x\n env[symbol] = eval(exp, env)\n else:\n proc = eval(x[0], env)\n args = [eval(arg, env) for arg in x[1:]]\n return proc(*args)\n\n\ndef tokenize(chars: str) ->list:\n \"\"\"convert a string of characters into a list of tokens\"\"\"\n return chars.replace('(', ' ( ').replace(')', ' ) ').split()\n\n\ndef parse(program: str) ->Exp:\n \"\"\"Read a scheme expression from a string\"\"\"\n return read_from_tokens(tokenize(program))\n\n\ndef read_from_tokens(tokens: list) ->Exp:\n \"\"\"Read an expression from a sequence of tokens\"\"\"\n if len(tokens) == 0:\n raise SyntaxError('unexpected EOF')\n token = tokens.pop(0)\n if token == '(':\n L = []\n while tokens[0] != ')':\n L.append(read_from_tokens(tokens))\n tokens.pop(0)\n return L\n elif token == ')':\n raise SyntaxError('unexpected )')\n else:\n return atom(token)\n\n\ndef atom(token: str) ->Atom:\n \"\"\"Numbers become numbers; every other token is a symbol\"\"\"\n try:\n return int(token)\n except ValueError:\n try:\n return float(token)\n except ValueError:\n return Symbol(token)\n\n\nprogram = '(begin (define r 10) (* pi (* r r)))'\nprint(eval(parse(program)))\n",
"step-4": "import math\nimport operator as op\nSymbol = str\nNumber = int, float\nAtom = Symbol, Number\nList = list\nExp = Atom, List\nEnv = dict\n\n\ndef standard_env() ->Env:\n \"\"\"An environment with some scheme standard procedures\"\"\"\n env = Env()\n env.update(vars(math))\n env.update({'+': op.add, '-': op.sub, '*': op.mul, '/': op.truediv, '>':\n op.gt, '>': op.lt, '>=': op.ge, '<=': op.le, '=': op.eq, 'abs': abs,\n 'append': op.add, 'apply': lambda proc, args: proc(*args), 'begin':\n lambda *x: x[-1], 'car': lambda x: x[0], 'cdr': lambda x: x[1:],\n 'cons': lambda x, y: [x] + y, 'eq?': op.is_, 'expt': pow, 'equal?':\n op.eq, 'length': len, 'list': lambda *x: List(x), 'list?': lambda x:\n isinstance(x, List), 'map': map, 'max': max, 'min': min, 'not': op.\n not_, 'null?': lambda x: x == [], 'number?': lambda x: isinstance(x,\n Number), 'print': print, 'procedure?': callable, 'round': round,\n 'symbol?': lambda x: isinstance(x, Symbol)})\n return env\n\n\nglobal_env = standard_env()\n\n\ndef eval(x: Exp, env=global_env) ->Exp:\n \"\"\"Evaluate an expression in an environment.\"\"\"\n if isinstance(x, Symbol):\n return env[x]\n elif not isinstance(x, List):\n return x\n elif x[0] == 'if':\n _, test, conseq, alt = x\n exp = conseq if eval(test, env) else alt\n return eval(exp, env)\n elif x[0] == 'define':\n _, symbol, exp = x\n env[symbol] = eval(exp, env)\n else:\n proc = eval(x[0], env)\n args = [eval(arg, env) for arg in x[1:]]\n return proc(*args)\n\n\ndef tokenize(chars: str) ->list:\n \"\"\"convert a string of characters into a list of tokens\"\"\"\n return chars.replace('(', ' ( ').replace(')', ' ) ').split()\n\n\ndef parse(program: str) ->Exp:\n \"\"\"Read a scheme expression from a string\"\"\"\n return read_from_tokens(tokenize(program))\n\n\ndef read_from_tokens(tokens: list) ->Exp:\n \"\"\"Read an expression from a sequence of tokens\"\"\"\n if len(tokens) == 0:\n raise SyntaxError('unexpected EOF')\n token = tokens.pop(0)\n if token == '(':\n L = []\n while tokens[0] != ')':\n L.append(read_from_tokens(tokens))\n tokens.pop(0)\n return L\n elif token == ')':\n raise SyntaxError('unexpected )')\n else:\n return atom(token)\n\n\ndef atom(token: str) ->Atom:\n \"\"\"Numbers become numbers; every other token is a symbol\"\"\"\n try:\n return int(token)\n except ValueError:\n try:\n return float(token)\n except ValueError:\n return Symbol(token)\n\n\nprogram = '(begin (define r 10) (* pi (* r r)))'\nprint(eval(parse(program)))\n",
"step-5": "import math\nimport operator as op\n\nSymbol = str\nNumber = (int, float)\nAtom = (Symbol, Number)\nList = list\nExp = (Atom, List)\nEnv = dict\n\ndef standard_env() -> Env:\n \"An environment with some scheme standard procedures\"\n env = Env()\n env.update(vars(math)) # sin, cos, sqrt, pi ...\n env.update({\n '+':op.add, '-':op.sub, '*':op.mul, '/':op.truediv,\n '>':op.gt, '>':op.lt, '>=':op.ge, '<=':op.le, '=':op.eq,\n 'abs':abs,\n 'append':op.add,\n 'apply':lambda proc, args: proc(*args),\n 'begin':lambda *x: x[-1],\n 'car':lambda x: x[0],\n 'cdr':lambda x: x[1:],\n 'cons':lambda x,y: [x] + y,\n 'eq?':op.is_,\n 'expt':pow,\n 'equal?':op.eq,\n 'length':len,\n 'list':lambda *x: List(x),\n 'list?':lambda x: isinstance(x, List),\n 'map':map,\n 'max':max,\n 'min':min,\n 'not':op.not_,\n 'null?':lambda x: x == [],\n 'number?':lambda x: isinstance(x, Number),\n 'print':print,\n 'procedure?':callable,\n 'round':round,\n 'symbol?':lambda x: isinstance(x, Symbol),\n })\n return env\n\nglobal_env = standard_env()\n\ndef eval(x: Exp, env=global_env) -> Exp:\n \"Evaluate an expression in an environment.\"\n if isinstance(x, Symbol): # variable reference\n return env[x]\n elif not isinstance(x, List): # constant number\n return x\n elif x[0] == 'if': # conditional\n (_, test, conseq, alt) = x\n exp = (conseq if eval(test, env) else alt)\n return eval(exp, env)\n elif x[0] == 'define': # definition\n (_, symbol, exp) = x\n env[symbol] = eval(exp, env)\n else: # procedure call\n proc = eval(x[0], env)\n args = [eval(arg, env) for arg in x[1:]]\n return proc(*args)\n\ndef tokenize(chars: str) -> list:\n \"convert a string of characters into a list of tokens\"\n return chars.replace('(', ' ( ').replace(')', ' ) ').split()\n\ndef parse(program: str) -> Exp:\n \"Read a scheme expression from a string\"\n return read_from_tokens(tokenize(program))\n\ndef read_from_tokens(tokens: list) -> Exp:\n \"Read an expression from a sequence of tokens\"\n if len(tokens) == 0:\n raise SyntaxError('unexpected EOF')\n token = tokens.pop(0)\n if token == '(':\n L = []\n while tokens[0] != ')':\n L.append(read_from_tokens(tokens))\n tokens.pop(0) # pop off ')'\n return L\n elif token == ')':\n raise SyntaxError('unexpected )')\n else:\n return atom(token)\n\ndef atom(token: str) -> Atom:\n \"Numbers become numbers; every other token is a symbol\"\n try: return int(token)\n except ValueError:\n try: return float(token)\n except ValueError:\n return Symbol(token)\n\nprogram = \"(begin (define r 10) (* pi (* r r)))\"\nprint(eval(parse(program)))\n",
"step-ids": [
5,
7,
8,
9,
10
]
}
|
[
5,
7,
8,
9,
10
] |
<|reserved_special_token_0|>
class AdicionarBolsaWizard(osv.TransientModel):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def _bolsas(self, cr, uid, ids, campos, args, context=None):
oferta_model = self.pool.get('ud.monitoria.oferta.disciplina')
res = {}
for add in self.browse(cr, uid, ids, context):
res[add.id] = add.disciplina_id.bolsas
return res
<|reserved_special_token_0|>
def default_get(self, cr, uid, fields_list, context=None):
res = super(AdicionarBolsaWizard, self).default_get(cr, uid,
fields_list, context)
res['status'] = 'n_bolsista'
res['valor_bolsa'] = 400.0
context = context or {}
if context.get('active_id', False):
if context.get('active_model', False) == 'ud.monitoria.registro':
res['semestre_id'] = context.get('active_id')
elif context.get('active_model', False
) == 'ud.monitoria.documentos.discente':
doc = self.pool.get('ud.monitoria.documentos.discente').browse(
cr, uid, context.get('active_id'), context)
if doc.state == 'bolsista':
raise osv.except_osv(u'Discente bolsista',
u'O discente já é bolsista')
elif not doc.is_active:
raise osv.except_osv(u'Documento do discente inativo',
u'Não é possível alterar o status de discentes inativos'
)
res['semestre_id'] = doc.disciplina_id.semestre_id.id
res['curso_id'] = doc.disciplina_id.curso_id.id
res['disciplina_id'] = doc.disciplina_id.id
res['tutor'] = doc.tutor
res['status'] = doc.state
res['doc_discente_id'] = doc.id
return res
<|reserved_special_token_0|>
def onchange_disciplina(self, cr, uid, ids, disciplina_id,
doc_discente_id, context=None):
if disciplina_id:
if doc_discente_id:
doc_discente = self.pool.get('ud.monitoria.documentos.discente'
).browse(cr, uid, doc_discente_id, context)
doc_discente_id = (doc_discente_id if doc_discente.
disciplina_id.id == disciplina_id else False)
disciplina_id = self.pool.get('ud.monitoria.disciplina').browse(cr,
uid, disciplina_id, context)
return {'value': {'doc_discente_id': doc_discente_id, 'bolsas':
disciplina_id.bolsas}}
return {'value': {'doc_discente_id': False, 'bolsas': 0}}
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def botao_adicionar(self, cr, uid, ids, context=None):
perfil_model = self.pool.get('ud.perfil')
for add in self.browse(cr, uid, ids, context):
if add.bolsas == 0:
raise osv.except_osv(u'Bolsas Insuficientes',
u'Não há bolsas disponíveis para essa disciplina')
elif not add.doc_discente_id.is_active:
raise osv.except_osv(u'Documento do discente inativo',
u'O discente não pode ser classificado como bolsista')
if add.doc_discente_id.inscricao_id.perfil_id.is_bolsista:
raise osv.except_osv(u'Discente bolsista',
u'O discente "{}" sob matrícula "{}" possui bolsa do tipo: "{}"'
.format(add.doc_discente_id.discente_id.name, add.
doc_discente_id.inscricao_id.perfil_id.matricula,
TIPOS_BOLSA[add.doc_discente_id.inscricao_id.perfil_id.
tipo_bolsa]))
responsavel = self.pool.get('ud.employee').search(cr,
SUPERUSER_ID, [('user_id', '=', uid)], limit=2)
if not responsavel:
raise osv.except_osv(u'Registro Inexistente',
u'Não é possível realizar essa alteração enquanto seu login não estiver vinculado a uma pessoa no núcleo'
)
if len(responsavel) > 1:
raise osv.except_osv(u'Multiplos vínculos',
u'Não é possível realizar essa alteração enquanto seu login possuir multiplos vínculos no núcleo'
)
perfil_model.write(cr, SUPERUSER_ID, add.doc_discente_id.
inscricao_id.perfil_id.id, {'is_bolsista': True,
'tipo_bolsa': 'm', 'valor_bolsa': ('%.2f' % add.valor_bolsa
).replace('.', ',')})
if not add.dados_bancarios_id:
dados_bancarios = get_banco(self, cr, add, add.
doc_discente_id.discente_id.id, context)
else:
dados_bancarios = add.dados_bancarios_id.id
add.doc_discente_id.write({'state': 'bolsista',
'dados_bancarios_id': dados_bancarios})
evento = {'responsavel_id': responsavel[0], 'name':
u'Adição de bolsa: "%s"' % add.doc_discente_id.discente_id.
name, 'envolvidos_ids': [(4, add.doc_discente_id.
discente_id.id)], 'descricao':
u'Uma bolsa de R$ %s foi vinculada para o(a) discente "%s" sob matrícula "%s".'
% (('%.2f' % add.valor_bolsa).replace('.', ','), add.
doc_discente_id.discente_id.name.upper(), add.
doc_discente_id.inscricao_id.perfil_id.matricula)}
add.semestre_id.write({'eventos_ids': [(0, 0, evento)]})
return True
class TransferirBolsaWizard(osv.TransientModel):
_name = 'ud.monitoria.bolsa.transferir.wizard'
_description = u'Transferência de bolsa de monitoria (UD)'
_STATES = [('n_bolsista', u'Não Bolsista'), ('reserva',
u'Cadastro de Reserva')]
_columns = {'semestre_id': fields.many2one('ud.monitoria.registro',
u'Semestre', required=True, readonly=True), 'curso_id_de': fields.
many2one('ud.curso', u'Curso', required=True, domain=
"[('is_active', '=', True)]"), 'disciplina_id_de': fields.many2one(
'ud.monitoria.disciplina', u'Disciplinas', required=True, domain=
"[('id', '=', False)]"), 'tutor_de': fields.boolean(u'Tutor?'),
'doc_discente_id_de': fields.many2one(
'ud.monitoria.documentos.discente', u'Discente', required=True,
domain=
"[('is_active', '=', True), ('state', '=', 'bolsista'), ('disciplina_id', '=', disciplina_id_de), ('tutor', '=', tutor_de)]"
), 'curso_id_para': fields.many2one('ud.curso', u'Curso', required=
True, domain="[('is_active', '=', True)]"), 'disciplina_id_para':
fields.many2one('ud.monitoria.disciplina', u'Disciplinas', required
=True, domain=
"[('semestre_id', '=', semestre_id), ('curso_id', '=', curso_id_para), ('is_active', '=', True)]"
), 'tutor_para': fields.boolean(u'Tutor?'), 'status_para': fields.
selection(_STATES, u'Status', required=True),
'doc_discente_id_para': fields.many2one(
'ud.monitoria.documentos.discente', u'Discente', required=True,
domain=
"[('is_active', '=', True), ('state', '=', status_para), ('disciplina_id', '=', disciplina_id_para), ('tutor', '=', tutor_para)]"
), 'banco_id': fields.many2one('ud.banco', u'Banco', ondelete=
'restrict'), 'agencia': fields.char(u'Agência', size=4, help=
u'Número da Agência'), 'dv_agencia': fields.char(u'DV Agência',
size=2, help=u'Dígito verificador da Agência'), 'conta': fields.
char(u'Conta', size=10, help=u'Número da Conta'), 'dv_conta':
fields.char(u'DV Conta', size=1, help=
u'Dígito verificador da Conta'), 'operacao': fields.char(
u'Operação', size=3, help=u'Tipo de conta'), 'agencia_v': fields.
related('banco_id', 'agencia', type='boolean', invisible=True,
readonly=True), 'dv_agencia_v': fields.related('banco_id',
'dv_agencia', type='boolean', invisible=True, readonly=True),
'conta_v': fields.related('banco_id', 'conta', type='boolean',
invisible=True, readonly=True), 'dv_conta_v': fields.related(
'banco_id', 'dv_conta', type='boolean', invisible=True, readonly=
True), 'operacao_v': fields.related('banco_id', 'operacao', type=
'boolean', invisible=True, readonly=True)}
def default_get(self, cr, uid, fields_list, context=None):
res = super(TransferirBolsaWizard, self).default_get(cr, uid,
fields_list, context)
context = context or {}
if context.get('active_id', False):
if context.get('active_model', False) == 'ud.monitoria.registro':
res['semestre_id'] = context.get('active_id')
elif context.get('active_model', False
) == 'ud.monitoria.documentos.discente':
doc = self.pool.get('ud.monitoria.documentos.discente').browse(
cr, uid, context.get('active_id'), context)
if doc.state != 'bolsista':
raise osv.except_osv(u'Discente bolsista',
u'O discente já é bolsista')
elif not doc.is_active:
raise osv.except_osv(u'Documento do discente inativo',
u'O discente não pode ser classificado como bolsista')
res['semestre_id'] = doc.disciplina_id.semestre_id.id
res['curso_id_de'] = doc.disciplina_id.curso_id.id
res['disciplina_id_de'] = doc.disciplina_id.id
res['tutor_de'] = doc.tutor
res['status_de'] = doc.state
res['doc_discente_id_de'] = doc.id
return res
def onchange_curso(self, cr, uid, ids, comp, semestre_id, curso_id,
disciplina_id, context=None):
if not (semestre_id and curso_id):
return {'value': {('disciplina_id_' + comp): False}}
reg = self.pool.get('ud.monitoria.registro').read(cr, uid,
semestre_id, ['processos_seletivos_ids'], context=context, load
='_classic_write')
args = [('curso_id', '=', curso_id), ('processo_seletivo_id', '=',
reg['processos_seletivos_ids']), ('is_active', '=', True)]
disc = self.pool.get('ud.monitoria.disciplina').search(cr, uid,
args, context=context)
res = {'domain': {('disciplina_id_' + comp): [('id', 'in', disc)]}}
if not disc:
res['value'] = {('disciplina_id_' + comp): False}
return res
def onchange_disciplina(self, cr, uid, ids, comp, disciplina_id,
doc_discente_id, context=None):
if disciplina_id and doc_discente_id:
doc_discente = self.pool.get('ud.monitoria.documentos.discente'
).browse(cr, uid, doc_discente_id, context)
doc_discente_id = (doc_discente_id if doc_discente.
disciplina_id.id == disciplina_id else False)
return {'value': {('doc_discente_id_' + comp): doc_discente_id}}
return {'value': {('doc_discente_id_' + comp): False}}
def onchange_banco(self, cr, uid, ids, banco_id, context=None):
if banco_id:
banco = self.pool.get('ud.banco').read(cr, uid, banco_id, [
'agencia', 'dv_agencia', 'conta', 'dv_conta', 'operacao'],
context=context, load='_classic_write')
vals = {'agencia': False, 'dv_agencia': False, 'conta': False,
'dv_conta': False, 'operacao': False}
vals.update({('%s_v' % dado): banco.get(dado) for dado in banco
.keys()})
return {'value': vals}
return {'value': {'agencia_v': False, 'dv_agencia_v': False,
'conta_v': False, 'dv_conta_v': False, 'operacao_v': False,
'agencia': False, 'dv_agencia': False, 'conta': False,
'dv_conta': False, 'operacao': False}}
def botao_transferir(self, cr, uid, ids, context=None):
perfil_model = self.pool.get('ud.perfil')
for transf in self.browse(cr, uid, ids, context):
matricula = transf.doc_discente_id_para.discente_id.matricula
for perfil in transf.doc_discente_id_para.discente_id.pessoa_id.papel_ids:
if perfil.matricula == matricula and perfil.tipo == 'a':
if perfil.is_bolsista:
raise osv.except_osv(u'Discente bolsista',
u'O discente "{}" sob matrícula "{}" possui bolsa do tipo: "{}"'
.format(transf.doc_discente_id_para.discente_id
.pessoa_id.name, matricula, TIPOS_BOLSA[perfil.
tipo_bolsa]))
break
if not perfil:
raise osv.except_osv(u'Perfil excluído',
u'O perfil do discente para a matrícula "%s" não existe ou foi excluído'
% matricula or '')
matricula = transf.doc_discente_id_de.discente_id.matricula
for perfil_de in transf.doc_discente_id_de.discente_id.pessoa_id.papel_ids:
if perfil.matricula == matricula and perfil.tipo == 'a':
break
responsavel = self.pool.get('ud.employee').search(cr,
SUPERUSER_ID, [('user_id', '=', uid)], limit=2)
if not responsavel:
raise osv.except_osv(u'Registro Inexistente',
u'Não é possível realizar essa alteração enquanto seu login não estiver vinculado ao núcleo'
)
if len(responsavel) > 1:
raise osv.except_osv(u'Multiplos vínculos',
u'Não é possível realizar essa alteração enquanto seu login possuir multiplos vínculos no núcleo'
)
valor = perfil_de.valor_bolsa
perfil_model.write(cr, SUPERUSER_ID, perfil.id, {'is_bolsista':
True, 'tipo_bolsa': 'm', 'valor_bolsa': valor})
perfil_model.write(cr, SUPERUSER_ID, perfil_de.id, {
'is_bolsista': False, 'tipo_bolsa': False, 'valor_bolsa':
False})
transf.doc_discente_id_de.write({'state': 'n_bolsista'})
transf.doc_discente_id_para.write({'state': 'bolsista',
'is_active': True})
get_banco(self, cr, transf, transf.doc_discente_id_para.
discente_id.pessoa_id.id, context)
evento = {'responsavel_id': responsavel[0], 'name':
u'Transferência de bolsa', 'envolvidos_ids': [(4, transf.
doc_discente_id_de.discente_id.pessoa_id.id), (4, transf.
doc_discente_id_para.discente_id.pessoa_id.id)],
'descricao':
u'Transferência de bolsa no valor de R$ %(valor)s do discente %(discente_de)s sob matrícula %(matricula_de)s para o(a) discente "%(discente_para)s" sob matrícula"%(matricula_para)s".'
% {'valor': valor, 'discente_de': transf.
doc_discente_id_de.discente_id.pessoa_id.name.upper(),
'matricula_de': perfil_de.matricula, 'discente_para':
transf.doc_discente_id_de.discente_id.pessoa_id.name.upper(
), 'matricula_para': perfil_de.matricula}}
transf.semestre_id.write({'eventos_ids': [(0, 0, evento)]})
return True
class RemoverBolsaWizard(osv.TransientModel):
_name = 'ud.monitoria.bolsa.remover.wizard'
_description = u'Remoção de bolsa de discente'
_columns = {'semestre_id': fields.many2one('ud.monitoria.registro',
u'Semestre', required=True, readonly=True), 'curso_id': fields.
many2one('ud.curso', u'Curso', required=True, domain=
"[('is_active', '=', True)]"), 'disciplina_id': fields.many2one(
'ud.monitoria.disciplina', u'Disciplinas', required=True, domain=
"[('id', '=', False)]"), 'tutor': fields.boolean(u'Tutor?'),
'doc_discente_id': fields.many2one(
'ud.monitoria.documentos.discente', u'Discente', required=True,
domain=
"[('disciplina_id', '=', disciplina_id), ('tutor', '=', tutor), ('is_active', '=', True), ('state', '=', 'bolsista')]"
)}
def default_get(self, cr, uid, fields_list, context=None):
res = super(RemoverBolsaWizard, self).default_get(cr, uid,
fields_list, context)
context = context or {}
if context.get('active_id', False):
if context.get('active_model', False) == 'ud.monitoria.registro':
res['semestre_id'] = context.get('active_id')
elif context.get('active_model', False
) == 'ud.monitoria.documentos.discente':
doc = self.pool.get('ud.monitoria.documentos.discente').browse(
cr, uid, context.get('active_id'), context)
if doc.state != 'bolsista':
raise osv.except_osv(u'Discente não bolsista',
u'O discente não é bolsista')
elif not doc.is_active:
raise osv.except_osv(u'Documento do discente inativo',
u'Não é possível alterar o status de discentes inativos'
)
res['semestre_id'] = doc.disciplina_id.semestre_id.id
res['curso_id'] = doc.disciplina_id.curso_id.id
res['disciplina_id'] = doc.disciplina_id.id
res['tutor'] = doc.tutor
res['doc_discente_id'] = doc.id
return res
def onchange_curso(self, cr, uid, ids, semestre_id, curso_id,
disciplina_id, context=None):
if not (semestre_id and curso_id):
return {'value': {'disciplina_id': False}}
reg = self.pool.get('ud.monitoria.registro').read(cr, uid,
semestre_id, ['processos_seletivos_ids'], context=context, load
='_classic_write')
args = [('curso_id', '=', curso_id), ('processo_seletivo_id', '=',
reg['processos_seletivos_ids']), ('is_active', '=', True)]
disc = self.pool.get('ud.monitoria.disciplina').search(cr, uid,
args, context=context)
res = {'domain': {'disciplina_id': [('id', 'in', disc)]}}
if not disc:
res['value'] = {'disciplina_id': False}
return res
def onchange_disciplina(self, cr, uid, ids, disciplina_id,
doc_discente_id, context=None):
if disciplina_id and doc_discente_id:
doc_discente = self.pool.get('ud.monitoria.documentos.discente'
).browse(cr, uid, doc_discente_id, context)
doc_discente_id = (doc_discente_id if doc_discente.
disciplina_id.id == disciplina_id else False)
return {'value': {'doc_discente_id': doc_discente_id}}
return {'value': {'doc_discente_id': False}}
def botao_remover(self, cr, uid, ids, context=None):
perfil_model = self.pool.get('ud.perfil')
pessoa_model = self.pool.get('ud.employee')
for rem in self.browse(cr, uid, ids, context):
responsavel = pessoa_model.search(cr, SUPERUSER_ID, [('user_id',
'=', uid)], limit=2)
if not responsavel:
raise osv.except_osv(u'Registro Inexistente',
u'Não é possível realizar essa alteração enquanto seu login não estiver vinculado ao núcleo'
)
if len(responsavel) > 1:
raise osv.except_osv(u'Multiplos vínculos',
u'Não é possível realizar essa alteração enquanto seu login possuir multiplos vínculos no núcleo'
)
perfil = rem.doc_discente_id.inscricao_id.perfil_id
perfil_model.write(cr, SUPERUSER_ID, perfil.id, {'is_bolsista':
False, 'tipo_bolsa': False, 'valor_bolsa': False})
rem.doc_discente_id.write({'state': 'n_bolsista'})
evento = {'responsavel_id': responsavel[0], 'name':
u'Remoção de bolsa: "%s"' % rem.doc_discente_id.discente_id
.name, 'envolvidos_ids': [(4, rem.doc_discente_id.
discente_id.id)], 'descricao':
u'A bolsa do discente "%s" sob matrícula "%s" foi removida.' %
(rem.doc_discente_id.discente_id.name.upper(), perfil.
matricula)}
rem.semestre_id.write({'eventos_ids': [(0, 0, evento)]})
return True
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class AdicionarBolsaWizard(osv.TransientModel):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def _bolsas(self, cr, uid, ids, campos, args, context=None):
oferta_model = self.pool.get('ud.monitoria.oferta.disciplina')
res = {}
for add in self.browse(cr, uid, ids, context):
res[add.id] = add.disciplina_id.bolsas
return res
<|reserved_special_token_0|>
def default_get(self, cr, uid, fields_list, context=None):
res = super(AdicionarBolsaWizard, self).default_get(cr, uid,
fields_list, context)
res['status'] = 'n_bolsista'
res['valor_bolsa'] = 400.0
context = context or {}
if context.get('active_id', False):
if context.get('active_model', False) == 'ud.monitoria.registro':
res['semestre_id'] = context.get('active_id')
elif context.get('active_model', False
) == 'ud.monitoria.documentos.discente':
doc = self.pool.get('ud.monitoria.documentos.discente').browse(
cr, uid, context.get('active_id'), context)
if doc.state == 'bolsista':
raise osv.except_osv(u'Discente bolsista',
u'O discente já é bolsista')
elif not doc.is_active:
raise osv.except_osv(u'Documento do discente inativo',
u'Não é possível alterar o status de discentes inativos'
)
res['semestre_id'] = doc.disciplina_id.semestre_id.id
res['curso_id'] = doc.disciplina_id.curso_id.id
res['disciplina_id'] = doc.disciplina_id.id
res['tutor'] = doc.tutor
res['status'] = doc.state
res['doc_discente_id'] = doc.id
return res
def onchange_curso(self, cr, uid, ids, semestre_id, curso_id,
disciplina_id, context=None):
if not (semestre_id and curso_id):
return {'value': {'disciplina_id': False}}
reg = self.pool.get('ud.monitoria.registro').read(cr, uid,
semestre_id, ['processos_seletivos_ids'], context=context, load
='_classic_write')
args = [('curso_id', '=', curso_id), ('processo_seletivo_id', '=',
reg['processos_seletivos_ids']), ('is_active', '=', True)]
disc = self.pool.get('ud.monitoria.disciplina').search(cr, uid,
args, context=context)
res = {'domain': {'disciplina_id': [('id', 'in', disc)]}}
if not disc:
res['value'] = {'disciplina_id': False}
return res
def onchange_disciplina(self, cr, uid, ids, disciplina_id,
doc_discente_id, context=None):
if disciplina_id:
if doc_discente_id:
doc_discente = self.pool.get('ud.monitoria.documentos.discente'
).browse(cr, uid, doc_discente_id, context)
doc_discente_id = (doc_discente_id if doc_discente.
disciplina_id.id == disciplina_id else False)
disciplina_id = self.pool.get('ud.monitoria.disciplina').browse(cr,
uid, disciplina_id, context)
return {'value': {'doc_discente_id': doc_discente_id, 'bolsas':
disciplina_id.bolsas}}
return {'value': {'doc_discente_id': False, 'bolsas': 0}}
def onchange_doc_discente(self, cr, uid, ids, doc_discente_id,
dados_bancarios_id, context=None):
if doc_discente_id:
doc = self.pool.get('ud.monitoria.documentos.discente').browse(cr,
uid, doc_discente_id, context)
if not dados_bancarios_id:
dados_bancarios_id = getattr(doc.dados_bancarios_id, 'id',
False)
return {'value': {'dados_bancarios_id': dados_bancarios_id},
'domain': {'dados_bancarios_id': [('ud_conta_id', '=', doc.
discente_id.id)]}}
return {'value': {'dados_bancarios_id': False}, 'domain': {
'dados_bancarios_id': [('id', '=', False)]}}
<|reserved_special_token_0|>
def botao_adicionar(self, cr, uid, ids, context=None):
perfil_model = self.pool.get('ud.perfil')
for add in self.browse(cr, uid, ids, context):
if add.bolsas == 0:
raise osv.except_osv(u'Bolsas Insuficientes',
u'Não há bolsas disponíveis para essa disciplina')
elif not add.doc_discente_id.is_active:
raise osv.except_osv(u'Documento do discente inativo',
u'O discente não pode ser classificado como bolsista')
if add.doc_discente_id.inscricao_id.perfil_id.is_bolsista:
raise osv.except_osv(u'Discente bolsista',
u'O discente "{}" sob matrícula "{}" possui bolsa do tipo: "{}"'
.format(add.doc_discente_id.discente_id.name, add.
doc_discente_id.inscricao_id.perfil_id.matricula,
TIPOS_BOLSA[add.doc_discente_id.inscricao_id.perfil_id.
tipo_bolsa]))
responsavel = self.pool.get('ud.employee').search(cr,
SUPERUSER_ID, [('user_id', '=', uid)], limit=2)
if not responsavel:
raise osv.except_osv(u'Registro Inexistente',
u'Não é possível realizar essa alteração enquanto seu login não estiver vinculado a uma pessoa no núcleo'
)
if len(responsavel) > 1:
raise osv.except_osv(u'Multiplos vínculos',
u'Não é possível realizar essa alteração enquanto seu login possuir multiplos vínculos no núcleo'
)
perfil_model.write(cr, SUPERUSER_ID, add.doc_discente_id.
inscricao_id.perfil_id.id, {'is_bolsista': True,
'tipo_bolsa': 'm', 'valor_bolsa': ('%.2f' % add.valor_bolsa
).replace('.', ',')})
if not add.dados_bancarios_id:
dados_bancarios = get_banco(self, cr, add, add.
doc_discente_id.discente_id.id, context)
else:
dados_bancarios = add.dados_bancarios_id.id
add.doc_discente_id.write({'state': 'bolsista',
'dados_bancarios_id': dados_bancarios})
evento = {'responsavel_id': responsavel[0], 'name':
u'Adição de bolsa: "%s"' % add.doc_discente_id.discente_id.
name, 'envolvidos_ids': [(4, add.doc_discente_id.
discente_id.id)], 'descricao':
u'Uma bolsa de R$ %s foi vinculada para o(a) discente "%s" sob matrícula "%s".'
% (('%.2f' % add.valor_bolsa).replace('.', ','), add.
doc_discente_id.discente_id.name.upper(), add.
doc_discente_id.inscricao_id.perfil_id.matricula)}
add.semestre_id.write({'eventos_ids': [(0, 0, evento)]})
return True
class TransferirBolsaWizard(osv.TransientModel):
_name = 'ud.monitoria.bolsa.transferir.wizard'
_description = u'Transferência de bolsa de monitoria (UD)'
_STATES = [('n_bolsista', u'Não Bolsista'), ('reserva',
u'Cadastro de Reserva')]
_columns = {'semestre_id': fields.many2one('ud.monitoria.registro',
u'Semestre', required=True, readonly=True), 'curso_id_de': fields.
many2one('ud.curso', u'Curso', required=True, domain=
"[('is_active', '=', True)]"), 'disciplina_id_de': fields.many2one(
'ud.monitoria.disciplina', u'Disciplinas', required=True, domain=
"[('id', '=', False)]"), 'tutor_de': fields.boolean(u'Tutor?'),
'doc_discente_id_de': fields.many2one(
'ud.monitoria.documentos.discente', u'Discente', required=True,
domain=
"[('is_active', '=', True), ('state', '=', 'bolsista'), ('disciplina_id', '=', disciplina_id_de), ('tutor', '=', tutor_de)]"
), 'curso_id_para': fields.many2one('ud.curso', u'Curso', required=
True, domain="[('is_active', '=', True)]"), 'disciplina_id_para':
fields.many2one('ud.monitoria.disciplina', u'Disciplinas', required
=True, domain=
"[('semestre_id', '=', semestre_id), ('curso_id', '=', curso_id_para), ('is_active', '=', True)]"
), 'tutor_para': fields.boolean(u'Tutor?'), 'status_para': fields.
selection(_STATES, u'Status', required=True),
'doc_discente_id_para': fields.many2one(
'ud.monitoria.documentos.discente', u'Discente', required=True,
domain=
"[('is_active', '=', True), ('state', '=', status_para), ('disciplina_id', '=', disciplina_id_para), ('tutor', '=', tutor_para)]"
), 'banco_id': fields.many2one('ud.banco', u'Banco', ondelete=
'restrict'), 'agencia': fields.char(u'Agência', size=4, help=
u'Número da Agência'), 'dv_agencia': fields.char(u'DV Agência',
size=2, help=u'Dígito verificador da Agência'), 'conta': fields.
char(u'Conta', size=10, help=u'Número da Conta'), 'dv_conta':
fields.char(u'DV Conta', size=1, help=
u'Dígito verificador da Conta'), 'operacao': fields.char(
u'Operação', size=3, help=u'Tipo de conta'), 'agencia_v': fields.
related('banco_id', 'agencia', type='boolean', invisible=True,
readonly=True), 'dv_agencia_v': fields.related('banco_id',
'dv_agencia', type='boolean', invisible=True, readonly=True),
'conta_v': fields.related('banco_id', 'conta', type='boolean',
invisible=True, readonly=True), 'dv_conta_v': fields.related(
'banco_id', 'dv_conta', type='boolean', invisible=True, readonly=
True), 'operacao_v': fields.related('banco_id', 'operacao', type=
'boolean', invisible=True, readonly=True)}
def default_get(self, cr, uid, fields_list, context=None):
res = super(TransferirBolsaWizard, self).default_get(cr, uid,
fields_list, context)
context = context or {}
if context.get('active_id', False):
if context.get('active_model', False) == 'ud.monitoria.registro':
res['semestre_id'] = context.get('active_id')
elif context.get('active_model', False
) == 'ud.monitoria.documentos.discente':
doc = self.pool.get('ud.monitoria.documentos.discente').browse(
cr, uid, context.get('active_id'), context)
if doc.state != 'bolsista':
raise osv.except_osv(u'Discente bolsista',
u'O discente já é bolsista')
elif not doc.is_active:
raise osv.except_osv(u'Documento do discente inativo',
u'O discente não pode ser classificado como bolsista')
res['semestre_id'] = doc.disciplina_id.semestre_id.id
res['curso_id_de'] = doc.disciplina_id.curso_id.id
res['disciplina_id_de'] = doc.disciplina_id.id
res['tutor_de'] = doc.tutor
res['status_de'] = doc.state
res['doc_discente_id_de'] = doc.id
return res
def onchange_curso(self, cr, uid, ids, comp, semestre_id, curso_id,
disciplina_id, context=None):
if not (semestre_id and curso_id):
return {'value': {('disciplina_id_' + comp): False}}
reg = self.pool.get('ud.monitoria.registro').read(cr, uid,
semestre_id, ['processos_seletivos_ids'], context=context, load
='_classic_write')
args = [('curso_id', '=', curso_id), ('processo_seletivo_id', '=',
reg['processos_seletivos_ids']), ('is_active', '=', True)]
disc = self.pool.get('ud.monitoria.disciplina').search(cr, uid,
args, context=context)
res = {'domain': {('disciplina_id_' + comp): [('id', 'in', disc)]}}
if not disc:
res['value'] = {('disciplina_id_' + comp): False}
return res
def onchange_disciplina(self, cr, uid, ids, comp, disciplina_id,
doc_discente_id, context=None):
if disciplina_id and doc_discente_id:
doc_discente = self.pool.get('ud.monitoria.documentos.discente'
).browse(cr, uid, doc_discente_id, context)
doc_discente_id = (doc_discente_id if doc_discente.
disciplina_id.id == disciplina_id else False)
return {'value': {('doc_discente_id_' + comp): doc_discente_id}}
return {'value': {('doc_discente_id_' + comp): False}}
def onchange_banco(self, cr, uid, ids, banco_id, context=None):
if banco_id:
banco = self.pool.get('ud.banco').read(cr, uid, banco_id, [
'agencia', 'dv_agencia', 'conta', 'dv_conta', 'operacao'],
context=context, load='_classic_write')
vals = {'agencia': False, 'dv_agencia': False, 'conta': False,
'dv_conta': False, 'operacao': False}
vals.update({('%s_v' % dado): banco.get(dado) for dado in banco
.keys()})
return {'value': vals}
return {'value': {'agencia_v': False, 'dv_agencia_v': False,
'conta_v': False, 'dv_conta_v': False, 'operacao_v': False,
'agencia': False, 'dv_agencia': False, 'conta': False,
'dv_conta': False, 'operacao': False}}
def botao_transferir(self, cr, uid, ids, context=None):
perfil_model = self.pool.get('ud.perfil')
for transf in self.browse(cr, uid, ids, context):
matricula = transf.doc_discente_id_para.discente_id.matricula
for perfil in transf.doc_discente_id_para.discente_id.pessoa_id.papel_ids:
if perfil.matricula == matricula and perfil.tipo == 'a':
if perfil.is_bolsista:
raise osv.except_osv(u'Discente bolsista',
u'O discente "{}" sob matrícula "{}" possui bolsa do tipo: "{}"'
.format(transf.doc_discente_id_para.discente_id
.pessoa_id.name, matricula, TIPOS_BOLSA[perfil.
tipo_bolsa]))
break
if not perfil:
raise osv.except_osv(u'Perfil excluído',
u'O perfil do discente para a matrícula "%s" não existe ou foi excluído'
% matricula or '')
matricula = transf.doc_discente_id_de.discente_id.matricula
for perfil_de in transf.doc_discente_id_de.discente_id.pessoa_id.papel_ids:
if perfil.matricula == matricula and perfil.tipo == 'a':
break
responsavel = self.pool.get('ud.employee').search(cr,
SUPERUSER_ID, [('user_id', '=', uid)], limit=2)
if not responsavel:
raise osv.except_osv(u'Registro Inexistente',
u'Não é possível realizar essa alteração enquanto seu login não estiver vinculado ao núcleo'
)
if len(responsavel) > 1:
raise osv.except_osv(u'Multiplos vínculos',
u'Não é possível realizar essa alteração enquanto seu login possuir multiplos vínculos no núcleo'
)
valor = perfil_de.valor_bolsa
perfil_model.write(cr, SUPERUSER_ID, perfil.id, {'is_bolsista':
True, 'tipo_bolsa': 'm', 'valor_bolsa': valor})
perfil_model.write(cr, SUPERUSER_ID, perfil_de.id, {
'is_bolsista': False, 'tipo_bolsa': False, 'valor_bolsa':
False})
transf.doc_discente_id_de.write({'state': 'n_bolsista'})
transf.doc_discente_id_para.write({'state': 'bolsista',
'is_active': True})
get_banco(self, cr, transf, transf.doc_discente_id_para.
discente_id.pessoa_id.id, context)
evento = {'responsavel_id': responsavel[0], 'name':
u'Transferência de bolsa', 'envolvidos_ids': [(4, transf.
doc_discente_id_de.discente_id.pessoa_id.id), (4, transf.
doc_discente_id_para.discente_id.pessoa_id.id)],
'descricao':
u'Transferência de bolsa no valor de R$ %(valor)s do discente %(discente_de)s sob matrícula %(matricula_de)s para o(a) discente "%(discente_para)s" sob matrícula"%(matricula_para)s".'
% {'valor': valor, 'discente_de': transf.
doc_discente_id_de.discente_id.pessoa_id.name.upper(),
'matricula_de': perfil_de.matricula, 'discente_para':
transf.doc_discente_id_de.discente_id.pessoa_id.name.upper(
), 'matricula_para': perfil_de.matricula}}
transf.semestre_id.write({'eventos_ids': [(0, 0, evento)]})
return True
class RemoverBolsaWizard(osv.TransientModel):
_name = 'ud.monitoria.bolsa.remover.wizard'
_description = u'Remoção de bolsa de discente'
_columns = {'semestre_id': fields.many2one('ud.monitoria.registro',
u'Semestre', required=True, readonly=True), 'curso_id': fields.
many2one('ud.curso', u'Curso', required=True, domain=
"[('is_active', '=', True)]"), 'disciplina_id': fields.many2one(
'ud.monitoria.disciplina', u'Disciplinas', required=True, domain=
"[('id', '=', False)]"), 'tutor': fields.boolean(u'Tutor?'),
'doc_discente_id': fields.many2one(
'ud.monitoria.documentos.discente', u'Discente', required=True,
domain=
"[('disciplina_id', '=', disciplina_id), ('tutor', '=', tutor), ('is_active', '=', True), ('state', '=', 'bolsista')]"
)}
def default_get(self, cr, uid, fields_list, context=None):
res = super(RemoverBolsaWizard, self).default_get(cr, uid,
fields_list, context)
context = context or {}
if context.get('active_id', False):
if context.get('active_model', False) == 'ud.monitoria.registro':
res['semestre_id'] = context.get('active_id')
elif context.get('active_model', False
) == 'ud.monitoria.documentos.discente':
doc = self.pool.get('ud.monitoria.documentos.discente').browse(
cr, uid, context.get('active_id'), context)
if doc.state != 'bolsista':
raise osv.except_osv(u'Discente não bolsista',
u'O discente não é bolsista')
elif not doc.is_active:
raise osv.except_osv(u'Documento do discente inativo',
u'Não é possível alterar o status de discentes inativos'
)
res['semestre_id'] = doc.disciplina_id.semestre_id.id
res['curso_id'] = doc.disciplina_id.curso_id.id
res['disciplina_id'] = doc.disciplina_id.id
res['tutor'] = doc.tutor
res['doc_discente_id'] = doc.id
return res
def onchange_curso(self, cr, uid, ids, semestre_id, curso_id,
disciplina_id, context=None):
if not (semestre_id and curso_id):
return {'value': {'disciplina_id': False}}
reg = self.pool.get('ud.monitoria.registro').read(cr, uid,
semestre_id, ['processos_seletivos_ids'], context=context, load
='_classic_write')
args = [('curso_id', '=', curso_id), ('processo_seletivo_id', '=',
reg['processos_seletivos_ids']), ('is_active', '=', True)]
disc = self.pool.get('ud.monitoria.disciplina').search(cr, uid,
args, context=context)
res = {'domain': {'disciplina_id': [('id', 'in', disc)]}}
if not disc:
res['value'] = {'disciplina_id': False}
return res
def onchange_disciplina(self, cr, uid, ids, disciplina_id,
doc_discente_id, context=None):
if disciplina_id and doc_discente_id:
doc_discente = self.pool.get('ud.monitoria.documentos.discente'
).browse(cr, uid, doc_discente_id, context)
doc_discente_id = (doc_discente_id if doc_discente.
disciplina_id.id == disciplina_id else False)
return {'value': {'doc_discente_id': doc_discente_id}}
return {'value': {'doc_discente_id': False}}
def botao_remover(self, cr, uid, ids, context=None):
perfil_model = self.pool.get('ud.perfil')
pessoa_model = self.pool.get('ud.employee')
for rem in self.browse(cr, uid, ids, context):
responsavel = pessoa_model.search(cr, SUPERUSER_ID, [('user_id',
'=', uid)], limit=2)
if not responsavel:
raise osv.except_osv(u'Registro Inexistente',
u'Não é possível realizar essa alteração enquanto seu login não estiver vinculado ao núcleo'
)
if len(responsavel) > 1:
raise osv.except_osv(u'Multiplos vínculos',
u'Não é possível realizar essa alteração enquanto seu login possuir multiplos vínculos no núcleo'
)
perfil = rem.doc_discente_id.inscricao_id.perfil_id
perfil_model.write(cr, SUPERUSER_ID, perfil.id, {'is_bolsista':
False, 'tipo_bolsa': False, 'valor_bolsa': False})
rem.doc_discente_id.write({'state': 'n_bolsista'})
evento = {'responsavel_id': responsavel[0], 'name':
u'Remoção de bolsa: "%s"' % rem.doc_discente_id.discente_id
.name, 'envolvidos_ids': [(4, rem.doc_discente_id.
discente_id.id)], 'descricao':
u'A bolsa do discente "%s" sob matrícula "%s" foi removida.' %
(rem.doc_discente_id.discente_id.name.upper(), perfil.
matricula)}
rem.semestre_id.write({'eventos_ids': [(0, 0, evento)]})
return True
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class AdicionarBolsaWizard(osv.TransientModel):
_name = 'ud.monitoria.bolsa.adicionar.wizard'
_description = u'Inclusão de bolsa de monitoria para discente (UD)'
_STATES = [('n_bolsista', u'Não Bolsista'), ('reserva',
u'Cadastro de Reserva')]
def _bolsas(self, cr, uid, ids, campos, args, context=None):
oferta_model = self.pool.get('ud.monitoria.oferta.disciplina')
res = {}
for add in self.browse(cr, uid, ids, context):
res[add.id] = add.disciplina_id.bolsas
return res
_columns = {'semestre_id': fields.many2one('ud.monitoria.registro',
u'Semestre', required=True, readonly=True), 'curso_id': fields.
many2one('ud.curso', u'Curso', required=True, domain=
"[('is_active', '=', True)]"), 'disciplina_id': fields.many2one(
'ud.monitoria.disciplina', u'Disciplinas', required=True, domain=
"[('semestre_id', '=', semestre_id), ('curso_id', '=', curso_id), ('is_active', '=', True)]"
), 'bolsas': fields.function(_bolsas, type='integer', string=
u'Bolsas disponíveis', help=
u'Número de bolsas disponíveis para a disciplina'), 'valor_bolsa':
fields.float(u'Bolsa (R$)'), 'tutor': fields.boolean(u'Tutor?'),
'status': fields.selection(_STATES, u'Status', required=True),
'doc_discente_id': fields.many2one(
'ud.monitoria.documentos.discente', u'Discente', required=True,
domain=
"[('disciplina_id', '=', disciplina_id), ('tutor', '=', tutor), ('is_active', '=', True), ('state', '=', status)]"
), 'dados_bancarios_id': fields.many2one('ud.dados.bancarios',
u'Dados Bancários', domain=[('id', '=', False)]), 'banco_id':
fields.many2one('ud.banco', u'Banco', ondelete='restrict'),
'agencia': fields.char(u'Agência', size=4, help=
u'Número da Agência'), 'dv_agencia': fields.char(u'DV Agência',
size=2, help=u'Dígito verificador da Agência'), 'conta': fields.
char(u'Conta', size=10, help=u'Número da Conta'), 'dv_conta':
fields.char(u'DV Conta', size=1, help=
u'Dígito verificador da Conta'), 'operacao': fields.char(
u'Operação', size=3, help=u'Tipo de conta'), 'agencia_v': fields.
related('banco_id', 'agencia', type='boolean', invisible=True,
readonly=True), 'dv_agencia_v': fields.related('banco_id',
'dv_agencia', type='boolean', invisible=True, readonly=True),
'conta_v': fields.related('banco_id', 'conta', type='boolean',
invisible=True, readonly=True), 'dv_conta_v': fields.related(
'banco_id', 'dv_conta', type='boolean', invisible=True, readonly=
True), 'operacao_v': fields.related('banco_id', 'operacao', type=
'boolean', invisible=True, readonly=True)}
def default_get(self, cr, uid, fields_list, context=None):
res = super(AdicionarBolsaWizard, self).default_get(cr, uid,
fields_list, context)
res['status'] = 'n_bolsista'
res['valor_bolsa'] = 400.0
context = context or {}
if context.get('active_id', False):
if context.get('active_model', False) == 'ud.monitoria.registro':
res['semestre_id'] = context.get('active_id')
elif context.get('active_model', False
) == 'ud.monitoria.documentos.discente':
doc = self.pool.get('ud.monitoria.documentos.discente').browse(
cr, uid, context.get('active_id'), context)
if doc.state == 'bolsista':
raise osv.except_osv(u'Discente bolsista',
u'O discente já é bolsista')
elif not doc.is_active:
raise osv.except_osv(u'Documento do discente inativo',
u'Não é possível alterar o status de discentes inativos'
)
res['semestre_id'] = doc.disciplina_id.semestre_id.id
res['curso_id'] = doc.disciplina_id.curso_id.id
res['disciplina_id'] = doc.disciplina_id.id
res['tutor'] = doc.tutor
res['status'] = doc.state
res['doc_discente_id'] = doc.id
return res
def onchange_curso(self, cr, uid, ids, semestre_id, curso_id,
disciplina_id, context=None):
if not (semestre_id and curso_id):
return {'value': {'disciplina_id': False}}
reg = self.pool.get('ud.monitoria.registro').read(cr, uid,
semestre_id, ['processos_seletivos_ids'], context=context, load
='_classic_write')
args = [('curso_id', '=', curso_id), ('processo_seletivo_id', '=',
reg['processos_seletivos_ids']), ('is_active', '=', True)]
disc = self.pool.get('ud.monitoria.disciplina').search(cr, uid,
args, context=context)
res = {'domain': {'disciplina_id': [('id', 'in', disc)]}}
if not disc:
res['value'] = {'disciplina_id': False}
return res
def onchange_disciplina(self, cr, uid, ids, disciplina_id,
doc_discente_id, context=None):
if disciplina_id:
if doc_discente_id:
doc_discente = self.pool.get('ud.monitoria.documentos.discente'
).browse(cr, uid, doc_discente_id, context)
doc_discente_id = (doc_discente_id if doc_discente.
disciplina_id.id == disciplina_id else False)
disciplina_id = self.pool.get('ud.monitoria.disciplina').browse(cr,
uid, disciplina_id, context)
return {'value': {'doc_discente_id': doc_discente_id, 'bolsas':
disciplina_id.bolsas}}
return {'value': {'doc_discente_id': False, 'bolsas': 0}}
def onchange_doc_discente(self, cr, uid, ids, doc_discente_id,
dados_bancarios_id, context=None):
if doc_discente_id:
doc = self.pool.get('ud.monitoria.documentos.discente').browse(cr,
uid, doc_discente_id, context)
if not dados_bancarios_id:
dados_bancarios_id = getattr(doc.dados_bancarios_id, 'id',
False)
return {'value': {'dados_bancarios_id': dados_bancarios_id},
'domain': {'dados_bancarios_id': [('ud_conta_id', '=', doc.
discente_id.id)]}}
return {'value': {'dados_bancarios_id': False}, 'domain': {
'dados_bancarios_id': [('id', '=', False)]}}
def onchange_banco(self, cr, uid, ids, banco_id, context=None):
if banco_id:
banco = self.pool.get('ud.banco').read(cr, uid, banco_id, [
'agencia', 'dv_agencia', 'conta', 'dv_conta', 'operacao'],
context=context, load='_classic_write')
vals = {'agencia': False, 'dv_agencia': False, 'conta': False,
'dv_conta': False, 'operacao': False}
vals.update({('%s_v' % dado): banco.get(dado) for dado in banco
.keys()})
return {'value': vals}
return {'value': {'agencia_v': False, 'dv_agencia_v': False,
'conta_v': False, 'dv_conta_v': False, 'operacao_v': False,
'agencia': False, 'dv_agencia': False, 'conta': False,
'dv_conta': False, 'operacao': False}}
def botao_adicionar(self, cr, uid, ids, context=None):
perfil_model = self.pool.get('ud.perfil')
for add in self.browse(cr, uid, ids, context):
if add.bolsas == 0:
raise osv.except_osv(u'Bolsas Insuficientes',
u'Não há bolsas disponíveis para essa disciplina')
elif not add.doc_discente_id.is_active:
raise osv.except_osv(u'Documento do discente inativo',
u'O discente não pode ser classificado como bolsista')
if add.doc_discente_id.inscricao_id.perfil_id.is_bolsista:
raise osv.except_osv(u'Discente bolsista',
u'O discente "{}" sob matrícula "{}" possui bolsa do tipo: "{}"'
.format(add.doc_discente_id.discente_id.name, add.
doc_discente_id.inscricao_id.perfil_id.matricula,
TIPOS_BOLSA[add.doc_discente_id.inscricao_id.perfil_id.
tipo_bolsa]))
responsavel = self.pool.get('ud.employee').search(cr,
SUPERUSER_ID, [('user_id', '=', uid)], limit=2)
if not responsavel:
raise osv.except_osv(u'Registro Inexistente',
u'Não é possível realizar essa alteração enquanto seu login não estiver vinculado a uma pessoa no núcleo'
)
if len(responsavel) > 1:
raise osv.except_osv(u'Multiplos vínculos',
u'Não é possível realizar essa alteração enquanto seu login possuir multiplos vínculos no núcleo'
)
perfil_model.write(cr, SUPERUSER_ID, add.doc_discente_id.
inscricao_id.perfil_id.id, {'is_bolsista': True,
'tipo_bolsa': 'm', 'valor_bolsa': ('%.2f' % add.valor_bolsa
).replace('.', ',')})
if not add.dados_bancarios_id:
dados_bancarios = get_banco(self, cr, add, add.
doc_discente_id.discente_id.id, context)
else:
dados_bancarios = add.dados_bancarios_id.id
add.doc_discente_id.write({'state': 'bolsista',
'dados_bancarios_id': dados_bancarios})
evento = {'responsavel_id': responsavel[0], 'name':
u'Adição de bolsa: "%s"' % add.doc_discente_id.discente_id.
name, 'envolvidos_ids': [(4, add.doc_discente_id.
discente_id.id)], 'descricao':
u'Uma bolsa de R$ %s foi vinculada para o(a) discente "%s" sob matrícula "%s".'
% (('%.2f' % add.valor_bolsa).replace('.', ','), add.
doc_discente_id.discente_id.name.upper(), add.
doc_discente_id.inscricao_id.perfil_id.matricula)}
add.semestre_id.write({'eventos_ids': [(0, 0, evento)]})
return True
class TransferirBolsaWizard(osv.TransientModel):
_name = 'ud.monitoria.bolsa.transferir.wizard'
_description = u'Transferência de bolsa de monitoria (UD)'
_STATES = [('n_bolsista', u'Não Bolsista'), ('reserva',
u'Cadastro de Reserva')]
_columns = {'semestre_id': fields.many2one('ud.monitoria.registro',
u'Semestre', required=True, readonly=True), 'curso_id_de': fields.
many2one('ud.curso', u'Curso', required=True, domain=
"[('is_active', '=', True)]"), 'disciplina_id_de': fields.many2one(
'ud.monitoria.disciplina', u'Disciplinas', required=True, domain=
"[('id', '=', False)]"), 'tutor_de': fields.boolean(u'Tutor?'),
'doc_discente_id_de': fields.many2one(
'ud.monitoria.documentos.discente', u'Discente', required=True,
domain=
"[('is_active', '=', True), ('state', '=', 'bolsista'), ('disciplina_id', '=', disciplina_id_de), ('tutor', '=', tutor_de)]"
), 'curso_id_para': fields.many2one('ud.curso', u'Curso', required=
True, domain="[('is_active', '=', True)]"), 'disciplina_id_para':
fields.many2one('ud.monitoria.disciplina', u'Disciplinas', required
=True, domain=
"[('semestre_id', '=', semestre_id), ('curso_id', '=', curso_id_para), ('is_active', '=', True)]"
), 'tutor_para': fields.boolean(u'Tutor?'), 'status_para': fields.
selection(_STATES, u'Status', required=True),
'doc_discente_id_para': fields.many2one(
'ud.monitoria.documentos.discente', u'Discente', required=True,
domain=
"[('is_active', '=', True), ('state', '=', status_para), ('disciplina_id', '=', disciplina_id_para), ('tutor', '=', tutor_para)]"
), 'banco_id': fields.many2one('ud.banco', u'Banco', ondelete=
'restrict'), 'agencia': fields.char(u'Agência', size=4, help=
u'Número da Agência'), 'dv_agencia': fields.char(u'DV Agência',
size=2, help=u'Dígito verificador da Agência'), 'conta': fields.
char(u'Conta', size=10, help=u'Número da Conta'), 'dv_conta':
fields.char(u'DV Conta', size=1, help=
u'Dígito verificador da Conta'), 'operacao': fields.char(
u'Operação', size=3, help=u'Tipo de conta'), 'agencia_v': fields.
related('banco_id', 'agencia', type='boolean', invisible=True,
readonly=True), 'dv_agencia_v': fields.related('banco_id',
'dv_agencia', type='boolean', invisible=True, readonly=True),
'conta_v': fields.related('banco_id', 'conta', type='boolean',
invisible=True, readonly=True), 'dv_conta_v': fields.related(
'banco_id', 'dv_conta', type='boolean', invisible=True, readonly=
True), 'operacao_v': fields.related('banco_id', 'operacao', type=
'boolean', invisible=True, readonly=True)}
def default_get(self, cr, uid, fields_list, context=None):
res = super(TransferirBolsaWizard, self).default_get(cr, uid,
fields_list, context)
context = context or {}
if context.get('active_id', False):
if context.get('active_model', False) == 'ud.monitoria.registro':
res['semestre_id'] = context.get('active_id')
elif context.get('active_model', False
) == 'ud.monitoria.documentos.discente':
doc = self.pool.get('ud.monitoria.documentos.discente').browse(
cr, uid, context.get('active_id'), context)
if doc.state != 'bolsista':
raise osv.except_osv(u'Discente bolsista',
u'O discente já é bolsista')
elif not doc.is_active:
raise osv.except_osv(u'Documento do discente inativo',
u'O discente não pode ser classificado como bolsista')
res['semestre_id'] = doc.disciplina_id.semestre_id.id
res['curso_id_de'] = doc.disciplina_id.curso_id.id
res['disciplina_id_de'] = doc.disciplina_id.id
res['tutor_de'] = doc.tutor
res['status_de'] = doc.state
res['doc_discente_id_de'] = doc.id
return res
def onchange_curso(self, cr, uid, ids, comp, semestre_id, curso_id,
disciplina_id, context=None):
if not (semestre_id and curso_id):
return {'value': {('disciplina_id_' + comp): False}}
reg = self.pool.get('ud.monitoria.registro').read(cr, uid,
semestre_id, ['processos_seletivos_ids'], context=context, load
='_classic_write')
args = [('curso_id', '=', curso_id), ('processo_seletivo_id', '=',
reg['processos_seletivos_ids']), ('is_active', '=', True)]
disc = self.pool.get('ud.monitoria.disciplina').search(cr, uid,
args, context=context)
res = {'domain': {('disciplina_id_' + comp): [('id', 'in', disc)]}}
if not disc:
res['value'] = {('disciplina_id_' + comp): False}
return res
def onchange_disciplina(self, cr, uid, ids, comp, disciplina_id,
doc_discente_id, context=None):
if disciplina_id and doc_discente_id:
doc_discente = self.pool.get('ud.monitoria.documentos.discente'
).browse(cr, uid, doc_discente_id, context)
doc_discente_id = (doc_discente_id if doc_discente.
disciplina_id.id == disciplina_id else False)
return {'value': {('doc_discente_id_' + comp): doc_discente_id}}
return {'value': {('doc_discente_id_' + comp): False}}
def onchange_banco(self, cr, uid, ids, banco_id, context=None):
if banco_id:
banco = self.pool.get('ud.banco').read(cr, uid, banco_id, [
'agencia', 'dv_agencia', 'conta', 'dv_conta', 'operacao'],
context=context, load='_classic_write')
vals = {'agencia': False, 'dv_agencia': False, 'conta': False,
'dv_conta': False, 'operacao': False}
vals.update({('%s_v' % dado): banco.get(dado) for dado in banco
.keys()})
return {'value': vals}
return {'value': {'agencia_v': False, 'dv_agencia_v': False,
'conta_v': False, 'dv_conta_v': False, 'operacao_v': False,
'agencia': False, 'dv_agencia': False, 'conta': False,
'dv_conta': False, 'operacao': False}}
def botao_transferir(self, cr, uid, ids, context=None):
perfil_model = self.pool.get('ud.perfil')
for transf in self.browse(cr, uid, ids, context):
matricula = transf.doc_discente_id_para.discente_id.matricula
for perfil in transf.doc_discente_id_para.discente_id.pessoa_id.papel_ids:
if perfil.matricula == matricula and perfil.tipo == 'a':
if perfil.is_bolsista:
raise osv.except_osv(u'Discente bolsista',
u'O discente "{}" sob matrícula "{}" possui bolsa do tipo: "{}"'
.format(transf.doc_discente_id_para.discente_id
.pessoa_id.name, matricula, TIPOS_BOLSA[perfil.
tipo_bolsa]))
break
if not perfil:
raise osv.except_osv(u'Perfil excluído',
u'O perfil do discente para a matrícula "%s" não existe ou foi excluído'
% matricula or '')
matricula = transf.doc_discente_id_de.discente_id.matricula
for perfil_de in transf.doc_discente_id_de.discente_id.pessoa_id.papel_ids:
if perfil.matricula == matricula and perfil.tipo == 'a':
break
responsavel = self.pool.get('ud.employee').search(cr,
SUPERUSER_ID, [('user_id', '=', uid)], limit=2)
if not responsavel:
raise osv.except_osv(u'Registro Inexistente',
u'Não é possível realizar essa alteração enquanto seu login não estiver vinculado ao núcleo'
)
if len(responsavel) > 1:
raise osv.except_osv(u'Multiplos vínculos',
u'Não é possível realizar essa alteração enquanto seu login possuir multiplos vínculos no núcleo'
)
valor = perfil_de.valor_bolsa
perfil_model.write(cr, SUPERUSER_ID, perfil.id, {'is_bolsista':
True, 'tipo_bolsa': 'm', 'valor_bolsa': valor})
perfil_model.write(cr, SUPERUSER_ID, perfil_de.id, {
'is_bolsista': False, 'tipo_bolsa': False, 'valor_bolsa':
False})
transf.doc_discente_id_de.write({'state': 'n_bolsista'})
transf.doc_discente_id_para.write({'state': 'bolsista',
'is_active': True})
get_banco(self, cr, transf, transf.doc_discente_id_para.
discente_id.pessoa_id.id, context)
evento = {'responsavel_id': responsavel[0], 'name':
u'Transferência de bolsa', 'envolvidos_ids': [(4, transf.
doc_discente_id_de.discente_id.pessoa_id.id), (4, transf.
doc_discente_id_para.discente_id.pessoa_id.id)],
'descricao':
u'Transferência de bolsa no valor de R$ %(valor)s do discente %(discente_de)s sob matrícula %(matricula_de)s para o(a) discente "%(discente_para)s" sob matrícula"%(matricula_para)s".'
% {'valor': valor, 'discente_de': transf.
doc_discente_id_de.discente_id.pessoa_id.name.upper(),
'matricula_de': perfil_de.matricula, 'discente_para':
transf.doc_discente_id_de.discente_id.pessoa_id.name.upper(
), 'matricula_para': perfil_de.matricula}}
transf.semestre_id.write({'eventos_ids': [(0, 0, evento)]})
return True
class RemoverBolsaWizard(osv.TransientModel):
_name = 'ud.monitoria.bolsa.remover.wizard'
_description = u'Remoção de bolsa de discente'
_columns = {'semestre_id': fields.many2one('ud.monitoria.registro',
u'Semestre', required=True, readonly=True), 'curso_id': fields.
many2one('ud.curso', u'Curso', required=True, domain=
"[('is_active', '=', True)]"), 'disciplina_id': fields.many2one(
'ud.monitoria.disciplina', u'Disciplinas', required=True, domain=
"[('id', '=', False)]"), 'tutor': fields.boolean(u'Tutor?'),
'doc_discente_id': fields.many2one(
'ud.monitoria.documentos.discente', u'Discente', required=True,
domain=
"[('disciplina_id', '=', disciplina_id), ('tutor', '=', tutor), ('is_active', '=', True), ('state', '=', 'bolsista')]"
)}
def default_get(self, cr, uid, fields_list, context=None):
res = super(RemoverBolsaWizard, self).default_get(cr, uid,
fields_list, context)
context = context or {}
if context.get('active_id', False):
if context.get('active_model', False) == 'ud.monitoria.registro':
res['semestre_id'] = context.get('active_id')
elif context.get('active_model', False
) == 'ud.monitoria.documentos.discente':
doc = self.pool.get('ud.monitoria.documentos.discente').browse(
cr, uid, context.get('active_id'), context)
if doc.state != 'bolsista':
raise osv.except_osv(u'Discente não bolsista',
u'O discente não é bolsista')
elif not doc.is_active:
raise osv.except_osv(u'Documento do discente inativo',
u'Não é possível alterar o status de discentes inativos'
)
res['semestre_id'] = doc.disciplina_id.semestre_id.id
res['curso_id'] = doc.disciplina_id.curso_id.id
res['disciplina_id'] = doc.disciplina_id.id
res['tutor'] = doc.tutor
res['doc_discente_id'] = doc.id
return res
def onchange_curso(self, cr, uid, ids, semestre_id, curso_id,
disciplina_id, context=None):
if not (semestre_id and curso_id):
return {'value': {'disciplina_id': False}}
reg = self.pool.get('ud.monitoria.registro').read(cr, uid,
semestre_id, ['processos_seletivos_ids'], context=context, load
='_classic_write')
args = [('curso_id', '=', curso_id), ('processo_seletivo_id', '=',
reg['processos_seletivos_ids']), ('is_active', '=', True)]
disc = self.pool.get('ud.monitoria.disciplina').search(cr, uid,
args, context=context)
res = {'domain': {'disciplina_id': [('id', 'in', disc)]}}
if not disc:
res['value'] = {'disciplina_id': False}
return res
def onchange_disciplina(self, cr, uid, ids, disciplina_id,
doc_discente_id, context=None):
if disciplina_id and doc_discente_id:
doc_discente = self.pool.get('ud.monitoria.documentos.discente'
).browse(cr, uid, doc_discente_id, context)
doc_discente_id = (doc_discente_id if doc_discente.
disciplina_id.id == disciplina_id else False)
return {'value': {'doc_discente_id': doc_discente_id}}
return {'value': {'doc_discente_id': False}}
def botao_remover(self, cr, uid, ids, context=None):
perfil_model = self.pool.get('ud.perfil')
pessoa_model = self.pool.get('ud.employee')
for rem in self.browse(cr, uid, ids, context):
responsavel = pessoa_model.search(cr, SUPERUSER_ID, [('user_id',
'=', uid)], limit=2)
if not responsavel:
raise osv.except_osv(u'Registro Inexistente',
u'Não é possível realizar essa alteração enquanto seu login não estiver vinculado ao núcleo'
)
if len(responsavel) > 1:
raise osv.except_osv(u'Multiplos vínculos',
u'Não é possível realizar essa alteração enquanto seu login possuir multiplos vínculos no núcleo'
)
perfil = rem.doc_discente_id.inscricao_id.perfil_id
perfil_model.write(cr, SUPERUSER_ID, perfil.id, {'is_bolsista':
False, 'tipo_bolsa': False, 'valor_bolsa': False})
rem.doc_discente_id.write({'state': 'n_bolsista'})
evento = {'responsavel_id': responsavel[0], 'name':
u'Remoção de bolsa: "%s"' % rem.doc_discente_id.discente_id
.name, 'envolvidos_ids': [(4, rem.doc_discente_id.
discente_id.id)], 'descricao':
u'A bolsa do discente "%s" sob matrícula "%s" foi removida.' %
(rem.doc_discente_id.discente_id.name.upper(), perfil.
matricula)}
rem.semestre_id.write({'eventos_ids': [(0, 0, evento)]})
return True
<|reserved_special_token_1|>
<|reserved_special_token_0|>
TIPOS_BOLSA = dict(_TIPOS_BOLSA)
def get_banco(cls, cr, browse_record, usuario_id, context=None):
dados_bancarios_model = cls.pool.get('ud.dados.bancarios')
args = [('banco_id', '=', browse_record.banco_id.id)]
if browse_record.agencia_v:
args.append(('agencia', '=', browse_record.agencia))
if browse_record.dv_agencia_v:
args.append(('dv_agencia', '=', browse_record.dv_agencia))
if browse_record.conta_v:
args.append(('conta', '=', browse_record.conta))
if browse_record.dv_conta_v:
args.append(('dv_conta', '=', browse_record.dv_conta))
if browse_record.operacao_v:
args.append(('operacao', '=', browse_record.operacao))
dados_bancarios = dados_bancarios_model.search(cr, SUPERUSER_ID, args,
context=context)
if dados_bancarios:
dados_bancarios = dados_bancarios_model.browse(cr, SUPERUSER_ID,
dados_bancarios[0])
if not dados_bancarios.ud_conta_id:
return dados_bancarios.id
elif dados_bancarios.ud_conta_id.id == usuario_id:
return dados_bancarios.id
raise osv.except_osv(u'Dados Bancários duplicados',
u'Outra pessoa já possui esses dados bancários!')
dados = {'banco_id': browse_record.banco_id.id, 'agencia':
browse_record.agencia, 'dv_agencia': browse_record.dv_agencia,
'conta': browse_record.conta, 'dv_conta': browse_record.dv_conta,
'operacao': browse_record.operacao, 'ud_conta_id': usuario_id}
return dados_bancarios_model.create(cr, SUPERUSER_ID, dados, context=
context)
class AdicionarBolsaWizard(osv.TransientModel):
_name = 'ud.monitoria.bolsa.adicionar.wizard'
_description = u'Inclusão de bolsa de monitoria para discente (UD)'
_STATES = [('n_bolsista', u'Não Bolsista'), ('reserva',
u'Cadastro de Reserva')]
def _bolsas(self, cr, uid, ids, campos, args, context=None):
oferta_model = self.pool.get('ud.monitoria.oferta.disciplina')
res = {}
for add in self.browse(cr, uid, ids, context):
res[add.id] = add.disciplina_id.bolsas
return res
_columns = {'semestre_id': fields.many2one('ud.monitoria.registro',
u'Semestre', required=True, readonly=True), 'curso_id': fields.
many2one('ud.curso', u'Curso', required=True, domain=
"[('is_active', '=', True)]"), 'disciplina_id': fields.many2one(
'ud.monitoria.disciplina', u'Disciplinas', required=True, domain=
"[('semestre_id', '=', semestre_id), ('curso_id', '=', curso_id), ('is_active', '=', True)]"
), 'bolsas': fields.function(_bolsas, type='integer', string=
u'Bolsas disponíveis', help=
u'Número de bolsas disponíveis para a disciplina'), 'valor_bolsa':
fields.float(u'Bolsa (R$)'), 'tutor': fields.boolean(u'Tutor?'),
'status': fields.selection(_STATES, u'Status', required=True),
'doc_discente_id': fields.many2one(
'ud.monitoria.documentos.discente', u'Discente', required=True,
domain=
"[('disciplina_id', '=', disciplina_id), ('tutor', '=', tutor), ('is_active', '=', True), ('state', '=', status)]"
), 'dados_bancarios_id': fields.many2one('ud.dados.bancarios',
u'Dados Bancários', domain=[('id', '=', False)]), 'banco_id':
fields.many2one('ud.banco', u'Banco', ondelete='restrict'),
'agencia': fields.char(u'Agência', size=4, help=
u'Número da Agência'), 'dv_agencia': fields.char(u'DV Agência',
size=2, help=u'Dígito verificador da Agência'), 'conta': fields.
char(u'Conta', size=10, help=u'Número da Conta'), 'dv_conta':
fields.char(u'DV Conta', size=1, help=
u'Dígito verificador da Conta'), 'operacao': fields.char(
u'Operação', size=3, help=u'Tipo de conta'), 'agencia_v': fields.
related('banco_id', 'agencia', type='boolean', invisible=True,
readonly=True), 'dv_agencia_v': fields.related('banco_id',
'dv_agencia', type='boolean', invisible=True, readonly=True),
'conta_v': fields.related('banco_id', 'conta', type='boolean',
invisible=True, readonly=True), 'dv_conta_v': fields.related(
'banco_id', 'dv_conta', type='boolean', invisible=True, readonly=
True), 'operacao_v': fields.related('banco_id', 'operacao', type=
'boolean', invisible=True, readonly=True)}
def default_get(self, cr, uid, fields_list, context=None):
res = super(AdicionarBolsaWizard, self).default_get(cr, uid,
fields_list, context)
res['status'] = 'n_bolsista'
res['valor_bolsa'] = 400.0
context = context or {}
if context.get('active_id', False):
if context.get('active_model', False) == 'ud.monitoria.registro':
res['semestre_id'] = context.get('active_id')
elif context.get('active_model', False
) == 'ud.monitoria.documentos.discente':
doc = self.pool.get('ud.monitoria.documentos.discente').browse(
cr, uid, context.get('active_id'), context)
if doc.state == 'bolsista':
raise osv.except_osv(u'Discente bolsista',
u'O discente já é bolsista')
elif not doc.is_active:
raise osv.except_osv(u'Documento do discente inativo',
u'Não é possível alterar o status de discentes inativos'
)
res['semestre_id'] = doc.disciplina_id.semestre_id.id
res['curso_id'] = doc.disciplina_id.curso_id.id
res['disciplina_id'] = doc.disciplina_id.id
res['tutor'] = doc.tutor
res['status'] = doc.state
res['doc_discente_id'] = doc.id
return res
def onchange_curso(self, cr, uid, ids, semestre_id, curso_id,
disciplina_id, context=None):
if not (semestre_id and curso_id):
return {'value': {'disciplina_id': False}}
reg = self.pool.get('ud.monitoria.registro').read(cr, uid,
semestre_id, ['processos_seletivos_ids'], context=context, load
='_classic_write')
args = [('curso_id', '=', curso_id), ('processo_seletivo_id', '=',
reg['processos_seletivos_ids']), ('is_active', '=', True)]
disc = self.pool.get('ud.monitoria.disciplina').search(cr, uid,
args, context=context)
res = {'domain': {'disciplina_id': [('id', 'in', disc)]}}
if not disc:
res['value'] = {'disciplina_id': False}
return res
def onchange_disciplina(self, cr, uid, ids, disciplina_id,
doc_discente_id, context=None):
if disciplina_id:
if doc_discente_id:
doc_discente = self.pool.get('ud.monitoria.documentos.discente'
).browse(cr, uid, doc_discente_id, context)
doc_discente_id = (doc_discente_id if doc_discente.
disciplina_id.id == disciplina_id else False)
disciplina_id = self.pool.get('ud.monitoria.disciplina').browse(cr,
uid, disciplina_id, context)
return {'value': {'doc_discente_id': doc_discente_id, 'bolsas':
disciplina_id.bolsas}}
return {'value': {'doc_discente_id': False, 'bolsas': 0}}
def onchange_doc_discente(self, cr, uid, ids, doc_discente_id,
dados_bancarios_id, context=None):
if doc_discente_id:
doc = self.pool.get('ud.monitoria.documentos.discente').browse(cr,
uid, doc_discente_id, context)
if not dados_bancarios_id:
dados_bancarios_id = getattr(doc.dados_bancarios_id, 'id',
False)
return {'value': {'dados_bancarios_id': dados_bancarios_id},
'domain': {'dados_bancarios_id': [('ud_conta_id', '=', doc.
discente_id.id)]}}
return {'value': {'dados_bancarios_id': False}, 'domain': {
'dados_bancarios_id': [('id', '=', False)]}}
def onchange_banco(self, cr, uid, ids, banco_id, context=None):
if banco_id:
banco = self.pool.get('ud.banco').read(cr, uid, banco_id, [
'agencia', 'dv_agencia', 'conta', 'dv_conta', 'operacao'],
context=context, load='_classic_write')
vals = {'agencia': False, 'dv_agencia': False, 'conta': False,
'dv_conta': False, 'operacao': False}
vals.update({('%s_v' % dado): banco.get(dado) for dado in banco
.keys()})
return {'value': vals}
return {'value': {'agencia_v': False, 'dv_agencia_v': False,
'conta_v': False, 'dv_conta_v': False, 'operacao_v': False,
'agencia': False, 'dv_agencia': False, 'conta': False,
'dv_conta': False, 'operacao': False}}
def botao_adicionar(self, cr, uid, ids, context=None):
perfil_model = self.pool.get('ud.perfil')
for add in self.browse(cr, uid, ids, context):
if add.bolsas == 0:
raise osv.except_osv(u'Bolsas Insuficientes',
u'Não há bolsas disponíveis para essa disciplina')
elif not add.doc_discente_id.is_active:
raise osv.except_osv(u'Documento do discente inativo',
u'O discente não pode ser classificado como bolsista')
if add.doc_discente_id.inscricao_id.perfil_id.is_bolsista:
raise osv.except_osv(u'Discente bolsista',
u'O discente "{}" sob matrícula "{}" possui bolsa do tipo: "{}"'
.format(add.doc_discente_id.discente_id.name, add.
doc_discente_id.inscricao_id.perfil_id.matricula,
TIPOS_BOLSA[add.doc_discente_id.inscricao_id.perfil_id.
tipo_bolsa]))
responsavel = self.pool.get('ud.employee').search(cr,
SUPERUSER_ID, [('user_id', '=', uid)], limit=2)
if not responsavel:
raise osv.except_osv(u'Registro Inexistente',
u'Não é possível realizar essa alteração enquanto seu login não estiver vinculado a uma pessoa no núcleo'
)
if len(responsavel) > 1:
raise osv.except_osv(u'Multiplos vínculos',
u'Não é possível realizar essa alteração enquanto seu login possuir multiplos vínculos no núcleo'
)
perfil_model.write(cr, SUPERUSER_ID, add.doc_discente_id.
inscricao_id.perfil_id.id, {'is_bolsista': True,
'tipo_bolsa': 'm', 'valor_bolsa': ('%.2f' % add.valor_bolsa
).replace('.', ',')})
if not add.dados_bancarios_id:
dados_bancarios = get_banco(self, cr, add, add.
doc_discente_id.discente_id.id, context)
else:
dados_bancarios = add.dados_bancarios_id.id
add.doc_discente_id.write({'state': 'bolsista',
'dados_bancarios_id': dados_bancarios})
evento = {'responsavel_id': responsavel[0], 'name':
u'Adição de bolsa: "%s"' % add.doc_discente_id.discente_id.
name, 'envolvidos_ids': [(4, add.doc_discente_id.
discente_id.id)], 'descricao':
u'Uma bolsa de R$ %s foi vinculada para o(a) discente "%s" sob matrícula "%s".'
% (('%.2f' % add.valor_bolsa).replace('.', ','), add.
doc_discente_id.discente_id.name.upper(), add.
doc_discente_id.inscricao_id.perfil_id.matricula)}
add.semestre_id.write({'eventos_ids': [(0, 0, evento)]})
return True
class TransferirBolsaWizard(osv.TransientModel):
_name = 'ud.monitoria.bolsa.transferir.wizard'
_description = u'Transferência de bolsa de monitoria (UD)'
_STATES = [('n_bolsista', u'Não Bolsista'), ('reserva',
u'Cadastro de Reserva')]
_columns = {'semestre_id': fields.many2one('ud.monitoria.registro',
u'Semestre', required=True, readonly=True), 'curso_id_de': fields.
many2one('ud.curso', u'Curso', required=True, domain=
"[('is_active', '=', True)]"), 'disciplina_id_de': fields.many2one(
'ud.monitoria.disciplina', u'Disciplinas', required=True, domain=
"[('id', '=', False)]"), 'tutor_de': fields.boolean(u'Tutor?'),
'doc_discente_id_de': fields.many2one(
'ud.monitoria.documentos.discente', u'Discente', required=True,
domain=
"[('is_active', '=', True), ('state', '=', 'bolsista'), ('disciplina_id', '=', disciplina_id_de), ('tutor', '=', tutor_de)]"
), 'curso_id_para': fields.many2one('ud.curso', u'Curso', required=
True, domain="[('is_active', '=', True)]"), 'disciplina_id_para':
fields.many2one('ud.monitoria.disciplina', u'Disciplinas', required
=True, domain=
"[('semestre_id', '=', semestre_id), ('curso_id', '=', curso_id_para), ('is_active', '=', True)]"
), 'tutor_para': fields.boolean(u'Tutor?'), 'status_para': fields.
selection(_STATES, u'Status', required=True),
'doc_discente_id_para': fields.many2one(
'ud.monitoria.documentos.discente', u'Discente', required=True,
domain=
"[('is_active', '=', True), ('state', '=', status_para), ('disciplina_id', '=', disciplina_id_para), ('tutor', '=', tutor_para)]"
), 'banco_id': fields.many2one('ud.banco', u'Banco', ondelete=
'restrict'), 'agencia': fields.char(u'Agência', size=4, help=
u'Número da Agência'), 'dv_agencia': fields.char(u'DV Agência',
size=2, help=u'Dígito verificador da Agência'), 'conta': fields.
char(u'Conta', size=10, help=u'Número da Conta'), 'dv_conta':
fields.char(u'DV Conta', size=1, help=
u'Dígito verificador da Conta'), 'operacao': fields.char(
u'Operação', size=3, help=u'Tipo de conta'), 'agencia_v': fields.
related('banco_id', 'agencia', type='boolean', invisible=True,
readonly=True), 'dv_agencia_v': fields.related('banco_id',
'dv_agencia', type='boolean', invisible=True, readonly=True),
'conta_v': fields.related('banco_id', 'conta', type='boolean',
invisible=True, readonly=True), 'dv_conta_v': fields.related(
'banco_id', 'dv_conta', type='boolean', invisible=True, readonly=
True), 'operacao_v': fields.related('banco_id', 'operacao', type=
'boolean', invisible=True, readonly=True)}
def default_get(self, cr, uid, fields_list, context=None):
res = super(TransferirBolsaWizard, self).default_get(cr, uid,
fields_list, context)
context = context or {}
if context.get('active_id', False):
if context.get('active_model', False) == 'ud.monitoria.registro':
res['semestre_id'] = context.get('active_id')
elif context.get('active_model', False
) == 'ud.monitoria.documentos.discente':
doc = self.pool.get('ud.monitoria.documentos.discente').browse(
cr, uid, context.get('active_id'), context)
if doc.state != 'bolsista':
raise osv.except_osv(u'Discente bolsista',
u'O discente já é bolsista')
elif not doc.is_active:
raise osv.except_osv(u'Documento do discente inativo',
u'O discente não pode ser classificado como bolsista')
res['semestre_id'] = doc.disciplina_id.semestre_id.id
res['curso_id_de'] = doc.disciplina_id.curso_id.id
res['disciplina_id_de'] = doc.disciplina_id.id
res['tutor_de'] = doc.tutor
res['status_de'] = doc.state
res['doc_discente_id_de'] = doc.id
return res
def onchange_curso(self, cr, uid, ids, comp, semestre_id, curso_id,
disciplina_id, context=None):
if not (semestre_id and curso_id):
return {'value': {('disciplina_id_' + comp): False}}
reg = self.pool.get('ud.monitoria.registro').read(cr, uid,
semestre_id, ['processos_seletivos_ids'], context=context, load
='_classic_write')
args = [('curso_id', '=', curso_id), ('processo_seletivo_id', '=',
reg['processos_seletivos_ids']), ('is_active', '=', True)]
disc = self.pool.get('ud.monitoria.disciplina').search(cr, uid,
args, context=context)
res = {'domain': {('disciplina_id_' + comp): [('id', 'in', disc)]}}
if not disc:
res['value'] = {('disciplina_id_' + comp): False}
return res
def onchange_disciplina(self, cr, uid, ids, comp, disciplina_id,
doc_discente_id, context=None):
if disciplina_id and doc_discente_id:
doc_discente = self.pool.get('ud.monitoria.documentos.discente'
).browse(cr, uid, doc_discente_id, context)
doc_discente_id = (doc_discente_id if doc_discente.
disciplina_id.id == disciplina_id else False)
return {'value': {('doc_discente_id_' + comp): doc_discente_id}}
return {'value': {('doc_discente_id_' + comp): False}}
def onchange_banco(self, cr, uid, ids, banco_id, context=None):
if banco_id:
banco = self.pool.get('ud.banco').read(cr, uid, banco_id, [
'agencia', 'dv_agencia', 'conta', 'dv_conta', 'operacao'],
context=context, load='_classic_write')
vals = {'agencia': False, 'dv_agencia': False, 'conta': False,
'dv_conta': False, 'operacao': False}
vals.update({('%s_v' % dado): banco.get(dado) for dado in banco
.keys()})
return {'value': vals}
return {'value': {'agencia_v': False, 'dv_agencia_v': False,
'conta_v': False, 'dv_conta_v': False, 'operacao_v': False,
'agencia': False, 'dv_agencia': False, 'conta': False,
'dv_conta': False, 'operacao': False}}
def botao_transferir(self, cr, uid, ids, context=None):
perfil_model = self.pool.get('ud.perfil')
for transf in self.browse(cr, uid, ids, context):
matricula = transf.doc_discente_id_para.discente_id.matricula
for perfil in transf.doc_discente_id_para.discente_id.pessoa_id.papel_ids:
if perfil.matricula == matricula and perfil.tipo == 'a':
if perfil.is_bolsista:
raise osv.except_osv(u'Discente bolsista',
u'O discente "{}" sob matrícula "{}" possui bolsa do tipo: "{}"'
.format(transf.doc_discente_id_para.discente_id
.pessoa_id.name, matricula, TIPOS_BOLSA[perfil.
tipo_bolsa]))
break
if not perfil:
raise osv.except_osv(u'Perfil excluído',
u'O perfil do discente para a matrícula "%s" não existe ou foi excluído'
% matricula or '')
matricula = transf.doc_discente_id_de.discente_id.matricula
for perfil_de in transf.doc_discente_id_de.discente_id.pessoa_id.papel_ids:
if perfil.matricula == matricula and perfil.tipo == 'a':
break
responsavel = self.pool.get('ud.employee').search(cr,
SUPERUSER_ID, [('user_id', '=', uid)], limit=2)
if not responsavel:
raise osv.except_osv(u'Registro Inexistente',
u'Não é possível realizar essa alteração enquanto seu login não estiver vinculado ao núcleo'
)
if len(responsavel) > 1:
raise osv.except_osv(u'Multiplos vínculos',
u'Não é possível realizar essa alteração enquanto seu login possuir multiplos vínculos no núcleo'
)
valor = perfil_de.valor_bolsa
perfil_model.write(cr, SUPERUSER_ID, perfil.id, {'is_bolsista':
True, 'tipo_bolsa': 'm', 'valor_bolsa': valor})
perfil_model.write(cr, SUPERUSER_ID, perfil_de.id, {
'is_bolsista': False, 'tipo_bolsa': False, 'valor_bolsa':
False})
transf.doc_discente_id_de.write({'state': 'n_bolsista'})
transf.doc_discente_id_para.write({'state': 'bolsista',
'is_active': True})
get_banco(self, cr, transf, transf.doc_discente_id_para.
discente_id.pessoa_id.id, context)
evento = {'responsavel_id': responsavel[0], 'name':
u'Transferência de bolsa', 'envolvidos_ids': [(4, transf.
doc_discente_id_de.discente_id.pessoa_id.id), (4, transf.
doc_discente_id_para.discente_id.pessoa_id.id)],
'descricao':
u'Transferência de bolsa no valor de R$ %(valor)s do discente %(discente_de)s sob matrícula %(matricula_de)s para o(a) discente "%(discente_para)s" sob matrícula"%(matricula_para)s".'
% {'valor': valor, 'discente_de': transf.
doc_discente_id_de.discente_id.pessoa_id.name.upper(),
'matricula_de': perfil_de.matricula, 'discente_para':
transf.doc_discente_id_de.discente_id.pessoa_id.name.upper(
), 'matricula_para': perfil_de.matricula}}
transf.semestre_id.write({'eventos_ids': [(0, 0, evento)]})
return True
class RemoverBolsaWizard(osv.TransientModel):
_name = 'ud.monitoria.bolsa.remover.wizard'
_description = u'Remoção de bolsa de discente'
_columns = {'semestre_id': fields.many2one('ud.monitoria.registro',
u'Semestre', required=True, readonly=True), 'curso_id': fields.
many2one('ud.curso', u'Curso', required=True, domain=
"[('is_active', '=', True)]"), 'disciplina_id': fields.many2one(
'ud.monitoria.disciplina', u'Disciplinas', required=True, domain=
"[('id', '=', False)]"), 'tutor': fields.boolean(u'Tutor?'),
'doc_discente_id': fields.many2one(
'ud.monitoria.documentos.discente', u'Discente', required=True,
domain=
"[('disciplina_id', '=', disciplina_id), ('tutor', '=', tutor), ('is_active', '=', True), ('state', '=', 'bolsista')]"
)}
def default_get(self, cr, uid, fields_list, context=None):
res = super(RemoverBolsaWizard, self).default_get(cr, uid,
fields_list, context)
context = context or {}
if context.get('active_id', False):
if context.get('active_model', False) == 'ud.monitoria.registro':
res['semestre_id'] = context.get('active_id')
elif context.get('active_model', False
) == 'ud.monitoria.documentos.discente':
doc = self.pool.get('ud.monitoria.documentos.discente').browse(
cr, uid, context.get('active_id'), context)
if doc.state != 'bolsista':
raise osv.except_osv(u'Discente não bolsista',
u'O discente não é bolsista')
elif not doc.is_active:
raise osv.except_osv(u'Documento do discente inativo',
u'Não é possível alterar o status de discentes inativos'
)
res['semestre_id'] = doc.disciplina_id.semestre_id.id
res['curso_id'] = doc.disciplina_id.curso_id.id
res['disciplina_id'] = doc.disciplina_id.id
res['tutor'] = doc.tutor
res['doc_discente_id'] = doc.id
return res
def onchange_curso(self, cr, uid, ids, semestre_id, curso_id,
disciplina_id, context=None):
if not (semestre_id and curso_id):
return {'value': {'disciplina_id': False}}
reg = self.pool.get('ud.monitoria.registro').read(cr, uid,
semestre_id, ['processos_seletivos_ids'], context=context, load
='_classic_write')
args = [('curso_id', '=', curso_id), ('processo_seletivo_id', '=',
reg['processos_seletivos_ids']), ('is_active', '=', True)]
disc = self.pool.get('ud.monitoria.disciplina').search(cr, uid,
args, context=context)
res = {'domain': {'disciplina_id': [('id', 'in', disc)]}}
if not disc:
res['value'] = {'disciplina_id': False}
return res
def onchange_disciplina(self, cr, uid, ids, disciplina_id,
doc_discente_id, context=None):
if disciplina_id and doc_discente_id:
doc_discente = self.pool.get('ud.monitoria.documentos.discente'
).browse(cr, uid, doc_discente_id, context)
doc_discente_id = (doc_discente_id if doc_discente.
disciplina_id.id == disciplina_id else False)
return {'value': {'doc_discente_id': doc_discente_id}}
return {'value': {'doc_discente_id': False}}
def botao_remover(self, cr, uid, ids, context=None):
perfil_model = self.pool.get('ud.perfil')
pessoa_model = self.pool.get('ud.employee')
for rem in self.browse(cr, uid, ids, context):
responsavel = pessoa_model.search(cr, SUPERUSER_ID, [('user_id',
'=', uid)], limit=2)
if not responsavel:
raise osv.except_osv(u'Registro Inexistente',
u'Não é possível realizar essa alteração enquanto seu login não estiver vinculado ao núcleo'
)
if len(responsavel) > 1:
raise osv.except_osv(u'Multiplos vínculos',
u'Não é possível realizar essa alteração enquanto seu login possuir multiplos vínculos no núcleo'
)
perfil = rem.doc_discente_id.inscricao_id.perfil_id
perfil_model.write(cr, SUPERUSER_ID, perfil.id, {'is_bolsista':
False, 'tipo_bolsa': False, 'valor_bolsa': False})
rem.doc_discente_id.write({'state': 'n_bolsista'})
evento = {'responsavel_id': responsavel[0], 'name':
u'Remoção de bolsa: "%s"' % rem.doc_discente_id.discente_id
.name, 'envolvidos_ids': [(4, rem.doc_discente_id.
discente_id.id)], 'descricao':
u'A bolsa do discente "%s" sob matrícula "%s" foi removida.' %
(rem.doc_discente_id.discente_id.name.upper(), perfil.
matricula)}
rem.semestre_id.write({'eventos_ids': [(0, 0, evento)]})
return True
<|reserved_special_token_1|>
# coding: utf-8
from openerp import SUPERUSER_ID
from openerp.osv import osv, fields
from openerp.addons.ud.ud import _TIPOS_BOLSA
TIPOS_BOLSA = dict(_TIPOS_BOLSA)
def get_banco(cls, cr, browse_record, usuario_id, context=None):
dados_bancarios_model = cls.pool.get("ud.dados.bancarios")
args = [("banco_id", "=", browse_record.banco_id.id)]
if browse_record.agencia_v:
args.append(("agencia", "=", browse_record.agencia))
if browse_record.dv_agencia_v:
args.append(("dv_agencia", "=", browse_record.dv_agencia))
if browse_record.conta_v:
args.append(("conta", "=", browse_record.conta))
if browse_record.dv_conta_v:
args.append(("dv_conta", "=", browse_record.dv_conta))
if browse_record.operacao_v:
args.append(("operacao", "=", browse_record.operacao))
dados_bancarios = dados_bancarios_model.search(cr, SUPERUSER_ID, args, context=context)
if dados_bancarios:
dados_bancarios = dados_bancarios_model.browse(cr, SUPERUSER_ID, dados_bancarios[0])
if not dados_bancarios.ud_conta_id:
return dados_bancarios.id
elif dados_bancarios.ud_conta_id.id == usuario_id:
return dados_bancarios.id
raise osv.except_osv(u"Dados Bancários duplicados", u"Outra pessoa já possui esses dados bancários!")
dados = {"banco_id": browse_record.banco_id.id, "agencia": browse_record.agencia, "dv_agencia": browse_record.dv_agencia,
"conta": browse_record.conta, "dv_conta": browse_record.dv_conta, "operacao": browse_record.operacao,
"ud_conta_id": usuario_id}
return dados_bancarios_model.create(cr, SUPERUSER_ID, dados, context=context)
class AdicionarBolsaWizard(osv.TransientModel):
_name = "ud.monitoria.bolsa.adicionar.wizard"
_description = u"Inclusão de bolsa de monitoria para discente (UD)"
_STATES = [
("n_bolsista", u"Não Bolsista"),
("reserva", u"Cadastro de Reserva"),
]
def _bolsas(self, cr, uid, ids, campos, args, context=None):
oferta_model = self.pool.get("ud.monitoria.oferta.disciplina")
res = {}
for add in self.browse(cr, uid, ids, context):
res[add.id] = add.disciplina_id.bolsas
return res
_columns = {
"semestre_id": fields.many2one("ud.monitoria.registro", u"Semestre", required=True, readonly=True),
"curso_id": fields.many2one("ud.curso", u"Curso", required=True, domain="[('is_active', '=', True)]"),
"disciplina_id": fields.many2one("ud.monitoria.disciplina", u"Disciplinas", required=True,
domain="[('semestre_id', '=', semestre_id), ('curso_id', '=', curso_id), "
"('is_active', '=', True)]"),
"bolsas": fields.function(_bolsas, type="integer", string=u"Bolsas disponíveis",
help=u"Número de bolsas disponíveis para a disciplina"),
"valor_bolsa": fields.float(u"Bolsa (R$)"),
"tutor": fields.boolean(u"Tutor?"),
"status": fields.selection(_STATES, u"Status", required=True),
"doc_discente_id": fields.many2one("ud.monitoria.documentos.discente", u"Discente", required=True,
domain="[('disciplina_id', '=', disciplina_id), ('tutor', '=', tutor), "
"('is_active', '=', True), ('state', '=', status)]"),
# DADOS BANCÁRIOS
"dados_bancarios_id": fields.many2one("ud.dados.bancarios", u"Dados Bancários", domain=[('id', '=', False)]),
"banco_id": fields.many2one("ud.banco", u"Banco", ondelete="restrict"),
"agencia": fields.char(u"Agência", size=4, help=u"Número da Agência"),
"dv_agencia": fields.char(u"DV Agência", size=2, help=u"Dígito verificador da Agência"),
"conta": fields.char(u"Conta", size=10, help=u"Número da Conta"),
"dv_conta": fields.char(u"DV Conta", size=1, help=u"Dígito verificador da Conta"),
"operacao": fields.char(u"Operação", size=3, help=u"Tipo de conta"),
"agencia_v": fields.related("banco_id", "agencia", type="boolean", invisible=True, readonly=True),
"dv_agencia_v": fields.related("banco_id", "dv_agencia", type="boolean", invisible=True, readonly=True),
"conta_v": fields.related("banco_id", "conta", type="boolean", invisible=True, readonly=True),
"dv_conta_v": fields.related("banco_id", "dv_conta", type="boolean", invisible=True, readonly=True),
"operacao_v": fields.related("banco_id", "operacao", type="boolean", invisible=True, readonly=True),
}
def default_get(self, cr, uid, fields_list, context=None):
res = super(AdicionarBolsaWizard, self).default_get(cr, uid, fields_list, context)
res["status"] = "n_bolsista"
res["valor_bolsa"] = 400.
context = context or {}
if context.get("active_id", False):
if context.get("active_model", False) == "ud.monitoria.registro":
res["semestre_id"] = context.get("active_id")
elif context.get("active_model", False) == "ud.monitoria.documentos.discente":
doc = self.pool.get("ud.monitoria.documentos.discente").browse(cr, uid, context.get("active_id"), context)
if doc.state == "bolsista":
raise osv.except_osv(u"Discente bolsista", u"O discente já é bolsista")
elif not doc.is_active:
raise osv.except_osv(u"Documento do discente inativo", u"Não é possível alterar o status de discentes inativos")
res["semestre_id"] = doc.disciplina_id.semestre_id.id
res["curso_id"] = doc.disciplina_id.curso_id.id
res["disciplina_id"] = doc.disciplina_id.id
res["tutor"] = doc.tutor
res["status"] = doc.state
res["doc_discente_id"] = doc.id
return res
def onchange_curso(self, cr, uid, ids, semestre_id, curso_id, disciplina_id, context=None):
if not (semestre_id and curso_id):
return {"value": {"disciplina_id": False}}
reg = self.pool.get("ud.monitoria.registro").read(cr, uid, semestre_id, ["processos_seletivos_ids"], context=context, load="_classic_write")
args = [("curso_id", "=", curso_id), ("processo_seletivo_id", "=", reg["processos_seletivos_ids"]), ("is_active", "=", True)]
disc = self.pool.get("ud.monitoria.disciplina").search(cr, uid, args, context=context)
res = {"domain": {"disciplina_id": [("id", "in", disc)]}}
if not disc:
res["value"]= {"disciplina_id": False}
return res
def onchange_disciplina(self, cr, uid, ids, disciplina_id, doc_discente_id, context=None):
if disciplina_id:
if doc_discente_id:
doc_discente = self.pool.get("ud.monitoria.documentos.discente").browse(cr, uid, doc_discente_id, context)
doc_discente_id = doc_discente_id if doc_discente.disciplina_id.id == disciplina_id else False
disciplina_id = self.pool.get("ud.monitoria.disciplina").browse(cr, uid, disciplina_id, context)
return {
"value": {"doc_discente_id": doc_discente_id,
"bolsas": disciplina_id.bolsas}
}
return {"value": {"doc_discente_id": False, "bolsas": 0}}
def onchange_doc_discente(self, cr, uid, ids, doc_discente_id, dados_bancarios_id, context=None):
if doc_discente_id:
doc = self.pool.get("ud.monitoria.documentos.discente").browse(cr, uid, doc_discente_id, context)
if not dados_bancarios_id:
dados_bancarios_id = getattr(doc.dados_bancarios_id, "id", False)
return {"value": {"dados_bancarios_id": dados_bancarios_id},
"domain": {"dados_bancarios_id": [("ud_conta_id", "=", doc.discente_id.id)]}}
return {"value": {"dados_bancarios_id": False},
"domain": {"dados_bancarios_id": [("id", "=", False)]}}
def onchange_banco(self, cr, uid, ids, banco_id, context=None):
if banco_id:
banco = self.pool.get("ud.banco").read(cr, uid, banco_id, [
"agencia", "dv_agencia", "conta", "dv_conta", "operacao"
], context=context, load="_classic_write")
vals = {"agencia": False, "dv_agencia": False, "conta": False, "dv_conta": False, "operacao": False}
vals.update({"%s_v" % dado: banco.get(dado) for dado in banco.keys()})
return {"value": vals}
return {"value": {"agencia_v": False, "dv_agencia_v": False, "conta_v": False, "dv_conta_v": False,"operacao_v": False,
"agencia": False, "dv_agencia": False, "conta": False, "dv_conta": False, "operacao": False}}
def botao_adicionar(self, cr, uid, ids, context=None):
perfil_model = self.pool.get("ud.perfil")
for add in self.browse(cr, uid, ids, context):
if add.bolsas == 0:
raise osv.except_osv(u"Bolsas Insuficientes", u"Não há bolsas disponíveis para essa disciplina")
elif not add.doc_discente_id.is_active:
raise osv.except_osv(u"Documento do discente inativo",
u"O discente não pode ser classificado como bolsista")
if add.doc_discente_id.inscricao_id.perfil_id.is_bolsista:
raise osv.except_osv(
u"Discente bolsista",
u"O discente \"{}\" sob matrícula \"{}\" possui bolsa do tipo: \"{}\"".format(
add.doc_discente_id.discente_id.name, add.doc_discente_id.inscricao_id.perfil_id.matricula,
TIPOS_BOLSA[add.doc_discente_id.inscricao_id.perfil_id.tipo_bolsa]
)
)
responsavel = self.pool.get("ud.employee").search(cr, SUPERUSER_ID, [("user_id", "=", uid)], limit=2)
if not responsavel:
raise osv.except_osv(
u"Registro Inexistente",
u"Não é possível realizar essa alteração enquanto seu login não estiver vinculado a uma pessoa no núcleo"
)
if len(responsavel) > 1:
raise osv.except_osv(
u"Multiplos vínculos",
u"Não é possível realizar essa alteração enquanto seu login possuir multiplos vínculos no núcleo"
)
perfil_model.write(cr, SUPERUSER_ID, add.doc_discente_id.inscricao_id.perfil_id.id, {
"is_bolsista": True, "tipo_bolsa": "m", "valor_bolsa": ("%.2f" % add.valor_bolsa).replace(".", ",")
})
if not add.dados_bancarios_id:
dados_bancarios = get_banco(self, cr, add, add.doc_discente_id.discente_id.id, context)
else:
dados_bancarios = add.dados_bancarios_id.id
add.doc_discente_id.write({"state": "bolsista", "dados_bancarios_id": dados_bancarios})
evento = {
"responsavel_id": responsavel[0],
"name": u"Adição de bolsa: \"%s\"" % add.doc_discente_id.discente_id.name,
"envolvidos_ids": [(4, add.doc_discente_id.discente_id.id)],
"descricao": u"Uma bolsa de R$ %s foi vinculada para o(a) discente \"%s\" sob matrícula \"%s\"." % (
("%.2f" % add.valor_bolsa).replace(".", ","),
add.doc_discente_id.discente_id.name.upper(), add.doc_discente_id.inscricao_id.perfil_id.matricula
)
}
add.semestre_id.write({"eventos_ids": [(0, 0, evento)]})
return True
class TransferirBolsaWizard(osv.TransientModel):
_name = "ud.monitoria.bolsa.transferir.wizard"
_description = u"Transferência de bolsa de monitoria (UD)"
_STATES = [
("n_bolsista", u"Não Bolsista"),
("reserva", u"Cadastro de Reserva"),
]
_columns = {
"semestre_id": fields.many2one("ud.monitoria.registro", u"Semestre", required=True, readonly=True),
"curso_id_de": fields.many2one("ud.curso", u"Curso", required=True, domain="[('is_active', '=', True)]"),
"disciplina_id_de": fields.many2one("ud.monitoria.disciplina", u"Disciplinas", required=True,
domain="[('id', '=', False)]"),
"tutor_de": fields.boolean(u"Tutor?"),
"doc_discente_id_de": fields.many2one("ud.monitoria.documentos.discente", u"Discente", required=True,
domain="[('is_active', '=', True), ('state', '=', 'bolsista'), "
"('disciplina_id', '=', disciplina_id_de), ('tutor', '=', tutor_de)]"),
"curso_id_para": fields.many2one("ud.curso", u"Curso", required=True, domain="[('is_active', '=', True)]"),
"disciplina_id_para": fields.many2one("ud.monitoria.disciplina", u"Disciplinas", required=True,
domain="[('semestre_id', '=', semestre_id), ('curso_id', '=', curso_id_para), "
"('is_active', '=', True)]"),
"tutor_para": fields.boolean(u"Tutor?"),
"status_para": fields.selection(_STATES, u"Status", required=True),
"doc_discente_id_para": fields.many2one("ud.monitoria.documentos.discente", u"Discente", required=True,
domain="[('is_active', '=', True), ('state', '=', status_para), "
"('disciplina_id', '=', disciplina_id_para), "
"('tutor', '=', tutor_para)]"),
# DADOS BANCÁRIOS
"banco_id": fields.many2one("ud.banco", u"Banco", ondelete="restrict"),
"agencia": fields.char(u"Agência", size=4, help=u"Número da Agência"),
"dv_agencia": fields.char(u"DV Agência", size=2, help=u"Dígito verificador da Agência"),
"conta": fields.char(u"Conta", size=10, help=u"Número da Conta"),
"dv_conta": fields.char(u"DV Conta", size=1, help=u"Dígito verificador da Conta"),
"operacao": fields.char(u"Operação", size=3, help=u"Tipo de conta"),
"agencia_v": fields.related("banco_id", "agencia", type="boolean", invisible=True, readonly=True),
"dv_agencia_v": fields.related("banco_id", "dv_agencia", type="boolean", invisible=True, readonly=True),
"conta_v": fields.related("banco_id", "conta", type="boolean", invisible=True, readonly=True),
"dv_conta_v": fields.related("banco_id", "dv_conta", type="boolean", invisible=True, readonly=True),
"operacao_v": fields.related("banco_id", "operacao", type="boolean", invisible=True, readonly=True),
}
def default_get(self, cr, uid, fields_list, context=None):
res = super(TransferirBolsaWizard, self).default_get(cr, uid, fields_list, context)
context = context or {}
if context.get("active_id", False):
if context.get("active_model", False) == "ud.monitoria.registro":
res["semestre_id"] = context.get("active_id")
elif context.get("active_model", False) == "ud.monitoria.documentos.discente":
doc = self.pool.get("ud.monitoria.documentos.discente").browse(cr, uid, context.get("active_id"),
context)
if doc.state != "bolsista":
raise osv.except_osv(u"Discente bolsista", u"O discente já é bolsista")
elif not doc.is_active:
raise osv.except_osv(u"Documento do discente inativo",
u"O discente não pode ser classificado como bolsista")
res["semestre_id"] = doc.disciplina_id.semestre_id.id
res["curso_id_de"] = doc.disciplina_id.curso_id.id
res["disciplina_id_de"] = doc.disciplina_id.id
res["tutor_de"] = doc.tutor
res["status_de"] = doc.state
res["doc_discente_id_de"] = doc.id
return res
def onchange_curso(self, cr, uid, ids, comp, semestre_id, curso_id, disciplina_id, context=None):
if not (semestre_id and curso_id):
return {"value": {"disciplina_id_" + comp: False}}
reg = self.pool.get("ud.monitoria.registro").read(cr, uid, semestre_id, ["processos_seletivos_ids"], context=context, load="_classic_write")
args = [("curso_id", "=", curso_id), ("processo_seletivo_id", "=", reg["processos_seletivos_ids"]), ("is_active", "=", True)]
disc = self.pool.get("ud.monitoria.disciplina").search(cr, uid, args, context=context)
res = {"domain": {"disciplina_id_" + comp: [("id", "in", disc)]}}
if not disc:
res["value"] = {"disciplina_id_" + comp: False}
return res
def onchange_disciplina(self, cr, uid, ids, comp, disciplina_id, doc_discente_id, context=None):
if disciplina_id and doc_discente_id:
doc_discente = self.pool.get("ud.monitoria.documentos.discente").browse(cr, uid, doc_discente_id, context)
doc_discente_id = doc_discente_id if doc_discente.disciplina_id.id == disciplina_id else False
return {
"value": {"doc_discente_id_" + comp: doc_discente_id}
}
return {"value": {"doc_discente_id_" + comp: False}}
def onchange_banco(self, cr, uid, ids, banco_id, context=None):
if banco_id:
banco = self.pool.get("ud.banco").read(cr, uid, banco_id, [
"agencia", "dv_agencia", "conta", "dv_conta", "operacao"
], context=context, load="_classic_write")
vals = {"agencia": False, "dv_agencia": False, "conta": False, "dv_conta": False, "operacao": False}
vals.update({"%s_v" % dado: banco.get(dado) for dado in banco.keys()})
return {"value": vals}
return {"value": {"agencia_v": False, "dv_agencia_v": False, "conta_v": False, "dv_conta_v": False,"operacao_v": False,
"agencia": False, "dv_agencia": False, "conta": False, "dv_conta": False, "operacao": False}}
def botao_transferir(self, cr, uid, ids, context=None):
perfil_model = self.pool.get("ud.perfil")
for transf in self.browse(cr, uid, ids, context):
matricula = transf.doc_discente_id_para.discente_id.matricula
for perfil in transf.doc_discente_id_para.discente_id.pessoa_id.papel_ids:
if perfil.matricula == matricula and perfil.tipo == "a":
if perfil.is_bolsista:
raise osv.except_osv(
u"Discente bolsista",
u"O discente \"{}\" sob matrícula \"{}\" possui bolsa do tipo: \"{}\"".format(
transf.doc_discente_id_para.discente_id.pessoa_id.name, matricula,
TIPOS_BOLSA[perfil.tipo_bolsa]
)
)
break
if not perfil:
raise osv.except_osv(
u"Perfil excluído",
u"O perfil do discente para a matrícula \"%s\" não existe ou foi excluído" % matricula or ""
)
matricula = transf.doc_discente_id_de.discente_id.matricula
for perfil_de in transf.doc_discente_id_de.discente_id.pessoa_id.papel_ids:
if perfil.matricula == matricula and perfil.tipo == "a":
break
responsavel = self.pool.get("ud.employee").search(cr, SUPERUSER_ID, [("user_id", "=", uid)], limit=2)
if not responsavel:
raise osv.except_osv(
u"Registro Inexistente",
u"Não é possível realizar essa alteração enquanto seu login não estiver vinculado ao núcleo"
)
if len(responsavel) > 1:
raise osv.except_osv(
u"Multiplos vínculos",
u"Não é possível realizar essa alteração enquanto seu login possuir multiplos vínculos no núcleo"
)
valor = perfil_de.valor_bolsa
perfil_model.write(cr, SUPERUSER_ID, perfil.id, {
"is_bolsista": True, "tipo_bolsa": "m", "valor_bolsa": valor
})
perfil_model.write(cr, SUPERUSER_ID, perfil_de.id, {
"is_bolsista": False, "tipo_bolsa": False, "valor_bolsa": False
})
transf.doc_discente_id_de.write({"state": "n_bolsista"})
transf.doc_discente_id_para.write({"state": "bolsista", "is_active": True})
get_banco(self, cr, transf, transf.doc_discente_id_para.discente_id.pessoa_id.id, context)
evento = {
"responsavel_id": responsavel[0],
"name": u"Transferência de bolsa",
"envolvidos_ids": [(4, transf.doc_discente_id_de.discente_id.pessoa_id.id),
(4, transf.doc_discente_id_para.discente_id.pessoa_id.id)],
"descricao": u"Transferência de bolsa no valor de R$ %(valor)s do discente %(discente_de)s sob matrícula "
u"%(matricula_de)s para o(a) discente \"%(discente_para)s\" sob matrícula"
u"\"%(matricula_para)s\"." % {
"valor": valor, "discente_de": transf.doc_discente_id_de.discente_id.pessoa_id.name.upper(),
"matricula_de": perfil_de.matricula,
"discente_para": transf.doc_discente_id_de.discente_id.pessoa_id.name.upper(),
"matricula_para": perfil_de.matricula
}
}
transf.semestre_id.write({"eventos_ids": [(0, 0, evento)]})
return True
class RemoverBolsaWizard(osv.TransientModel):
_name = "ud.monitoria.bolsa.remover.wizard"
_description = u"Remoção de bolsa de discente"
_columns = {
"semestre_id": fields.many2one("ud.monitoria.registro", u"Semestre", required=True, readonly=True),
"curso_id": fields.many2one("ud.curso", u"Curso", required=True, domain="[('is_active', '=', True)]"),
"disciplina_id": fields.many2one("ud.monitoria.disciplina", u"Disciplinas", required=True,
domain="[('id', '=', False)]"),
"tutor": fields.boolean(u"Tutor?"),
"doc_discente_id": fields.many2one("ud.monitoria.documentos.discente", u"Discente", required=True,
domain="[('disciplina_id', '=', disciplina_id), ('tutor', '=', tutor), "
"('is_active', '=', True), ('state', '=', 'bolsista')]"),
}
def default_get(self, cr, uid, fields_list, context=None):
res = super(RemoverBolsaWizard, self).default_get(cr, uid, fields_list, context)
context = context or {}
if context.get("active_id", False):
if context.get("active_model", False) == "ud.monitoria.registro":
res["semestre_id"] = context.get("active_id")
elif context.get("active_model", False) == "ud.monitoria.documentos.discente":
doc = self.pool.get("ud.monitoria.documentos.discente").browse(cr, uid, context.get("active_id"), context)
if doc.state != "bolsista":
raise osv.except_osv(u"Discente não bolsista", u"O discente não é bolsista")
elif not doc.is_active:
raise osv.except_osv(u"Documento do discente inativo",
u"Não é possível alterar o status de discentes inativos")
res["semestre_id"] = doc.disciplina_id.semestre_id.id
res["curso_id"] = doc.disciplina_id.curso_id.id
res["disciplina_id"] = doc.disciplina_id.id
res["tutor"] = doc.tutor
res["doc_discente_id"] = doc.id
return res
def onchange_curso(self, cr, uid, ids, semestre_id, curso_id, disciplina_id, context=None):
if not (semestre_id and curso_id):
return {"value": {"disciplina_id": False}}
reg = self.pool.get("ud.monitoria.registro").read(cr, uid, semestre_id, ["processos_seletivos_ids"], context=context, load="_classic_write")
args = [("curso_id", "=", curso_id), ("processo_seletivo_id", "=", reg["processos_seletivos_ids"]), ("is_active", "=", True)]
disc = self.pool.get("ud.monitoria.disciplina").search(cr, uid, args, context=context)
res = {"domain": {"disciplina_id": [("id", "in", disc)]}}
if not disc:
res["value"] = {"disciplina_id": False}
return res
def onchange_disciplina(self, cr, uid, ids, disciplina_id, doc_discente_id, context=None):
if disciplina_id and doc_discente_id:
doc_discente = self.pool.get("ud.monitoria.documentos.discente").browse(cr, uid, doc_discente_id, context)
doc_discente_id = doc_discente_id if doc_discente.disciplina_id.id == disciplina_id else False
return {
"value": {"doc_discente_id": doc_discente_id}
}
return {"value": {"doc_discente_id": False}}
def botao_remover(self, cr, uid, ids, context=None):
perfil_model = self.pool.get("ud.perfil")
pessoa_model = self.pool.get("ud.employee")
for rem in self.browse(cr, uid, ids, context):
responsavel = pessoa_model.search(cr, SUPERUSER_ID, [("user_id", "=", uid)], limit=2)
if not responsavel:
raise osv.except_osv(
u"Registro Inexistente",
u"Não é possível realizar essa alteração enquanto seu login não estiver vinculado ao núcleo"
)
if len(responsavel) > 1:
raise osv.except_osv(
u"Multiplos vínculos",
u"Não é possível realizar essa alteração enquanto seu login possuir multiplos vínculos no núcleo"
)
perfil = rem.doc_discente_id.inscricao_id.perfil_id
perfil_model.write(cr, SUPERUSER_ID, perfil.id, {
"is_bolsista": False, "tipo_bolsa": False, "valor_bolsa": False
})
rem.doc_discente_id.write({"state": "n_bolsista"})
evento = {
"responsavel_id": responsavel[0],
"name": u"Remoção de bolsa: \"%s\"" % rem.doc_discente_id.discente_id.name,
"envolvidos_ids": [(4, rem.doc_discente_id.discente_id.id)],
"descricao": u"A bolsa do discente \"%s\" sob matrícula \"%s\" foi removida." % (
rem.doc_discente_id.discente_id.name.upper(), perfil.matricula
)
}
rem.semestre_id.write({"eventos_ids": [(0, 0, evento)]})
return True
|
flexible
|
{
"blob_id": "fd877f5952c1fc0b2115d0950a066501ee7545f8",
"index": 4150,
"step-1": "<mask token>\n\n\nclass AdicionarBolsaWizard(osv.TransientModel):\n <mask token>\n <mask token>\n <mask token>\n\n def _bolsas(self, cr, uid, ids, campos, args, context=None):\n oferta_model = self.pool.get('ud.monitoria.oferta.disciplina')\n res = {}\n for add in self.browse(cr, uid, ids, context):\n res[add.id] = add.disciplina_id.bolsas\n return res\n <mask token>\n\n def default_get(self, cr, uid, fields_list, context=None):\n res = super(AdicionarBolsaWizard, self).default_get(cr, uid,\n fields_list, context)\n res['status'] = 'n_bolsista'\n res['valor_bolsa'] = 400.0\n context = context or {}\n if context.get('active_id', False):\n if context.get('active_model', False) == 'ud.monitoria.registro':\n res['semestre_id'] = context.get('active_id')\n elif context.get('active_model', False\n ) == 'ud.monitoria.documentos.discente':\n doc = self.pool.get('ud.monitoria.documentos.discente').browse(\n cr, uid, context.get('active_id'), context)\n if doc.state == 'bolsista':\n raise osv.except_osv(u'Discente bolsista',\n u'O discente já é bolsista')\n elif not doc.is_active:\n raise osv.except_osv(u'Documento do discente inativo',\n u'Não é possível alterar o status de discentes inativos'\n )\n res['semestre_id'] = doc.disciplina_id.semestre_id.id\n res['curso_id'] = doc.disciplina_id.curso_id.id\n res['disciplina_id'] = doc.disciplina_id.id\n res['tutor'] = doc.tutor\n res['status'] = doc.state\n res['doc_discente_id'] = doc.id\n return res\n <mask token>\n\n def onchange_disciplina(self, cr, uid, ids, disciplina_id,\n doc_discente_id, context=None):\n if disciplina_id:\n if doc_discente_id:\n doc_discente = self.pool.get('ud.monitoria.documentos.discente'\n ).browse(cr, uid, doc_discente_id, context)\n doc_discente_id = (doc_discente_id if doc_discente.\n disciplina_id.id == disciplina_id else False)\n disciplina_id = self.pool.get('ud.monitoria.disciplina').browse(cr,\n uid, disciplina_id, context)\n return {'value': {'doc_discente_id': doc_discente_id, 'bolsas':\n disciplina_id.bolsas}}\n return {'value': {'doc_discente_id': False, 'bolsas': 0}}\n <mask token>\n <mask token>\n\n def botao_adicionar(self, cr, uid, ids, context=None):\n perfil_model = self.pool.get('ud.perfil')\n for add in self.browse(cr, uid, ids, context):\n if add.bolsas == 0:\n raise osv.except_osv(u'Bolsas Insuficientes',\n u'Não há bolsas disponíveis para essa disciplina')\n elif not add.doc_discente_id.is_active:\n raise osv.except_osv(u'Documento do discente inativo',\n u'O discente não pode ser classificado como bolsista')\n if add.doc_discente_id.inscricao_id.perfil_id.is_bolsista:\n raise osv.except_osv(u'Discente bolsista',\n u'O discente \"{}\" sob matrícula \"{}\" possui bolsa do tipo: \"{}\"'\n .format(add.doc_discente_id.discente_id.name, add.\n doc_discente_id.inscricao_id.perfil_id.matricula,\n TIPOS_BOLSA[add.doc_discente_id.inscricao_id.perfil_id.\n tipo_bolsa]))\n responsavel = self.pool.get('ud.employee').search(cr,\n SUPERUSER_ID, [('user_id', '=', uid)], limit=2)\n if not responsavel:\n raise osv.except_osv(u'Registro Inexistente',\n u'Não é possível realizar essa alteração enquanto seu login não estiver vinculado a uma pessoa no núcleo'\n )\n if len(responsavel) > 1:\n raise osv.except_osv(u'Multiplos vínculos',\n u'Não é possível realizar essa alteração enquanto seu login possuir multiplos vínculos no núcleo'\n )\n perfil_model.write(cr, SUPERUSER_ID, add.doc_discente_id.\n inscricao_id.perfil_id.id, {'is_bolsista': True,\n 'tipo_bolsa': 'm', 'valor_bolsa': ('%.2f' % add.valor_bolsa\n ).replace('.', ',')})\n if not add.dados_bancarios_id:\n dados_bancarios = get_banco(self, cr, add, add.\n doc_discente_id.discente_id.id, context)\n else:\n dados_bancarios = add.dados_bancarios_id.id\n add.doc_discente_id.write({'state': 'bolsista',\n 'dados_bancarios_id': dados_bancarios})\n evento = {'responsavel_id': responsavel[0], 'name': \n u'Adição de bolsa: \"%s\"' % add.doc_discente_id.discente_id.\n name, 'envolvidos_ids': [(4, add.doc_discente_id.\n discente_id.id)], 'descricao': \n u'Uma bolsa de R$ %s foi vinculada para o(a) discente \"%s\" sob matrícula \"%s\".'\n % (('%.2f' % add.valor_bolsa).replace('.', ','), add.\n doc_discente_id.discente_id.name.upper(), add.\n doc_discente_id.inscricao_id.perfil_id.matricula)}\n add.semestre_id.write({'eventos_ids': [(0, 0, evento)]})\n return True\n\n\nclass TransferirBolsaWizard(osv.TransientModel):\n _name = 'ud.monitoria.bolsa.transferir.wizard'\n _description = u'Transferência de bolsa de monitoria (UD)'\n _STATES = [('n_bolsista', u'Não Bolsista'), ('reserva',\n u'Cadastro de Reserva')]\n _columns = {'semestre_id': fields.many2one('ud.monitoria.registro',\n u'Semestre', required=True, readonly=True), 'curso_id_de': fields.\n many2one('ud.curso', u'Curso', required=True, domain=\n \"[('is_active', '=', True)]\"), 'disciplina_id_de': fields.many2one(\n 'ud.monitoria.disciplina', u'Disciplinas', required=True, domain=\n \"[('id', '=', False)]\"), 'tutor_de': fields.boolean(u'Tutor?'),\n 'doc_discente_id_de': fields.many2one(\n 'ud.monitoria.documentos.discente', u'Discente', required=True,\n domain=\n \"[('is_active', '=', True), ('state', '=', 'bolsista'), ('disciplina_id', '=', disciplina_id_de), ('tutor', '=', tutor_de)]\"\n ), 'curso_id_para': fields.many2one('ud.curso', u'Curso', required=\n True, domain=\"[('is_active', '=', True)]\"), 'disciplina_id_para':\n fields.many2one('ud.monitoria.disciplina', u'Disciplinas', required\n =True, domain=\n \"[('semestre_id', '=', semestre_id), ('curso_id', '=', curso_id_para), ('is_active', '=', True)]\"\n ), 'tutor_para': fields.boolean(u'Tutor?'), 'status_para': fields.\n selection(_STATES, u'Status', required=True),\n 'doc_discente_id_para': fields.many2one(\n 'ud.monitoria.documentos.discente', u'Discente', required=True,\n domain=\n \"[('is_active', '=', True), ('state', '=', status_para), ('disciplina_id', '=', disciplina_id_para), ('tutor', '=', tutor_para)]\"\n ), 'banco_id': fields.many2one('ud.banco', u'Banco', ondelete=\n 'restrict'), 'agencia': fields.char(u'Agência', size=4, help=\n u'Número da Agência'), 'dv_agencia': fields.char(u'DV Agência',\n size=2, help=u'Dígito verificador da Agência'), 'conta': fields.\n char(u'Conta', size=10, help=u'Número da Conta'), 'dv_conta':\n fields.char(u'DV Conta', size=1, help=\n u'Dígito verificador da Conta'), 'operacao': fields.char(\n u'Operação', size=3, help=u'Tipo de conta'), 'agencia_v': fields.\n related('banco_id', 'agencia', type='boolean', invisible=True,\n readonly=True), 'dv_agencia_v': fields.related('banco_id',\n 'dv_agencia', type='boolean', invisible=True, readonly=True),\n 'conta_v': fields.related('banco_id', 'conta', type='boolean',\n invisible=True, readonly=True), 'dv_conta_v': fields.related(\n 'banco_id', 'dv_conta', type='boolean', invisible=True, readonly=\n True), 'operacao_v': fields.related('banco_id', 'operacao', type=\n 'boolean', invisible=True, readonly=True)}\n\n def default_get(self, cr, uid, fields_list, context=None):\n res = super(TransferirBolsaWizard, self).default_get(cr, uid,\n fields_list, context)\n context = context or {}\n if context.get('active_id', False):\n if context.get('active_model', False) == 'ud.monitoria.registro':\n res['semestre_id'] = context.get('active_id')\n elif context.get('active_model', False\n ) == 'ud.monitoria.documentos.discente':\n doc = self.pool.get('ud.monitoria.documentos.discente').browse(\n cr, uid, context.get('active_id'), context)\n if doc.state != 'bolsista':\n raise osv.except_osv(u'Discente bolsista',\n u'O discente já é bolsista')\n elif not doc.is_active:\n raise osv.except_osv(u'Documento do discente inativo',\n u'O discente não pode ser classificado como bolsista')\n res['semestre_id'] = doc.disciplina_id.semestre_id.id\n res['curso_id_de'] = doc.disciplina_id.curso_id.id\n res['disciplina_id_de'] = doc.disciplina_id.id\n res['tutor_de'] = doc.tutor\n res['status_de'] = doc.state\n res['doc_discente_id_de'] = doc.id\n return res\n\n def onchange_curso(self, cr, uid, ids, comp, semestre_id, curso_id,\n disciplina_id, context=None):\n if not (semestre_id and curso_id):\n return {'value': {('disciplina_id_' + comp): False}}\n reg = self.pool.get('ud.monitoria.registro').read(cr, uid,\n semestre_id, ['processos_seletivos_ids'], context=context, load\n ='_classic_write')\n args = [('curso_id', '=', curso_id), ('processo_seletivo_id', '=',\n reg['processos_seletivos_ids']), ('is_active', '=', True)]\n disc = self.pool.get('ud.monitoria.disciplina').search(cr, uid,\n args, context=context)\n res = {'domain': {('disciplina_id_' + comp): [('id', 'in', disc)]}}\n if not disc:\n res['value'] = {('disciplina_id_' + comp): False}\n return res\n\n def onchange_disciplina(self, cr, uid, ids, comp, disciplina_id,\n doc_discente_id, context=None):\n if disciplina_id and doc_discente_id:\n doc_discente = self.pool.get('ud.monitoria.documentos.discente'\n ).browse(cr, uid, doc_discente_id, context)\n doc_discente_id = (doc_discente_id if doc_discente.\n disciplina_id.id == disciplina_id else False)\n return {'value': {('doc_discente_id_' + comp): doc_discente_id}}\n return {'value': {('doc_discente_id_' + comp): False}}\n\n def onchange_banco(self, cr, uid, ids, banco_id, context=None):\n if banco_id:\n banco = self.pool.get('ud.banco').read(cr, uid, banco_id, [\n 'agencia', 'dv_agencia', 'conta', 'dv_conta', 'operacao'],\n context=context, load='_classic_write')\n vals = {'agencia': False, 'dv_agencia': False, 'conta': False,\n 'dv_conta': False, 'operacao': False}\n vals.update({('%s_v' % dado): banco.get(dado) for dado in banco\n .keys()})\n return {'value': vals}\n return {'value': {'agencia_v': False, 'dv_agencia_v': False,\n 'conta_v': False, 'dv_conta_v': False, 'operacao_v': False,\n 'agencia': False, 'dv_agencia': False, 'conta': False,\n 'dv_conta': False, 'operacao': False}}\n\n def botao_transferir(self, cr, uid, ids, context=None):\n perfil_model = self.pool.get('ud.perfil')\n for transf in self.browse(cr, uid, ids, context):\n matricula = transf.doc_discente_id_para.discente_id.matricula\n for perfil in transf.doc_discente_id_para.discente_id.pessoa_id.papel_ids:\n if perfil.matricula == matricula and perfil.tipo == 'a':\n if perfil.is_bolsista:\n raise osv.except_osv(u'Discente bolsista',\n u'O discente \"{}\" sob matrícula \"{}\" possui bolsa do tipo: \"{}\"'\n .format(transf.doc_discente_id_para.discente_id\n .pessoa_id.name, matricula, TIPOS_BOLSA[perfil.\n tipo_bolsa]))\n break\n if not perfil:\n raise osv.except_osv(u'Perfil excluído', \n u'O perfil do discente para a matrícula \"%s\" não existe ou foi excluído'\n % matricula or '')\n matricula = transf.doc_discente_id_de.discente_id.matricula\n for perfil_de in transf.doc_discente_id_de.discente_id.pessoa_id.papel_ids:\n if perfil.matricula == matricula and perfil.tipo == 'a':\n break\n responsavel = self.pool.get('ud.employee').search(cr,\n SUPERUSER_ID, [('user_id', '=', uid)], limit=2)\n if not responsavel:\n raise osv.except_osv(u'Registro Inexistente',\n u'Não é possível realizar essa alteração enquanto seu login não estiver vinculado ao núcleo'\n )\n if len(responsavel) > 1:\n raise osv.except_osv(u'Multiplos vínculos',\n u'Não é possível realizar essa alteração enquanto seu login possuir multiplos vínculos no núcleo'\n )\n valor = perfil_de.valor_bolsa\n perfil_model.write(cr, SUPERUSER_ID, perfil.id, {'is_bolsista':\n True, 'tipo_bolsa': 'm', 'valor_bolsa': valor})\n perfil_model.write(cr, SUPERUSER_ID, perfil_de.id, {\n 'is_bolsista': False, 'tipo_bolsa': False, 'valor_bolsa': \n False})\n transf.doc_discente_id_de.write({'state': 'n_bolsista'})\n transf.doc_discente_id_para.write({'state': 'bolsista',\n 'is_active': True})\n get_banco(self, cr, transf, transf.doc_discente_id_para.\n discente_id.pessoa_id.id, context)\n evento = {'responsavel_id': responsavel[0], 'name':\n u'Transferência de bolsa', 'envolvidos_ids': [(4, transf.\n doc_discente_id_de.discente_id.pessoa_id.id), (4, transf.\n doc_discente_id_para.discente_id.pessoa_id.id)],\n 'descricao': \n u'Transferência de bolsa no valor de R$ %(valor)s do discente %(discente_de)s sob matrícula %(matricula_de)s para o(a) discente \"%(discente_para)s\" sob matrícula\"%(matricula_para)s\".'\n % {'valor': valor, 'discente_de': transf.\n doc_discente_id_de.discente_id.pessoa_id.name.upper(),\n 'matricula_de': perfil_de.matricula, 'discente_para':\n transf.doc_discente_id_de.discente_id.pessoa_id.name.upper(\n ), 'matricula_para': perfil_de.matricula}}\n transf.semestre_id.write({'eventos_ids': [(0, 0, evento)]})\n return True\n\n\nclass RemoverBolsaWizard(osv.TransientModel):\n _name = 'ud.monitoria.bolsa.remover.wizard'\n _description = u'Remoção de bolsa de discente'\n _columns = {'semestre_id': fields.many2one('ud.monitoria.registro',\n u'Semestre', required=True, readonly=True), 'curso_id': fields.\n many2one('ud.curso', u'Curso', required=True, domain=\n \"[('is_active', '=', True)]\"), 'disciplina_id': fields.many2one(\n 'ud.monitoria.disciplina', u'Disciplinas', required=True, domain=\n \"[('id', '=', False)]\"), 'tutor': fields.boolean(u'Tutor?'),\n 'doc_discente_id': fields.many2one(\n 'ud.monitoria.documentos.discente', u'Discente', required=True,\n domain=\n \"[('disciplina_id', '=', disciplina_id), ('tutor', '=', tutor), ('is_active', '=', True), ('state', '=', 'bolsista')]\"\n )}\n\n def default_get(self, cr, uid, fields_list, context=None):\n res = super(RemoverBolsaWizard, self).default_get(cr, uid,\n fields_list, context)\n context = context or {}\n if context.get('active_id', False):\n if context.get('active_model', False) == 'ud.monitoria.registro':\n res['semestre_id'] = context.get('active_id')\n elif context.get('active_model', False\n ) == 'ud.monitoria.documentos.discente':\n doc = self.pool.get('ud.monitoria.documentos.discente').browse(\n cr, uid, context.get('active_id'), context)\n if doc.state != 'bolsista':\n raise osv.except_osv(u'Discente não bolsista',\n u'O discente não é bolsista')\n elif not doc.is_active:\n raise osv.except_osv(u'Documento do discente inativo',\n u'Não é possível alterar o status de discentes inativos'\n )\n res['semestre_id'] = doc.disciplina_id.semestre_id.id\n res['curso_id'] = doc.disciplina_id.curso_id.id\n res['disciplina_id'] = doc.disciplina_id.id\n res['tutor'] = doc.tutor\n res['doc_discente_id'] = doc.id\n return res\n\n def onchange_curso(self, cr, uid, ids, semestre_id, curso_id,\n disciplina_id, context=None):\n if not (semestre_id and curso_id):\n return {'value': {'disciplina_id': False}}\n reg = self.pool.get('ud.monitoria.registro').read(cr, uid,\n semestre_id, ['processos_seletivos_ids'], context=context, load\n ='_classic_write')\n args = [('curso_id', '=', curso_id), ('processo_seletivo_id', '=',\n reg['processos_seletivos_ids']), ('is_active', '=', True)]\n disc = self.pool.get('ud.monitoria.disciplina').search(cr, uid,\n args, context=context)\n res = {'domain': {'disciplina_id': [('id', 'in', disc)]}}\n if not disc:\n res['value'] = {'disciplina_id': False}\n return res\n\n def onchange_disciplina(self, cr, uid, ids, disciplina_id,\n doc_discente_id, context=None):\n if disciplina_id and doc_discente_id:\n doc_discente = self.pool.get('ud.monitoria.documentos.discente'\n ).browse(cr, uid, doc_discente_id, context)\n doc_discente_id = (doc_discente_id if doc_discente.\n disciplina_id.id == disciplina_id else False)\n return {'value': {'doc_discente_id': doc_discente_id}}\n return {'value': {'doc_discente_id': False}}\n\n def botao_remover(self, cr, uid, ids, context=None):\n perfil_model = self.pool.get('ud.perfil')\n pessoa_model = self.pool.get('ud.employee')\n for rem in self.browse(cr, uid, ids, context):\n responsavel = pessoa_model.search(cr, SUPERUSER_ID, [('user_id',\n '=', uid)], limit=2)\n if not responsavel:\n raise osv.except_osv(u'Registro Inexistente',\n u'Não é possível realizar essa alteração enquanto seu login não estiver vinculado ao núcleo'\n )\n if len(responsavel) > 1:\n raise osv.except_osv(u'Multiplos vínculos',\n u'Não é possível realizar essa alteração enquanto seu login possuir multiplos vínculos no núcleo'\n )\n perfil = rem.doc_discente_id.inscricao_id.perfil_id\n perfil_model.write(cr, SUPERUSER_ID, perfil.id, {'is_bolsista':\n False, 'tipo_bolsa': False, 'valor_bolsa': False})\n rem.doc_discente_id.write({'state': 'n_bolsista'})\n evento = {'responsavel_id': responsavel[0], 'name': \n u'Remoção de bolsa: \"%s\"' % rem.doc_discente_id.discente_id\n .name, 'envolvidos_ids': [(4, rem.doc_discente_id.\n discente_id.id)], 'descricao': \n u'A bolsa do discente \"%s\" sob matrícula \"%s\" foi removida.' %\n (rem.doc_discente_id.discente_id.name.upper(), perfil.\n matricula)}\n rem.semestre_id.write({'eventos_ids': [(0, 0, evento)]})\n return True\n",
"step-2": "<mask token>\n\n\nclass AdicionarBolsaWizard(osv.TransientModel):\n <mask token>\n <mask token>\n <mask token>\n\n def _bolsas(self, cr, uid, ids, campos, args, context=None):\n oferta_model = self.pool.get('ud.monitoria.oferta.disciplina')\n res = {}\n for add in self.browse(cr, uid, ids, context):\n res[add.id] = add.disciplina_id.bolsas\n return res\n <mask token>\n\n def default_get(self, cr, uid, fields_list, context=None):\n res = super(AdicionarBolsaWizard, self).default_get(cr, uid,\n fields_list, context)\n res['status'] = 'n_bolsista'\n res['valor_bolsa'] = 400.0\n context = context or {}\n if context.get('active_id', False):\n if context.get('active_model', False) == 'ud.monitoria.registro':\n res['semestre_id'] = context.get('active_id')\n elif context.get('active_model', False\n ) == 'ud.monitoria.documentos.discente':\n doc = self.pool.get('ud.monitoria.documentos.discente').browse(\n cr, uid, context.get('active_id'), context)\n if doc.state == 'bolsista':\n raise osv.except_osv(u'Discente bolsista',\n u'O discente já é bolsista')\n elif not doc.is_active:\n raise osv.except_osv(u'Documento do discente inativo',\n u'Não é possível alterar o status de discentes inativos'\n )\n res['semestre_id'] = doc.disciplina_id.semestre_id.id\n res['curso_id'] = doc.disciplina_id.curso_id.id\n res['disciplina_id'] = doc.disciplina_id.id\n res['tutor'] = doc.tutor\n res['status'] = doc.state\n res['doc_discente_id'] = doc.id\n return res\n\n def onchange_curso(self, cr, uid, ids, semestre_id, curso_id,\n disciplina_id, context=None):\n if not (semestre_id and curso_id):\n return {'value': {'disciplina_id': False}}\n reg = self.pool.get('ud.monitoria.registro').read(cr, uid,\n semestre_id, ['processos_seletivos_ids'], context=context, load\n ='_classic_write')\n args = [('curso_id', '=', curso_id), ('processo_seletivo_id', '=',\n reg['processos_seletivos_ids']), ('is_active', '=', True)]\n disc = self.pool.get('ud.monitoria.disciplina').search(cr, uid,\n args, context=context)\n res = {'domain': {'disciplina_id': [('id', 'in', disc)]}}\n if not disc:\n res['value'] = {'disciplina_id': False}\n return res\n\n def onchange_disciplina(self, cr, uid, ids, disciplina_id,\n doc_discente_id, context=None):\n if disciplina_id:\n if doc_discente_id:\n doc_discente = self.pool.get('ud.monitoria.documentos.discente'\n ).browse(cr, uid, doc_discente_id, context)\n doc_discente_id = (doc_discente_id if doc_discente.\n disciplina_id.id == disciplina_id else False)\n disciplina_id = self.pool.get('ud.monitoria.disciplina').browse(cr,\n uid, disciplina_id, context)\n return {'value': {'doc_discente_id': doc_discente_id, 'bolsas':\n disciplina_id.bolsas}}\n return {'value': {'doc_discente_id': False, 'bolsas': 0}}\n\n def onchange_doc_discente(self, cr, uid, ids, doc_discente_id,\n dados_bancarios_id, context=None):\n if doc_discente_id:\n doc = self.pool.get('ud.monitoria.documentos.discente').browse(cr,\n uid, doc_discente_id, context)\n if not dados_bancarios_id:\n dados_bancarios_id = getattr(doc.dados_bancarios_id, 'id', \n False)\n return {'value': {'dados_bancarios_id': dados_bancarios_id},\n 'domain': {'dados_bancarios_id': [('ud_conta_id', '=', doc.\n discente_id.id)]}}\n return {'value': {'dados_bancarios_id': False}, 'domain': {\n 'dados_bancarios_id': [('id', '=', False)]}}\n <mask token>\n\n def botao_adicionar(self, cr, uid, ids, context=None):\n perfil_model = self.pool.get('ud.perfil')\n for add in self.browse(cr, uid, ids, context):\n if add.bolsas == 0:\n raise osv.except_osv(u'Bolsas Insuficientes',\n u'Não há bolsas disponíveis para essa disciplina')\n elif not add.doc_discente_id.is_active:\n raise osv.except_osv(u'Documento do discente inativo',\n u'O discente não pode ser classificado como bolsista')\n if add.doc_discente_id.inscricao_id.perfil_id.is_bolsista:\n raise osv.except_osv(u'Discente bolsista',\n u'O discente \"{}\" sob matrícula \"{}\" possui bolsa do tipo: \"{}\"'\n .format(add.doc_discente_id.discente_id.name, add.\n doc_discente_id.inscricao_id.perfil_id.matricula,\n TIPOS_BOLSA[add.doc_discente_id.inscricao_id.perfil_id.\n tipo_bolsa]))\n responsavel = self.pool.get('ud.employee').search(cr,\n SUPERUSER_ID, [('user_id', '=', uid)], limit=2)\n if not responsavel:\n raise osv.except_osv(u'Registro Inexistente',\n u'Não é possível realizar essa alteração enquanto seu login não estiver vinculado a uma pessoa no núcleo'\n )\n if len(responsavel) > 1:\n raise osv.except_osv(u'Multiplos vínculos',\n u'Não é possível realizar essa alteração enquanto seu login possuir multiplos vínculos no núcleo'\n )\n perfil_model.write(cr, SUPERUSER_ID, add.doc_discente_id.\n inscricao_id.perfil_id.id, {'is_bolsista': True,\n 'tipo_bolsa': 'm', 'valor_bolsa': ('%.2f' % add.valor_bolsa\n ).replace('.', ',')})\n if not add.dados_bancarios_id:\n dados_bancarios = get_banco(self, cr, add, add.\n doc_discente_id.discente_id.id, context)\n else:\n dados_bancarios = add.dados_bancarios_id.id\n add.doc_discente_id.write({'state': 'bolsista',\n 'dados_bancarios_id': dados_bancarios})\n evento = {'responsavel_id': responsavel[0], 'name': \n u'Adição de bolsa: \"%s\"' % add.doc_discente_id.discente_id.\n name, 'envolvidos_ids': [(4, add.doc_discente_id.\n discente_id.id)], 'descricao': \n u'Uma bolsa de R$ %s foi vinculada para o(a) discente \"%s\" sob matrícula \"%s\".'\n % (('%.2f' % add.valor_bolsa).replace('.', ','), add.\n doc_discente_id.discente_id.name.upper(), add.\n doc_discente_id.inscricao_id.perfil_id.matricula)}\n add.semestre_id.write({'eventos_ids': [(0, 0, evento)]})\n return True\n\n\nclass TransferirBolsaWizard(osv.TransientModel):\n _name = 'ud.monitoria.bolsa.transferir.wizard'\n _description = u'Transferência de bolsa de monitoria (UD)'\n _STATES = [('n_bolsista', u'Não Bolsista'), ('reserva',\n u'Cadastro de Reserva')]\n _columns = {'semestre_id': fields.many2one('ud.monitoria.registro',\n u'Semestre', required=True, readonly=True), 'curso_id_de': fields.\n many2one('ud.curso', u'Curso', required=True, domain=\n \"[('is_active', '=', True)]\"), 'disciplina_id_de': fields.many2one(\n 'ud.monitoria.disciplina', u'Disciplinas', required=True, domain=\n \"[('id', '=', False)]\"), 'tutor_de': fields.boolean(u'Tutor?'),\n 'doc_discente_id_de': fields.many2one(\n 'ud.monitoria.documentos.discente', u'Discente', required=True,\n domain=\n \"[('is_active', '=', True), ('state', '=', 'bolsista'), ('disciplina_id', '=', disciplina_id_de), ('tutor', '=', tutor_de)]\"\n ), 'curso_id_para': fields.many2one('ud.curso', u'Curso', required=\n True, domain=\"[('is_active', '=', True)]\"), 'disciplina_id_para':\n fields.many2one('ud.monitoria.disciplina', u'Disciplinas', required\n =True, domain=\n \"[('semestre_id', '=', semestre_id), ('curso_id', '=', curso_id_para), ('is_active', '=', True)]\"\n ), 'tutor_para': fields.boolean(u'Tutor?'), 'status_para': fields.\n selection(_STATES, u'Status', required=True),\n 'doc_discente_id_para': fields.many2one(\n 'ud.monitoria.documentos.discente', u'Discente', required=True,\n domain=\n \"[('is_active', '=', True), ('state', '=', status_para), ('disciplina_id', '=', disciplina_id_para), ('tutor', '=', tutor_para)]\"\n ), 'banco_id': fields.many2one('ud.banco', u'Banco', ondelete=\n 'restrict'), 'agencia': fields.char(u'Agência', size=4, help=\n u'Número da Agência'), 'dv_agencia': fields.char(u'DV Agência',\n size=2, help=u'Dígito verificador da Agência'), 'conta': fields.\n char(u'Conta', size=10, help=u'Número da Conta'), 'dv_conta':\n fields.char(u'DV Conta', size=1, help=\n u'Dígito verificador da Conta'), 'operacao': fields.char(\n u'Operação', size=3, help=u'Tipo de conta'), 'agencia_v': fields.\n related('banco_id', 'agencia', type='boolean', invisible=True,\n readonly=True), 'dv_agencia_v': fields.related('banco_id',\n 'dv_agencia', type='boolean', invisible=True, readonly=True),\n 'conta_v': fields.related('banco_id', 'conta', type='boolean',\n invisible=True, readonly=True), 'dv_conta_v': fields.related(\n 'banco_id', 'dv_conta', type='boolean', invisible=True, readonly=\n True), 'operacao_v': fields.related('banco_id', 'operacao', type=\n 'boolean', invisible=True, readonly=True)}\n\n def default_get(self, cr, uid, fields_list, context=None):\n res = super(TransferirBolsaWizard, self).default_get(cr, uid,\n fields_list, context)\n context = context or {}\n if context.get('active_id', False):\n if context.get('active_model', False) == 'ud.monitoria.registro':\n res['semestre_id'] = context.get('active_id')\n elif context.get('active_model', False\n ) == 'ud.monitoria.documentos.discente':\n doc = self.pool.get('ud.monitoria.documentos.discente').browse(\n cr, uid, context.get('active_id'), context)\n if doc.state != 'bolsista':\n raise osv.except_osv(u'Discente bolsista',\n u'O discente já é bolsista')\n elif not doc.is_active:\n raise osv.except_osv(u'Documento do discente inativo',\n u'O discente não pode ser classificado como bolsista')\n res['semestre_id'] = doc.disciplina_id.semestre_id.id\n res['curso_id_de'] = doc.disciplina_id.curso_id.id\n res['disciplina_id_de'] = doc.disciplina_id.id\n res['tutor_de'] = doc.tutor\n res['status_de'] = doc.state\n res['doc_discente_id_de'] = doc.id\n return res\n\n def onchange_curso(self, cr, uid, ids, comp, semestre_id, curso_id,\n disciplina_id, context=None):\n if not (semestre_id and curso_id):\n return {'value': {('disciplina_id_' + comp): False}}\n reg = self.pool.get('ud.monitoria.registro').read(cr, uid,\n semestre_id, ['processos_seletivos_ids'], context=context, load\n ='_classic_write')\n args = [('curso_id', '=', curso_id), ('processo_seletivo_id', '=',\n reg['processos_seletivos_ids']), ('is_active', '=', True)]\n disc = self.pool.get('ud.monitoria.disciplina').search(cr, uid,\n args, context=context)\n res = {'domain': {('disciplina_id_' + comp): [('id', 'in', disc)]}}\n if not disc:\n res['value'] = {('disciplina_id_' + comp): False}\n return res\n\n def onchange_disciplina(self, cr, uid, ids, comp, disciplina_id,\n doc_discente_id, context=None):\n if disciplina_id and doc_discente_id:\n doc_discente = self.pool.get('ud.monitoria.documentos.discente'\n ).browse(cr, uid, doc_discente_id, context)\n doc_discente_id = (doc_discente_id if doc_discente.\n disciplina_id.id == disciplina_id else False)\n return {'value': {('doc_discente_id_' + comp): doc_discente_id}}\n return {'value': {('doc_discente_id_' + comp): False}}\n\n def onchange_banco(self, cr, uid, ids, banco_id, context=None):\n if banco_id:\n banco = self.pool.get('ud.banco').read(cr, uid, banco_id, [\n 'agencia', 'dv_agencia', 'conta', 'dv_conta', 'operacao'],\n context=context, load='_classic_write')\n vals = {'agencia': False, 'dv_agencia': False, 'conta': False,\n 'dv_conta': False, 'operacao': False}\n vals.update({('%s_v' % dado): banco.get(dado) for dado in banco\n .keys()})\n return {'value': vals}\n return {'value': {'agencia_v': False, 'dv_agencia_v': False,\n 'conta_v': False, 'dv_conta_v': False, 'operacao_v': False,\n 'agencia': False, 'dv_agencia': False, 'conta': False,\n 'dv_conta': False, 'operacao': False}}\n\n def botao_transferir(self, cr, uid, ids, context=None):\n perfil_model = self.pool.get('ud.perfil')\n for transf in self.browse(cr, uid, ids, context):\n matricula = transf.doc_discente_id_para.discente_id.matricula\n for perfil in transf.doc_discente_id_para.discente_id.pessoa_id.papel_ids:\n if perfil.matricula == matricula and perfil.tipo == 'a':\n if perfil.is_bolsista:\n raise osv.except_osv(u'Discente bolsista',\n u'O discente \"{}\" sob matrícula \"{}\" possui bolsa do tipo: \"{}\"'\n .format(transf.doc_discente_id_para.discente_id\n .pessoa_id.name, matricula, TIPOS_BOLSA[perfil.\n tipo_bolsa]))\n break\n if not perfil:\n raise osv.except_osv(u'Perfil excluído', \n u'O perfil do discente para a matrícula \"%s\" não existe ou foi excluído'\n % matricula or '')\n matricula = transf.doc_discente_id_de.discente_id.matricula\n for perfil_de in transf.doc_discente_id_de.discente_id.pessoa_id.papel_ids:\n if perfil.matricula == matricula and perfil.tipo == 'a':\n break\n responsavel = self.pool.get('ud.employee').search(cr,\n SUPERUSER_ID, [('user_id', '=', uid)], limit=2)\n if not responsavel:\n raise osv.except_osv(u'Registro Inexistente',\n u'Não é possível realizar essa alteração enquanto seu login não estiver vinculado ao núcleo'\n )\n if len(responsavel) > 1:\n raise osv.except_osv(u'Multiplos vínculos',\n u'Não é possível realizar essa alteração enquanto seu login possuir multiplos vínculos no núcleo'\n )\n valor = perfil_de.valor_bolsa\n perfil_model.write(cr, SUPERUSER_ID, perfil.id, {'is_bolsista':\n True, 'tipo_bolsa': 'm', 'valor_bolsa': valor})\n perfil_model.write(cr, SUPERUSER_ID, perfil_de.id, {\n 'is_bolsista': False, 'tipo_bolsa': False, 'valor_bolsa': \n False})\n transf.doc_discente_id_de.write({'state': 'n_bolsista'})\n transf.doc_discente_id_para.write({'state': 'bolsista',\n 'is_active': True})\n get_banco(self, cr, transf, transf.doc_discente_id_para.\n discente_id.pessoa_id.id, context)\n evento = {'responsavel_id': responsavel[0], 'name':\n u'Transferência de bolsa', 'envolvidos_ids': [(4, transf.\n doc_discente_id_de.discente_id.pessoa_id.id), (4, transf.\n doc_discente_id_para.discente_id.pessoa_id.id)],\n 'descricao': \n u'Transferência de bolsa no valor de R$ %(valor)s do discente %(discente_de)s sob matrícula %(matricula_de)s para o(a) discente \"%(discente_para)s\" sob matrícula\"%(matricula_para)s\".'\n % {'valor': valor, 'discente_de': transf.\n doc_discente_id_de.discente_id.pessoa_id.name.upper(),\n 'matricula_de': perfil_de.matricula, 'discente_para':\n transf.doc_discente_id_de.discente_id.pessoa_id.name.upper(\n ), 'matricula_para': perfil_de.matricula}}\n transf.semestre_id.write({'eventos_ids': [(0, 0, evento)]})\n return True\n\n\nclass RemoverBolsaWizard(osv.TransientModel):\n _name = 'ud.monitoria.bolsa.remover.wizard'\n _description = u'Remoção de bolsa de discente'\n _columns = {'semestre_id': fields.many2one('ud.monitoria.registro',\n u'Semestre', required=True, readonly=True), 'curso_id': fields.\n many2one('ud.curso', u'Curso', required=True, domain=\n \"[('is_active', '=', True)]\"), 'disciplina_id': fields.many2one(\n 'ud.monitoria.disciplina', u'Disciplinas', required=True, domain=\n \"[('id', '=', False)]\"), 'tutor': fields.boolean(u'Tutor?'),\n 'doc_discente_id': fields.many2one(\n 'ud.monitoria.documentos.discente', u'Discente', required=True,\n domain=\n \"[('disciplina_id', '=', disciplina_id), ('tutor', '=', tutor), ('is_active', '=', True), ('state', '=', 'bolsista')]\"\n )}\n\n def default_get(self, cr, uid, fields_list, context=None):\n res = super(RemoverBolsaWizard, self).default_get(cr, uid,\n fields_list, context)\n context = context or {}\n if context.get('active_id', False):\n if context.get('active_model', False) == 'ud.monitoria.registro':\n res['semestre_id'] = context.get('active_id')\n elif context.get('active_model', False\n ) == 'ud.monitoria.documentos.discente':\n doc = self.pool.get('ud.monitoria.documentos.discente').browse(\n cr, uid, context.get('active_id'), context)\n if doc.state != 'bolsista':\n raise osv.except_osv(u'Discente não bolsista',\n u'O discente não é bolsista')\n elif not doc.is_active:\n raise osv.except_osv(u'Documento do discente inativo',\n u'Não é possível alterar o status de discentes inativos'\n )\n res['semestre_id'] = doc.disciplina_id.semestre_id.id\n res['curso_id'] = doc.disciplina_id.curso_id.id\n res['disciplina_id'] = doc.disciplina_id.id\n res['tutor'] = doc.tutor\n res['doc_discente_id'] = doc.id\n return res\n\n def onchange_curso(self, cr, uid, ids, semestre_id, curso_id,\n disciplina_id, context=None):\n if not (semestre_id and curso_id):\n return {'value': {'disciplina_id': False}}\n reg = self.pool.get('ud.monitoria.registro').read(cr, uid,\n semestre_id, ['processos_seletivos_ids'], context=context, load\n ='_classic_write')\n args = [('curso_id', '=', curso_id), ('processo_seletivo_id', '=',\n reg['processos_seletivos_ids']), ('is_active', '=', True)]\n disc = self.pool.get('ud.monitoria.disciplina').search(cr, uid,\n args, context=context)\n res = {'domain': {'disciplina_id': [('id', 'in', disc)]}}\n if not disc:\n res['value'] = {'disciplina_id': False}\n return res\n\n def onchange_disciplina(self, cr, uid, ids, disciplina_id,\n doc_discente_id, context=None):\n if disciplina_id and doc_discente_id:\n doc_discente = self.pool.get('ud.monitoria.documentos.discente'\n ).browse(cr, uid, doc_discente_id, context)\n doc_discente_id = (doc_discente_id if doc_discente.\n disciplina_id.id == disciplina_id else False)\n return {'value': {'doc_discente_id': doc_discente_id}}\n return {'value': {'doc_discente_id': False}}\n\n def botao_remover(self, cr, uid, ids, context=None):\n perfil_model = self.pool.get('ud.perfil')\n pessoa_model = self.pool.get('ud.employee')\n for rem in self.browse(cr, uid, ids, context):\n responsavel = pessoa_model.search(cr, SUPERUSER_ID, [('user_id',\n '=', uid)], limit=2)\n if not responsavel:\n raise osv.except_osv(u'Registro Inexistente',\n u'Não é possível realizar essa alteração enquanto seu login não estiver vinculado ao núcleo'\n )\n if len(responsavel) > 1:\n raise osv.except_osv(u'Multiplos vínculos',\n u'Não é possível realizar essa alteração enquanto seu login possuir multiplos vínculos no núcleo'\n )\n perfil = rem.doc_discente_id.inscricao_id.perfil_id\n perfil_model.write(cr, SUPERUSER_ID, perfil.id, {'is_bolsista':\n False, 'tipo_bolsa': False, 'valor_bolsa': False})\n rem.doc_discente_id.write({'state': 'n_bolsista'})\n evento = {'responsavel_id': responsavel[0], 'name': \n u'Remoção de bolsa: \"%s\"' % rem.doc_discente_id.discente_id\n .name, 'envolvidos_ids': [(4, rem.doc_discente_id.\n discente_id.id)], 'descricao': \n u'A bolsa do discente \"%s\" sob matrícula \"%s\" foi removida.' %\n (rem.doc_discente_id.discente_id.name.upper(), perfil.\n matricula)}\n rem.semestre_id.write({'eventos_ids': [(0, 0, evento)]})\n return True\n",
"step-3": "<mask token>\n\n\nclass AdicionarBolsaWizard(osv.TransientModel):\n _name = 'ud.monitoria.bolsa.adicionar.wizard'\n _description = u'Inclusão de bolsa de monitoria para discente (UD)'\n _STATES = [('n_bolsista', u'Não Bolsista'), ('reserva',\n u'Cadastro de Reserva')]\n\n def _bolsas(self, cr, uid, ids, campos, args, context=None):\n oferta_model = self.pool.get('ud.monitoria.oferta.disciplina')\n res = {}\n for add in self.browse(cr, uid, ids, context):\n res[add.id] = add.disciplina_id.bolsas\n return res\n _columns = {'semestre_id': fields.many2one('ud.monitoria.registro',\n u'Semestre', required=True, readonly=True), 'curso_id': fields.\n many2one('ud.curso', u'Curso', required=True, domain=\n \"[('is_active', '=', True)]\"), 'disciplina_id': fields.many2one(\n 'ud.monitoria.disciplina', u'Disciplinas', required=True, domain=\n \"[('semestre_id', '=', semestre_id), ('curso_id', '=', curso_id), ('is_active', '=', True)]\"\n ), 'bolsas': fields.function(_bolsas, type='integer', string=\n u'Bolsas disponíveis', help=\n u'Número de bolsas disponíveis para a disciplina'), 'valor_bolsa':\n fields.float(u'Bolsa (R$)'), 'tutor': fields.boolean(u'Tutor?'),\n 'status': fields.selection(_STATES, u'Status', required=True),\n 'doc_discente_id': fields.many2one(\n 'ud.monitoria.documentos.discente', u'Discente', required=True,\n domain=\n \"[('disciplina_id', '=', disciplina_id), ('tutor', '=', tutor), ('is_active', '=', True), ('state', '=', status)]\"\n ), 'dados_bancarios_id': fields.many2one('ud.dados.bancarios',\n u'Dados Bancários', domain=[('id', '=', False)]), 'banco_id':\n fields.many2one('ud.banco', u'Banco', ondelete='restrict'),\n 'agencia': fields.char(u'Agência', size=4, help=\n u'Número da Agência'), 'dv_agencia': fields.char(u'DV Agência',\n size=2, help=u'Dígito verificador da Agência'), 'conta': fields.\n char(u'Conta', size=10, help=u'Número da Conta'), 'dv_conta':\n fields.char(u'DV Conta', size=1, help=\n u'Dígito verificador da Conta'), 'operacao': fields.char(\n u'Operação', size=3, help=u'Tipo de conta'), 'agencia_v': fields.\n related('banco_id', 'agencia', type='boolean', invisible=True,\n readonly=True), 'dv_agencia_v': fields.related('banco_id',\n 'dv_agencia', type='boolean', invisible=True, readonly=True),\n 'conta_v': fields.related('banco_id', 'conta', type='boolean',\n invisible=True, readonly=True), 'dv_conta_v': fields.related(\n 'banco_id', 'dv_conta', type='boolean', invisible=True, readonly=\n True), 'operacao_v': fields.related('banco_id', 'operacao', type=\n 'boolean', invisible=True, readonly=True)}\n\n def default_get(self, cr, uid, fields_list, context=None):\n res = super(AdicionarBolsaWizard, self).default_get(cr, uid,\n fields_list, context)\n res['status'] = 'n_bolsista'\n res['valor_bolsa'] = 400.0\n context = context or {}\n if context.get('active_id', False):\n if context.get('active_model', False) == 'ud.monitoria.registro':\n res['semestre_id'] = context.get('active_id')\n elif context.get('active_model', False\n ) == 'ud.monitoria.documentos.discente':\n doc = self.pool.get('ud.monitoria.documentos.discente').browse(\n cr, uid, context.get('active_id'), context)\n if doc.state == 'bolsista':\n raise osv.except_osv(u'Discente bolsista',\n u'O discente já é bolsista')\n elif not doc.is_active:\n raise osv.except_osv(u'Documento do discente inativo',\n u'Não é possível alterar o status de discentes inativos'\n )\n res['semestre_id'] = doc.disciplina_id.semestre_id.id\n res['curso_id'] = doc.disciplina_id.curso_id.id\n res['disciplina_id'] = doc.disciplina_id.id\n res['tutor'] = doc.tutor\n res['status'] = doc.state\n res['doc_discente_id'] = doc.id\n return res\n\n def onchange_curso(self, cr, uid, ids, semestre_id, curso_id,\n disciplina_id, context=None):\n if not (semestre_id and curso_id):\n return {'value': {'disciplina_id': False}}\n reg = self.pool.get('ud.monitoria.registro').read(cr, uid,\n semestre_id, ['processos_seletivos_ids'], context=context, load\n ='_classic_write')\n args = [('curso_id', '=', curso_id), ('processo_seletivo_id', '=',\n reg['processos_seletivos_ids']), ('is_active', '=', True)]\n disc = self.pool.get('ud.monitoria.disciplina').search(cr, uid,\n args, context=context)\n res = {'domain': {'disciplina_id': [('id', 'in', disc)]}}\n if not disc:\n res['value'] = {'disciplina_id': False}\n return res\n\n def onchange_disciplina(self, cr, uid, ids, disciplina_id,\n doc_discente_id, context=None):\n if disciplina_id:\n if doc_discente_id:\n doc_discente = self.pool.get('ud.monitoria.documentos.discente'\n ).browse(cr, uid, doc_discente_id, context)\n doc_discente_id = (doc_discente_id if doc_discente.\n disciplina_id.id == disciplina_id else False)\n disciplina_id = self.pool.get('ud.monitoria.disciplina').browse(cr,\n uid, disciplina_id, context)\n return {'value': {'doc_discente_id': doc_discente_id, 'bolsas':\n disciplina_id.bolsas}}\n return {'value': {'doc_discente_id': False, 'bolsas': 0}}\n\n def onchange_doc_discente(self, cr, uid, ids, doc_discente_id,\n dados_bancarios_id, context=None):\n if doc_discente_id:\n doc = self.pool.get('ud.monitoria.documentos.discente').browse(cr,\n uid, doc_discente_id, context)\n if not dados_bancarios_id:\n dados_bancarios_id = getattr(doc.dados_bancarios_id, 'id', \n False)\n return {'value': {'dados_bancarios_id': dados_bancarios_id},\n 'domain': {'dados_bancarios_id': [('ud_conta_id', '=', doc.\n discente_id.id)]}}\n return {'value': {'dados_bancarios_id': False}, 'domain': {\n 'dados_bancarios_id': [('id', '=', False)]}}\n\n def onchange_banco(self, cr, uid, ids, banco_id, context=None):\n if banco_id:\n banco = self.pool.get('ud.banco').read(cr, uid, banco_id, [\n 'agencia', 'dv_agencia', 'conta', 'dv_conta', 'operacao'],\n context=context, load='_classic_write')\n vals = {'agencia': False, 'dv_agencia': False, 'conta': False,\n 'dv_conta': False, 'operacao': False}\n vals.update({('%s_v' % dado): banco.get(dado) for dado in banco\n .keys()})\n return {'value': vals}\n return {'value': {'agencia_v': False, 'dv_agencia_v': False,\n 'conta_v': False, 'dv_conta_v': False, 'operacao_v': False,\n 'agencia': False, 'dv_agencia': False, 'conta': False,\n 'dv_conta': False, 'operacao': False}}\n\n def botao_adicionar(self, cr, uid, ids, context=None):\n perfil_model = self.pool.get('ud.perfil')\n for add in self.browse(cr, uid, ids, context):\n if add.bolsas == 0:\n raise osv.except_osv(u'Bolsas Insuficientes',\n u'Não há bolsas disponíveis para essa disciplina')\n elif not add.doc_discente_id.is_active:\n raise osv.except_osv(u'Documento do discente inativo',\n u'O discente não pode ser classificado como bolsista')\n if add.doc_discente_id.inscricao_id.perfil_id.is_bolsista:\n raise osv.except_osv(u'Discente bolsista',\n u'O discente \"{}\" sob matrícula \"{}\" possui bolsa do tipo: \"{}\"'\n .format(add.doc_discente_id.discente_id.name, add.\n doc_discente_id.inscricao_id.perfil_id.matricula,\n TIPOS_BOLSA[add.doc_discente_id.inscricao_id.perfil_id.\n tipo_bolsa]))\n responsavel = self.pool.get('ud.employee').search(cr,\n SUPERUSER_ID, [('user_id', '=', uid)], limit=2)\n if not responsavel:\n raise osv.except_osv(u'Registro Inexistente',\n u'Não é possível realizar essa alteração enquanto seu login não estiver vinculado a uma pessoa no núcleo'\n )\n if len(responsavel) > 1:\n raise osv.except_osv(u'Multiplos vínculos',\n u'Não é possível realizar essa alteração enquanto seu login possuir multiplos vínculos no núcleo'\n )\n perfil_model.write(cr, SUPERUSER_ID, add.doc_discente_id.\n inscricao_id.perfil_id.id, {'is_bolsista': True,\n 'tipo_bolsa': 'm', 'valor_bolsa': ('%.2f' % add.valor_bolsa\n ).replace('.', ',')})\n if not add.dados_bancarios_id:\n dados_bancarios = get_banco(self, cr, add, add.\n doc_discente_id.discente_id.id, context)\n else:\n dados_bancarios = add.dados_bancarios_id.id\n add.doc_discente_id.write({'state': 'bolsista',\n 'dados_bancarios_id': dados_bancarios})\n evento = {'responsavel_id': responsavel[0], 'name': \n u'Adição de bolsa: \"%s\"' % add.doc_discente_id.discente_id.\n name, 'envolvidos_ids': [(4, add.doc_discente_id.\n discente_id.id)], 'descricao': \n u'Uma bolsa de R$ %s foi vinculada para o(a) discente \"%s\" sob matrícula \"%s\".'\n % (('%.2f' % add.valor_bolsa).replace('.', ','), add.\n doc_discente_id.discente_id.name.upper(), add.\n doc_discente_id.inscricao_id.perfil_id.matricula)}\n add.semestre_id.write({'eventos_ids': [(0, 0, evento)]})\n return True\n\n\nclass TransferirBolsaWizard(osv.TransientModel):\n _name = 'ud.monitoria.bolsa.transferir.wizard'\n _description = u'Transferência de bolsa de monitoria (UD)'\n _STATES = [('n_bolsista', u'Não Bolsista'), ('reserva',\n u'Cadastro de Reserva')]\n _columns = {'semestre_id': fields.many2one('ud.monitoria.registro',\n u'Semestre', required=True, readonly=True), 'curso_id_de': fields.\n many2one('ud.curso', u'Curso', required=True, domain=\n \"[('is_active', '=', True)]\"), 'disciplina_id_de': fields.many2one(\n 'ud.monitoria.disciplina', u'Disciplinas', required=True, domain=\n \"[('id', '=', False)]\"), 'tutor_de': fields.boolean(u'Tutor?'),\n 'doc_discente_id_de': fields.many2one(\n 'ud.monitoria.documentos.discente', u'Discente', required=True,\n domain=\n \"[('is_active', '=', True), ('state', '=', 'bolsista'), ('disciplina_id', '=', disciplina_id_de), ('tutor', '=', tutor_de)]\"\n ), 'curso_id_para': fields.many2one('ud.curso', u'Curso', required=\n True, domain=\"[('is_active', '=', True)]\"), 'disciplina_id_para':\n fields.many2one('ud.monitoria.disciplina', u'Disciplinas', required\n =True, domain=\n \"[('semestre_id', '=', semestre_id), ('curso_id', '=', curso_id_para), ('is_active', '=', True)]\"\n ), 'tutor_para': fields.boolean(u'Tutor?'), 'status_para': fields.\n selection(_STATES, u'Status', required=True),\n 'doc_discente_id_para': fields.many2one(\n 'ud.monitoria.documentos.discente', u'Discente', required=True,\n domain=\n \"[('is_active', '=', True), ('state', '=', status_para), ('disciplina_id', '=', disciplina_id_para), ('tutor', '=', tutor_para)]\"\n ), 'banco_id': fields.many2one('ud.banco', u'Banco', ondelete=\n 'restrict'), 'agencia': fields.char(u'Agência', size=4, help=\n u'Número da Agência'), 'dv_agencia': fields.char(u'DV Agência',\n size=2, help=u'Dígito verificador da Agência'), 'conta': fields.\n char(u'Conta', size=10, help=u'Número da Conta'), 'dv_conta':\n fields.char(u'DV Conta', size=1, help=\n u'Dígito verificador da Conta'), 'operacao': fields.char(\n u'Operação', size=3, help=u'Tipo de conta'), 'agencia_v': fields.\n related('banco_id', 'agencia', type='boolean', invisible=True,\n readonly=True), 'dv_agencia_v': fields.related('banco_id',\n 'dv_agencia', type='boolean', invisible=True, readonly=True),\n 'conta_v': fields.related('banco_id', 'conta', type='boolean',\n invisible=True, readonly=True), 'dv_conta_v': fields.related(\n 'banco_id', 'dv_conta', type='boolean', invisible=True, readonly=\n True), 'operacao_v': fields.related('banco_id', 'operacao', type=\n 'boolean', invisible=True, readonly=True)}\n\n def default_get(self, cr, uid, fields_list, context=None):\n res = super(TransferirBolsaWizard, self).default_get(cr, uid,\n fields_list, context)\n context = context or {}\n if context.get('active_id', False):\n if context.get('active_model', False) == 'ud.monitoria.registro':\n res['semestre_id'] = context.get('active_id')\n elif context.get('active_model', False\n ) == 'ud.monitoria.documentos.discente':\n doc = self.pool.get('ud.monitoria.documentos.discente').browse(\n cr, uid, context.get('active_id'), context)\n if doc.state != 'bolsista':\n raise osv.except_osv(u'Discente bolsista',\n u'O discente já é bolsista')\n elif not doc.is_active:\n raise osv.except_osv(u'Documento do discente inativo',\n u'O discente não pode ser classificado como bolsista')\n res['semestre_id'] = doc.disciplina_id.semestre_id.id\n res['curso_id_de'] = doc.disciplina_id.curso_id.id\n res['disciplina_id_de'] = doc.disciplina_id.id\n res['tutor_de'] = doc.tutor\n res['status_de'] = doc.state\n res['doc_discente_id_de'] = doc.id\n return res\n\n def onchange_curso(self, cr, uid, ids, comp, semestre_id, curso_id,\n disciplina_id, context=None):\n if not (semestre_id and curso_id):\n return {'value': {('disciplina_id_' + comp): False}}\n reg = self.pool.get('ud.monitoria.registro').read(cr, uid,\n semestre_id, ['processos_seletivos_ids'], context=context, load\n ='_classic_write')\n args = [('curso_id', '=', curso_id), ('processo_seletivo_id', '=',\n reg['processos_seletivos_ids']), ('is_active', '=', True)]\n disc = self.pool.get('ud.monitoria.disciplina').search(cr, uid,\n args, context=context)\n res = {'domain': {('disciplina_id_' + comp): [('id', 'in', disc)]}}\n if not disc:\n res['value'] = {('disciplina_id_' + comp): False}\n return res\n\n def onchange_disciplina(self, cr, uid, ids, comp, disciplina_id,\n doc_discente_id, context=None):\n if disciplina_id and doc_discente_id:\n doc_discente = self.pool.get('ud.monitoria.documentos.discente'\n ).browse(cr, uid, doc_discente_id, context)\n doc_discente_id = (doc_discente_id if doc_discente.\n disciplina_id.id == disciplina_id else False)\n return {'value': {('doc_discente_id_' + comp): doc_discente_id}}\n return {'value': {('doc_discente_id_' + comp): False}}\n\n def onchange_banco(self, cr, uid, ids, banco_id, context=None):\n if banco_id:\n banco = self.pool.get('ud.banco').read(cr, uid, banco_id, [\n 'agencia', 'dv_agencia', 'conta', 'dv_conta', 'operacao'],\n context=context, load='_classic_write')\n vals = {'agencia': False, 'dv_agencia': False, 'conta': False,\n 'dv_conta': False, 'operacao': False}\n vals.update({('%s_v' % dado): banco.get(dado) for dado in banco\n .keys()})\n return {'value': vals}\n return {'value': {'agencia_v': False, 'dv_agencia_v': False,\n 'conta_v': False, 'dv_conta_v': False, 'operacao_v': False,\n 'agencia': False, 'dv_agencia': False, 'conta': False,\n 'dv_conta': False, 'operacao': False}}\n\n def botao_transferir(self, cr, uid, ids, context=None):\n perfil_model = self.pool.get('ud.perfil')\n for transf in self.browse(cr, uid, ids, context):\n matricula = transf.doc_discente_id_para.discente_id.matricula\n for perfil in transf.doc_discente_id_para.discente_id.pessoa_id.papel_ids:\n if perfil.matricula == matricula and perfil.tipo == 'a':\n if perfil.is_bolsista:\n raise osv.except_osv(u'Discente bolsista',\n u'O discente \"{}\" sob matrícula \"{}\" possui bolsa do tipo: \"{}\"'\n .format(transf.doc_discente_id_para.discente_id\n .pessoa_id.name, matricula, TIPOS_BOLSA[perfil.\n tipo_bolsa]))\n break\n if not perfil:\n raise osv.except_osv(u'Perfil excluído', \n u'O perfil do discente para a matrícula \"%s\" não existe ou foi excluído'\n % matricula or '')\n matricula = transf.doc_discente_id_de.discente_id.matricula\n for perfil_de in transf.doc_discente_id_de.discente_id.pessoa_id.papel_ids:\n if perfil.matricula == matricula and perfil.tipo == 'a':\n break\n responsavel = self.pool.get('ud.employee').search(cr,\n SUPERUSER_ID, [('user_id', '=', uid)], limit=2)\n if not responsavel:\n raise osv.except_osv(u'Registro Inexistente',\n u'Não é possível realizar essa alteração enquanto seu login não estiver vinculado ao núcleo'\n )\n if len(responsavel) > 1:\n raise osv.except_osv(u'Multiplos vínculos',\n u'Não é possível realizar essa alteração enquanto seu login possuir multiplos vínculos no núcleo'\n )\n valor = perfil_de.valor_bolsa\n perfil_model.write(cr, SUPERUSER_ID, perfil.id, {'is_bolsista':\n True, 'tipo_bolsa': 'm', 'valor_bolsa': valor})\n perfil_model.write(cr, SUPERUSER_ID, perfil_de.id, {\n 'is_bolsista': False, 'tipo_bolsa': False, 'valor_bolsa': \n False})\n transf.doc_discente_id_de.write({'state': 'n_bolsista'})\n transf.doc_discente_id_para.write({'state': 'bolsista',\n 'is_active': True})\n get_banco(self, cr, transf, transf.doc_discente_id_para.\n discente_id.pessoa_id.id, context)\n evento = {'responsavel_id': responsavel[0], 'name':\n u'Transferência de bolsa', 'envolvidos_ids': [(4, transf.\n doc_discente_id_de.discente_id.pessoa_id.id), (4, transf.\n doc_discente_id_para.discente_id.pessoa_id.id)],\n 'descricao': \n u'Transferência de bolsa no valor de R$ %(valor)s do discente %(discente_de)s sob matrícula %(matricula_de)s para o(a) discente \"%(discente_para)s\" sob matrícula\"%(matricula_para)s\".'\n % {'valor': valor, 'discente_de': transf.\n doc_discente_id_de.discente_id.pessoa_id.name.upper(),\n 'matricula_de': perfil_de.matricula, 'discente_para':\n transf.doc_discente_id_de.discente_id.pessoa_id.name.upper(\n ), 'matricula_para': perfil_de.matricula}}\n transf.semestre_id.write({'eventos_ids': [(0, 0, evento)]})\n return True\n\n\nclass RemoverBolsaWizard(osv.TransientModel):\n _name = 'ud.monitoria.bolsa.remover.wizard'\n _description = u'Remoção de bolsa de discente'\n _columns = {'semestre_id': fields.many2one('ud.monitoria.registro',\n u'Semestre', required=True, readonly=True), 'curso_id': fields.\n many2one('ud.curso', u'Curso', required=True, domain=\n \"[('is_active', '=', True)]\"), 'disciplina_id': fields.many2one(\n 'ud.monitoria.disciplina', u'Disciplinas', required=True, domain=\n \"[('id', '=', False)]\"), 'tutor': fields.boolean(u'Tutor?'),\n 'doc_discente_id': fields.many2one(\n 'ud.monitoria.documentos.discente', u'Discente', required=True,\n domain=\n \"[('disciplina_id', '=', disciplina_id), ('tutor', '=', tutor), ('is_active', '=', True), ('state', '=', 'bolsista')]\"\n )}\n\n def default_get(self, cr, uid, fields_list, context=None):\n res = super(RemoverBolsaWizard, self).default_get(cr, uid,\n fields_list, context)\n context = context or {}\n if context.get('active_id', False):\n if context.get('active_model', False) == 'ud.monitoria.registro':\n res['semestre_id'] = context.get('active_id')\n elif context.get('active_model', False\n ) == 'ud.monitoria.documentos.discente':\n doc = self.pool.get('ud.monitoria.documentos.discente').browse(\n cr, uid, context.get('active_id'), context)\n if doc.state != 'bolsista':\n raise osv.except_osv(u'Discente não bolsista',\n u'O discente não é bolsista')\n elif not doc.is_active:\n raise osv.except_osv(u'Documento do discente inativo',\n u'Não é possível alterar o status de discentes inativos'\n )\n res['semestre_id'] = doc.disciplina_id.semestre_id.id\n res['curso_id'] = doc.disciplina_id.curso_id.id\n res['disciplina_id'] = doc.disciplina_id.id\n res['tutor'] = doc.tutor\n res['doc_discente_id'] = doc.id\n return res\n\n def onchange_curso(self, cr, uid, ids, semestre_id, curso_id,\n disciplina_id, context=None):\n if not (semestre_id and curso_id):\n return {'value': {'disciplina_id': False}}\n reg = self.pool.get('ud.monitoria.registro').read(cr, uid,\n semestre_id, ['processos_seletivos_ids'], context=context, load\n ='_classic_write')\n args = [('curso_id', '=', curso_id), ('processo_seletivo_id', '=',\n reg['processos_seletivos_ids']), ('is_active', '=', True)]\n disc = self.pool.get('ud.monitoria.disciplina').search(cr, uid,\n args, context=context)\n res = {'domain': {'disciplina_id': [('id', 'in', disc)]}}\n if not disc:\n res['value'] = {'disciplina_id': False}\n return res\n\n def onchange_disciplina(self, cr, uid, ids, disciplina_id,\n doc_discente_id, context=None):\n if disciplina_id and doc_discente_id:\n doc_discente = self.pool.get('ud.monitoria.documentos.discente'\n ).browse(cr, uid, doc_discente_id, context)\n doc_discente_id = (doc_discente_id if doc_discente.\n disciplina_id.id == disciplina_id else False)\n return {'value': {'doc_discente_id': doc_discente_id}}\n return {'value': {'doc_discente_id': False}}\n\n def botao_remover(self, cr, uid, ids, context=None):\n perfil_model = self.pool.get('ud.perfil')\n pessoa_model = self.pool.get('ud.employee')\n for rem in self.browse(cr, uid, ids, context):\n responsavel = pessoa_model.search(cr, SUPERUSER_ID, [('user_id',\n '=', uid)], limit=2)\n if not responsavel:\n raise osv.except_osv(u'Registro Inexistente',\n u'Não é possível realizar essa alteração enquanto seu login não estiver vinculado ao núcleo'\n )\n if len(responsavel) > 1:\n raise osv.except_osv(u'Multiplos vínculos',\n u'Não é possível realizar essa alteração enquanto seu login possuir multiplos vínculos no núcleo'\n )\n perfil = rem.doc_discente_id.inscricao_id.perfil_id\n perfil_model.write(cr, SUPERUSER_ID, perfil.id, {'is_bolsista':\n False, 'tipo_bolsa': False, 'valor_bolsa': False})\n rem.doc_discente_id.write({'state': 'n_bolsista'})\n evento = {'responsavel_id': responsavel[0], 'name': \n u'Remoção de bolsa: \"%s\"' % rem.doc_discente_id.discente_id\n .name, 'envolvidos_ids': [(4, rem.doc_discente_id.\n discente_id.id)], 'descricao': \n u'A bolsa do discente \"%s\" sob matrícula \"%s\" foi removida.' %\n (rem.doc_discente_id.discente_id.name.upper(), perfil.\n matricula)}\n rem.semestre_id.write({'eventos_ids': [(0, 0, evento)]})\n return True\n",
"step-4": "<mask token>\nTIPOS_BOLSA = dict(_TIPOS_BOLSA)\n\n\ndef get_banco(cls, cr, browse_record, usuario_id, context=None):\n dados_bancarios_model = cls.pool.get('ud.dados.bancarios')\n args = [('banco_id', '=', browse_record.banco_id.id)]\n if browse_record.agencia_v:\n args.append(('agencia', '=', browse_record.agencia))\n if browse_record.dv_agencia_v:\n args.append(('dv_agencia', '=', browse_record.dv_agencia))\n if browse_record.conta_v:\n args.append(('conta', '=', browse_record.conta))\n if browse_record.dv_conta_v:\n args.append(('dv_conta', '=', browse_record.dv_conta))\n if browse_record.operacao_v:\n args.append(('operacao', '=', browse_record.operacao))\n dados_bancarios = dados_bancarios_model.search(cr, SUPERUSER_ID, args,\n context=context)\n if dados_bancarios:\n dados_bancarios = dados_bancarios_model.browse(cr, SUPERUSER_ID,\n dados_bancarios[0])\n if not dados_bancarios.ud_conta_id:\n return dados_bancarios.id\n elif dados_bancarios.ud_conta_id.id == usuario_id:\n return dados_bancarios.id\n raise osv.except_osv(u'Dados Bancários duplicados',\n u'Outra pessoa já possui esses dados bancários!')\n dados = {'banco_id': browse_record.banco_id.id, 'agencia':\n browse_record.agencia, 'dv_agencia': browse_record.dv_agencia,\n 'conta': browse_record.conta, 'dv_conta': browse_record.dv_conta,\n 'operacao': browse_record.operacao, 'ud_conta_id': usuario_id}\n return dados_bancarios_model.create(cr, SUPERUSER_ID, dados, context=\n context)\n\n\nclass AdicionarBolsaWizard(osv.TransientModel):\n _name = 'ud.monitoria.bolsa.adicionar.wizard'\n _description = u'Inclusão de bolsa de monitoria para discente (UD)'\n _STATES = [('n_bolsista', u'Não Bolsista'), ('reserva',\n u'Cadastro de Reserva')]\n\n def _bolsas(self, cr, uid, ids, campos, args, context=None):\n oferta_model = self.pool.get('ud.monitoria.oferta.disciplina')\n res = {}\n for add in self.browse(cr, uid, ids, context):\n res[add.id] = add.disciplina_id.bolsas\n return res\n _columns = {'semestre_id': fields.many2one('ud.monitoria.registro',\n u'Semestre', required=True, readonly=True), 'curso_id': fields.\n many2one('ud.curso', u'Curso', required=True, domain=\n \"[('is_active', '=', True)]\"), 'disciplina_id': fields.many2one(\n 'ud.monitoria.disciplina', u'Disciplinas', required=True, domain=\n \"[('semestre_id', '=', semestre_id), ('curso_id', '=', curso_id), ('is_active', '=', True)]\"\n ), 'bolsas': fields.function(_bolsas, type='integer', string=\n u'Bolsas disponíveis', help=\n u'Número de bolsas disponíveis para a disciplina'), 'valor_bolsa':\n fields.float(u'Bolsa (R$)'), 'tutor': fields.boolean(u'Tutor?'),\n 'status': fields.selection(_STATES, u'Status', required=True),\n 'doc_discente_id': fields.many2one(\n 'ud.monitoria.documentos.discente', u'Discente', required=True,\n domain=\n \"[('disciplina_id', '=', disciplina_id), ('tutor', '=', tutor), ('is_active', '=', True), ('state', '=', status)]\"\n ), 'dados_bancarios_id': fields.many2one('ud.dados.bancarios',\n u'Dados Bancários', domain=[('id', '=', False)]), 'banco_id':\n fields.many2one('ud.banco', u'Banco', ondelete='restrict'),\n 'agencia': fields.char(u'Agência', size=4, help=\n u'Número da Agência'), 'dv_agencia': fields.char(u'DV Agência',\n size=2, help=u'Dígito verificador da Agência'), 'conta': fields.\n char(u'Conta', size=10, help=u'Número da Conta'), 'dv_conta':\n fields.char(u'DV Conta', size=1, help=\n u'Dígito verificador da Conta'), 'operacao': fields.char(\n u'Operação', size=3, help=u'Tipo de conta'), 'agencia_v': fields.\n related('banco_id', 'agencia', type='boolean', invisible=True,\n readonly=True), 'dv_agencia_v': fields.related('banco_id',\n 'dv_agencia', type='boolean', invisible=True, readonly=True),\n 'conta_v': fields.related('banco_id', 'conta', type='boolean',\n invisible=True, readonly=True), 'dv_conta_v': fields.related(\n 'banco_id', 'dv_conta', type='boolean', invisible=True, readonly=\n True), 'operacao_v': fields.related('banco_id', 'operacao', type=\n 'boolean', invisible=True, readonly=True)}\n\n def default_get(self, cr, uid, fields_list, context=None):\n res = super(AdicionarBolsaWizard, self).default_get(cr, uid,\n fields_list, context)\n res['status'] = 'n_bolsista'\n res['valor_bolsa'] = 400.0\n context = context or {}\n if context.get('active_id', False):\n if context.get('active_model', False) == 'ud.monitoria.registro':\n res['semestre_id'] = context.get('active_id')\n elif context.get('active_model', False\n ) == 'ud.monitoria.documentos.discente':\n doc = self.pool.get('ud.monitoria.documentos.discente').browse(\n cr, uid, context.get('active_id'), context)\n if doc.state == 'bolsista':\n raise osv.except_osv(u'Discente bolsista',\n u'O discente já é bolsista')\n elif not doc.is_active:\n raise osv.except_osv(u'Documento do discente inativo',\n u'Não é possível alterar o status de discentes inativos'\n )\n res['semestre_id'] = doc.disciplina_id.semestre_id.id\n res['curso_id'] = doc.disciplina_id.curso_id.id\n res['disciplina_id'] = doc.disciplina_id.id\n res['tutor'] = doc.tutor\n res['status'] = doc.state\n res['doc_discente_id'] = doc.id\n return res\n\n def onchange_curso(self, cr, uid, ids, semestre_id, curso_id,\n disciplina_id, context=None):\n if not (semestre_id and curso_id):\n return {'value': {'disciplina_id': False}}\n reg = self.pool.get('ud.monitoria.registro').read(cr, uid,\n semestre_id, ['processos_seletivos_ids'], context=context, load\n ='_classic_write')\n args = [('curso_id', '=', curso_id), ('processo_seletivo_id', '=',\n reg['processos_seletivos_ids']), ('is_active', '=', True)]\n disc = self.pool.get('ud.monitoria.disciplina').search(cr, uid,\n args, context=context)\n res = {'domain': {'disciplina_id': [('id', 'in', disc)]}}\n if not disc:\n res['value'] = {'disciplina_id': False}\n return res\n\n def onchange_disciplina(self, cr, uid, ids, disciplina_id,\n doc_discente_id, context=None):\n if disciplina_id:\n if doc_discente_id:\n doc_discente = self.pool.get('ud.monitoria.documentos.discente'\n ).browse(cr, uid, doc_discente_id, context)\n doc_discente_id = (doc_discente_id if doc_discente.\n disciplina_id.id == disciplina_id else False)\n disciplina_id = self.pool.get('ud.monitoria.disciplina').browse(cr,\n uid, disciplina_id, context)\n return {'value': {'doc_discente_id': doc_discente_id, 'bolsas':\n disciplina_id.bolsas}}\n return {'value': {'doc_discente_id': False, 'bolsas': 0}}\n\n def onchange_doc_discente(self, cr, uid, ids, doc_discente_id,\n dados_bancarios_id, context=None):\n if doc_discente_id:\n doc = self.pool.get('ud.monitoria.documentos.discente').browse(cr,\n uid, doc_discente_id, context)\n if not dados_bancarios_id:\n dados_bancarios_id = getattr(doc.dados_bancarios_id, 'id', \n False)\n return {'value': {'dados_bancarios_id': dados_bancarios_id},\n 'domain': {'dados_bancarios_id': [('ud_conta_id', '=', doc.\n discente_id.id)]}}\n return {'value': {'dados_bancarios_id': False}, 'domain': {\n 'dados_bancarios_id': [('id', '=', False)]}}\n\n def onchange_banco(self, cr, uid, ids, banco_id, context=None):\n if banco_id:\n banco = self.pool.get('ud.banco').read(cr, uid, banco_id, [\n 'agencia', 'dv_agencia', 'conta', 'dv_conta', 'operacao'],\n context=context, load='_classic_write')\n vals = {'agencia': False, 'dv_agencia': False, 'conta': False,\n 'dv_conta': False, 'operacao': False}\n vals.update({('%s_v' % dado): banco.get(dado) for dado in banco\n .keys()})\n return {'value': vals}\n return {'value': {'agencia_v': False, 'dv_agencia_v': False,\n 'conta_v': False, 'dv_conta_v': False, 'operacao_v': False,\n 'agencia': False, 'dv_agencia': False, 'conta': False,\n 'dv_conta': False, 'operacao': False}}\n\n def botao_adicionar(self, cr, uid, ids, context=None):\n perfil_model = self.pool.get('ud.perfil')\n for add in self.browse(cr, uid, ids, context):\n if add.bolsas == 0:\n raise osv.except_osv(u'Bolsas Insuficientes',\n u'Não há bolsas disponíveis para essa disciplina')\n elif not add.doc_discente_id.is_active:\n raise osv.except_osv(u'Documento do discente inativo',\n u'O discente não pode ser classificado como bolsista')\n if add.doc_discente_id.inscricao_id.perfil_id.is_bolsista:\n raise osv.except_osv(u'Discente bolsista',\n u'O discente \"{}\" sob matrícula \"{}\" possui bolsa do tipo: \"{}\"'\n .format(add.doc_discente_id.discente_id.name, add.\n doc_discente_id.inscricao_id.perfil_id.matricula,\n TIPOS_BOLSA[add.doc_discente_id.inscricao_id.perfil_id.\n tipo_bolsa]))\n responsavel = self.pool.get('ud.employee').search(cr,\n SUPERUSER_ID, [('user_id', '=', uid)], limit=2)\n if not responsavel:\n raise osv.except_osv(u'Registro Inexistente',\n u'Não é possível realizar essa alteração enquanto seu login não estiver vinculado a uma pessoa no núcleo'\n )\n if len(responsavel) > 1:\n raise osv.except_osv(u'Multiplos vínculos',\n u'Não é possível realizar essa alteração enquanto seu login possuir multiplos vínculos no núcleo'\n )\n perfil_model.write(cr, SUPERUSER_ID, add.doc_discente_id.\n inscricao_id.perfil_id.id, {'is_bolsista': True,\n 'tipo_bolsa': 'm', 'valor_bolsa': ('%.2f' % add.valor_bolsa\n ).replace('.', ',')})\n if not add.dados_bancarios_id:\n dados_bancarios = get_banco(self, cr, add, add.\n doc_discente_id.discente_id.id, context)\n else:\n dados_bancarios = add.dados_bancarios_id.id\n add.doc_discente_id.write({'state': 'bolsista',\n 'dados_bancarios_id': dados_bancarios})\n evento = {'responsavel_id': responsavel[0], 'name': \n u'Adição de bolsa: \"%s\"' % add.doc_discente_id.discente_id.\n name, 'envolvidos_ids': [(4, add.doc_discente_id.\n discente_id.id)], 'descricao': \n u'Uma bolsa de R$ %s foi vinculada para o(a) discente \"%s\" sob matrícula \"%s\".'\n % (('%.2f' % add.valor_bolsa).replace('.', ','), add.\n doc_discente_id.discente_id.name.upper(), add.\n doc_discente_id.inscricao_id.perfil_id.matricula)}\n add.semestre_id.write({'eventos_ids': [(0, 0, evento)]})\n return True\n\n\nclass TransferirBolsaWizard(osv.TransientModel):\n _name = 'ud.monitoria.bolsa.transferir.wizard'\n _description = u'Transferência de bolsa de monitoria (UD)'\n _STATES = [('n_bolsista', u'Não Bolsista'), ('reserva',\n u'Cadastro de Reserva')]\n _columns = {'semestre_id': fields.many2one('ud.monitoria.registro',\n u'Semestre', required=True, readonly=True), 'curso_id_de': fields.\n many2one('ud.curso', u'Curso', required=True, domain=\n \"[('is_active', '=', True)]\"), 'disciplina_id_de': fields.many2one(\n 'ud.monitoria.disciplina', u'Disciplinas', required=True, domain=\n \"[('id', '=', False)]\"), 'tutor_de': fields.boolean(u'Tutor?'),\n 'doc_discente_id_de': fields.many2one(\n 'ud.monitoria.documentos.discente', u'Discente', required=True,\n domain=\n \"[('is_active', '=', True), ('state', '=', 'bolsista'), ('disciplina_id', '=', disciplina_id_de), ('tutor', '=', tutor_de)]\"\n ), 'curso_id_para': fields.many2one('ud.curso', u'Curso', required=\n True, domain=\"[('is_active', '=', True)]\"), 'disciplina_id_para':\n fields.many2one('ud.monitoria.disciplina', u'Disciplinas', required\n =True, domain=\n \"[('semestre_id', '=', semestre_id), ('curso_id', '=', curso_id_para), ('is_active', '=', True)]\"\n ), 'tutor_para': fields.boolean(u'Tutor?'), 'status_para': fields.\n selection(_STATES, u'Status', required=True),\n 'doc_discente_id_para': fields.many2one(\n 'ud.monitoria.documentos.discente', u'Discente', required=True,\n domain=\n \"[('is_active', '=', True), ('state', '=', status_para), ('disciplina_id', '=', disciplina_id_para), ('tutor', '=', tutor_para)]\"\n ), 'banco_id': fields.many2one('ud.banco', u'Banco', ondelete=\n 'restrict'), 'agencia': fields.char(u'Agência', size=4, help=\n u'Número da Agência'), 'dv_agencia': fields.char(u'DV Agência',\n size=2, help=u'Dígito verificador da Agência'), 'conta': fields.\n char(u'Conta', size=10, help=u'Número da Conta'), 'dv_conta':\n fields.char(u'DV Conta', size=1, help=\n u'Dígito verificador da Conta'), 'operacao': fields.char(\n u'Operação', size=3, help=u'Tipo de conta'), 'agencia_v': fields.\n related('banco_id', 'agencia', type='boolean', invisible=True,\n readonly=True), 'dv_agencia_v': fields.related('banco_id',\n 'dv_agencia', type='boolean', invisible=True, readonly=True),\n 'conta_v': fields.related('banco_id', 'conta', type='boolean',\n invisible=True, readonly=True), 'dv_conta_v': fields.related(\n 'banco_id', 'dv_conta', type='boolean', invisible=True, readonly=\n True), 'operacao_v': fields.related('banco_id', 'operacao', type=\n 'boolean', invisible=True, readonly=True)}\n\n def default_get(self, cr, uid, fields_list, context=None):\n res = super(TransferirBolsaWizard, self).default_get(cr, uid,\n fields_list, context)\n context = context or {}\n if context.get('active_id', False):\n if context.get('active_model', False) == 'ud.monitoria.registro':\n res['semestre_id'] = context.get('active_id')\n elif context.get('active_model', False\n ) == 'ud.monitoria.documentos.discente':\n doc = self.pool.get('ud.monitoria.documentos.discente').browse(\n cr, uid, context.get('active_id'), context)\n if doc.state != 'bolsista':\n raise osv.except_osv(u'Discente bolsista',\n u'O discente já é bolsista')\n elif not doc.is_active:\n raise osv.except_osv(u'Documento do discente inativo',\n u'O discente não pode ser classificado como bolsista')\n res['semestre_id'] = doc.disciplina_id.semestre_id.id\n res['curso_id_de'] = doc.disciplina_id.curso_id.id\n res['disciplina_id_de'] = doc.disciplina_id.id\n res['tutor_de'] = doc.tutor\n res['status_de'] = doc.state\n res['doc_discente_id_de'] = doc.id\n return res\n\n def onchange_curso(self, cr, uid, ids, comp, semestre_id, curso_id,\n disciplina_id, context=None):\n if not (semestre_id and curso_id):\n return {'value': {('disciplina_id_' + comp): False}}\n reg = self.pool.get('ud.monitoria.registro').read(cr, uid,\n semestre_id, ['processos_seletivos_ids'], context=context, load\n ='_classic_write')\n args = [('curso_id', '=', curso_id), ('processo_seletivo_id', '=',\n reg['processos_seletivos_ids']), ('is_active', '=', True)]\n disc = self.pool.get('ud.monitoria.disciplina').search(cr, uid,\n args, context=context)\n res = {'domain': {('disciplina_id_' + comp): [('id', 'in', disc)]}}\n if not disc:\n res['value'] = {('disciplina_id_' + comp): False}\n return res\n\n def onchange_disciplina(self, cr, uid, ids, comp, disciplina_id,\n doc_discente_id, context=None):\n if disciplina_id and doc_discente_id:\n doc_discente = self.pool.get('ud.monitoria.documentos.discente'\n ).browse(cr, uid, doc_discente_id, context)\n doc_discente_id = (doc_discente_id if doc_discente.\n disciplina_id.id == disciplina_id else False)\n return {'value': {('doc_discente_id_' + comp): doc_discente_id}}\n return {'value': {('doc_discente_id_' + comp): False}}\n\n def onchange_banco(self, cr, uid, ids, banco_id, context=None):\n if banco_id:\n banco = self.pool.get('ud.banco').read(cr, uid, banco_id, [\n 'agencia', 'dv_agencia', 'conta', 'dv_conta', 'operacao'],\n context=context, load='_classic_write')\n vals = {'agencia': False, 'dv_agencia': False, 'conta': False,\n 'dv_conta': False, 'operacao': False}\n vals.update({('%s_v' % dado): banco.get(dado) for dado in banco\n .keys()})\n return {'value': vals}\n return {'value': {'agencia_v': False, 'dv_agencia_v': False,\n 'conta_v': False, 'dv_conta_v': False, 'operacao_v': False,\n 'agencia': False, 'dv_agencia': False, 'conta': False,\n 'dv_conta': False, 'operacao': False}}\n\n def botao_transferir(self, cr, uid, ids, context=None):\n perfil_model = self.pool.get('ud.perfil')\n for transf in self.browse(cr, uid, ids, context):\n matricula = transf.doc_discente_id_para.discente_id.matricula\n for perfil in transf.doc_discente_id_para.discente_id.pessoa_id.papel_ids:\n if perfil.matricula == matricula and perfil.tipo == 'a':\n if perfil.is_bolsista:\n raise osv.except_osv(u'Discente bolsista',\n u'O discente \"{}\" sob matrícula \"{}\" possui bolsa do tipo: \"{}\"'\n .format(transf.doc_discente_id_para.discente_id\n .pessoa_id.name, matricula, TIPOS_BOLSA[perfil.\n tipo_bolsa]))\n break\n if not perfil:\n raise osv.except_osv(u'Perfil excluído', \n u'O perfil do discente para a matrícula \"%s\" não existe ou foi excluído'\n % matricula or '')\n matricula = transf.doc_discente_id_de.discente_id.matricula\n for perfil_de in transf.doc_discente_id_de.discente_id.pessoa_id.papel_ids:\n if perfil.matricula == matricula and perfil.tipo == 'a':\n break\n responsavel = self.pool.get('ud.employee').search(cr,\n SUPERUSER_ID, [('user_id', '=', uid)], limit=2)\n if not responsavel:\n raise osv.except_osv(u'Registro Inexistente',\n u'Não é possível realizar essa alteração enquanto seu login não estiver vinculado ao núcleo'\n )\n if len(responsavel) > 1:\n raise osv.except_osv(u'Multiplos vínculos',\n u'Não é possível realizar essa alteração enquanto seu login possuir multiplos vínculos no núcleo'\n )\n valor = perfil_de.valor_bolsa\n perfil_model.write(cr, SUPERUSER_ID, perfil.id, {'is_bolsista':\n True, 'tipo_bolsa': 'm', 'valor_bolsa': valor})\n perfil_model.write(cr, SUPERUSER_ID, perfil_de.id, {\n 'is_bolsista': False, 'tipo_bolsa': False, 'valor_bolsa': \n False})\n transf.doc_discente_id_de.write({'state': 'n_bolsista'})\n transf.doc_discente_id_para.write({'state': 'bolsista',\n 'is_active': True})\n get_banco(self, cr, transf, transf.doc_discente_id_para.\n discente_id.pessoa_id.id, context)\n evento = {'responsavel_id': responsavel[0], 'name':\n u'Transferência de bolsa', 'envolvidos_ids': [(4, transf.\n doc_discente_id_de.discente_id.pessoa_id.id), (4, transf.\n doc_discente_id_para.discente_id.pessoa_id.id)],\n 'descricao': \n u'Transferência de bolsa no valor de R$ %(valor)s do discente %(discente_de)s sob matrícula %(matricula_de)s para o(a) discente \"%(discente_para)s\" sob matrícula\"%(matricula_para)s\".'\n % {'valor': valor, 'discente_de': transf.\n doc_discente_id_de.discente_id.pessoa_id.name.upper(),\n 'matricula_de': perfil_de.matricula, 'discente_para':\n transf.doc_discente_id_de.discente_id.pessoa_id.name.upper(\n ), 'matricula_para': perfil_de.matricula}}\n transf.semestre_id.write({'eventos_ids': [(0, 0, evento)]})\n return True\n\n\nclass RemoverBolsaWizard(osv.TransientModel):\n _name = 'ud.monitoria.bolsa.remover.wizard'\n _description = u'Remoção de bolsa de discente'\n _columns = {'semestre_id': fields.many2one('ud.monitoria.registro',\n u'Semestre', required=True, readonly=True), 'curso_id': fields.\n many2one('ud.curso', u'Curso', required=True, domain=\n \"[('is_active', '=', True)]\"), 'disciplina_id': fields.many2one(\n 'ud.monitoria.disciplina', u'Disciplinas', required=True, domain=\n \"[('id', '=', False)]\"), 'tutor': fields.boolean(u'Tutor?'),\n 'doc_discente_id': fields.many2one(\n 'ud.monitoria.documentos.discente', u'Discente', required=True,\n domain=\n \"[('disciplina_id', '=', disciplina_id), ('tutor', '=', tutor), ('is_active', '=', True), ('state', '=', 'bolsista')]\"\n )}\n\n def default_get(self, cr, uid, fields_list, context=None):\n res = super(RemoverBolsaWizard, self).default_get(cr, uid,\n fields_list, context)\n context = context or {}\n if context.get('active_id', False):\n if context.get('active_model', False) == 'ud.monitoria.registro':\n res['semestre_id'] = context.get('active_id')\n elif context.get('active_model', False\n ) == 'ud.monitoria.documentos.discente':\n doc = self.pool.get('ud.monitoria.documentos.discente').browse(\n cr, uid, context.get('active_id'), context)\n if doc.state != 'bolsista':\n raise osv.except_osv(u'Discente não bolsista',\n u'O discente não é bolsista')\n elif not doc.is_active:\n raise osv.except_osv(u'Documento do discente inativo',\n u'Não é possível alterar o status de discentes inativos'\n )\n res['semestre_id'] = doc.disciplina_id.semestre_id.id\n res['curso_id'] = doc.disciplina_id.curso_id.id\n res['disciplina_id'] = doc.disciplina_id.id\n res['tutor'] = doc.tutor\n res['doc_discente_id'] = doc.id\n return res\n\n def onchange_curso(self, cr, uid, ids, semestre_id, curso_id,\n disciplina_id, context=None):\n if not (semestre_id and curso_id):\n return {'value': {'disciplina_id': False}}\n reg = self.pool.get('ud.monitoria.registro').read(cr, uid,\n semestre_id, ['processos_seletivos_ids'], context=context, load\n ='_classic_write')\n args = [('curso_id', '=', curso_id), ('processo_seletivo_id', '=',\n reg['processos_seletivos_ids']), ('is_active', '=', True)]\n disc = self.pool.get('ud.monitoria.disciplina').search(cr, uid,\n args, context=context)\n res = {'domain': {'disciplina_id': [('id', 'in', disc)]}}\n if not disc:\n res['value'] = {'disciplina_id': False}\n return res\n\n def onchange_disciplina(self, cr, uid, ids, disciplina_id,\n doc_discente_id, context=None):\n if disciplina_id and doc_discente_id:\n doc_discente = self.pool.get('ud.monitoria.documentos.discente'\n ).browse(cr, uid, doc_discente_id, context)\n doc_discente_id = (doc_discente_id if doc_discente.\n disciplina_id.id == disciplina_id else False)\n return {'value': {'doc_discente_id': doc_discente_id}}\n return {'value': {'doc_discente_id': False}}\n\n def botao_remover(self, cr, uid, ids, context=None):\n perfil_model = self.pool.get('ud.perfil')\n pessoa_model = self.pool.get('ud.employee')\n for rem in self.browse(cr, uid, ids, context):\n responsavel = pessoa_model.search(cr, SUPERUSER_ID, [('user_id',\n '=', uid)], limit=2)\n if not responsavel:\n raise osv.except_osv(u'Registro Inexistente',\n u'Não é possível realizar essa alteração enquanto seu login não estiver vinculado ao núcleo'\n )\n if len(responsavel) > 1:\n raise osv.except_osv(u'Multiplos vínculos',\n u'Não é possível realizar essa alteração enquanto seu login possuir multiplos vínculos no núcleo'\n )\n perfil = rem.doc_discente_id.inscricao_id.perfil_id\n perfil_model.write(cr, SUPERUSER_ID, perfil.id, {'is_bolsista':\n False, 'tipo_bolsa': False, 'valor_bolsa': False})\n rem.doc_discente_id.write({'state': 'n_bolsista'})\n evento = {'responsavel_id': responsavel[0], 'name': \n u'Remoção de bolsa: \"%s\"' % rem.doc_discente_id.discente_id\n .name, 'envolvidos_ids': [(4, rem.doc_discente_id.\n discente_id.id)], 'descricao': \n u'A bolsa do discente \"%s\" sob matrícula \"%s\" foi removida.' %\n (rem.doc_discente_id.discente_id.name.upper(), perfil.\n matricula)}\n rem.semestre_id.write({'eventos_ids': [(0, 0, evento)]})\n return True\n",
"step-5": "# coding: utf-8\nfrom openerp import SUPERUSER_ID\nfrom openerp.osv import osv, fields\nfrom openerp.addons.ud.ud import _TIPOS_BOLSA\n\nTIPOS_BOLSA = dict(_TIPOS_BOLSA)\n\n\ndef get_banco(cls, cr, browse_record, usuario_id, context=None):\n dados_bancarios_model = cls.pool.get(\"ud.dados.bancarios\")\n args = [(\"banco_id\", \"=\", browse_record.banco_id.id)]\n if browse_record.agencia_v:\n args.append((\"agencia\", \"=\", browse_record.agencia))\n if browse_record.dv_agencia_v:\n args.append((\"dv_agencia\", \"=\", browse_record.dv_agencia))\n if browse_record.conta_v:\n args.append((\"conta\", \"=\", browse_record.conta))\n if browse_record.dv_conta_v:\n args.append((\"dv_conta\", \"=\", browse_record.dv_conta))\n if browse_record.operacao_v:\n args.append((\"operacao\", \"=\", browse_record.operacao))\n dados_bancarios = dados_bancarios_model.search(cr, SUPERUSER_ID, args, context=context)\n if dados_bancarios:\n dados_bancarios = dados_bancarios_model.browse(cr, SUPERUSER_ID, dados_bancarios[0])\n if not dados_bancarios.ud_conta_id:\n return dados_bancarios.id\n elif dados_bancarios.ud_conta_id.id == usuario_id:\n return dados_bancarios.id\n raise osv.except_osv(u\"Dados Bancários duplicados\", u\"Outra pessoa já possui esses dados bancários!\")\n dados = {\"banco_id\": browse_record.banco_id.id, \"agencia\": browse_record.agencia, \"dv_agencia\": browse_record.dv_agencia,\n \"conta\": browse_record.conta, \"dv_conta\": browse_record.dv_conta, \"operacao\": browse_record.operacao,\n \"ud_conta_id\": usuario_id}\n return dados_bancarios_model.create(cr, SUPERUSER_ID, dados, context=context)\n\n\nclass AdicionarBolsaWizard(osv.TransientModel):\n _name = \"ud.monitoria.bolsa.adicionar.wizard\"\n _description = u\"Inclusão de bolsa de monitoria para discente (UD)\"\n\n _STATES = [\n (\"n_bolsista\", u\"Não Bolsista\"),\n (\"reserva\", u\"Cadastro de Reserva\"),\n ]\n\n def _bolsas(self, cr, uid, ids, campos, args, context=None):\n oferta_model = self.pool.get(\"ud.monitoria.oferta.disciplina\")\n res = {}\n for add in self.browse(cr, uid, ids, context):\n res[add.id] = add.disciplina_id.bolsas\n return res\n\n _columns = {\n \"semestre_id\": fields.many2one(\"ud.monitoria.registro\", u\"Semestre\", required=True, readonly=True),\n \"curso_id\": fields.many2one(\"ud.curso\", u\"Curso\", required=True, domain=\"[('is_active', '=', True)]\"),\n \"disciplina_id\": fields.many2one(\"ud.monitoria.disciplina\", u\"Disciplinas\", required=True,\n domain=\"[('semestre_id', '=', semestre_id), ('curso_id', '=', curso_id), \"\n \"('is_active', '=', True)]\"),\n \"bolsas\": fields.function(_bolsas, type=\"integer\", string=u\"Bolsas disponíveis\",\n help=u\"Número de bolsas disponíveis para a disciplina\"),\n \"valor_bolsa\": fields.float(u\"Bolsa (R$)\"),\n \"tutor\": fields.boolean(u\"Tutor?\"),\n \"status\": fields.selection(_STATES, u\"Status\", required=True),\n \"doc_discente_id\": fields.many2one(\"ud.monitoria.documentos.discente\", u\"Discente\", required=True,\n domain=\"[('disciplina_id', '=', disciplina_id), ('tutor', '=', tutor), \"\n \"('is_active', '=', True), ('state', '=', status)]\"),\n # DADOS BANCÁRIOS\n \"dados_bancarios_id\": fields.many2one(\"ud.dados.bancarios\", u\"Dados Bancários\", domain=[('id', '=', False)]),\n \"banco_id\": fields.many2one(\"ud.banco\", u\"Banco\", ondelete=\"restrict\"),\n \"agencia\": fields.char(u\"Agência\", size=4, help=u\"Número da Agência\"),\n \"dv_agencia\": fields.char(u\"DV Agência\", size=2, help=u\"Dígito verificador da Agência\"),\n \"conta\": fields.char(u\"Conta\", size=10, help=u\"Número da Conta\"),\n \"dv_conta\": fields.char(u\"DV Conta\", size=1, help=u\"Dígito verificador da Conta\"),\n \"operacao\": fields.char(u\"Operação\", size=3, help=u\"Tipo de conta\"),\n\n \"agencia_v\": fields.related(\"banco_id\", \"agencia\", type=\"boolean\", invisible=True, readonly=True),\n \"dv_agencia_v\": fields.related(\"banco_id\", \"dv_agencia\", type=\"boolean\", invisible=True, readonly=True),\n \"conta_v\": fields.related(\"banco_id\", \"conta\", type=\"boolean\", invisible=True, readonly=True),\n \"dv_conta_v\": fields.related(\"banco_id\", \"dv_conta\", type=\"boolean\", invisible=True, readonly=True),\n \"operacao_v\": fields.related(\"banco_id\", \"operacao\", type=\"boolean\", invisible=True, readonly=True),\n }\n\n def default_get(self, cr, uid, fields_list, context=None):\n res = super(AdicionarBolsaWizard, self).default_get(cr, uid, fields_list, context)\n res[\"status\"] = \"n_bolsista\"\n res[\"valor_bolsa\"] = 400.\n context = context or {}\n if context.get(\"active_id\", False):\n if context.get(\"active_model\", False) == \"ud.monitoria.registro\":\n res[\"semestre_id\"] = context.get(\"active_id\")\n elif context.get(\"active_model\", False) == \"ud.monitoria.documentos.discente\":\n doc = self.pool.get(\"ud.monitoria.documentos.discente\").browse(cr, uid, context.get(\"active_id\"), context)\n if doc.state == \"bolsista\":\n raise osv.except_osv(u\"Discente bolsista\", u\"O discente já é bolsista\")\n elif not doc.is_active:\n raise osv.except_osv(u\"Documento do discente inativo\", u\"Não é possível alterar o status de discentes inativos\")\n res[\"semestre_id\"] = doc.disciplina_id.semestre_id.id\n res[\"curso_id\"] = doc.disciplina_id.curso_id.id\n res[\"disciplina_id\"] = doc.disciplina_id.id\n res[\"tutor\"] = doc.tutor\n res[\"status\"] = doc.state\n res[\"doc_discente_id\"] = doc.id\n return res\n\n def onchange_curso(self, cr, uid, ids, semestre_id, curso_id, disciplina_id, context=None):\n if not (semestre_id and curso_id):\n return {\"value\": {\"disciplina_id\": False}}\n reg = self.pool.get(\"ud.monitoria.registro\").read(cr, uid, semestre_id, [\"processos_seletivos_ids\"], context=context, load=\"_classic_write\")\n args = [(\"curso_id\", \"=\", curso_id), (\"processo_seletivo_id\", \"=\", reg[\"processos_seletivos_ids\"]), (\"is_active\", \"=\", True)]\n disc = self.pool.get(\"ud.monitoria.disciplina\").search(cr, uid, args, context=context)\n res = {\"domain\": {\"disciplina_id\": [(\"id\", \"in\", disc)]}}\n if not disc:\n res[\"value\"]= {\"disciplina_id\": False}\n return res\n\n def onchange_disciplina(self, cr, uid, ids, disciplina_id, doc_discente_id, context=None):\n if disciplina_id:\n if doc_discente_id:\n doc_discente = self.pool.get(\"ud.monitoria.documentos.discente\").browse(cr, uid, doc_discente_id, context)\n doc_discente_id = doc_discente_id if doc_discente.disciplina_id.id == disciplina_id else False\n disciplina_id = self.pool.get(\"ud.monitoria.disciplina\").browse(cr, uid, disciplina_id, context)\n return {\n \"value\": {\"doc_discente_id\": doc_discente_id,\n \"bolsas\": disciplina_id.bolsas}\n }\n return {\"value\": {\"doc_discente_id\": False, \"bolsas\": 0}}\n\n def onchange_doc_discente(self, cr, uid, ids, doc_discente_id, dados_bancarios_id, context=None):\n if doc_discente_id:\n doc = self.pool.get(\"ud.monitoria.documentos.discente\").browse(cr, uid, doc_discente_id, context)\n if not dados_bancarios_id:\n dados_bancarios_id = getattr(doc.dados_bancarios_id, \"id\", False)\n return {\"value\": {\"dados_bancarios_id\": dados_bancarios_id},\n \"domain\": {\"dados_bancarios_id\": [(\"ud_conta_id\", \"=\", doc.discente_id.id)]}}\n return {\"value\": {\"dados_bancarios_id\": False},\n \"domain\": {\"dados_bancarios_id\": [(\"id\", \"=\", False)]}}\n\n def onchange_banco(self, cr, uid, ids, banco_id, context=None):\n if banco_id:\n banco = self.pool.get(\"ud.banco\").read(cr, uid, banco_id, [\n \"agencia\", \"dv_agencia\", \"conta\", \"dv_conta\", \"operacao\"\n ], context=context, load=\"_classic_write\")\n vals = {\"agencia\": False, \"dv_agencia\": False, \"conta\": False, \"dv_conta\": False, \"operacao\": False}\n vals.update({\"%s_v\" % dado: banco.get(dado) for dado in banco.keys()})\n return {\"value\": vals}\n return {\"value\": {\"agencia_v\": False, \"dv_agencia_v\": False, \"conta_v\": False, \"dv_conta_v\": False,\"operacao_v\": False,\n \"agencia\": False, \"dv_agencia\": False, \"conta\": False, \"dv_conta\": False, \"operacao\": False}}\n\n def botao_adicionar(self, cr, uid, ids, context=None):\n perfil_model = self.pool.get(\"ud.perfil\")\n for add in self.browse(cr, uid, ids, context):\n if add.bolsas == 0:\n raise osv.except_osv(u\"Bolsas Insuficientes\", u\"Não há bolsas disponíveis para essa disciplina\")\n elif not add.doc_discente_id.is_active:\n raise osv.except_osv(u\"Documento do discente inativo\",\n u\"O discente não pode ser classificado como bolsista\")\n if add.doc_discente_id.inscricao_id.perfil_id.is_bolsista:\n raise osv.except_osv(\n u\"Discente bolsista\",\n u\"O discente \\\"{}\\\" sob matrícula \\\"{}\\\" possui bolsa do tipo: \\\"{}\\\"\".format(\n add.doc_discente_id.discente_id.name, add.doc_discente_id.inscricao_id.perfil_id.matricula,\n TIPOS_BOLSA[add.doc_discente_id.inscricao_id.perfil_id.tipo_bolsa]\n )\n )\n responsavel = self.pool.get(\"ud.employee\").search(cr, SUPERUSER_ID, [(\"user_id\", \"=\", uid)], limit=2)\n if not responsavel:\n raise osv.except_osv(\n u\"Registro Inexistente\",\n u\"Não é possível realizar essa alteração enquanto seu login não estiver vinculado a uma pessoa no núcleo\"\n )\n if len(responsavel) > 1:\n raise osv.except_osv(\n u\"Multiplos vínculos\",\n u\"Não é possível realizar essa alteração enquanto seu login possuir multiplos vínculos no núcleo\"\n )\n perfil_model.write(cr, SUPERUSER_ID, add.doc_discente_id.inscricao_id.perfil_id.id, {\n \"is_bolsista\": True, \"tipo_bolsa\": \"m\", \"valor_bolsa\": (\"%.2f\" % add.valor_bolsa).replace(\".\", \",\")\n })\n if not add.dados_bancarios_id:\n dados_bancarios = get_banco(self, cr, add, add.doc_discente_id.discente_id.id, context)\n else:\n dados_bancarios = add.dados_bancarios_id.id\n add.doc_discente_id.write({\"state\": \"bolsista\", \"dados_bancarios_id\": dados_bancarios})\n evento = {\n \"responsavel_id\": responsavel[0],\n \"name\": u\"Adição de bolsa: \\\"%s\\\"\" % add.doc_discente_id.discente_id.name,\n \"envolvidos_ids\": [(4, add.doc_discente_id.discente_id.id)],\n \"descricao\": u\"Uma bolsa de R$ %s foi vinculada para o(a) discente \\\"%s\\\" sob matrícula \\\"%s\\\".\" % (\n (\"%.2f\" % add.valor_bolsa).replace(\".\", \",\"),\n add.doc_discente_id.discente_id.name.upper(), add.doc_discente_id.inscricao_id.perfil_id.matricula\n )\n }\n add.semestre_id.write({\"eventos_ids\": [(0, 0, evento)]})\n return True\n\n\nclass TransferirBolsaWizard(osv.TransientModel):\n _name = \"ud.monitoria.bolsa.transferir.wizard\"\n _description = u\"Transferência de bolsa de monitoria (UD)\"\n\n _STATES = [\n (\"n_bolsista\", u\"Não Bolsista\"),\n (\"reserva\", u\"Cadastro de Reserva\"),\n ]\n\n _columns = {\n \"semestre_id\": fields.many2one(\"ud.monitoria.registro\", u\"Semestre\", required=True, readonly=True),\n\n \"curso_id_de\": fields.many2one(\"ud.curso\", u\"Curso\", required=True, domain=\"[('is_active', '=', True)]\"),\n \"disciplina_id_de\": fields.many2one(\"ud.monitoria.disciplina\", u\"Disciplinas\", required=True,\n domain=\"[('id', '=', False)]\"),\n \"tutor_de\": fields.boolean(u\"Tutor?\"),\n \"doc_discente_id_de\": fields.many2one(\"ud.monitoria.documentos.discente\", u\"Discente\", required=True,\n domain=\"[('is_active', '=', True), ('state', '=', 'bolsista'), \"\n \"('disciplina_id', '=', disciplina_id_de), ('tutor', '=', tutor_de)]\"),\n\n \"curso_id_para\": fields.many2one(\"ud.curso\", u\"Curso\", required=True, domain=\"[('is_active', '=', True)]\"),\n \"disciplina_id_para\": fields.many2one(\"ud.monitoria.disciplina\", u\"Disciplinas\", required=True,\n domain=\"[('semestre_id', '=', semestre_id), ('curso_id', '=', curso_id_para), \"\n \"('is_active', '=', True)]\"),\n \"tutor_para\": fields.boolean(u\"Tutor?\"),\n \"status_para\": fields.selection(_STATES, u\"Status\", required=True),\n \"doc_discente_id_para\": fields.many2one(\"ud.monitoria.documentos.discente\", u\"Discente\", required=True,\n domain=\"[('is_active', '=', True), ('state', '=', status_para), \"\n \"('disciplina_id', '=', disciplina_id_para), \"\n \"('tutor', '=', tutor_para)]\"),\n # DADOS BANCÁRIOS\n \"banco_id\": fields.many2one(\"ud.banco\", u\"Banco\", ondelete=\"restrict\"),\n \"agencia\": fields.char(u\"Agência\", size=4, help=u\"Número da Agência\"),\n \"dv_agencia\": fields.char(u\"DV Agência\", size=2, help=u\"Dígito verificador da Agência\"),\n \"conta\": fields.char(u\"Conta\", size=10, help=u\"Número da Conta\"),\n \"dv_conta\": fields.char(u\"DV Conta\", size=1, help=u\"Dígito verificador da Conta\"),\n \"operacao\": fields.char(u\"Operação\", size=3, help=u\"Tipo de conta\"),\n\n \"agencia_v\": fields.related(\"banco_id\", \"agencia\", type=\"boolean\", invisible=True, readonly=True),\n \"dv_agencia_v\": fields.related(\"banco_id\", \"dv_agencia\", type=\"boolean\", invisible=True, readonly=True),\n \"conta_v\": fields.related(\"banco_id\", \"conta\", type=\"boolean\", invisible=True, readonly=True),\n \"dv_conta_v\": fields.related(\"banco_id\", \"dv_conta\", type=\"boolean\", invisible=True, readonly=True),\n \"operacao_v\": fields.related(\"banco_id\", \"operacao\", type=\"boolean\", invisible=True, readonly=True),\n }\n\n def default_get(self, cr, uid, fields_list, context=None):\n res = super(TransferirBolsaWizard, self).default_get(cr, uid, fields_list, context)\n context = context or {}\n if context.get(\"active_id\", False):\n if context.get(\"active_model\", False) == \"ud.monitoria.registro\":\n res[\"semestre_id\"] = context.get(\"active_id\")\n elif context.get(\"active_model\", False) == \"ud.monitoria.documentos.discente\":\n doc = self.pool.get(\"ud.monitoria.documentos.discente\").browse(cr, uid, context.get(\"active_id\"),\n context)\n if doc.state != \"bolsista\":\n raise osv.except_osv(u\"Discente bolsista\", u\"O discente já é bolsista\")\n elif not doc.is_active:\n raise osv.except_osv(u\"Documento do discente inativo\",\n u\"O discente não pode ser classificado como bolsista\")\n res[\"semestre_id\"] = doc.disciplina_id.semestre_id.id\n res[\"curso_id_de\"] = doc.disciplina_id.curso_id.id\n res[\"disciplina_id_de\"] = doc.disciplina_id.id\n res[\"tutor_de\"] = doc.tutor\n res[\"status_de\"] = doc.state\n res[\"doc_discente_id_de\"] = doc.id\n return res\n\n def onchange_curso(self, cr, uid, ids, comp, semestre_id, curso_id, disciplina_id, context=None):\n if not (semestre_id and curso_id):\n return {\"value\": {\"disciplina_id_\" + comp: False}}\n reg = self.pool.get(\"ud.monitoria.registro\").read(cr, uid, semestre_id, [\"processos_seletivos_ids\"], context=context, load=\"_classic_write\")\n args = [(\"curso_id\", \"=\", curso_id), (\"processo_seletivo_id\", \"=\", reg[\"processos_seletivos_ids\"]), (\"is_active\", \"=\", True)]\n disc = self.pool.get(\"ud.monitoria.disciplina\").search(cr, uid, args, context=context)\n res = {\"domain\": {\"disciplina_id_\" + comp: [(\"id\", \"in\", disc)]}}\n if not disc:\n res[\"value\"] = {\"disciplina_id_\" + comp: False}\n return res\n\n def onchange_disciplina(self, cr, uid, ids, comp, disciplina_id, doc_discente_id, context=None):\n if disciplina_id and doc_discente_id:\n doc_discente = self.pool.get(\"ud.monitoria.documentos.discente\").browse(cr, uid, doc_discente_id, context)\n doc_discente_id = doc_discente_id if doc_discente.disciplina_id.id == disciplina_id else False\n return {\n \"value\": {\"doc_discente_id_\" + comp: doc_discente_id}\n }\n return {\"value\": {\"doc_discente_id_\" + comp: False}}\n\n def onchange_banco(self, cr, uid, ids, banco_id, context=None):\n if banco_id:\n banco = self.pool.get(\"ud.banco\").read(cr, uid, banco_id, [\n \"agencia\", \"dv_agencia\", \"conta\", \"dv_conta\", \"operacao\"\n ], context=context, load=\"_classic_write\")\n vals = {\"agencia\": False, \"dv_agencia\": False, \"conta\": False, \"dv_conta\": False, \"operacao\": False}\n vals.update({\"%s_v\" % dado: banco.get(dado) for dado in banco.keys()})\n return {\"value\": vals}\n return {\"value\": {\"agencia_v\": False, \"dv_agencia_v\": False, \"conta_v\": False, \"dv_conta_v\": False,\"operacao_v\": False,\n \"agencia\": False, \"dv_agencia\": False, \"conta\": False, \"dv_conta\": False, \"operacao\": False}}\n\n def botao_transferir(self, cr, uid, ids, context=None):\n perfil_model = self.pool.get(\"ud.perfil\")\n for transf in self.browse(cr, uid, ids, context):\n matricula = transf.doc_discente_id_para.discente_id.matricula\n for perfil in transf.doc_discente_id_para.discente_id.pessoa_id.papel_ids:\n if perfil.matricula == matricula and perfil.tipo == \"a\":\n if perfil.is_bolsista:\n raise osv.except_osv(\n u\"Discente bolsista\",\n u\"O discente \\\"{}\\\" sob matrícula \\\"{}\\\" possui bolsa do tipo: \\\"{}\\\"\".format(\n transf.doc_discente_id_para.discente_id.pessoa_id.name, matricula,\n TIPOS_BOLSA[perfil.tipo_bolsa]\n )\n )\n break\n if not perfil:\n raise osv.except_osv(\n u\"Perfil excluído\",\n u\"O perfil do discente para a matrícula \\\"%s\\\" não existe ou foi excluído\" % matricula or \"\"\n )\n matricula = transf.doc_discente_id_de.discente_id.matricula\n for perfil_de in transf.doc_discente_id_de.discente_id.pessoa_id.papel_ids:\n if perfil.matricula == matricula and perfil.tipo == \"a\":\n break\n responsavel = self.pool.get(\"ud.employee\").search(cr, SUPERUSER_ID, [(\"user_id\", \"=\", uid)], limit=2)\n if not responsavel:\n raise osv.except_osv(\n u\"Registro Inexistente\",\n u\"Não é possível realizar essa alteração enquanto seu login não estiver vinculado ao núcleo\"\n )\n if len(responsavel) > 1:\n raise osv.except_osv(\n u\"Multiplos vínculos\",\n u\"Não é possível realizar essa alteração enquanto seu login possuir multiplos vínculos no núcleo\"\n )\n valor = perfil_de.valor_bolsa\n perfil_model.write(cr, SUPERUSER_ID, perfil.id, {\n \"is_bolsista\": True, \"tipo_bolsa\": \"m\", \"valor_bolsa\": valor\n })\n perfil_model.write(cr, SUPERUSER_ID, perfil_de.id, {\n \"is_bolsista\": False, \"tipo_bolsa\": False, \"valor_bolsa\": False\n })\n transf.doc_discente_id_de.write({\"state\": \"n_bolsista\"})\n transf.doc_discente_id_para.write({\"state\": \"bolsista\", \"is_active\": True})\n get_banco(self, cr, transf, transf.doc_discente_id_para.discente_id.pessoa_id.id, context)\n evento = {\n \"responsavel_id\": responsavel[0],\n \"name\": u\"Transferência de bolsa\",\n \"envolvidos_ids\": [(4, transf.doc_discente_id_de.discente_id.pessoa_id.id),\n (4, transf.doc_discente_id_para.discente_id.pessoa_id.id)],\n \"descricao\": u\"Transferência de bolsa no valor de R$ %(valor)s do discente %(discente_de)s sob matrícula \"\n u\"%(matricula_de)s para o(a) discente \\\"%(discente_para)s\\\" sob matrícula\"\n u\"\\\"%(matricula_para)s\\\".\" % {\n \"valor\": valor, \"discente_de\": transf.doc_discente_id_de.discente_id.pessoa_id.name.upper(),\n \"matricula_de\": perfil_de.matricula,\n \"discente_para\": transf.doc_discente_id_de.discente_id.pessoa_id.name.upper(),\n \"matricula_para\": perfil_de.matricula\n }\n }\n transf.semestre_id.write({\"eventos_ids\": [(0, 0, evento)]})\n return True\n\n\nclass RemoverBolsaWizard(osv.TransientModel):\n _name = \"ud.monitoria.bolsa.remover.wizard\"\n _description = u\"Remoção de bolsa de discente\"\n\n _columns = {\n \"semestre_id\": fields.many2one(\"ud.monitoria.registro\", u\"Semestre\", required=True, readonly=True),\n \"curso_id\": fields.many2one(\"ud.curso\", u\"Curso\", required=True, domain=\"[('is_active', '=', True)]\"),\n \"disciplina_id\": fields.many2one(\"ud.monitoria.disciplina\", u\"Disciplinas\", required=True,\n domain=\"[('id', '=', False)]\"),\n \"tutor\": fields.boolean(u\"Tutor?\"),\n \"doc_discente_id\": fields.many2one(\"ud.monitoria.documentos.discente\", u\"Discente\", required=True,\n domain=\"[('disciplina_id', '=', disciplina_id), ('tutor', '=', tutor), \"\n \"('is_active', '=', True), ('state', '=', 'bolsista')]\"),\n }\n\n def default_get(self, cr, uid, fields_list, context=None):\n res = super(RemoverBolsaWizard, self).default_get(cr, uid, fields_list, context)\n context = context or {}\n if context.get(\"active_id\", False):\n if context.get(\"active_model\", False) == \"ud.monitoria.registro\":\n res[\"semestre_id\"] = context.get(\"active_id\")\n elif context.get(\"active_model\", False) == \"ud.monitoria.documentos.discente\":\n doc = self.pool.get(\"ud.monitoria.documentos.discente\").browse(cr, uid, context.get(\"active_id\"), context)\n if doc.state != \"bolsista\":\n raise osv.except_osv(u\"Discente não bolsista\", u\"O discente não é bolsista\")\n elif not doc.is_active:\n raise osv.except_osv(u\"Documento do discente inativo\",\n u\"Não é possível alterar o status de discentes inativos\")\n res[\"semestre_id\"] = doc.disciplina_id.semestre_id.id\n res[\"curso_id\"] = doc.disciplina_id.curso_id.id\n res[\"disciplina_id\"] = doc.disciplina_id.id\n res[\"tutor\"] = doc.tutor\n res[\"doc_discente_id\"] = doc.id\n return res\n\n def onchange_curso(self, cr, uid, ids, semestre_id, curso_id, disciplina_id, context=None):\n if not (semestre_id and curso_id):\n return {\"value\": {\"disciplina_id\": False}}\n reg = self.pool.get(\"ud.monitoria.registro\").read(cr, uid, semestre_id, [\"processos_seletivos_ids\"], context=context, load=\"_classic_write\")\n args = [(\"curso_id\", \"=\", curso_id), (\"processo_seletivo_id\", \"=\", reg[\"processos_seletivos_ids\"]), (\"is_active\", \"=\", True)]\n disc = self.pool.get(\"ud.monitoria.disciplina\").search(cr, uid, args, context=context)\n res = {\"domain\": {\"disciplina_id\": [(\"id\", \"in\", disc)]}}\n if not disc:\n res[\"value\"] = {\"disciplina_id\": False}\n return res\n\n def onchange_disciplina(self, cr, uid, ids, disciplina_id, doc_discente_id, context=None):\n if disciplina_id and doc_discente_id:\n doc_discente = self.pool.get(\"ud.monitoria.documentos.discente\").browse(cr, uid, doc_discente_id, context)\n doc_discente_id = doc_discente_id if doc_discente.disciplina_id.id == disciplina_id else False\n return {\n \"value\": {\"doc_discente_id\": doc_discente_id}\n }\n return {\"value\": {\"doc_discente_id\": False}}\n\n def botao_remover(self, cr, uid, ids, context=None):\n perfil_model = self.pool.get(\"ud.perfil\")\n pessoa_model = self.pool.get(\"ud.employee\")\n for rem in self.browse(cr, uid, ids, context):\n responsavel = pessoa_model.search(cr, SUPERUSER_ID, [(\"user_id\", \"=\", uid)], limit=2)\n if not responsavel:\n raise osv.except_osv(\n u\"Registro Inexistente\",\n u\"Não é possível realizar essa alteração enquanto seu login não estiver vinculado ao núcleo\"\n )\n if len(responsavel) > 1:\n raise osv.except_osv(\n u\"Multiplos vínculos\",\n u\"Não é possível realizar essa alteração enquanto seu login possuir multiplos vínculos no núcleo\"\n )\n perfil = rem.doc_discente_id.inscricao_id.perfil_id\n perfil_model.write(cr, SUPERUSER_ID, perfil.id, {\n \"is_bolsista\": False, \"tipo_bolsa\": False, \"valor_bolsa\": False\n })\n rem.doc_discente_id.write({\"state\": \"n_bolsista\"})\n evento = {\n \"responsavel_id\": responsavel[0],\n \"name\": u\"Remoção de bolsa: \\\"%s\\\"\" % rem.doc_discente_id.discente_id.name,\n \"envolvidos_ids\": [(4, rem.doc_discente_id.discente_id.id)],\n \"descricao\": u\"A bolsa do discente \\\"%s\\\" sob matrícula \\\"%s\\\" foi removida.\" % (\n rem.doc_discente_id.discente_id.name.upper(), perfil.matricula\n )\n }\n rem.semestre_id.write({\"eventos_ids\": [(0, 0, evento)]})\n return True\n",
"step-ids": [
18,
20,
22,
24,
26
]
}
|
[
18,
20,
22,
24,
26
] |
<|reserved_special_token_0|>
def compute_accuracy(model, good, bad):
train_arrays = numpy.zeros((25000, 400))
train_labels = numpy.zeros(25000)
classifier = LogisticRegression()
for i in range(25000 / 2):
prefix_train_pos = 'good_' + str(i)
prefix_train_neg = 'bad_' + str(i)
pos_review = model.docvecs[prefix_train_pos]
neg_review = model.docvecs[prefix_train_neg]
train_arrays[i] = pos_review
train_labels[i] = 1
train_arrays[25000 / 2 + i] = neg_review
train_labels[25000 / 2 + i] = 0
classifier.fit(train_arrays, train_labels)
test_arrays_good = numpy.zeros((12500, 400))
test_ratings_good = numpy.zeros(12500)
test_labels_good = numpy.zeros(12500)
test_arrays_bad = numpy.zeros((12500, 400))
test_ratings_bad = numpy.zeros(12500)
test_labels_bad = numpy.zeros(12500)
test_arrays = numpy.zeros((25000, 400))
test_rating = numpy.zeros(25000)
test_labels = numpy.zeros(25000)
good_correct = 0
good_total = 0
bad_correct = 0
bad_total = 0
for i, review in enumerate(good):
test_arrays[i] = model.infer_vector(review[0])
test_labels[i] = 1
if classifier.predict([test_arrays[i]]) == 1:
good_correct += 1
for i, review in enumerate(bad):
test_arrays[i + 12500] = model.infer_vector(review[0])
test_labels[i + 12500] = 0
if classifier.predict([test_arrays[i + 12500]]) == 0:
bad_correct += 1
accuracy = classifier.score(test_arrays, test_labels) * 100
print('Classifier reports a {}% accuracy'.format(accuracy))
print('{} Good correctly identified'.format(good_correct))
print('{} Bad correctly identified'.format(bad_correct))
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def compute_accuracy(model, good, bad):
train_arrays = numpy.zeros((25000, 400))
train_labels = numpy.zeros(25000)
classifier = LogisticRegression()
for i in range(25000 / 2):
prefix_train_pos = 'good_' + str(i)
prefix_train_neg = 'bad_' + str(i)
pos_review = model.docvecs[prefix_train_pos]
neg_review = model.docvecs[prefix_train_neg]
train_arrays[i] = pos_review
train_labels[i] = 1
train_arrays[25000 / 2 + i] = neg_review
train_labels[25000 / 2 + i] = 0
classifier.fit(train_arrays, train_labels)
test_arrays_good = numpy.zeros((12500, 400))
test_ratings_good = numpy.zeros(12500)
test_labels_good = numpy.zeros(12500)
test_arrays_bad = numpy.zeros((12500, 400))
test_ratings_bad = numpy.zeros(12500)
test_labels_bad = numpy.zeros(12500)
test_arrays = numpy.zeros((25000, 400))
test_rating = numpy.zeros(25000)
test_labels = numpy.zeros(25000)
good_correct = 0
good_total = 0
bad_correct = 0
bad_total = 0
for i, review in enumerate(good):
test_arrays[i] = model.infer_vector(review[0])
test_labels[i] = 1
if classifier.predict([test_arrays[i]]) == 1:
good_correct += 1
for i, review in enumerate(bad):
test_arrays[i + 12500] = model.infer_vector(review[0])
test_labels[i + 12500] = 0
if classifier.predict([test_arrays[i + 12500]]) == 0:
bad_correct += 1
accuracy = classifier.score(test_arrays, test_labels) * 100
print('Classifier reports a {}% accuracy'.format(accuracy))
print('{} Good correctly identified'.format(good_correct))
print('{} Bad correctly identified'.format(bad_correct))
<|reserved_special_token_0|>
compute_accuracy(yelp_model, yelp_sources_good, yelp_sources_bad)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
dirname = os.path.dirname(__file__)
def compute_accuracy(model, good, bad):
train_arrays = numpy.zeros((25000, 400))
train_labels = numpy.zeros(25000)
classifier = LogisticRegression()
for i in range(25000 / 2):
prefix_train_pos = 'good_' + str(i)
prefix_train_neg = 'bad_' + str(i)
pos_review = model.docvecs[prefix_train_pos]
neg_review = model.docvecs[prefix_train_neg]
train_arrays[i] = pos_review
train_labels[i] = 1
train_arrays[25000 / 2 + i] = neg_review
train_labels[25000 / 2 + i] = 0
classifier.fit(train_arrays, train_labels)
test_arrays_good = numpy.zeros((12500, 400))
test_ratings_good = numpy.zeros(12500)
test_labels_good = numpy.zeros(12500)
test_arrays_bad = numpy.zeros((12500, 400))
test_ratings_bad = numpy.zeros(12500)
test_labels_bad = numpy.zeros(12500)
test_arrays = numpy.zeros((25000, 400))
test_rating = numpy.zeros(25000)
test_labels = numpy.zeros(25000)
good_correct = 0
good_total = 0
bad_correct = 0
bad_total = 0
for i, review in enumerate(good):
test_arrays[i] = model.infer_vector(review[0])
test_labels[i] = 1
if classifier.predict([test_arrays[i]]) == 1:
good_correct += 1
for i, review in enumerate(bad):
test_arrays[i + 12500] = model.infer_vector(review[0])
test_labels[i + 12500] = 0
if classifier.predict([test_arrays[i + 12500]]) == 0:
bad_correct += 1
accuracy = classifier.score(test_arrays, test_labels) * 100
print('Classifier reports a {}% accuracy'.format(accuracy))
print('{} Good correctly identified'.format(good_correct))
print('{} Bad correctly identified'.format(bad_correct))
yelp_model = Doc2Vec.load(os.path.join(dirname, 'models/yelp_model.d2v'))
yelp_sources_good = YelpLabeledLineSentence(os.path.join(dirname,
'../data/review.json'), 'good', 12500)
yelp_sources_bad = YelpLabeledLineSentence(os.path.join(dirname,
'../data/review.json'), 'bad', 12500)
compute_accuracy(yelp_model, yelp_sources_good, yelp_sources_bad)
<|reserved_special_token_1|>
from gensim import utils
from gensim.models.doc2vec import LabeledSentence
from gensim.models import Doc2Vec
from matplotlib import pyplot as plt
from sklearn.manifold import TSNE
from sklearn.feature_extraction.text import CountVectorizer
from random import shuffle
from sklearn.linear_model import LogisticRegression
from yelp_labeled_line_sentence import YelpLabeledLineSentence
from imdb_labeled_line_sentence import IMDBLabeledLineSentence
from sklearn.linear_model import SGDClassifier
import numpy
import json
import time
import os
import sys
import csv
dirname = os.path.dirname(__file__)
def compute_accuracy(model, good, bad):
train_arrays = numpy.zeros((25000, 400))
train_labels = numpy.zeros(25000)
classifier = LogisticRegression()
for i in range(25000 / 2):
prefix_train_pos = 'good_' + str(i)
prefix_train_neg = 'bad_' + str(i)
pos_review = model.docvecs[prefix_train_pos]
neg_review = model.docvecs[prefix_train_neg]
train_arrays[i] = pos_review
train_labels[i] = 1
train_arrays[25000 / 2 + i] = neg_review
train_labels[25000 / 2 + i] = 0
classifier.fit(train_arrays, train_labels)
test_arrays_good = numpy.zeros((12500, 400))
test_ratings_good = numpy.zeros(12500)
test_labels_good = numpy.zeros(12500)
test_arrays_bad = numpy.zeros((12500, 400))
test_ratings_bad = numpy.zeros(12500)
test_labels_bad = numpy.zeros(12500)
test_arrays = numpy.zeros((25000, 400))
test_rating = numpy.zeros(25000)
test_labels = numpy.zeros(25000)
good_correct = 0
good_total = 0
bad_correct = 0
bad_total = 0
for i, review in enumerate(good):
test_arrays[i] = model.infer_vector(review[0])
test_labels[i] = 1
if classifier.predict([test_arrays[i]]) == 1:
good_correct += 1
for i, review in enumerate(bad):
test_arrays[i + 12500] = model.infer_vector(review[0])
test_labels[i + 12500] = 0
if classifier.predict([test_arrays[i + 12500]]) == 0:
bad_correct += 1
accuracy = classifier.score(test_arrays, test_labels) * 100
print('Classifier reports a {}% accuracy'.format(accuracy))
print('{} Good correctly identified'.format(good_correct))
print('{} Bad correctly identified'.format(bad_correct))
yelp_model = Doc2Vec.load(os.path.join(dirname, 'models/yelp_model.d2v'))
yelp_sources_good = YelpLabeledLineSentence(os.path.join(dirname,
'../data/review.json'), 'good', 12500)
yelp_sources_bad = YelpLabeledLineSentence(os.path.join(dirname,
'../data/review.json'), 'bad', 12500)
compute_accuracy(yelp_model, yelp_sources_good, yelp_sources_bad)
<|reserved_special_token_1|>
# ARGS:
# 1: total train reviews
# 2: number of iterations (for csv output)
# 3: size of vector
# 4: good/bad sizes
# import dependencies
from gensim import utils
from gensim.models.doc2vec import LabeledSentence
from gensim.models import Doc2Vec
from matplotlib import pyplot as plt
from sklearn.manifold import TSNE
from sklearn.feature_extraction.text import CountVectorizer
from random import shuffle
from sklearn.linear_model import LogisticRegression
from yelp_labeled_line_sentence import YelpLabeledLineSentence
from imdb_labeled_line_sentence import IMDBLabeledLineSentence
from sklearn.linear_model import SGDClassifier
import numpy
import json
import time
import os
import sys
import csv
dirname = os.path.dirname(__file__)
def compute_accuracy(model, good, bad):
# load our doc2vec model that we trained
# take our train reviews from the model, and put them in array, good reviews first, bad reviews second half of array
train_arrays = numpy.zeros((25000, 400))
train_labels = numpy.zeros(25000)
# create a logistic regression classifier
classifier = LogisticRegression()
# take our train reviews from the model, and put them in array, good reviews first, bad reviews second half of array
for i in range((25000/2)):
prefix_train_pos = 'good_' + str(i)
prefix_train_neg = 'bad_' + str(i)
pos_review = model.docvecs[prefix_train_pos]
neg_review = model.docvecs[prefix_train_neg]
train_arrays[i] = pos_review
train_labels[i] = 1
train_arrays[(25000/2) + i] = neg_review
train_labels[(25000/2) + i] = 0
classifier.fit(train_arrays, train_labels)
# take our test reviews from the model, and put them in array, good reviews first, bad reviews second half of array
# for each review, we'll infer the review's vector against our model
test_arrays_good = numpy.zeros((12500, 400))
test_ratings_good = numpy.zeros(12500)
test_labels_good = numpy.zeros(12500)
test_arrays_bad = numpy.zeros((12500, 400))
test_ratings_bad = numpy.zeros(12500)
test_labels_bad = numpy.zeros(12500)
test_arrays = numpy.zeros((25000, 400))
test_rating = numpy.zeros(25000)
test_labels = numpy.zeros(25000)
good_correct = 0
good_total = 0
bad_correct = 0
bad_total = 0
for i, review in enumerate(good):
test_arrays[i] = model.infer_vector(review[0])
test_labels[i] = 1
if(classifier.predict([test_arrays[i]]) == 1):
good_correct += 1
# test_ratings_good[i] = review[1][2]
for i, review in enumerate(bad):
test_arrays[i + 12500] = model.infer_vector(review[0])
test_labels[i + 12500] = 0
if(classifier.predict([test_arrays[i + 12500]]) == 0):
bad_correct += 1
# test_ratings_bad[i] = review[1][2]
# print the accuracy of our classifier
# accuracy=classifier.score(test_arrays_good, test_labels_good) * 100
# print("Classifier reports a {}% accuracy for good reviews".format(accuracy))
#
# accuracy=classifier.score(test_arrays_bad, test_labels_bad) * 100
# print("Classifier reports a {}% accuracy for bad reviews".format(accuracy))
#
accuracy=classifier.score(test_arrays, test_labels) * 100
print("Classifier reports a {}% accuracy".format(accuracy))
print("{} Good correctly identified".format(good_correct))
print("{} Bad correctly identified".format(bad_correct))
# for dim in range(1, int(sys.argv[3])):
# # plot probability of review being good vs feature vector value
# plt.scatter(test_arrays_good[:,dim], classifier.predict_proba(test_arrays_good)[:,1], color='green')
# plt.scatter(test_arrays_bad[:,dim], classifier.predict_proba(test_arrays_bad)[:,1], color='red')
#
# plt.ylabel('Probability of Review Being Good')
# plt.xlabel('dim={}'.format(dim))
# plt.show()
# # reduce the n-dimensional feature vector to n=1 using t-SNE
# tsne = TSNE(n_components=1)
# test_arrays_tsne_good = tsne.fit_transform(test_arrays_good)
# test_arrays_tsne_bad = tsne.fit_transform(test_arrays_bad)
#
# # plot probability of review being good vs feature vector value
# plt.scatter(test_arrays_tsne_good, classifier.predict_proba(test_arrays_good)[:,1], color='green')
# plt.scatter(test_arrays_tsne_bad, classifier.predict_proba(test_arrays_bad)[:,1], color='red')
#
# plt.ylabel('Probability of Review Being Good')
# plt.xlabel('t-SNE reduced feature vector (dim=1)')
# plt.show()
# # reduce the n-dimensional feature vector to n=1 using t-SNE
# tsne = TSNE(n_components=2)
# test_arrays_tsne_good = tsne.fit_transform(test_arrays_good)
# test_arrays_tsne_bad = tsne.fit_transform(test_arrays_bad)
#
# # plot feature vectors against each other
# plt.scatter(test_arrays_tsne_good[:,0], test_arrays_tsne_good[:,1], color='green')
# plt.scatter(test_arrays_tsne_bad[:,0], test_arrays_tsne_bad[:,1], color='red')
#
# plt.ylabel('x1')
# plt.xlabel('x2')
# plt.show()
yelp_model = Doc2Vec.load(os.path.join(dirname,'models/yelp_model.d2v'))
# imdb_model = Doc2Vec.load(os.path.join(dirname,'models/imdb_model.d2v'))
# create an array of LabeledLineSentences for previously unseen
# good and bad reviews
# this does some basic formatting of the text as well to make it more
# digestible by gensim and sklearn
yelp_sources_good = YelpLabeledLineSentence(os.path.join(dirname, '../data/review.json'), 'good', 12500)
yelp_sources_bad = YelpLabeledLineSentence(os.path.join(dirname, '../data/review.json'), 'bad', 12500)
# imdb_sources_good = IMDBLabeledLineSentence({os.path.join(dirname, '../data/aclImdb/test/pos'):'good'})
# imdb_sources_bad = IMDBLabeledLineSentence({os.path.join(dirname, '../data/aclImdb/test/neg'):'bad'})
compute_accuracy(yelp_model, yelp_sources_good, yelp_sources_bad)
# compute_accuracy(imdb_model, imdb_sources_good, imdb_sources_bad)
|
flexible
|
{
"blob_id": "95015c467dd6371f575fb5535fe652a914650ef1",
"index": 2016,
"step-1": "<mask token>\n\n\ndef compute_accuracy(model, good, bad):\n train_arrays = numpy.zeros((25000, 400))\n train_labels = numpy.zeros(25000)\n classifier = LogisticRegression()\n for i in range(25000 / 2):\n prefix_train_pos = 'good_' + str(i)\n prefix_train_neg = 'bad_' + str(i)\n pos_review = model.docvecs[prefix_train_pos]\n neg_review = model.docvecs[prefix_train_neg]\n train_arrays[i] = pos_review\n train_labels[i] = 1\n train_arrays[25000 / 2 + i] = neg_review\n train_labels[25000 / 2 + i] = 0\n classifier.fit(train_arrays, train_labels)\n test_arrays_good = numpy.zeros((12500, 400))\n test_ratings_good = numpy.zeros(12500)\n test_labels_good = numpy.zeros(12500)\n test_arrays_bad = numpy.zeros((12500, 400))\n test_ratings_bad = numpy.zeros(12500)\n test_labels_bad = numpy.zeros(12500)\n test_arrays = numpy.zeros((25000, 400))\n test_rating = numpy.zeros(25000)\n test_labels = numpy.zeros(25000)\n good_correct = 0\n good_total = 0\n bad_correct = 0\n bad_total = 0\n for i, review in enumerate(good):\n test_arrays[i] = model.infer_vector(review[0])\n test_labels[i] = 1\n if classifier.predict([test_arrays[i]]) == 1:\n good_correct += 1\n for i, review in enumerate(bad):\n test_arrays[i + 12500] = model.infer_vector(review[0])\n test_labels[i + 12500] = 0\n if classifier.predict([test_arrays[i + 12500]]) == 0:\n bad_correct += 1\n accuracy = classifier.score(test_arrays, test_labels) * 100\n print('Classifier reports a {}% accuracy'.format(accuracy))\n print('{} Good correctly identified'.format(good_correct))\n print('{} Bad correctly identified'.format(bad_correct))\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef compute_accuracy(model, good, bad):\n train_arrays = numpy.zeros((25000, 400))\n train_labels = numpy.zeros(25000)\n classifier = LogisticRegression()\n for i in range(25000 / 2):\n prefix_train_pos = 'good_' + str(i)\n prefix_train_neg = 'bad_' + str(i)\n pos_review = model.docvecs[prefix_train_pos]\n neg_review = model.docvecs[prefix_train_neg]\n train_arrays[i] = pos_review\n train_labels[i] = 1\n train_arrays[25000 / 2 + i] = neg_review\n train_labels[25000 / 2 + i] = 0\n classifier.fit(train_arrays, train_labels)\n test_arrays_good = numpy.zeros((12500, 400))\n test_ratings_good = numpy.zeros(12500)\n test_labels_good = numpy.zeros(12500)\n test_arrays_bad = numpy.zeros((12500, 400))\n test_ratings_bad = numpy.zeros(12500)\n test_labels_bad = numpy.zeros(12500)\n test_arrays = numpy.zeros((25000, 400))\n test_rating = numpy.zeros(25000)\n test_labels = numpy.zeros(25000)\n good_correct = 0\n good_total = 0\n bad_correct = 0\n bad_total = 0\n for i, review in enumerate(good):\n test_arrays[i] = model.infer_vector(review[0])\n test_labels[i] = 1\n if classifier.predict([test_arrays[i]]) == 1:\n good_correct += 1\n for i, review in enumerate(bad):\n test_arrays[i + 12500] = model.infer_vector(review[0])\n test_labels[i + 12500] = 0\n if classifier.predict([test_arrays[i + 12500]]) == 0:\n bad_correct += 1\n accuracy = classifier.score(test_arrays, test_labels) * 100\n print('Classifier reports a {}% accuracy'.format(accuracy))\n print('{} Good correctly identified'.format(good_correct))\n print('{} Bad correctly identified'.format(bad_correct))\n\n\n<mask token>\ncompute_accuracy(yelp_model, yelp_sources_good, yelp_sources_bad)\n",
"step-3": "<mask token>\ndirname = os.path.dirname(__file__)\n\n\ndef compute_accuracy(model, good, bad):\n train_arrays = numpy.zeros((25000, 400))\n train_labels = numpy.zeros(25000)\n classifier = LogisticRegression()\n for i in range(25000 / 2):\n prefix_train_pos = 'good_' + str(i)\n prefix_train_neg = 'bad_' + str(i)\n pos_review = model.docvecs[prefix_train_pos]\n neg_review = model.docvecs[prefix_train_neg]\n train_arrays[i] = pos_review\n train_labels[i] = 1\n train_arrays[25000 / 2 + i] = neg_review\n train_labels[25000 / 2 + i] = 0\n classifier.fit(train_arrays, train_labels)\n test_arrays_good = numpy.zeros((12500, 400))\n test_ratings_good = numpy.zeros(12500)\n test_labels_good = numpy.zeros(12500)\n test_arrays_bad = numpy.zeros((12500, 400))\n test_ratings_bad = numpy.zeros(12500)\n test_labels_bad = numpy.zeros(12500)\n test_arrays = numpy.zeros((25000, 400))\n test_rating = numpy.zeros(25000)\n test_labels = numpy.zeros(25000)\n good_correct = 0\n good_total = 0\n bad_correct = 0\n bad_total = 0\n for i, review in enumerate(good):\n test_arrays[i] = model.infer_vector(review[0])\n test_labels[i] = 1\n if classifier.predict([test_arrays[i]]) == 1:\n good_correct += 1\n for i, review in enumerate(bad):\n test_arrays[i + 12500] = model.infer_vector(review[0])\n test_labels[i + 12500] = 0\n if classifier.predict([test_arrays[i + 12500]]) == 0:\n bad_correct += 1\n accuracy = classifier.score(test_arrays, test_labels) * 100\n print('Classifier reports a {}% accuracy'.format(accuracy))\n print('{} Good correctly identified'.format(good_correct))\n print('{} Bad correctly identified'.format(bad_correct))\n\n\nyelp_model = Doc2Vec.load(os.path.join(dirname, 'models/yelp_model.d2v'))\nyelp_sources_good = YelpLabeledLineSentence(os.path.join(dirname,\n '../data/review.json'), 'good', 12500)\nyelp_sources_bad = YelpLabeledLineSentence(os.path.join(dirname,\n '../data/review.json'), 'bad', 12500)\ncompute_accuracy(yelp_model, yelp_sources_good, yelp_sources_bad)\n",
"step-4": "from gensim import utils\nfrom gensim.models.doc2vec import LabeledSentence\nfrom gensim.models import Doc2Vec\nfrom matplotlib import pyplot as plt\nfrom sklearn.manifold import TSNE\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom random import shuffle\nfrom sklearn.linear_model import LogisticRegression\nfrom yelp_labeled_line_sentence import YelpLabeledLineSentence\nfrom imdb_labeled_line_sentence import IMDBLabeledLineSentence\nfrom sklearn.linear_model import SGDClassifier\nimport numpy\nimport json\nimport time\nimport os\nimport sys\nimport csv\ndirname = os.path.dirname(__file__)\n\n\ndef compute_accuracy(model, good, bad):\n train_arrays = numpy.zeros((25000, 400))\n train_labels = numpy.zeros(25000)\n classifier = LogisticRegression()\n for i in range(25000 / 2):\n prefix_train_pos = 'good_' + str(i)\n prefix_train_neg = 'bad_' + str(i)\n pos_review = model.docvecs[prefix_train_pos]\n neg_review = model.docvecs[prefix_train_neg]\n train_arrays[i] = pos_review\n train_labels[i] = 1\n train_arrays[25000 / 2 + i] = neg_review\n train_labels[25000 / 2 + i] = 0\n classifier.fit(train_arrays, train_labels)\n test_arrays_good = numpy.zeros((12500, 400))\n test_ratings_good = numpy.zeros(12500)\n test_labels_good = numpy.zeros(12500)\n test_arrays_bad = numpy.zeros((12500, 400))\n test_ratings_bad = numpy.zeros(12500)\n test_labels_bad = numpy.zeros(12500)\n test_arrays = numpy.zeros((25000, 400))\n test_rating = numpy.zeros(25000)\n test_labels = numpy.zeros(25000)\n good_correct = 0\n good_total = 0\n bad_correct = 0\n bad_total = 0\n for i, review in enumerate(good):\n test_arrays[i] = model.infer_vector(review[0])\n test_labels[i] = 1\n if classifier.predict([test_arrays[i]]) == 1:\n good_correct += 1\n for i, review in enumerate(bad):\n test_arrays[i + 12500] = model.infer_vector(review[0])\n test_labels[i + 12500] = 0\n if classifier.predict([test_arrays[i + 12500]]) == 0:\n bad_correct += 1\n accuracy = classifier.score(test_arrays, test_labels) * 100\n print('Classifier reports a {}% accuracy'.format(accuracy))\n print('{} Good correctly identified'.format(good_correct))\n print('{} Bad correctly identified'.format(bad_correct))\n\n\nyelp_model = Doc2Vec.load(os.path.join(dirname, 'models/yelp_model.d2v'))\nyelp_sources_good = YelpLabeledLineSentence(os.path.join(dirname,\n '../data/review.json'), 'good', 12500)\nyelp_sources_bad = YelpLabeledLineSentence(os.path.join(dirname,\n '../data/review.json'), 'bad', 12500)\ncompute_accuracy(yelp_model, yelp_sources_good, yelp_sources_bad)\n",
"step-5": "# ARGS:\n# 1: total train reviews\n# 2: number of iterations (for csv output)\n# 3: size of vector\n# 4: good/bad sizes\n\n# import dependencies\nfrom gensim import utils\nfrom gensim.models.doc2vec import LabeledSentence\nfrom gensim.models import Doc2Vec\nfrom matplotlib import pyplot as plt\nfrom sklearn.manifold import TSNE\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom random import shuffle\nfrom sklearn.linear_model import LogisticRegression\nfrom yelp_labeled_line_sentence import YelpLabeledLineSentence\nfrom imdb_labeled_line_sentence import IMDBLabeledLineSentence\nfrom sklearn.linear_model import SGDClassifier\nimport numpy\nimport json\nimport time\nimport os\nimport sys\nimport csv\n\ndirname = os.path.dirname(__file__)\n\ndef compute_accuracy(model, good, bad):\n # load our doc2vec model that we trained\n\n\n # take our train reviews from the model, and put them in array, good reviews first, bad reviews second half of array\n train_arrays = numpy.zeros((25000, 400))\n train_labels = numpy.zeros(25000)\n\n # create a logistic regression classifier\n classifier = LogisticRegression()\n\n # take our train reviews from the model, and put them in array, good reviews first, bad reviews second half of array\n for i in range((25000/2)):\n prefix_train_pos = 'good_' + str(i)\n prefix_train_neg = 'bad_' + str(i)\n\n pos_review = model.docvecs[prefix_train_pos]\n neg_review = model.docvecs[prefix_train_neg]\n\n train_arrays[i] = pos_review\n train_labels[i] = 1\n\n train_arrays[(25000/2) + i] = neg_review\n train_labels[(25000/2) + i] = 0\n\n classifier.fit(train_arrays, train_labels)\n\n\n # take our test reviews from the model, and put them in array, good reviews first, bad reviews second half of array\n # for each review, we'll infer the review's vector against our model\n\n test_arrays_good = numpy.zeros((12500, 400))\n test_ratings_good = numpy.zeros(12500)\n test_labels_good = numpy.zeros(12500)\n\n test_arrays_bad = numpy.zeros((12500, 400))\n test_ratings_bad = numpy.zeros(12500)\n test_labels_bad = numpy.zeros(12500)\n\n test_arrays = numpy.zeros((25000, 400))\n test_rating = numpy.zeros(25000)\n test_labels = numpy.zeros(25000)\n\n good_correct = 0\n good_total = 0\n bad_correct = 0\n bad_total = 0\n\n for i, review in enumerate(good):\n test_arrays[i] = model.infer_vector(review[0])\n test_labels[i] = 1\n if(classifier.predict([test_arrays[i]]) == 1):\n good_correct += 1\n # test_ratings_good[i] = review[1][2]\n\n for i, review in enumerate(bad):\n test_arrays[i + 12500] = model.infer_vector(review[0])\n test_labels[i + 12500] = 0\n if(classifier.predict([test_arrays[i + 12500]]) == 0):\n bad_correct += 1\n\n # test_ratings_bad[i] = review[1][2]\n\n # print the accuracy of our classifier\n # accuracy=classifier.score(test_arrays_good, test_labels_good) * 100\n # print(\"Classifier reports a {}% accuracy for good reviews\".format(accuracy))\n #\n # accuracy=classifier.score(test_arrays_bad, test_labels_bad) * 100\n # print(\"Classifier reports a {}% accuracy for bad reviews\".format(accuracy))\n #\n accuracy=classifier.score(test_arrays, test_labels) * 100\n print(\"Classifier reports a {}% accuracy\".format(accuracy))\n\n\n print(\"{} Good correctly identified\".format(good_correct))\n print(\"{} Bad correctly identified\".format(bad_correct))\n\n # for dim in range(1, int(sys.argv[3])):\n # # plot probability of review being good vs feature vector value\n # plt.scatter(test_arrays_good[:,dim], classifier.predict_proba(test_arrays_good)[:,1], color='green')\n # plt.scatter(test_arrays_bad[:,dim], classifier.predict_proba(test_arrays_bad)[:,1], color='red')\n #\n # plt.ylabel('Probability of Review Being Good')\n # plt.xlabel('dim={}'.format(dim))\n # plt.show()\n\n # # reduce the n-dimensional feature vector to n=1 using t-SNE\n # tsne = TSNE(n_components=1)\n # test_arrays_tsne_good = tsne.fit_transform(test_arrays_good)\n # test_arrays_tsne_bad = tsne.fit_transform(test_arrays_bad)\n #\n # # plot probability of review being good vs feature vector value\n # plt.scatter(test_arrays_tsne_good, classifier.predict_proba(test_arrays_good)[:,1], color='green')\n # plt.scatter(test_arrays_tsne_bad, classifier.predict_proba(test_arrays_bad)[:,1], color='red')\n #\n # plt.ylabel('Probability of Review Being Good')\n # plt.xlabel('t-SNE reduced feature vector (dim=1)')\n # plt.show()\n\n # # reduce the n-dimensional feature vector to n=1 using t-SNE\n # tsne = TSNE(n_components=2)\n # test_arrays_tsne_good = tsne.fit_transform(test_arrays_good)\n # test_arrays_tsne_bad = tsne.fit_transform(test_arrays_bad)\n #\n # # plot feature vectors against each other\n # plt.scatter(test_arrays_tsne_good[:,0], test_arrays_tsne_good[:,1], color='green')\n # plt.scatter(test_arrays_tsne_bad[:,0], test_arrays_tsne_bad[:,1], color='red')\n #\n # plt.ylabel('x1')\n # plt.xlabel('x2')\n # plt.show()\n\n\nyelp_model = Doc2Vec.load(os.path.join(dirname,'models/yelp_model.d2v'))\n# imdb_model = Doc2Vec.load(os.path.join(dirname,'models/imdb_model.d2v'))\n\n# create an array of LabeledLineSentences for previously unseen\n# good and bad reviews\n# this does some basic formatting of the text as well to make it more\n# digestible by gensim and sklearn\nyelp_sources_good = YelpLabeledLineSentence(os.path.join(dirname, '../data/review.json'), 'good', 12500)\nyelp_sources_bad = YelpLabeledLineSentence(os.path.join(dirname, '../data/review.json'), 'bad', 12500)\n\n# imdb_sources_good = IMDBLabeledLineSentence({os.path.join(dirname, '../data/aclImdb/test/pos'):'good'})\n# imdb_sources_bad = IMDBLabeledLineSentence({os.path.join(dirname, '../data/aclImdb/test/neg'):'bad'})\n\ncompute_accuracy(yelp_model, yelp_sources_good, yelp_sources_bad)\n# compute_accuracy(imdb_model, imdb_sources_good, imdb_sources_bad)\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
print(Londonlocaltime)
print(Londonlocaltime.strftime('%H'))
<|reserved_special_token_0|>
print(PDXlocaltime)
print(PDXlocaltime.strftime('%H'))
<|reserved_special_token_0|>
print(NYClocaltime)
print(NYClocaltime.strftime('%H'))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
Londontz = timezone('Europe/London')
Londonlocaltime = datetime.now(Londontz)
print(Londonlocaltime)
print(Londonlocaltime.strftime('%H'))
PDXtz = timezone('America/Los_Angeles')
PDXlocaltime = datetime.now(PDXtz)
print(PDXlocaltime)
print(PDXlocaltime.strftime('%H'))
NYCtz = timezone('America/New_York')
NYClocaltime = datetime.now(NYCtz)
print(NYClocaltime)
print(NYClocaltime.strftime('%H'))
<|reserved_special_token_1|>
from datetime import datetime
import pytz
from pytz import timezone
Londontz = timezone('Europe/London')
Londonlocaltime = datetime.now(Londontz)
print(Londonlocaltime)
print(Londonlocaltime.strftime('%H'))
PDXtz = timezone('America/Los_Angeles')
PDXlocaltime = datetime.now(PDXtz)
print(PDXlocaltime)
print(PDXlocaltime.strftime('%H'))
NYCtz = timezone('America/New_York')
NYClocaltime = datetime.now(NYCtz)
print(NYClocaltime)
print(NYClocaltime.strftime('%H'))
<|reserved_special_token_1|>
from datetime import datetime
import pytz
from pytz import timezone
##PDXtime = datetime.now()
##print(PDXtime.hour)
##
##NYCtime = PDXtime.hour + 3
##print(NYCtime)
##
##Londontime = PDXtime.hour + 8
##print(Londontime)
Londontz = timezone('Europe/London')
Londonlocaltime = datetime.now(Londontz)
print(Londonlocaltime)
print(Londonlocaltime.strftime('%H')) #just the hour in 24 hr format
PDXtz = timezone('America/Los_Angeles')
PDXlocaltime = datetime.now(PDXtz)
print(PDXlocaltime)
print(PDXlocaltime.strftime('%H'))
NYCtz = timezone('America/New_York')
NYClocaltime = datetime.now(NYCtz)
print(NYClocaltime)
print(NYClocaltime.strftime('%H'))
|
flexible
|
{
"blob_id": "d8cfd9de95e1f47fc41a5389f5137b4af90dc0f1",
"index": 3949,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(Londonlocaltime)\nprint(Londonlocaltime.strftime('%H'))\n<mask token>\nprint(PDXlocaltime)\nprint(PDXlocaltime.strftime('%H'))\n<mask token>\nprint(NYClocaltime)\nprint(NYClocaltime.strftime('%H'))\n",
"step-3": "<mask token>\nLondontz = timezone('Europe/London')\nLondonlocaltime = datetime.now(Londontz)\nprint(Londonlocaltime)\nprint(Londonlocaltime.strftime('%H'))\nPDXtz = timezone('America/Los_Angeles')\nPDXlocaltime = datetime.now(PDXtz)\nprint(PDXlocaltime)\nprint(PDXlocaltime.strftime('%H'))\nNYCtz = timezone('America/New_York')\nNYClocaltime = datetime.now(NYCtz)\nprint(NYClocaltime)\nprint(NYClocaltime.strftime('%H'))\n",
"step-4": "from datetime import datetime\nimport pytz\nfrom pytz import timezone\nLondontz = timezone('Europe/London')\nLondonlocaltime = datetime.now(Londontz)\nprint(Londonlocaltime)\nprint(Londonlocaltime.strftime('%H'))\nPDXtz = timezone('America/Los_Angeles')\nPDXlocaltime = datetime.now(PDXtz)\nprint(PDXlocaltime)\nprint(PDXlocaltime.strftime('%H'))\nNYCtz = timezone('America/New_York')\nNYClocaltime = datetime.now(NYCtz)\nprint(NYClocaltime)\nprint(NYClocaltime.strftime('%H'))\n",
"step-5": "from datetime import datetime\nimport pytz\nfrom pytz import timezone \n\n\n\n##PDXtime = datetime.now()\n##print(PDXtime.hour)\n##\n##NYCtime = PDXtime.hour + 3\n##print(NYCtime)\n##\n##Londontime = PDXtime.hour + 8\n##print(Londontime)\n\n\n\nLondontz = timezone('Europe/London')\nLondonlocaltime = datetime.now(Londontz)\nprint(Londonlocaltime)\nprint(Londonlocaltime.strftime('%H')) #just the hour in 24 hr format\n\n\nPDXtz = timezone('America/Los_Angeles')\nPDXlocaltime = datetime.now(PDXtz)\nprint(PDXlocaltime)\nprint(PDXlocaltime.strftime('%H'))\n\nNYCtz = timezone('America/New_York')\nNYClocaltime = datetime.now(NYCtz)\nprint(NYClocaltime)\nprint(NYClocaltime.strftime('%H'))\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
def post_create(request):
form = PostForm(request.POST or None, request.FILES or None)
if request.method == 'POST':
user = request.POST.get('user')
title = request.POST.get('title')
content = request.POST.get('content')
PostStudent.objects.create(user=user, title=title, content=content)
messages.success(request, 'Successfully Posted')
context = {'form': form}
return render(request, 'post/create_post.html', context)
def temp_post(request):
return render(request, 'post/Posts.html', {})
<|reserved_special_token_0|>
def allpoststudents(request):
if not request.user.is_staff or request.user.is_staff:
obj = PostStudent.objects.all().order_by('-timestamp')
query = request.GET.get('q')
if query:
obj = obj.filter(Q(title__icontains=query) | Q(content__icontains=
query) | Q(user__icontains=query) | Q(timestamp__icontains=query)
).distinct()
context = {'obj': obj}
return render(request, 'post/All_Post_Students.html', context)
<|reserved_special_token_0|>
def post_details(request, id=None):
instance = get_object_or_404(Post, id=id)
content_type = ContentType.objects.get_for_model(Post)
obj_id = instance.id
comments = Comment.objects.filter(content_type=content_type, object_id=
obj_id)
initial_data = {'content_type': content_type, 'object_id': instance.id}
form = CommentForm(request.POST or None, initial=initial_data)
if form.is_valid():
c_type = form.cleaned_data.get('content_type')
content_type = ContentType.objects.get(model=c_type)
obj_id = form.cleaned_data.get('object_id')
content_data = form.cleaned_data.get('content')
parent_obj = None
try:
parent_id = int(request.POST.get('parent_id'))
except:
parent_id = None
if parent_id:
parent_qs = Comment.objects.filter(id=parent_id)
if parent_qs.exists():
parent_obj = parent_qs.first()
new_comment, created = Comment.objects.get_or_create(user=request.
user, content_type=content_type, object_id=obj_id, content=
content_data, parent=parent_obj)
context = {'title': instance.title, 'instance': instance, 'comments':
comments, 'form': form, 'obj_id': obj_id}
return render(request, 'post/Posts.html', context)
def post_details_student(request, id=None):
instance = get_object_or_404(PostStudent, id=id)
content_type = ContentType.objects.get_for_model(PostStudent)
obj_id = instance.id
comments = CommentStudent.objects.filter(content_type=content_type,
object_id=obj_id)
initial_data = {'content_type': content_type, 'object_id': instance.id}
form = CommentForm(request.POST or None, initial=initial_data)
if form.is_valid():
c_type = form.cleaned_data.get('content_type')
content_type = ContentType.objects.get(model=c_type)
obj_id = form.cleaned_data.get('object_id')
content_data = form.cleaned_data.get('content')
parent_obj = None
try:
parent_id = int(request.POST.get('parent_id'))
except:
parent_id = None
if parent_id:
parent_qs = Comment.objects.filter(id=parent_id)
if parent_qs.exists():
parent_obj = parent_qs.first()
new_comment, created = CommentStudent.objects.get_or_create(user=
request.user, content_type=content_type, object_id=obj_id,
content=content_data, parent=parent_obj)
context = {'title': instance.title, 'instance': instance, 'comments':
comments, 'form': form, 'obj_id': obj_id}
return render(request, 'post/post_details_student.html', context)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def post_create(request):
form = PostForm(request.POST or None, request.FILES or None)
if request.method == 'POST':
user = request.POST.get('user')
title = request.POST.get('title')
content = request.POST.get('content')
PostStudent.objects.create(user=user, title=title, content=content)
messages.success(request, 'Successfully Posted')
context = {'form': form}
return render(request, 'post/create_post.html', context)
def temp_post(request):
return render(request, 'post/Posts.html', {})
<|reserved_special_token_0|>
def allpoststudents(request):
if not request.user.is_staff or request.user.is_staff:
obj = PostStudent.objects.all().order_by('-timestamp')
query = request.GET.get('q')
if query:
obj = obj.filter(Q(title__icontains=query) | Q(content__icontains=
query) | Q(user__icontains=query) | Q(timestamp__icontains=query)
).distinct()
context = {'obj': obj}
return render(request, 'post/All_Post_Students.html', context)
def post_update(request, id=None):
instance = get_object_or_404(Post, id=id)
form = PostForm(request.POST or None, instance=instance)
if form.is_valid():
instance = form.save(commit=False)
instance.save()
messages.success(request, "<a href='#'>Item </a>Saved", extra_tags=
'html_safe')
return HttpResponseRedirect(instance.get_absolute_url())
context = {'title': instance.title, 'instance': instance, 'form': form}
return render(request, 'post/create_post.html', context)
def post_details(request, id=None):
instance = get_object_or_404(Post, id=id)
content_type = ContentType.objects.get_for_model(Post)
obj_id = instance.id
comments = Comment.objects.filter(content_type=content_type, object_id=
obj_id)
initial_data = {'content_type': content_type, 'object_id': instance.id}
form = CommentForm(request.POST or None, initial=initial_data)
if form.is_valid():
c_type = form.cleaned_data.get('content_type')
content_type = ContentType.objects.get(model=c_type)
obj_id = form.cleaned_data.get('object_id')
content_data = form.cleaned_data.get('content')
parent_obj = None
try:
parent_id = int(request.POST.get('parent_id'))
except:
parent_id = None
if parent_id:
parent_qs = Comment.objects.filter(id=parent_id)
if parent_qs.exists():
parent_obj = parent_qs.first()
new_comment, created = Comment.objects.get_or_create(user=request.
user, content_type=content_type, object_id=obj_id, content=
content_data, parent=parent_obj)
context = {'title': instance.title, 'instance': instance, 'comments':
comments, 'form': form, 'obj_id': obj_id}
return render(request, 'post/Posts.html', context)
def post_details_student(request, id=None):
instance = get_object_or_404(PostStudent, id=id)
content_type = ContentType.objects.get_for_model(PostStudent)
obj_id = instance.id
comments = CommentStudent.objects.filter(content_type=content_type,
object_id=obj_id)
initial_data = {'content_type': content_type, 'object_id': instance.id}
form = CommentForm(request.POST or None, initial=initial_data)
if form.is_valid():
c_type = form.cleaned_data.get('content_type')
content_type = ContentType.objects.get(model=c_type)
obj_id = form.cleaned_data.get('object_id')
content_data = form.cleaned_data.get('content')
parent_obj = None
try:
parent_id = int(request.POST.get('parent_id'))
except:
parent_id = None
if parent_id:
parent_qs = Comment.objects.filter(id=parent_id)
if parent_qs.exists():
parent_obj = parent_qs.first()
new_comment, created = CommentStudent.objects.get_or_create(user=
request.user, content_type=content_type, object_id=obj_id,
content=content_data, parent=parent_obj)
context = {'title': instance.title, 'instance': instance, 'comments':
comments, 'form': form, 'obj_id': obj_id}
return render(request, 'post/post_details_student.html', context)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def post_create(request):
form = PostForm(request.POST or None, request.FILES or None)
if request.method == 'POST':
user = request.POST.get('user')
title = request.POST.get('title')
content = request.POST.get('content')
PostStudent.objects.create(user=user, title=title, content=content)
messages.success(request, 'Successfully Posted')
context = {'form': form}
return render(request, 'post/create_post.html', context)
def temp_post(request):
return render(request, 'post/Posts.html', {})
<|reserved_special_token_0|>
def allpoststudents(request):
if not request.user.is_staff or request.user.is_staff:
obj = PostStudent.objects.all().order_by('-timestamp')
query = request.GET.get('q')
if query:
obj = obj.filter(Q(title__icontains=query) | Q(content__icontains=
query) | Q(user__icontains=query) | Q(timestamp__icontains=query)
).distinct()
context = {'obj': obj}
return render(request, 'post/All_Post_Students.html', context)
def post_update(request, id=None):
instance = get_object_or_404(Post, id=id)
form = PostForm(request.POST or None, instance=instance)
if form.is_valid():
instance = form.save(commit=False)
instance.save()
messages.success(request, "<a href='#'>Item </a>Saved", extra_tags=
'html_safe')
return HttpResponseRedirect(instance.get_absolute_url())
context = {'title': instance.title, 'instance': instance, 'form': form}
return render(request, 'post/create_post.html', context)
def post_details(request, id=None):
instance = get_object_or_404(Post, id=id)
content_type = ContentType.objects.get_for_model(Post)
obj_id = instance.id
comments = Comment.objects.filter(content_type=content_type, object_id=
obj_id)
initial_data = {'content_type': content_type, 'object_id': instance.id}
form = CommentForm(request.POST or None, initial=initial_data)
if form.is_valid():
c_type = form.cleaned_data.get('content_type')
content_type = ContentType.objects.get(model=c_type)
obj_id = form.cleaned_data.get('object_id')
content_data = form.cleaned_data.get('content')
parent_obj = None
try:
parent_id = int(request.POST.get('parent_id'))
except:
parent_id = None
if parent_id:
parent_qs = Comment.objects.filter(id=parent_id)
if parent_qs.exists():
parent_obj = parent_qs.first()
new_comment, created = Comment.objects.get_or_create(user=request.
user, content_type=content_type, object_id=obj_id, content=
content_data, parent=parent_obj)
context = {'title': instance.title, 'instance': instance, 'comments':
comments, 'form': form, 'obj_id': obj_id}
return render(request, 'post/Posts.html', context)
def post_details_student(request, id=None):
instance = get_object_or_404(PostStudent, id=id)
content_type = ContentType.objects.get_for_model(PostStudent)
obj_id = instance.id
comments = CommentStudent.objects.filter(content_type=content_type,
object_id=obj_id)
initial_data = {'content_type': content_type, 'object_id': instance.id}
form = CommentForm(request.POST or None, initial=initial_data)
if form.is_valid():
c_type = form.cleaned_data.get('content_type')
content_type = ContentType.objects.get(model=c_type)
obj_id = form.cleaned_data.get('object_id')
content_data = form.cleaned_data.get('content')
parent_obj = None
try:
parent_id = int(request.POST.get('parent_id'))
except:
parent_id = None
if parent_id:
parent_qs = Comment.objects.filter(id=parent_id)
if parent_qs.exists():
parent_obj = parent_qs.first()
new_comment, created = CommentStudent.objects.get_or_create(user=
request.user, content_type=content_type, object_id=obj_id,
content=content_data, parent=parent_obj)
context = {'title': instance.title, 'instance': instance, 'comments':
comments, 'form': form, 'obj_id': obj_id}
return render(request, 'post/post_details_student.html', context)
def post_delete(request, id=None):
instance = get_object_or_404(PostStudent, id=id)
instance.delete()
messages.success(request, 'Successfully deleted')
return render(request, 'post/All_Post_Students.html', {})
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def post_create(request):
form = PostForm(request.POST or None, request.FILES or None)
if request.method == 'POST':
user = request.POST.get('user')
title = request.POST.get('title')
content = request.POST.get('content')
PostStudent.objects.create(user=user, title=title, content=content)
messages.success(request, 'Successfully Posted')
context = {'form': form}
return render(request, 'post/create_post.html', context)
def temp_post(request):
return render(request, 'post/Posts.html', {})
def temp_allpost(request):
obj = Post.objects.all()
context = {'obj': obj}
return render(request, 'post/All_Post.html', context)
def allpoststudents(request):
if not request.user.is_staff or request.user.is_staff:
obj = PostStudent.objects.all().order_by('-timestamp')
query = request.GET.get('q')
if query:
obj = obj.filter(Q(title__icontains=query) | Q(content__icontains=
query) | Q(user__icontains=query) | Q(timestamp__icontains=query)
).distinct()
context = {'obj': obj}
return render(request, 'post/All_Post_Students.html', context)
def post_update(request, id=None):
instance = get_object_or_404(Post, id=id)
form = PostForm(request.POST or None, instance=instance)
if form.is_valid():
instance = form.save(commit=False)
instance.save()
messages.success(request, "<a href='#'>Item </a>Saved", extra_tags=
'html_safe')
return HttpResponseRedirect(instance.get_absolute_url())
context = {'title': instance.title, 'instance': instance, 'form': form}
return render(request, 'post/create_post.html', context)
def post_details(request, id=None):
instance = get_object_or_404(Post, id=id)
content_type = ContentType.objects.get_for_model(Post)
obj_id = instance.id
comments = Comment.objects.filter(content_type=content_type, object_id=
obj_id)
initial_data = {'content_type': content_type, 'object_id': instance.id}
form = CommentForm(request.POST or None, initial=initial_data)
if form.is_valid():
c_type = form.cleaned_data.get('content_type')
content_type = ContentType.objects.get(model=c_type)
obj_id = form.cleaned_data.get('object_id')
content_data = form.cleaned_data.get('content')
parent_obj = None
try:
parent_id = int(request.POST.get('parent_id'))
except:
parent_id = None
if parent_id:
parent_qs = Comment.objects.filter(id=parent_id)
if parent_qs.exists():
parent_obj = parent_qs.first()
new_comment, created = Comment.objects.get_or_create(user=request.
user, content_type=content_type, object_id=obj_id, content=
content_data, parent=parent_obj)
context = {'title': instance.title, 'instance': instance, 'comments':
comments, 'form': form, 'obj_id': obj_id}
return render(request, 'post/Posts.html', context)
def post_details_student(request, id=None):
instance = get_object_or_404(PostStudent, id=id)
content_type = ContentType.objects.get_for_model(PostStudent)
obj_id = instance.id
comments = CommentStudent.objects.filter(content_type=content_type,
object_id=obj_id)
initial_data = {'content_type': content_type, 'object_id': instance.id}
form = CommentForm(request.POST or None, initial=initial_data)
if form.is_valid():
c_type = form.cleaned_data.get('content_type')
content_type = ContentType.objects.get(model=c_type)
obj_id = form.cleaned_data.get('object_id')
content_data = form.cleaned_data.get('content')
parent_obj = None
try:
parent_id = int(request.POST.get('parent_id'))
except:
parent_id = None
if parent_id:
parent_qs = Comment.objects.filter(id=parent_id)
if parent_qs.exists():
parent_obj = parent_qs.first()
new_comment, created = CommentStudent.objects.get_or_create(user=
request.user, content_type=content_type, object_id=obj_id,
content=content_data, parent=parent_obj)
context = {'title': instance.title, 'instance': instance, 'comments':
comments, 'form': form, 'obj_id': obj_id}
return render(request, 'post/post_details_student.html', context)
def post_delete(request, id=None):
instance = get_object_or_404(PostStudent, id=id)
instance.delete()
messages.success(request, 'Successfully deleted')
return render(request, 'post/All_Post_Students.html', {})
<|reserved_special_token_1|>
from django.shortcuts import render, get_object_or_404, redirect
from django.contrib.contenttypes.models import ContentType
from User.forms import EditProfileForm
from User import forms
from django.db.models import Q
from django.contrib import messages
from django.urls import reverse
from django.http import HttpResponseRedirect
from posts.forms import *
# Create your views here.
from .models import Post
from comments.models import *
from comments.forms import *
def post_create(request):
form = PostForm(request.POST or None, request.FILES or None)
if request.method == "POST":
user= request.POST.get("user")
title = request.POST.get("title")
content = request.POST.get("content")
PostStudent.objects.create(user=user, title=title,content=content)
messages.success(request, "Successfully Posted")
#if form.is_valid():
#instance = form.save(commit=False)
#instance.save()
context = {
"form": form,
}
return render(request, "post/create_post.html", context)
def temp_post(request):
return render(request, 'post/Posts.html', {})
def temp_allpost(request):
obj = Post.objects.all()
context = {'obj': obj}
return render(request, 'post/All_Post.html', context)
def allpoststudents(request):
if not request.user.is_staff or request.user.is_staff:
obj = PostStudent.objects.all().order_by("-timestamp")
query = request.GET.get("q")
if query:
obj = obj.filter(
Q(title__icontains=query)|
Q(content__icontains=query)|
Q(user__icontains=query)|
Q(timestamp__icontains=query)
).distinct()
context = {'obj': obj}
return render(request, 'post/All_Post_Students.html', context)
def post_update(request, id=None):
instance = get_object_or_404(Post, id=id)
form = PostForm(request.POST or None, instance=instance)
if form.is_valid():
instance = form.save(commit=False)
instance.save()
messages.success(request, "<a href='#'>Item </a>Saved", extra_tags='html_safe')
return HttpResponseRedirect(instance.get_absolute_url())
context = {
"title": instance.title,
"instance": instance,
"form": form,
}
return render(request, "post/create_post.html", context)
def post_details(request, id=None):
instance = get_object_or_404(Post, id=id)
content_type = ContentType.objects.get_for_model(Post)
obj_id = instance.id
comments = Comment.objects.filter(content_type=content_type, object_id=obj_id)
initial_data = {
"content_type": content_type,
"object_id": instance.id
}
form = CommentForm(request.POST or None, initial= initial_data)
if form.is_valid():
c_type = form.cleaned_data.get("content_type")
content_type = ContentType.objects.get(model=c_type)
obj_id = form.cleaned_data.get("object_id")
content_data = form.cleaned_data.get("content")
parent_obj = None
try:
parent_id = int(request.POST.get("parent_id"))
except:
parent_id = None
if parent_id:
parent_qs = Comment.objects.filter(id=parent_id)
if parent_qs.exists():
parent_obj = parent_qs.first()
new_comment, created = Comment.objects.get_or_create(
user = request.user,
content_type = content_type,
object_id = obj_id,
content = content_data,
parent = parent_obj,
)
context = {
"title":instance.title,
"instance":instance,
"comments": comments,
"form": form,
"obj_id": obj_id,
}
return render(request, "post/Posts.html", context)
def post_details_student(request, id=None):
instance = get_object_or_404(PostStudent, id=id)
content_type = ContentType.objects.get_for_model(PostStudent)
obj_id = instance.id
comments = CommentStudent.objects.filter(content_type=content_type, object_id=obj_id)
initial_data = {
"content_type": content_type,
"object_id": instance.id
}
form = CommentForm(request.POST or None, initial=initial_data)
if form.is_valid():
c_type = form.cleaned_data.get("content_type")
content_type = ContentType.objects.get(model=c_type)
obj_id = form.cleaned_data.get("object_id")
content_data = form.cleaned_data.get("content")
parent_obj = None
try:
parent_id = int(request.POST.get("parent_id"))
except:
parent_id = None
if parent_id:
parent_qs = Comment.objects.filter(id=parent_id)
if parent_qs.exists():
parent_obj = parent_qs.first()
new_comment, created = CommentStudent.objects.get_or_create(
user=request.user,
content_type=content_type,
object_id=obj_id,
content=content_data,
parent=parent_obj,
)
context = {
"title": instance.title,
"instance": instance,
"comments": comments,
"form": form,
"obj_id": obj_id,
}
return render(request, "post/post_details_student.html", context)
def post_delete(request, id=None):
instance = get_object_or_404(PostStudent, id=id)
instance.delete()
messages.success(request, "Successfully deleted")
return render(request, 'post/All_Post_Students.html', {})
|
flexible
|
{
"blob_id": "e9fab2bb49cfda00b8cfedafab0009f691d11ec9",
"index": 9924,
"step-1": "<mask token>\n\n\ndef post_create(request):\n form = PostForm(request.POST or None, request.FILES or None)\n if request.method == 'POST':\n user = request.POST.get('user')\n title = request.POST.get('title')\n content = request.POST.get('content')\n PostStudent.objects.create(user=user, title=title, content=content)\n messages.success(request, 'Successfully Posted')\n context = {'form': form}\n return render(request, 'post/create_post.html', context)\n\n\ndef temp_post(request):\n return render(request, 'post/Posts.html', {})\n\n\n<mask token>\n\n\ndef allpoststudents(request):\n if not request.user.is_staff or request.user.is_staff:\n obj = PostStudent.objects.all().order_by('-timestamp')\n query = request.GET.get('q')\n if query:\n obj = obj.filter(Q(title__icontains=query) | Q(content__icontains=\n query) | Q(user__icontains=query) | Q(timestamp__icontains=query)\n ).distinct()\n context = {'obj': obj}\n return render(request, 'post/All_Post_Students.html', context)\n\n\n<mask token>\n\n\ndef post_details(request, id=None):\n instance = get_object_or_404(Post, id=id)\n content_type = ContentType.objects.get_for_model(Post)\n obj_id = instance.id\n comments = Comment.objects.filter(content_type=content_type, object_id=\n obj_id)\n initial_data = {'content_type': content_type, 'object_id': instance.id}\n form = CommentForm(request.POST or None, initial=initial_data)\n if form.is_valid():\n c_type = form.cleaned_data.get('content_type')\n content_type = ContentType.objects.get(model=c_type)\n obj_id = form.cleaned_data.get('object_id')\n content_data = form.cleaned_data.get('content')\n parent_obj = None\n try:\n parent_id = int(request.POST.get('parent_id'))\n except:\n parent_id = None\n if parent_id:\n parent_qs = Comment.objects.filter(id=parent_id)\n if parent_qs.exists():\n parent_obj = parent_qs.first()\n new_comment, created = Comment.objects.get_or_create(user=request.\n user, content_type=content_type, object_id=obj_id, content=\n content_data, parent=parent_obj)\n context = {'title': instance.title, 'instance': instance, 'comments':\n comments, 'form': form, 'obj_id': obj_id}\n return render(request, 'post/Posts.html', context)\n\n\ndef post_details_student(request, id=None):\n instance = get_object_or_404(PostStudent, id=id)\n content_type = ContentType.objects.get_for_model(PostStudent)\n obj_id = instance.id\n comments = CommentStudent.objects.filter(content_type=content_type,\n object_id=obj_id)\n initial_data = {'content_type': content_type, 'object_id': instance.id}\n form = CommentForm(request.POST or None, initial=initial_data)\n if form.is_valid():\n c_type = form.cleaned_data.get('content_type')\n content_type = ContentType.objects.get(model=c_type)\n obj_id = form.cleaned_data.get('object_id')\n content_data = form.cleaned_data.get('content')\n parent_obj = None\n try:\n parent_id = int(request.POST.get('parent_id'))\n except:\n parent_id = None\n if parent_id:\n parent_qs = Comment.objects.filter(id=parent_id)\n if parent_qs.exists():\n parent_obj = parent_qs.first()\n new_comment, created = CommentStudent.objects.get_or_create(user=\n request.user, content_type=content_type, object_id=obj_id,\n content=content_data, parent=parent_obj)\n context = {'title': instance.title, 'instance': instance, 'comments':\n comments, 'form': form, 'obj_id': obj_id}\n return render(request, 'post/post_details_student.html', context)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef post_create(request):\n form = PostForm(request.POST or None, request.FILES or None)\n if request.method == 'POST':\n user = request.POST.get('user')\n title = request.POST.get('title')\n content = request.POST.get('content')\n PostStudent.objects.create(user=user, title=title, content=content)\n messages.success(request, 'Successfully Posted')\n context = {'form': form}\n return render(request, 'post/create_post.html', context)\n\n\ndef temp_post(request):\n return render(request, 'post/Posts.html', {})\n\n\n<mask token>\n\n\ndef allpoststudents(request):\n if not request.user.is_staff or request.user.is_staff:\n obj = PostStudent.objects.all().order_by('-timestamp')\n query = request.GET.get('q')\n if query:\n obj = obj.filter(Q(title__icontains=query) | Q(content__icontains=\n query) | Q(user__icontains=query) | Q(timestamp__icontains=query)\n ).distinct()\n context = {'obj': obj}\n return render(request, 'post/All_Post_Students.html', context)\n\n\ndef post_update(request, id=None):\n instance = get_object_or_404(Post, id=id)\n form = PostForm(request.POST or None, instance=instance)\n if form.is_valid():\n instance = form.save(commit=False)\n instance.save()\n messages.success(request, \"<a href='#'>Item </a>Saved\", extra_tags=\n 'html_safe')\n return HttpResponseRedirect(instance.get_absolute_url())\n context = {'title': instance.title, 'instance': instance, 'form': form}\n return render(request, 'post/create_post.html', context)\n\n\ndef post_details(request, id=None):\n instance = get_object_or_404(Post, id=id)\n content_type = ContentType.objects.get_for_model(Post)\n obj_id = instance.id\n comments = Comment.objects.filter(content_type=content_type, object_id=\n obj_id)\n initial_data = {'content_type': content_type, 'object_id': instance.id}\n form = CommentForm(request.POST or None, initial=initial_data)\n if form.is_valid():\n c_type = form.cleaned_data.get('content_type')\n content_type = ContentType.objects.get(model=c_type)\n obj_id = form.cleaned_data.get('object_id')\n content_data = form.cleaned_data.get('content')\n parent_obj = None\n try:\n parent_id = int(request.POST.get('parent_id'))\n except:\n parent_id = None\n if parent_id:\n parent_qs = Comment.objects.filter(id=parent_id)\n if parent_qs.exists():\n parent_obj = parent_qs.first()\n new_comment, created = Comment.objects.get_or_create(user=request.\n user, content_type=content_type, object_id=obj_id, content=\n content_data, parent=parent_obj)\n context = {'title': instance.title, 'instance': instance, 'comments':\n comments, 'form': form, 'obj_id': obj_id}\n return render(request, 'post/Posts.html', context)\n\n\ndef post_details_student(request, id=None):\n instance = get_object_or_404(PostStudent, id=id)\n content_type = ContentType.objects.get_for_model(PostStudent)\n obj_id = instance.id\n comments = CommentStudent.objects.filter(content_type=content_type,\n object_id=obj_id)\n initial_data = {'content_type': content_type, 'object_id': instance.id}\n form = CommentForm(request.POST or None, initial=initial_data)\n if form.is_valid():\n c_type = form.cleaned_data.get('content_type')\n content_type = ContentType.objects.get(model=c_type)\n obj_id = form.cleaned_data.get('object_id')\n content_data = form.cleaned_data.get('content')\n parent_obj = None\n try:\n parent_id = int(request.POST.get('parent_id'))\n except:\n parent_id = None\n if parent_id:\n parent_qs = Comment.objects.filter(id=parent_id)\n if parent_qs.exists():\n parent_obj = parent_qs.first()\n new_comment, created = CommentStudent.objects.get_or_create(user=\n request.user, content_type=content_type, object_id=obj_id,\n content=content_data, parent=parent_obj)\n context = {'title': instance.title, 'instance': instance, 'comments':\n comments, 'form': form, 'obj_id': obj_id}\n return render(request, 'post/post_details_student.html', context)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef post_create(request):\n form = PostForm(request.POST or None, request.FILES or None)\n if request.method == 'POST':\n user = request.POST.get('user')\n title = request.POST.get('title')\n content = request.POST.get('content')\n PostStudent.objects.create(user=user, title=title, content=content)\n messages.success(request, 'Successfully Posted')\n context = {'form': form}\n return render(request, 'post/create_post.html', context)\n\n\ndef temp_post(request):\n return render(request, 'post/Posts.html', {})\n\n\n<mask token>\n\n\ndef allpoststudents(request):\n if not request.user.is_staff or request.user.is_staff:\n obj = PostStudent.objects.all().order_by('-timestamp')\n query = request.GET.get('q')\n if query:\n obj = obj.filter(Q(title__icontains=query) | Q(content__icontains=\n query) | Q(user__icontains=query) | Q(timestamp__icontains=query)\n ).distinct()\n context = {'obj': obj}\n return render(request, 'post/All_Post_Students.html', context)\n\n\ndef post_update(request, id=None):\n instance = get_object_or_404(Post, id=id)\n form = PostForm(request.POST or None, instance=instance)\n if form.is_valid():\n instance = form.save(commit=False)\n instance.save()\n messages.success(request, \"<a href='#'>Item </a>Saved\", extra_tags=\n 'html_safe')\n return HttpResponseRedirect(instance.get_absolute_url())\n context = {'title': instance.title, 'instance': instance, 'form': form}\n return render(request, 'post/create_post.html', context)\n\n\ndef post_details(request, id=None):\n instance = get_object_or_404(Post, id=id)\n content_type = ContentType.objects.get_for_model(Post)\n obj_id = instance.id\n comments = Comment.objects.filter(content_type=content_type, object_id=\n obj_id)\n initial_data = {'content_type': content_type, 'object_id': instance.id}\n form = CommentForm(request.POST or None, initial=initial_data)\n if form.is_valid():\n c_type = form.cleaned_data.get('content_type')\n content_type = ContentType.objects.get(model=c_type)\n obj_id = form.cleaned_data.get('object_id')\n content_data = form.cleaned_data.get('content')\n parent_obj = None\n try:\n parent_id = int(request.POST.get('parent_id'))\n except:\n parent_id = None\n if parent_id:\n parent_qs = Comment.objects.filter(id=parent_id)\n if parent_qs.exists():\n parent_obj = parent_qs.first()\n new_comment, created = Comment.objects.get_or_create(user=request.\n user, content_type=content_type, object_id=obj_id, content=\n content_data, parent=parent_obj)\n context = {'title': instance.title, 'instance': instance, 'comments':\n comments, 'form': form, 'obj_id': obj_id}\n return render(request, 'post/Posts.html', context)\n\n\ndef post_details_student(request, id=None):\n instance = get_object_or_404(PostStudent, id=id)\n content_type = ContentType.objects.get_for_model(PostStudent)\n obj_id = instance.id\n comments = CommentStudent.objects.filter(content_type=content_type,\n object_id=obj_id)\n initial_data = {'content_type': content_type, 'object_id': instance.id}\n form = CommentForm(request.POST or None, initial=initial_data)\n if form.is_valid():\n c_type = form.cleaned_data.get('content_type')\n content_type = ContentType.objects.get(model=c_type)\n obj_id = form.cleaned_data.get('object_id')\n content_data = form.cleaned_data.get('content')\n parent_obj = None\n try:\n parent_id = int(request.POST.get('parent_id'))\n except:\n parent_id = None\n if parent_id:\n parent_qs = Comment.objects.filter(id=parent_id)\n if parent_qs.exists():\n parent_obj = parent_qs.first()\n new_comment, created = CommentStudent.objects.get_or_create(user=\n request.user, content_type=content_type, object_id=obj_id,\n content=content_data, parent=parent_obj)\n context = {'title': instance.title, 'instance': instance, 'comments':\n comments, 'form': form, 'obj_id': obj_id}\n return render(request, 'post/post_details_student.html', context)\n\n\ndef post_delete(request, id=None):\n instance = get_object_or_404(PostStudent, id=id)\n instance.delete()\n messages.success(request, 'Successfully deleted')\n return render(request, 'post/All_Post_Students.html', {})\n",
"step-4": "<mask token>\n\n\ndef post_create(request):\n form = PostForm(request.POST or None, request.FILES or None)\n if request.method == 'POST':\n user = request.POST.get('user')\n title = request.POST.get('title')\n content = request.POST.get('content')\n PostStudent.objects.create(user=user, title=title, content=content)\n messages.success(request, 'Successfully Posted')\n context = {'form': form}\n return render(request, 'post/create_post.html', context)\n\n\ndef temp_post(request):\n return render(request, 'post/Posts.html', {})\n\n\ndef temp_allpost(request):\n obj = Post.objects.all()\n context = {'obj': obj}\n return render(request, 'post/All_Post.html', context)\n\n\ndef allpoststudents(request):\n if not request.user.is_staff or request.user.is_staff:\n obj = PostStudent.objects.all().order_by('-timestamp')\n query = request.GET.get('q')\n if query:\n obj = obj.filter(Q(title__icontains=query) | Q(content__icontains=\n query) | Q(user__icontains=query) | Q(timestamp__icontains=query)\n ).distinct()\n context = {'obj': obj}\n return render(request, 'post/All_Post_Students.html', context)\n\n\ndef post_update(request, id=None):\n instance = get_object_or_404(Post, id=id)\n form = PostForm(request.POST or None, instance=instance)\n if form.is_valid():\n instance = form.save(commit=False)\n instance.save()\n messages.success(request, \"<a href='#'>Item </a>Saved\", extra_tags=\n 'html_safe')\n return HttpResponseRedirect(instance.get_absolute_url())\n context = {'title': instance.title, 'instance': instance, 'form': form}\n return render(request, 'post/create_post.html', context)\n\n\ndef post_details(request, id=None):\n instance = get_object_or_404(Post, id=id)\n content_type = ContentType.objects.get_for_model(Post)\n obj_id = instance.id\n comments = Comment.objects.filter(content_type=content_type, object_id=\n obj_id)\n initial_data = {'content_type': content_type, 'object_id': instance.id}\n form = CommentForm(request.POST or None, initial=initial_data)\n if form.is_valid():\n c_type = form.cleaned_data.get('content_type')\n content_type = ContentType.objects.get(model=c_type)\n obj_id = form.cleaned_data.get('object_id')\n content_data = form.cleaned_data.get('content')\n parent_obj = None\n try:\n parent_id = int(request.POST.get('parent_id'))\n except:\n parent_id = None\n if parent_id:\n parent_qs = Comment.objects.filter(id=parent_id)\n if parent_qs.exists():\n parent_obj = parent_qs.first()\n new_comment, created = Comment.objects.get_or_create(user=request.\n user, content_type=content_type, object_id=obj_id, content=\n content_data, parent=parent_obj)\n context = {'title': instance.title, 'instance': instance, 'comments':\n comments, 'form': form, 'obj_id': obj_id}\n return render(request, 'post/Posts.html', context)\n\n\ndef post_details_student(request, id=None):\n instance = get_object_or_404(PostStudent, id=id)\n content_type = ContentType.objects.get_for_model(PostStudent)\n obj_id = instance.id\n comments = CommentStudent.objects.filter(content_type=content_type,\n object_id=obj_id)\n initial_data = {'content_type': content_type, 'object_id': instance.id}\n form = CommentForm(request.POST or None, initial=initial_data)\n if form.is_valid():\n c_type = form.cleaned_data.get('content_type')\n content_type = ContentType.objects.get(model=c_type)\n obj_id = form.cleaned_data.get('object_id')\n content_data = form.cleaned_data.get('content')\n parent_obj = None\n try:\n parent_id = int(request.POST.get('parent_id'))\n except:\n parent_id = None\n if parent_id:\n parent_qs = Comment.objects.filter(id=parent_id)\n if parent_qs.exists():\n parent_obj = parent_qs.first()\n new_comment, created = CommentStudent.objects.get_or_create(user=\n request.user, content_type=content_type, object_id=obj_id,\n content=content_data, parent=parent_obj)\n context = {'title': instance.title, 'instance': instance, 'comments':\n comments, 'form': form, 'obj_id': obj_id}\n return render(request, 'post/post_details_student.html', context)\n\n\ndef post_delete(request, id=None):\n instance = get_object_or_404(PostStudent, id=id)\n instance.delete()\n messages.success(request, 'Successfully deleted')\n return render(request, 'post/All_Post_Students.html', {})\n",
"step-5": "from django.shortcuts import render, get_object_or_404, redirect\nfrom django.contrib.contenttypes.models import ContentType\nfrom User.forms import EditProfileForm\nfrom User import forms\nfrom django.db.models import Q\nfrom django.contrib import messages\nfrom django.urls import reverse\nfrom django.http import HttpResponseRedirect\nfrom posts.forms import *\n\n\n# Create your views here.\nfrom .models import Post\nfrom comments.models import *\nfrom comments.forms import *\n\n\ndef post_create(request):\n form = PostForm(request.POST or None, request.FILES or None)\n if request.method == \"POST\":\n user= request.POST.get(\"user\")\n title = request.POST.get(\"title\")\n content = request.POST.get(\"content\")\n PostStudent.objects.create(user=user, title=title,content=content)\n messages.success(request, \"Successfully Posted\")\n #if form.is_valid():\n #instance = form.save(commit=False)\n #instance.save()\n context = {\n \"form\": form,\n\n }\n return render(request, \"post/create_post.html\", context)\n\n\ndef temp_post(request):\n return render(request, 'post/Posts.html', {})\n\n\ndef temp_allpost(request):\n obj = Post.objects.all()\n context = {'obj': obj}\n return render(request, 'post/All_Post.html', context)\n\n\ndef allpoststudents(request):\n if not request.user.is_staff or request.user.is_staff:\n obj = PostStudent.objects.all().order_by(\"-timestamp\")\n query = request.GET.get(\"q\")\n if query:\n obj = obj.filter(\n Q(title__icontains=query)|\n Q(content__icontains=query)|\n Q(user__icontains=query)|\n Q(timestamp__icontains=query)\n ).distinct()\n context = {'obj': obj}\n return render(request, 'post/All_Post_Students.html', context)\n\n\ndef post_update(request, id=None):\n instance = get_object_or_404(Post, id=id)\n form = PostForm(request.POST or None, instance=instance)\n if form.is_valid():\n instance = form.save(commit=False)\n instance.save()\n messages.success(request, \"<a href='#'>Item </a>Saved\", extra_tags='html_safe')\n return HttpResponseRedirect(instance.get_absolute_url())\n\n context = {\n \"title\": instance.title,\n \"instance\": instance,\n \"form\": form,\n }\n return render(request, \"post/create_post.html\", context)\n\n\ndef post_details(request, id=None):\n instance = get_object_or_404(Post, id=id)\n content_type = ContentType.objects.get_for_model(Post)\n obj_id = instance.id\n comments = Comment.objects.filter(content_type=content_type, object_id=obj_id)\n initial_data = {\n \"content_type\": content_type,\n \"object_id\": instance.id\n }\n form = CommentForm(request.POST or None, initial= initial_data)\n if form.is_valid():\n c_type = form.cleaned_data.get(\"content_type\")\n content_type = ContentType.objects.get(model=c_type)\n obj_id = form.cleaned_data.get(\"object_id\")\n content_data = form.cleaned_data.get(\"content\")\n parent_obj = None\n try:\n parent_id = int(request.POST.get(\"parent_id\"))\n except:\n parent_id = None\n\n if parent_id:\n parent_qs = Comment.objects.filter(id=parent_id)\n if parent_qs.exists():\n parent_obj = parent_qs.first()\n\n new_comment, created = Comment.objects.get_or_create(\n user = request.user,\n content_type = content_type,\n object_id = obj_id,\n content = content_data,\n parent = parent_obj,\n )\n\n\n\n context = {\n \"title\":instance.title,\n \"instance\":instance,\n \"comments\": comments,\n \"form\": form,\n \"obj_id\": obj_id,\n }\n return render(request, \"post/Posts.html\", context)\n\n\ndef post_details_student(request, id=None):\n instance = get_object_or_404(PostStudent, id=id)\n content_type = ContentType.objects.get_for_model(PostStudent)\n obj_id = instance.id\n comments = CommentStudent.objects.filter(content_type=content_type, object_id=obj_id)\n initial_data = {\n \"content_type\": content_type,\n \"object_id\": instance.id\n }\n form = CommentForm(request.POST or None, initial=initial_data)\n if form.is_valid():\n c_type = form.cleaned_data.get(\"content_type\")\n content_type = ContentType.objects.get(model=c_type)\n obj_id = form.cleaned_data.get(\"object_id\")\n content_data = form.cleaned_data.get(\"content\")\n parent_obj = None\n try:\n parent_id = int(request.POST.get(\"parent_id\"))\n except:\n parent_id = None\n\n if parent_id:\n parent_qs = Comment.objects.filter(id=parent_id)\n if parent_qs.exists():\n parent_obj = parent_qs.first()\n\n new_comment, created = CommentStudent.objects.get_or_create(\n user=request.user,\n content_type=content_type,\n object_id=obj_id,\n content=content_data,\n parent=parent_obj,\n )\n\n context = {\n \"title\": instance.title,\n \"instance\": instance,\n \"comments\": comments,\n \"form\": form,\n \"obj_id\": obj_id,\n }\n return render(request, \"post/post_details_student.html\", context)\n\n\ndef post_delete(request, id=None):\n instance = get_object_or_404(PostStudent, id=id)\n instance.delete()\n messages.success(request, \"Successfully deleted\")\n return render(request, 'post/All_Post_Students.html', {})\n",
"step-ids": [
5,
6,
7,
8,
10
]
}
|
[
5,
6,
7,
8,
10
] |
'''
Created on 17 june, 2018
@author: [email protected] (Satish Palnati)
This class is for
'''
import sys
from PySide.QtGui import *
from PySide.QtCore import *
from PySide import QtGui
from PySide import QtCore
class PingWindow:
wind_close_flg = False
def __init__(self,last_parent):
self.last_parent = last_parent
self.main_widget = QWidget()
self.main_widget.setMaximumHeight(400)
self.parent_layout = QVBoxLayout()
#self.last_parent.right_base_layout_v
self.ping_log_layout = QHBoxLayout()
self.progress_bar_layout = QHBoxLayout() #to incorporate the progress bar and the buttons
self.secondary_progress_layout = QVBoxLayout() #just the progress bar
self.control_button_layout = QGridLayout() #cancel,close,open valid / invalid file
# UP ip layout for ping logs
self.up_ip_layout = QVBoxLayout()
self.up_ip_btn = QtGui.QLabel("UP Nodes")
self.up_ip_btn.setFont(QtGui.QFont("Verdana", 10, QtGui.QFont.Bold))
self.up_ip_btn.setStyleSheet("background-color:white ;color:Green;border: 2px solid black")
self.up_ip_layout.addWidget(self.up_ip_btn)
self.up_ip_btn.setToolTip("Please click here to open UP NODE file.. !")
self.up_ip_btn.setStyleSheet("""QToolTip { background-color: #00bfff; color: black; border: black solid 1px }""")
self.up_ip_textbox = QPlainTextEdit()
self.up_ip_textbox.setFont(QtGui.QFont("Verdana", 10, QtGui.QFont.Bold))
self.up_ip_textbox.setStyleSheet("background-color: rgb(150,240,190) ;color:rgb(9,57,31);border: 2px solid black; ")
self.up_ip_textbox.setReadOnly(True)
self.up_ip_layout.addWidget(self.up_ip_textbox)
# DOWN ip layout for ping logs
self.down_ip_layout = QVBoxLayout()
self.down_ip_btn = QtGui.QLabel("DOWN Nodes")
self.down_ip_btn.setFont(QtGui.QFont("Verdana", 10, QtGui.QFont.Bold))
self.down_ip_btn.setStyleSheet("QPushButton {background-color: white ;color:Red;border: 2px solid black}")
self.down_ip_layout.addWidget(self.down_ip_btn)
self.down_ip_btn.setToolTip("Please click here to open UP NODE file.. !")
self.down_ip_btn.setStyleSheet("""QToolTip { background-color: #00bfff; color: black; border: black solid 1px }""")
self.down_ip_textbox = QPlainTextEdit()
self.down_ip_textbox.setFont(QtGui.QFont("Verdana", 10, QtGui.QFont.Bold))
self.down_ip_textbox.setStyleSheet("background-color: rgb(250,210,210);color:rgb(118,14,16);border: 2px solid black; ")
self.down_ip_textbox.setReadOnly(True)
self.down_ip_layout.addWidget(self.down_ip_textbox)
self.progress_bar_layout.addLayout(self.secondary_progress_layout)
self.progress_bar_layout.addLayout(self.control_button_layout)
self.ping_log_layout.addLayout(self.up_ip_layout)
self.ping_log_layout.addLayout(self.down_ip_layout)
self.parent_layout.addLayout(self.ping_log_layout)
self.parent_layout.addLayout(self.progress_bar_layout)
self.progressBar = QtGui.QProgressBar()
self.progressLabel = QtGui.QLabel("Ping process is in progress .... Please wait until the log file is generated...!")
self.cancel_button = QtGui.QPushButton("Cancel")
# self.progressBar.setGeometry(QtCore.QRect(100, 645, 710, 17))
self.progressBar.setProperty("Current status", 0)
self.progressBar.setObjectName("progressBar")
self.progressBar.setMaximumHeight(15)
self.progressBar.setTextVisible(True)
self.progressBar.setValue(0)
self.progressBar.setRange(0,100)
self.progressLabel.setFont(QtGui.QFont("verdana", 9, QtGui.QFont.Normal))
self.secondary_progress_layout.addWidget(self.progressBar)
self.secondary_progress_layout.addWidget(self.progressLabel)
self.progress_bar_layout.addWidget(self.cancel_button)
# self.last_parent.msgBox.information(,'Job status!',"Ping logs process has been closed.!", QtGui.QMessageBox.Ok)
self.main_widget.setLayout(self.parent_layout)
self.last_parent.right_base_layout_v.addWidget(self.main_widget)
self.main_widget.hide()
def prepare_window(self,):
self.progressBar.show()
self.progressLabel.show()
self.cancel_button.show()
self.up_ip_textbox.clear()
self.down_ip_textbox.clear()
self.main_widget.show()
def closeEvent(self,event):
self.wind_close_flg = True
|
normal
|
{
"blob_id": "75b1d2fb927063669a962f72deb57323001c0b7a",
"index": 5657,
"step-1": "<mask token>\n\n\nclass PingWindow:\n <mask token>\n\n def __init__(self, last_parent):\n self.last_parent = last_parent\n self.main_widget = QWidget()\n self.main_widget.setMaximumHeight(400)\n self.parent_layout = QVBoxLayout()\n self.ping_log_layout = QHBoxLayout()\n self.progress_bar_layout = QHBoxLayout()\n self.secondary_progress_layout = QVBoxLayout()\n self.control_button_layout = QGridLayout()\n self.up_ip_layout = QVBoxLayout()\n self.up_ip_btn = QtGui.QLabel('UP Nodes')\n self.up_ip_btn.setFont(QtGui.QFont('Verdana', 10, QtGui.QFont.Bold))\n self.up_ip_btn.setStyleSheet(\n 'background-color:white ;color:Green;border: 2px solid black')\n self.up_ip_layout.addWidget(self.up_ip_btn)\n self.up_ip_btn.setToolTip('Please click here to open UP NODE file.. !')\n self.up_ip_btn.setStyleSheet(\n 'QToolTip { background-color: #00bfff; color: black; border: black solid 1px }'\n )\n self.up_ip_textbox = QPlainTextEdit()\n self.up_ip_textbox.setFont(QtGui.QFont('Verdana', 10, QtGui.QFont.Bold)\n )\n self.up_ip_textbox.setStyleSheet(\n 'background-color: rgb(150,240,190) ;color:rgb(9,57,31);border: 2px solid black; '\n )\n self.up_ip_textbox.setReadOnly(True)\n self.up_ip_layout.addWidget(self.up_ip_textbox)\n self.down_ip_layout = QVBoxLayout()\n self.down_ip_btn = QtGui.QLabel('DOWN Nodes')\n self.down_ip_btn.setFont(QtGui.QFont('Verdana', 10, QtGui.QFont.Bold))\n self.down_ip_btn.setStyleSheet(\n 'QPushButton {background-color: white ;color:Red;border: 2px solid black}'\n )\n self.down_ip_layout.addWidget(self.down_ip_btn)\n self.down_ip_btn.setToolTip(\n 'Please click here to open UP NODE file.. !')\n self.down_ip_btn.setStyleSheet(\n 'QToolTip { background-color: #00bfff; color: black; border: black solid 1px }'\n )\n self.down_ip_textbox = QPlainTextEdit()\n self.down_ip_textbox.setFont(QtGui.QFont('Verdana', 10, QtGui.QFont\n .Bold))\n self.down_ip_textbox.setStyleSheet(\n 'background-color: rgb(250,210,210);color:rgb(118,14,16);border: 2px solid black; '\n )\n self.down_ip_textbox.setReadOnly(True)\n self.down_ip_layout.addWidget(self.down_ip_textbox)\n self.progress_bar_layout.addLayout(self.secondary_progress_layout)\n self.progress_bar_layout.addLayout(self.control_button_layout)\n self.ping_log_layout.addLayout(self.up_ip_layout)\n self.ping_log_layout.addLayout(self.down_ip_layout)\n self.parent_layout.addLayout(self.ping_log_layout)\n self.parent_layout.addLayout(self.progress_bar_layout)\n self.progressBar = QtGui.QProgressBar()\n self.progressLabel = QtGui.QLabel(\n 'Ping process is in progress .... Please wait until the log file is generated...!'\n )\n self.cancel_button = QtGui.QPushButton('Cancel')\n self.progressBar.setProperty('Current status', 0)\n self.progressBar.setObjectName('progressBar')\n self.progressBar.setMaximumHeight(15)\n self.progressBar.setTextVisible(True)\n self.progressBar.setValue(0)\n self.progressBar.setRange(0, 100)\n self.progressLabel.setFont(QtGui.QFont('verdana', 9, QtGui.QFont.\n Normal))\n self.secondary_progress_layout.addWidget(self.progressBar)\n self.secondary_progress_layout.addWidget(self.progressLabel)\n self.progress_bar_layout.addWidget(self.cancel_button)\n self.main_widget.setLayout(self.parent_layout)\n self.last_parent.right_base_layout_v.addWidget(self.main_widget)\n self.main_widget.hide()\n <mask token>\n\n def closeEvent(self, event):\n self.wind_close_flg = True\n",
"step-2": "<mask token>\n\n\nclass PingWindow:\n <mask token>\n\n def __init__(self, last_parent):\n self.last_parent = last_parent\n self.main_widget = QWidget()\n self.main_widget.setMaximumHeight(400)\n self.parent_layout = QVBoxLayout()\n self.ping_log_layout = QHBoxLayout()\n self.progress_bar_layout = QHBoxLayout()\n self.secondary_progress_layout = QVBoxLayout()\n self.control_button_layout = QGridLayout()\n self.up_ip_layout = QVBoxLayout()\n self.up_ip_btn = QtGui.QLabel('UP Nodes')\n self.up_ip_btn.setFont(QtGui.QFont('Verdana', 10, QtGui.QFont.Bold))\n self.up_ip_btn.setStyleSheet(\n 'background-color:white ;color:Green;border: 2px solid black')\n self.up_ip_layout.addWidget(self.up_ip_btn)\n self.up_ip_btn.setToolTip('Please click here to open UP NODE file.. !')\n self.up_ip_btn.setStyleSheet(\n 'QToolTip { background-color: #00bfff; color: black; border: black solid 1px }'\n )\n self.up_ip_textbox = QPlainTextEdit()\n self.up_ip_textbox.setFont(QtGui.QFont('Verdana', 10, QtGui.QFont.Bold)\n )\n self.up_ip_textbox.setStyleSheet(\n 'background-color: rgb(150,240,190) ;color:rgb(9,57,31);border: 2px solid black; '\n )\n self.up_ip_textbox.setReadOnly(True)\n self.up_ip_layout.addWidget(self.up_ip_textbox)\n self.down_ip_layout = QVBoxLayout()\n self.down_ip_btn = QtGui.QLabel('DOWN Nodes')\n self.down_ip_btn.setFont(QtGui.QFont('Verdana', 10, QtGui.QFont.Bold))\n self.down_ip_btn.setStyleSheet(\n 'QPushButton {background-color: white ;color:Red;border: 2px solid black}'\n )\n self.down_ip_layout.addWidget(self.down_ip_btn)\n self.down_ip_btn.setToolTip(\n 'Please click here to open UP NODE file.. !')\n self.down_ip_btn.setStyleSheet(\n 'QToolTip { background-color: #00bfff; color: black; border: black solid 1px }'\n )\n self.down_ip_textbox = QPlainTextEdit()\n self.down_ip_textbox.setFont(QtGui.QFont('Verdana', 10, QtGui.QFont\n .Bold))\n self.down_ip_textbox.setStyleSheet(\n 'background-color: rgb(250,210,210);color:rgb(118,14,16);border: 2px solid black; '\n )\n self.down_ip_textbox.setReadOnly(True)\n self.down_ip_layout.addWidget(self.down_ip_textbox)\n self.progress_bar_layout.addLayout(self.secondary_progress_layout)\n self.progress_bar_layout.addLayout(self.control_button_layout)\n self.ping_log_layout.addLayout(self.up_ip_layout)\n self.ping_log_layout.addLayout(self.down_ip_layout)\n self.parent_layout.addLayout(self.ping_log_layout)\n self.parent_layout.addLayout(self.progress_bar_layout)\n self.progressBar = QtGui.QProgressBar()\n self.progressLabel = QtGui.QLabel(\n 'Ping process is in progress .... Please wait until the log file is generated...!'\n )\n self.cancel_button = QtGui.QPushButton('Cancel')\n self.progressBar.setProperty('Current status', 0)\n self.progressBar.setObjectName('progressBar')\n self.progressBar.setMaximumHeight(15)\n self.progressBar.setTextVisible(True)\n self.progressBar.setValue(0)\n self.progressBar.setRange(0, 100)\n self.progressLabel.setFont(QtGui.QFont('verdana', 9, QtGui.QFont.\n Normal))\n self.secondary_progress_layout.addWidget(self.progressBar)\n self.secondary_progress_layout.addWidget(self.progressLabel)\n self.progress_bar_layout.addWidget(self.cancel_button)\n self.main_widget.setLayout(self.parent_layout)\n self.last_parent.right_base_layout_v.addWidget(self.main_widget)\n self.main_widget.hide()\n\n def prepare_window(self):\n self.progressBar.show()\n self.progressLabel.show()\n self.cancel_button.show()\n self.up_ip_textbox.clear()\n self.down_ip_textbox.clear()\n self.main_widget.show()\n\n def closeEvent(self, event):\n self.wind_close_flg = True\n",
"step-3": "<mask token>\n\n\nclass PingWindow:\n wind_close_flg = False\n\n def __init__(self, last_parent):\n self.last_parent = last_parent\n self.main_widget = QWidget()\n self.main_widget.setMaximumHeight(400)\n self.parent_layout = QVBoxLayout()\n self.ping_log_layout = QHBoxLayout()\n self.progress_bar_layout = QHBoxLayout()\n self.secondary_progress_layout = QVBoxLayout()\n self.control_button_layout = QGridLayout()\n self.up_ip_layout = QVBoxLayout()\n self.up_ip_btn = QtGui.QLabel('UP Nodes')\n self.up_ip_btn.setFont(QtGui.QFont('Verdana', 10, QtGui.QFont.Bold))\n self.up_ip_btn.setStyleSheet(\n 'background-color:white ;color:Green;border: 2px solid black')\n self.up_ip_layout.addWidget(self.up_ip_btn)\n self.up_ip_btn.setToolTip('Please click here to open UP NODE file.. !')\n self.up_ip_btn.setStyleSheet(\n 'QToolTip { background-color: #00bfff; color: black; border: black solid 1px }'\n )\n self.up_ip_textbox = QPlainTextEdit()\n self.up_ip_textbox.setFont(QtGui.QFont('Verdana', 10, QtGui.QFont.Bold)\n )\n self.up_ip_textbox.setStyleSheet(\n 'background-color: rgb(150,240,190) ;color:rgb(9,57,31);border: 2px solid black; '\n )\n self.up_ip_textbox.setReadOnly(True)\n self.up_ip_layout.addWidget(self.up_ip_textbox)\n self.down_ip_layout = QVBoxLayout()\n self.down_ip_btn = QtGui.QLabel('DOWN Nodes')\n self.down_ip_btn.setFont(QtGui.QFont('Verdana', 10, QtGui.QFont.Bold))\n self.down_ip_btn.setStyleSheet(\n 'QPushButton {background-color: white ;color:Red;border: 2px solid black}'\n )\n self.down_ip_layout.addWidget(self.down_ip_btn)\n self.down_ip_btn.setToolTip(\n 'Please click here to open UP NODE file.. !')\n self.down_ip_btn.setStyleSheet(\n 'QToolTip { background-color: #00bfff; color: black; border: black solid 1px }'\n )\n self.down_ip_textbox = QPlainTextEdit()\n self.down_ip_textbox.setFont(QtGui.QFont('Verdana', 10, QtGui.QFont\n .Bold))\n self.down_ip_textbox.setStyleSheet(\n 'background-color: rgb(250,210,210);color:rgb(118,14,16);border: 2px solid black; '\n )\n self.down_ip_textbox.setReadOnly(True)\n self.down_ip_layout.addWidget(self.down_ip_textbox)\n self.progress_bar_layout.addLayout(self.secondary_progress_layout)\n self.progress_bar_layout.addLayout(self.control_button_layout)\n self.ping_log_layout.addLayout(self.up_ip_layout)\n self.ping_log_layout.addLayout(self.down_ip_layout)\n self.parent_layout.addLayout(self.ping_log_layout)\n self.parent_layout.addLayout(self.progress_bar_layout)\n self.progressBar = QtGui.QProgressBar()\n self.progressLabel = QtGui.QLabel(\n 'Ping process is in progress .... Please wait until the log file is generated...!'\n )\n self.cancel_button = QtGui.QPushButton('Cancel')\n self.progressBar.setProperty('Current status', 0)\n self.progressBar.setObjectName('progressBar')\n self.progressBar.setMaximumHeight(15)\n self.progressBar.setTextVisible(True)\n self.progressBar.setValue(0)\n self.progressBar.setRange(0, 100)\n self.progressLabel.setFont(QtGui.QFont('verdana', 9, QtGui.QFont.\n Normal))\n self.secondary_progress_layout.addWidget(self.progressBar)\n self.secondary_progress_layout.addWidget(self.progressLabel)\n self.progress_bar_layout.addWidget(self.cancel_button)\n self.main_widget.setLayout(self.parent_layout)\n self.last_parent.right_base_layout_v.addWidget(self.main_widget)\n self.main_widget.hide()\n\n def prepare_window(self):\n self.progressBar.show()\n self.progressLabel.show()\n self.cancel_button.show()\n self.up_ip_textbox.clear()\n self.down_ip_textbox.clear()\n self.main_widget.show()\n\n def closeEvent(self, event):\n self.wind_close_flg = True\n",
"step-4": "<mask token>\nimport sys\nfrom PySide.QtGui import *\nfrom PySide.QtCore import *\nfrom PySide import QtGui\nfrom PySide import QtCore\n\n\nclass PingWindow:\n wind_close_flg = False\n\n def __init__(self, last_parent):\n self.last_parent = last_parent\n self.main_widget = QWidget()\n self.main_widget.setMaximumHeight(400)\n self.parent_layout = QVBoxLayout()\n self.ping_log_layout = QHBoxLayout()\n self.progress_bar_layout = QHBoxLayout()\n self.secondary_progress_layout = QVBoxLayout()\n self.control_button_layout = QGridLayout()\n self.up_ip_layout = QVBoxLayout()\n self.up_ip_btn = QtGui.QLabel('UP Nodes')\n self.up_ip_btn.setFont(QtGui.QFont('Verdana', 10, QtGui.QFont.Bold))\n self.up_ip_btn.setStyleSheet(\n 'background-color:white ;color:Green;border: 2px solid black')\n self.up_ip_layout.addWidget(self.up_ip_btn)\n self.up_ip_btn.setToolTip('Please click here to open UP NODE file.. !')\n self.up_ip_btn.setStyleSheet(\n 'QToolTip { background-color: #00bfff; color: black; border: black solid 1px }'\n )\n self.up_ip_textbox = QPlainTextEdit()\n self.up_ip_textbox.setFont(QtGui.QFont('Verdana', 10, QtGui.QFont.Bold)\n )\n self.up_ip_textbox.setStyleSheet(\n 'background-color: rgb(150,240,190) ;color:rgb(9,57,31);border: 2px solid black; '\n )\n self.up_ip_textbox.setReadOnly(True)\n self.up_ip_layout.addWidget(self.up_ip_textbox)\n self.down_ip_layout = QVBoxLayout()\n self.down_ip_btn = QtGui.QLabel('DOWN Nodes')\n self.down_ip_btn.setFont(QtGui.QFont('Verdana', 10, QtGui.QFont.Bold))\n self.down_ip_btn.setStyleSheet(\n 'QPushButton {background-color: white ;color:Red;border: 2px solid black}'\n )\n self.down_ip_layout.addWidget(self.down_ip_btn)\n self.down_ip_btn.setToolTip(\n 'Please click here to open UP NODE file.. !')\n self.down_ip_btn.setStyleSheet(\n 'QToolTip { background-color: #00bfff; color: black; border: black solid 1px }'\n )\n self.down_ip_textbox = QPlainTextEdit()\n self.down_ip_textbox.setFont(QtGui.QFont('Verdana', 10, QtGui.QFont\n .Bold))\n self.down_ip_textbox.setStyleSheet(\n 'background-color: rgb(250,210,210);color:rgb(118,14,16);border: 2px solid black; '\n )\n self.down_ip_textbox.setReadOnly(True)\n self.down_ip_layout.addWidget(self.down_ip_textbox)\n self.progress_bar_layout.addLayout(self.secondary_progress_layout)\n self.progress_bar_layout.addLayout(self.control_button_layout)\n self.ping_log_layout.addLayout(self.up_ip_layout)\n self.ping_log_layout.addLayout(self.down_ip_layout)\n self.parent_layout.addLayout(self.ping_log_layout)\n self.parent_layout.addLayout(self.progress_bar_layout)\n self.progressBar = QtGui.QProgressBar()\n self.progressLabel = QtGui.QLabel(\n 'Ping process is in progress .... Please wait until the log file is generated...!'\n )\n self.cancel_button = QtGui.QPushButton('Cancel')\n self.progressBar.setProperty('Current status', 0)\n self.progressBar.setObjectName('progressBar')\n self.progressBar.setMaximumHeight(15)\n self.progressBar.setTextVisible(True)\n self.progressBar.setValue(0)\n self.progressBar.setRange(0, 100)\n self.progressLabel.setFont(QtGui.QFont('verdana', 9, QtGui.QFont.\n Normal))\n self.secondary_progress_layout.addWidget(self.progressBar)\n self.secondary_progress_layout.addWidget(self.progressLabel)\n self.progress_bar_layout.addWidget(self.cancel_button)\n self.main_widget.setLayout(self.parent_layout)\n self.last_parent.right_base_layout_v.addWidget(self.main_widget)\n self.main_widget.hide()\n\n def prepare_window(self):\n self.progressBar.show()\n self.progressLabel.show()\n self.cancel_button.show()\n self.up_ip_textbox.clear()\n self.down_ip_textbox.clear()\n self.main_widget.show()\n\n def closeEvent(self, event):\n self.wind_close_flg = True\n",
"step-5": "'''\r\nCreated on 17 june, 2018\r\n@author: [email protected] (Satish Palnati)\r\nThis class is for \r\n\r\n'''\r\nimport sys\r\nfrom PySide.QtGui import * \r\nfrom PySide.QtCore import *\r\nfrom PySide import QtGui\r\nfrom PySide import QtCore\r\n\r\nclass PingWindow:\r\n\r\n wind_close_flg = False\r\n\r\n def __init__(self,last_parent):\r\n \r\n self.last_parent = last_parent\r\n\r\n self.main_widget = QWidget()\r\n self.main_widget.setMaximumHeight(400)\r\n \r\n self.parent_layout = QVBoxLayout()\r\n #self.last_parent.right_base_layout_v\r\n\r\n \r\n self.ping_log_layout = QHBoxLayout()\r\n \r\n self.progress_bar_layout = QHBoxLayout() #to incorporate the progress bar and the buttons\r\n \r\n self.secondary_progress_layout = QVBoxLayout() #just the progress bar\r\n \r\n self.control_button_layout = QGridLayout() #cancel,close,open valid / invalid file\r\n \r\n \r\n # UP ip layout for ping logs \r\n self.up_ip_layout = QVBoxLayout()\r\n \r\n self.up_ip_btn = QtGui.QLabel(\"UP Nodes\")\r\n self.up_ip_btn.setFont(QtGui.QFont(\"Verdana\", 10, QtGui.QFont.Bold))\r\n self.up_ip_btn.setStyleSheet(\"background-color:white ;color:Green;border: 2px solid black\")\r\n self.up_ip_layout.addWidget(self.up_ip_btn)\r\n \r\n self.up_ip_btn.setToolTip(\"Please click here to open UP NODE file.. !\")\r\n self.up_ip_btn.setStyleSheet(\"\"\"QToolTip { background-color: #00bfff; color: black; border: black solid 1px }\"\"\")\r\n\r\n self.up_ip_textbox = QPlainTextEdit()\r\n self.up_ip_textbox.setFont(QtGui.QFont(\"Verdana\", 10, QtGui.QFont.Bold))\r\n self.up_ip_textbox.setStyleSheet(\"background-color: rgb(150,240,190) ;color:rgb(9,57,31);border: 2px solid black; \")\r\n self.up_ip_textbox.setReadOnly(True)\r\n self.up_ip_layout.addWidget(self.up_ip_textbox)\r\n \r\n # DOWN ip layout for ping logs\r\n self.down_ip_layout = QVBoxLayout()\r\n \r\n self.down_ip_btn = QtGui.QLabel(\"DOWN Nodes\")\r\n self.down_ip_btn.setFont(QtGui.QFont(\"Verdana\", 10, QtGui.QFont.Bold))\r\n self.down_ip_btn.setStyleSheet(\"QPushButton {background-color: white ;color:Red;border: 2px solid black}\")\r\n self.down_ip_layout.addWidget(self.down_ip_btn)\r\n \r\n self.down_ip_btn.setToolTip(\"Please click here to open UP NODE file.. !\")\r\n self.down_ip_btn.setStyleSheet(\"\"\"QToolTip { background-color: #00bfff; color: black; border: black solid 1px }\"\"\")\r\n \r\n self.down_ip_textbox = QPlainTextEdit()\r\n self.down_ip_textbox.setFont(QtGui.QFont(\"Verdana\", 10, QtGui.QFont.Bold))\r\n self.down_ip_textbox.setStyleSheet(\"background-color: rgb(250,210,210);color:rgb(118,14,16);border: 2px solid black; \")\r\n self.down_ip_textbox.setReadOnly(True)\r\n self.down_ip_layout.addWidget(self.down_ip_textbox)\r\n \r\n self.progress_bar_layout.addLayout(self.secondary_progress_layout)\r\n self.progress_bar_layout.addLayout(self.control_button_layout)\r\n \r\n self.ping_log_layout.addLayout(self.up_ip_layout)\r\n self.ping_log_layout.addLayout(self.down_ip_layout)\r\n \r\n self.parent_layout.addLayout(self.ping_log_layout)\r\n self.parent_layout.addLayout(self.progress_bar_layout)\r\n \r\n \r\n self.progressBar = QtGui.QProgressBar()\r\n self.progressLabel = QtGui.QLabel(\"Ping process is in progress .... Please wait until the log file is generated...!\")\r\n self.cancel_button = QtGui.QPushButton(\"Cancel\")\r\n \r\n# self.progressBar.setGeometry(QtCore.QRect(100, 645, 710, 17))\r\n self.progressBar.setProperty(\"Current status\", 0)\r\n self.progressBar.setObjectName(\"progressBar\")\r\n self.progressBar.setMaximumHeight(15)\r\n self.progressBar.setTextVisible(True)\r\n self.progressBar.setValue(0)\r\n self.progressBar.setRange(0,100)\r\n \r\n \r\n self.progressLabel.setFont(QtGui.QFont(\"verdana\", 9, QtGui.QFont.Normal))\r\n \r\n self.secondary_progress_layout.addWidget(self.progressBar)\r\n self.secondary_progress_layout.addWidget(self.progressLabel)\r\n self.progress_bar_layout.addWidget(self.cancel_button)\r\n # self.last_parent.msgBox.information(,'Job status!',\"Ping logs process has been closed.!\", QtGui.QMessageBox.Ok)\r\n \r\n self.main_widget.setLayout(self.parent_layout) \r\n \r\n \r\n self.last_parent.right_base_layout_v.addWidget(self.main_widget)\r\n \r\n self.main_widget.hide()\r\n \r\n \r\n def prepare_window(self,):\r\n \r\n self.progressBar.show()\r\n self.progressLabel.show()\r\n self.cancel_button.show()\r\n self.up_ip_textbox.clear()\r\n self.down_ip_textbox.clear()\r\n self.main_widget.show()\r\n \r\n def closeEvent(self,event):\r\n \r\n \r\n self.wind_close_flg = True\r\n \r\n \r\n ",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
"""Copied from http://svn.sourceforge.jp/svnroot/slothlib/CSharp/Version1/SlothLib/NLP/Filter/StopWord/word/Japanese.txt"""
STOP_WORDS = set(
"""
あそこ
あたり
あちら
あっち
あと
あな
あなた
あれ
いくつ
いつ
いま
いや
いろいろ
うち
おおまか
おまえ
おれ
がい
かく
かたち
かやの
から
がら
きた
くせ
ここ
こっち
こと
ごと
こちら
ごっちゃ
これ
これら
ごろ
さまざま
さらい
さん
しかた
しよう
すか
ずつ
すね
すべて
ぜんぶ
そう
そこ
そちら
そっち
そで
それ
それぞれ
それなり
たくさん
たち
たび
ため
だめ
ちゃ
ちゃん
てん
とおり
とき
どこ
どこか
ところ
どちら
どっか
どっち
どれ
なか
なかば
なに
など
なん
はじめ
はず
はるか
ひと
ひとつ
ふく
ぶり
べつ
へん
ぺん
ほう
ほか
まさ
まし
まとも
まま
みたい
みつ
みなさん
みんな
もと
もの
もん
やつ
よう
よそ
わけ
わたし
ハイ
上
中
下
字
年
月
日
時
分
秒
週
火
水
木
金
土
国
都
道
府
県
市
区
町
村
各
第
方
何
的
度
文
者
性
体
人
他
今
部
課
係
外
類
達
気
室
口
誰
用
界
会
首
男
女
別
話
私
屋
店
家
場
等
見
際
観
段
略
例
系
論
形
間
地
員
線
点
書
品
力
法
感
作
元
手
数
彼
彼女
子
内
楽
喜
怒
哀
輪
頃
化
境
俺
奴
高
校
婦
伸
紀
誌
レ
行
列
事
士
台
集
様
所
歴
器
名
情
連
毎
式
簿
回
匹
個
席
束
歳
目
通
面
円
玉
枚
前
後
左
右
次
先
春
夏
秋
冬
一
二
三
四
五
六
七
八
九
十
百
千
万
億
兆
下記
上記
時間
今回
前回
場合
一つ
年生
自分
ヶ所
ヵ所
カ所
箇所
ヶ月
ヵ月
カ月
箇月
名前
本当
確か
時点
全部
関係
近く
方法
我々
違い
多く
扱い
新た
その後
半ば
結局
様々
以前
以後
以降
未満
以上
以下
幾つ
毎日
自体
向こう
何人
手段
同じ
感じ
""".split()
)
|
normal
|
{
"blob_id": "254afebcc909c805d1e4972a0910eb4451d1e64e",
"index": 8704,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nSTOP_WORDS = set(\n \"\"\"\nあそこ\nあたり\nあちら\nあっち\nあと\nあな\nあなた\nあれ\nいくつ\nいつ\nいま\nいや\nいろいろ\nうち\nおおまか\nおまえ\nおれ\nがい\nかく\nかたち\nかやの\nから\nがら\nきた\nくせ\nここ\nこっち\nこと\nごと\nこちら\nごっちゃ\nこれ\nこれら\nごろ\nさまざま\nさらい\nさん\nしかた\nしよう\nすか\nずつ\nすね\nすべて\nぜんぶ\nそう\nそこ\nそちら\nそっち\nそで\nそれ\nそれぞれ\nそれなり\nたくさん\nたち\nたび\nため\nだめ\nちゃ\nちゃん\nてん\nとおり\nとき\nどこ\nどこか\nところ\nどちら\nどっか\nどっち\nどれ\nなか\nなかば\nなに\nなど\nなん\nはじめ\nはず\nはるか\nひと\nひとつ\nふく\nぶり\nべつ\nへん\nぺん\nほう\nほか\nまさ\nまし\nまとも\nまま\nみたい\nみつ\nみなさん\nみんな\nもと\nもの\nもん\nやつ\nよう\nよそ\nわけ\nわたし\nハイ\n上\n中\n下\n字\n年\n月\n日\n時\n分\n秒\n週\n火\n水\n木\n金\n土\n国\n都\n道\n府\n県\n市\n区\n町\n村\n\n\n各\n第\n方\n何\n的\n度\n文\n者\n性\n体\n人\n他\n今\n部\n課\n係\n外\n類\n達\n気\n室\n口\n誰\n用\n界\n会\n首\n男\n女\n別\n話\n私\n屋\n店\n家\n場\n等\n見\n際\n観\n段\n略\n例\n系\n論\n形\n間\n地\n員\n線\n点\n書\n品\n力\n法\n感\n作\n元\n手\n数\n彼\n彼女\n子\n内\n楽\n喜\n怒\n哀\n輪\n頃\n化\n境\n俺\n奴\n高\n校\n婦\n伸\n紀\n誌\nレ\n行\n列\n事\n士\n台\n集\n様\n所\n歴\n器\n名\n情\n連\n毎\n式\n簿\n\n\n\n\n回\n匹\n個\n席\n束\n歳\n目\n通\n面\n円\n玉\n枚\n\n前\n後\n左\n右\n次\n先\n\n春\n夏\n秋\n冬\n\n\n\n一\n二\n三\n四\n五\n六\n七\n八\n九\n十\n百\n千\n万\n億\n兆\n\n\n下記\n上記\n時間\n今回\n前回\n場合\n一つ\n年生\n自分\nヶ所\nヵ所\nカ所\n箇所\nヶ月\nヵ月\nカ月\n箇月\n名前\n本当\n確か\n時点\n全部\n関係\n近く\n方法\n我々\n違い\n多く\n扱い\n新た\nその後\n半ば\n結局\n様々\n以前\n以後\n以降\n未満\n以上\n以下\n幾つ\n毎日\n自体\n向こう\n何人\n手段\n同じ\n感じ\n\"\"\"\n .split())\n",
"step-3": "\"\"\"Copied from http://svn.sourceforge.jp/svnroot/slothlib/CSharp/Version1/SlothLib/NLP/Filter/StopWord/word/Japanese.txt\"\"\"\nSTOP_WORDS = set(\n \"\"\"\nあそこ\nあたり\nあちら\nあっち\nあと\nあな\nあなた\nあれ\nいくつ\nいつ\nいま\nいや\nいろいろ\nうち\nおおまか\nおまえ\nおれ\nがい\nかく\nかたち\nかやの\nから\nがら\nきた\nくせ\nここ\nこっち\nこと\nごと\nこちら\nごっちゃ\nこれ\nこれら\nごろ\nさまざま\nさらい\nさん\nしかた\nしよう\nすか\nずつ\nすね\nすべて\nぜんぶ\nそう\nそこ\nそちら\nそっち\nそで\nそれ\nそれぞれ\nそれなり\nたくさん\nたち\nたび\nため\nだめ\nちゃ\nちゃん\nてん\nとおり\nとき\nどこ\nどこか\nところ\nどちら\nどっか\nどっち\nどれ\nなか\nなかば\nなに\nなど\nなん\nはじめ\nはず\nはるか\nひと\nひとつ\nふく\nぶり\nべつ\nへん\nぺん\nほう\nほか\nまさ\nまし\nまとも\nまま\nみたい\nみつ\nみなさん\nみんな\nもと\nもの\nもん\nやつ\nよう\nよそ\nわけ\nわたし\nハイ\n上\n中\n下\n字\n年\n月\n日\n時\n分\n秒\n週\n火\n水\n木\n金\n土\n国\n都\n道\n府\n県\n市\n区\n町\n村\n\n\n各\n第\n方\n何\n的\n度\n文\n者\n性\n体\n人\n他\n今\n部\n課\n係\n外\n類\n達\n気\n室\n口\n誰\n用\n界\n会\n首\n男\n女\n別\n話\n私\n屋\n店\n家\n場\n等\n見\n際\n観\n段\n略\n例\n系\n論\n形\n間\n地\n員\n線\n点\n書\n品\n力\n法\n感\n作\n元\n手\n数\n彼\n彼女\n子\n内\n楽\n喜\n怒\n哀\n輪\n頃\n化\n境\n俺\n奴\n高\n校\n婦\n伸\n紀\n誌\nレ\n行\n列\n事\n士\n台\n集\n様\n所\n歴\n器\n名\n情\n連\n毎\n式\n簿\n\n\n\n\n回\n匹\n個\n席\n束\n歳\n目\n通\n面\n円\n玉\n枚\n\n前\n後\n左\n右\n次\n先\n\n春\n夏\n秋\n冬\n\n\n\n一\n二\n三\n四\n五\n六\n七\n八\n九\n十\n百\n千\n万\n億\n兆\n\n\n下記\n上記\n時間\n今回\n前回\n場合\n一つ\n年生\n自分\nヶ所\nヵ所\nカ所\n箇所\nヶ月\nヵ月\nカ月\n箇月\n名前\n本当\n確か\n時点\n全部\n関係\n近く\n方法\n我々\n違い\n多く\n扱い\n新た\nその後\n半ば\n結局\n様々\n以前\n以後\n以降\n未満\n以上\n以下\n幾つ\n毎日\n自体\n向こう\n何人\n手段\n同じ\n感じ\n\"\"\".split()\n)\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
for t in range(int(input())):
st = list(input())
N, j = len(st), 1
for i in range(N // 2):
if st[i] == '*' or st[-i - 1] == '*':
break
elif st[i] != st[-i - 1]:
j = 0
break
print('#{} Exist'.format(t + 1)) if j else print('#{} Not exist'.format
(t + 1))
|
flexible
|
{
"blob_id": "21d499555b4bc4944996a57ae544a56aa317b00b",
"index": 4386,
"step-1": "<mask token>\n",
"step-2": "for t in range(int(input())):\n st = list(input())\n N, j = len(st), 1\n for i in range(N // 2):\n if st[i] == '*' or st[-i - 1] == '*':\n break\n elif st[i] != st[-i - 1]:\n j = 0\n break\n print('#{} Exist'.format(t + 1)) if j else print('#{} Not exist'.format\n (t + 1))\n",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
def html_print(text, title=''):
from IPython.core.display import display, HTML
# create title for the content
display(HTML("<h4>" + str(title) + "</h4>"))
# create content
html = display(HTML("<font size=2 face=Verdana>" + text + "</font>"))
return html
|
normal
|
{
"blob_id": "84a63f60a45f1f8fc1efec8f30345a43c3c30c63",
"index": 7332,
"step-1": "<mask token>\n",
"step-2": "def html_print(text, title=''):\n from IPython.core.display import display, HTML\n display(HTML('<h4>' + str(title) + '</h4>'))\n html = display(HTML('<font size=2 face=Verdana>' + text + '</font>'))\n return html\n",
"step-3": "def html_print(text, title=''):\n\n from IPython.core.display import display, HTML\n\n # create title for the content\n display(HTML(\"<h4>\" + str(title) + \"</h4>\"))\n\n # create content\n html = display(HTML(\"<font size=2 face=Verdana>\" + text + \"</font>\"))\n\n return html\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
#!/usr/bin/python
# -*- coding:utf-8 -*-
import numpy as np
from functools import reduce
def element_wise_op(x, operation):
for i in np.nditer(x, op_flags=['readwrite']):
i[...] = operation[i]
class RecurrentLayer(object):
def __init__(self, input_dim, state_dim, activator, learning_rate):
self.input_dim = input_dim
self.state_dim = state_dim
self.activator = activator
self.learning_rate = learning_rate
self.time = 0
self.state_list = np.zeros((state_dim, 1)) #Initialization of state series in time 0
self.W = np.random.uniform(-1e-3, 1e-3, (state_dim, state_dim))
self.U = np.random.uniform(-1e-3, 1e-3, (state_dim, input_dim))
def forward(self, input_vec):
self.time += 1
state = (np.dot(self.U, input_vec) + np.dot(self.W, self.state_list[-1]))
element_wise_op(state, self.activator.forward)
self.state_list.append(state)
def bptt(self, sensitivity_array, activator):
self.calcu_delta(sensitivity_array, activator)
self.calcu_grad()
def calcu_delta(self, sensitivity_array, activator):
self.delta_list = []
for i in range(self.time):
self.delta_list.append(np.zeros(self.state_dim, 1))
self.delta_list.append(sensitivity_array)
for k in range(self.time -1, 0, -1):
self.calcu_delta_k(k, activator)
def calcu_delta_k(self, k, activator):
state = self.state_list[k+1].copy()
element_wise_op(self.state_list[k+1], activator.backward)
self.state_list[k] = np.dot(np.dot(self.state_list[k+1].T, self.W), np.diag(state[:, 0])).T
def calcu_grad(self):
self.grad_list = []
for t in range(self.time + 1):
self.grad_list.append(np.zeros((self.state_dim, self.state_dim)))
for t in range(self.time, 0, -1):
self.calcu_grad_t(t)
self.grad = reduce(lambda a, b: a+b, self.grad_list, self.grad)
def calcu_grad_t(self, t):
grad = np.dot(self.delta_list[t], self.delta_list[t-1].T)
self.grad_list[t] = grad
def bpttupdate(self):
self.W -= self.grad * self.learning_rate
|
normal
|
{
"blob_id": "b34ce3ac87a01b8e80abc3fde1c91638f2896610",
"index": 2392,
"step-1": "<mask token>\n\n\nclass RecurrentLayer(object):\n <mask token>\n\n def forward(self, input_vec):\n self.time += 1\n state = np.dot(self.U, input_vec) + np.dot(self.W, self.state_list[-1])\n element_wise_op(state, self.activator.forward)\n self.state_list.append(state)\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def bpttupdate(self):\n self.W -= self.grad * self.learning_rate\n",
"step-2": "<mask token>\n\n\nclass RecurrentLayer(object):\n\n def __init__(self, input_dim, state_dim, activator, learning_rate):\n self.input_dim = input_dim\n self.state_dim = state_dim\n self.activator = activator\n self.learning_rate = learning_rate\n self.time = 0\n self.state_list = np.zeros((state_dim, 1))\n self.W = np.random.uniform(-0.001, 0.001, (state_dim, state_dim))\n self.U = np.random.uniform(-0.001, 0.001, (state_dim, input_dim))\n\n def forward(self, input_vec):\n self.time += 1\n state = np.dot(self.U, input_vec) + np.dot(self.W, self.state_list[-1])\n element_wise_op(state, self.activator.forward)\n self.state_list.append(state)\n <mask token>\n\n def calcu_delta(self, sensitivity_array, activator):\n self.delta_list = []\n for i in range(self.time):\n self.delta_list.append(np.zeros(self.state_dim, 1))\n self.delta_list.append(sensitivity_array)\n for k in range(self.time - 1, 0, -1):\n self.calcu_delta_k(k, activator)\n\n def calcu_delta_k(self, k, activator):\n state = self.state_list[k + 1].copy()\n element_wise_op(self.state_list[k + 1], activator.backward)\n self.state_list[k] = np.dot(np.dot(self.state_list[k + 1].T, self.W\n ), np.diag(state[:, 0])).T\n\n def calcu_grad(self):\n self.grad_list = []\n for t in range(self.time + 1):\n self.grad_list.append(np.zeros((self.state_dim, self.state_dim)))\n for t in range(self.time, 0, -1):\n self.calcu_grad_t(t)\n self.grad = reduce(lambda a, b: a + b, self.grad_list, self.grad)\n\n def calcu_grad_t(self, t):\n grad = np.dot(self.delta_list[t], self.delta_list[t - 1].T)\n self.grad_list[t] = grad\n\n def bpttupdate(self):\n self.W -= self.grad * self.learning_rate\n",
"step-3": "<mask token>\n\n\nclass RecurrentLayer(object):\n\n def __init__(self, input_dim, state_dim, activator, learning_rate):\n self.input_dim = input_dim\n self.state_dim = state_dim\n self.activator = activator\n self.learning_rate = learning_rate\n self.time = 0\n self.state_list = np.zeros((state_dim, 1))\n self.W = np.random.uniform(-0.001, 0.001, (state_dim, state_dim))\n self.U = np.random.uniform(-0.001, 0.001, (state_dim, input_dim))\n\n def forward(self, input_vec):\n self.time += 1\n state = np.dot(self.U, input_vec) + np.dot(self.W, self.state_list[-1])\n element_wise_op(state, self.activator.forward)\n self.state_list.append(state)\n\n def bptt(self, sensitivity_array, activator):\n self.calcu_delta(sensitivity_array, activator)\n self.calcu_grad()\n\n def calcu_delta(self, sensitivity_array, activator):\n self.delta_list = []\n for i in range(self.time):\n self.delta_list.append(np.zeros(self.state_dim, 1))\n self.delta_list.append(sensitivity_array)\n for k in range(self.time - 1, 0, -1):\n self.calcu_delta_k(k, activator)\n\n def calcu_delta_k(self, k, activator):\n state = self.state_list[k + 1].copy()\n element_wise_op(self.state_list[k + 1], activator.backward)\n self.state_list[k] = np.dot(np.dot(self.state_list[k + 1].T, self.W\n ), np.diag(state[:, 0])).T\n\n def calcu_grad(self):\n self.grad_list = []\n for t in range(self.time + 1):\n self.grad_list.append(np.zeros((self.state_dim, self.state_dim)))\n for t in range(self.time, 0, -1):\n self.calcu_grad_t(t)\n self.grad = reduce(lambda a, b: a + b, self.grad_list, self.grad)\n\n def calcu_grad_t(self, t):\n grad = np.dot(self.delta_list[t], self.delta_list[t - 1].T)\n self.grad_list[t] = grad\n\n def bpttupdate(self):\n self.W -= self.grad * self.learning_rate\n",
"step-4": "<mask token>\n\n\ndef element_wise_op(x, operation):\n for i in np.nditer(x, op_flags=['readwrite']):\n i[...] = operation[i]\n\n\nclass RecurrentLayer(object):\n\n def __init__(self, input_dim, state_dim, activator, learning_rate):\n self.input_dim = input_dim\n self.state_dim = state_dim\n self.activator = activator\n self.learning_rate = learning_rate\n self.time = 0\n self.state_list = np.zeros((state_dim, 1))\n self.W = np.random.uniform(-0.001, 0.001, (state_dim, state_dim))\n self.U = np.random.uniform(-0.001, 0.001, (state_dim, input_dim))\n\n def forward(self, input_vec):\n self.time += 1\n state = np.dot(self.U, input_vec) + np.dot(self.W, self.state_list[-1])\n element_wise_op(state, self.activator.forward)\n self.state_list.append(state)\n\n def bptt(self, sensitivity_array, activator):\n self.calcu_delta(sensitivity_array, activator)\n self.calcu_grad()\n\n def calcu_delta(self, sensitivity_array, activator):\n self.delta_list = []\n for i in range(self.time):\n self.delta_list.append(np.zeros(self.state_dim, 1))\n self.delta_list.append(sensitivity_array)\n for k in range(self.time - 1, 0, -1):\n self.calcu_delta_k(k, activator)\n\n def calcu_delta_k(self, k, activator):\n state = self.state_list[k + 1].copy()\n element_wise_op(self.state_list[k + 1], activator.backward)\n self.state_list[k] = np.dot(np.dot(self.state_list[k + 1].T, self.W\n ), np.diag(state[:, 0])).T\n\n def calcu_grad(self):\n self.grad_list = []\n for t in range(self.time + 1):\n self.grad_list.append(np.zeros((self.state_dim, self.state_dim)))\n for t in range(self.time, 0, -1):\n self.calcu_grad_t(t)\n self.grad = reduce(lambda a, b: a + b, self.grad_list, self.grad)\n\n def calcu_grad_t(self, t):\n grad = np.dot(self.delta_list[t], self.delta_list[t - 1].T)\n self.grad_list[t] = grad\n\n def bpttupdate(self):\n self.W -= self.grad * self.learning_rate\n",
"step-5": "#!/usr/bin/python\n# -*- coding:utf-8 -*-\n\nimport numpy as np\nfrom functools import reduce\n\ndef element_wise_op(x, operation):\n for i in np.nditer(x, op_flags=['readwrite']):\n i[...] = operation[i]\n\nclass RecurrentLayer(object):\n def __init__(self, input_dim, state_dim, activator, learning_rate):\n self.input_dim = input_dim\n self.state_dim = state_dim\n self.activator = activator\n self.learning_rate = learning_rate\n self.time = 0\n self.state_list = np.zeros((state_dim, 1)) #Initialization of state series in time 0\n self.W = np.random.uniform(-1e-3, 1e-3, (state_dim, state_dim))\n self.U = np.random.uniform(-1e-3, 1e-3, (state_dim, input_dim))\n\n def forward(self, input_vec):\n self.time += 1\n state = (np.dot(self.U, input_vec) + np.dot(self.W, self.state_list[-1]))\n element_wise_op(state, self.activator.forward)\n self.state_list.append(state)\n\n def bptt(self, sensitivity_array, activator):\n self.calcu_delta(sensitivity_array, activator)\n self.calcu_grad()\n\n def calcu_delta(self, sensitivity_array, activator):\n self.delta_list = []\n for i in range(self.time):\n self.delta_list.append(np.zeros(self.state_dim, 1))\n self.delta_list.append(sensitivity_array)\n for k in range(self.time -1, 0, -1):\n self.calcu_delta_k(k, activator)\n\n def calcu_delta_k(self, k, activator):\n state = self.state_list[k+1].copy()\n element_wise_op(self.state_list[k+1], activator.backward)\n self.state_list[k] = np.dot(np.dot(self.state_list[k+1].T, self.W), np.diag(state[:, 0])).T\n\n def calcu_grad(self):\n self.grad_list = []\n for t in range(self.time + 1):\n self.grad_list.append(np.zeros((self.state_dim, self.state_dim)))\n for t in range(self.time, 0, -1):\n self.calcu_grad_t(t)\n self.grad = reduce(lambda a, b: a+b, self.grad_list, self.grad)\n\n def calcu_grad_t(self, t):\n grad = np.dot(self.delta_list[t], self.delta_list[t-1].T)\n self.grad_list[t] = grad\n def bpttupdate(self):\n self.W -= self.grad * self.learning_rate\n\n",
"step-ids": [
3,
8,
9,
10,
12
]
}
|
[
3,
8,
9,
10,
12
] |
<|reserved_special_token_0|>
class CreateUser(APIView):
<|reserved_special_token_0|>
class EditUser(APIView):
def put(self, request, pk):
user = User.objects.filter(pk=pk, is_removed=False).first()
if user is None:
return Response({'error': 'User Does Not Exists', 'success':
False}, status=status.HTTP_422_UNPROCESSABLE_ENTITY)
serialized_user = UserEditionSerializer(data=request.data, instance
=user)
if serialized_user.is_valid():
user = serialized_user.save()
return Response(UserSerializer(instance=user).data, status=
status.HTTP_202_ACCEPTED)
return Response(serialized_user.errors, status=status.
HTTP_400_BAD_REQUEST)
class RemoveUser(APIView):
def delete(self, request, pk):
user = User.objects.filter(pk=pk, is_removed=False).first()
if user is None:
return Response({'error': 'User Does Not Exists', 'success':
False}, status=status.HTTP_422_UNPROCESSABLE_ENTITY)
user.is_removed = True
user.save()
return Response(status=status.HTTP_204_NO_CONTENT)
class GetUserFromToken(APIView):
permission_classes = [IsAuthenticated]
def get(self, request):
user = request.user
serialized_user = UserSerializer(instance=user)
return Response(serialized_user.data)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class UserDetail(APIView):
<|reserved_special_token_0|>
class CreateUser(APIView):
def post(self, request):
serialized_data = UserCreationSerialier(data=request.data)
if serialized_data.is_valid():
data = serialized_data.validated_data
user = User.objects.filter(email=data['email'], is_removed=False
).first()
if user is not None:
return Response({'error': 'This email is Already Taken!',
'success': False}, status=status.HTTP_400_BAD_REQUEST)
user = User(email=data['email'], full_name=data['full_name'])
user.set_password(data['password'])
user.save()
serialized_user = UserSerializer(instance=user)
return Response(serialized_user.data, status=status.
HTTP_201_CREATED)
return Response(serialized_data.errors, status=status.
HTTP_400_BAD_REQUEST)
class EditUser(APIView):
def put(self, request, pk):
user = User.objects.filter(pk=pk, is_removed=False).first()
if user is None:
return Response({'error': 'User Does Not Exists', 'success':
False}, status=status.HTTP_422_UNPROCESSABLE_ENTITY)
serialized_user = UserEditionSerializer(data=request.data, instance
=user)
if serialized_user.is_valid():
user = serialized_user.save()
return Response(UserSerializer(instance=user).data, status=
status.HTTP_202_ACCEPTED)
return Response(serialized_user.errors, status=status.
HTTP_400_BAD_REQUEST)
class RemoveUser(APIView):
def delete(self, request, pk):
user = User.objects.filter(pk=pk, is_removed=False).first()
if user is None:
return Response({'error': 'User Does Not Exists', 'success':
False}, status=status.HTTP_422_UNPROCESSABLE_ENTITY)
user.is_removed = True
user.save()
return Response(status=status.HTTP_204_NO_CONTENT)
class GetUserFromToken(APIView):
permission_classes = [IsAuthenticated]
def get(self, request):
user = request.user
serialized_user = UserSerializer(instance=user)
return Response(serialized_user.data)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class UserDetail(APIView):
def get(self, request, pk):
user = User.objects.filter(pk=pk, is_removed=False).first()
if user is None:
return Response({'error': 'User Does Not Exists', 'success':
False}, status=status.HTTP_422_UNPROCESSABLE_ENTITY)
serailized_data = UserSerializer(instance=user)
return Response(serailized_data.data, status=status.HTTP_200_OK)
class CreateUser(APIView):
def post(self, request):
serialized_data = UserCreationSerialier(data=request.data)
if serialized_data.is_valid():
data = serialized_data.validated_data
user = User.objects.filter(email=data['email'], is_removed=False
).first()
if user is not None:
return Response({'error': 'This email is Already Taken!',
'success': False}, status=status.HTTP_400_BAD_REQUEST)
user = User(email=data['email'], full_name=data['full_name'])
user.set_password(data['password'])
user.save()
serialized_user = UserSerializer(instance=user)
return Response(serialized_user.data, status=status.
HTTP_201_CREATED)
return Response(serialized_data.errors, status=status.
HTTP_400_BAD_REQUEST)
class EditUser(APIView):
def put(self, request, pk):
user = User.objects.filter(pk=pk, is_removed=False).first()
if user is None:
return Response({'error': 'User Does Not Exists', 'success':
False}, status=status.HTTP_422_UNPROCESSABLE_ENTITY)
serialized_user = UserEditionSerializer(data=request.data, instance
=user)
if serialized_user.is_valid():
user = serialized_user.save()
return Response(UserSerializer(instance=user).data, status=
status.HTTP_202_ACCEPTED)
return Response(serialized_user.errors, status=status.
HTTP_400_BAD_REQUEST)
class RemoveUser(APIView):
def delete(self, request, pk):
user = User.objects.filter(pk=pk, is_removed=False).first()
if user is None:
return Response({'error': 'User Does Not Exists', 'success':
False}, status=status.HTTP_422_UNPROCESSABLE_ENTITY)
user.is_removed = True
user.save()
return Response(status=status.HTTP_204_NO_CONTENT)
class GetUserFromToken(APIView):
permission_classes = [IsAuthenticated]
def get(self, request):
user = request.user
serialized_user = UserSerializer(instance=user)
return Response(serialized_user.data)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Users(APIView):
def get(self, request):
users = User.objects.filter(is_removed=False)
serialized_users = UserSerializer(instance=users, many=True)
return Response(serialized_users.data, status=status.HTTP_200_OK)
class UserDetail(APIView):
def get(self, request, pk):
user = User.objects.filter(pk=pk, is_removed=False).first()
if user is None:
return Response({'error': 'User Does Not Exists', 'success':
False}, status=status.HTTP_422_UNPROCESSABLE_ENTITY)
serailized_data = UserSerializer(instance=user)
return Response(serailized_data.data, status=status.HTTP_200_OK)
class CreateUser(APIView):
def post(self, request):
serialized_data = UserCreationSerialier(data=request.data)
if serialized_data.is_valid():
data = serialized_data.validated_data
user = User.objects.filter(email=data['email'], is_removed=False
).first()
if user is not None:
return Response({'error': 'This email is Already Taken!',
'success': False}, status=status.HTTP_400_BAD_REQUEST)
user = User(email=data['email'], full_name=data['full_name'])
user.set_password(data['password'])
user.save()
serialized_user = UserSerializer(instance=user)
return Response(serialized_user.data, status=status.
HTTP_201_CREATED)
return Response(serialized_data.errors, status=status.
HTTP_400_BAD_REQUEST)
class EditUser(APIView):
def put(self, request, pk):
user = User.objects.filter(pk=pk, is_removed=False).first()
if user is None:
return Response({'error': 'User Does Not Exists', 'success':
False}, status=status.HTTP_422_UNPROCESSABLE_ENTITY)
serialized_user = UserEditionSerializer(data=request.data, instance
=user)
if serialized_user.is_valid():
user = serialized_user.save()
return Response(UserSerializer(instance=user).data, status=
status.HTTP_202_ACCEPTED)
return Response(serialized_user.errors, status=status.
HTTP_400_BAD_REQUEST)
class RemoveUser(APIView):
def delete(self, request, pk):
user = User.objects.filter(pk=pk, is_removed=False).first()
if user is None:
return Response({'error': 'User Does Not Exists', 'success':
False}, status=status.HTTP_422_UNPROCESSABLE_ENTITY)
user.is_removed = True
user.save()
return Response(status=status.HTTP_204_NO_CONTENT)
class GetUserFromToken(APIView):
permission_classes = [IsAuthenticated]
def get(self, request):
user = request.user
serialized_user = UserSerializer(instance=user)
return Response(serialized_user.data)
<|reserved_special_token_1|>
from django.http import response
from django.shortcuts import render
from rest_framework.views import APIView
from rest_framework.response import Response
from .models import User
from .serializers import UserSerializer,UserCreationSerialier,UserEditionSerializer
from rest_framework import status
from rest_framework.permissions import IsAuthenticated
class Users(APIView):
# permission_classes = [IsAuthenticated]
def get(self,request):
users = User.objects.filter(is_removed=False)
serialized_users = UserSerializer(instance=users,many=True)
return Response(serialized_users.data,status=status.HTTP_200_OK)
class UserDetail(APIView):
def get(self,request,pk):
user = User.objects.filter(pk=pk,is_removed=False).first()
if user is None:
return Response({'error':'User Does Not Exists','success':False},status=status.HTTP_422_UNPROCESSABLE_ENTITY)
serailized_data = UserSerializer(instance=user)
return Response(serailized_data.data,status=status.HTTP_200_OK)
class CreateUser(APIView):
def post(self,request):
serialized_data = UserCreationSerialier(data=request.data)
if serialized_data.is_valid():
data = serialized_data.validated_data
user = User.objects.filter(email=data['email'],is_removed=False).first()
if user is not None:
return Response({'error':'This email is Already Taken!','success':False},status=status.HTTP_400_BAD_REQUEST)
user = User(email=data['email'],full_name=data['full_name'])
user.set_password(data['password'])
user.save()
serialized_user = UserSerializer(instance=user)
return Response(serialized_user.data,status=status.HTTP_201_CREATED)
return Response(serialized_data.errors,status=status.HTTP_400_BAD_REQUEST)
class EditUser(APIView):
def put(self,request,pk):
user = User.objects.filter(pk=pk,is_removed=False).first()
if user is None:
return Response({'error':'User Does Not Exists','success':False},status=status.HTTP_422_UNPROCESSABLE_ENTITY)
serialized_user = UserEditionSerializer(data=request.data,instance=user)
if serialized_user.is_valid():
user = serialized_user.save()
return Response(UserSerializer(instance=user).data,status=status.HTTP_202_ACCEPTED)
return Response(serialized_user.errors,status=status.HTTP_400_BAD_REQUEST)
class RemoveUser(APIView):
def delete(self,request,pk):
user = User.objects.filter(pk=pk,is_removed=False).first()
if user is None:
return Response({'error':'User Does Not Exists','success':False},status=status.HTTP_422_UNPROCESSABLE_ENTITY)
user.is_removed = True
user.save()
return Response(status=status.HTTP_204_NO_CONTENT)
class GetUserFromToken(APIView):
permission_classes = [IsAuthenticated]
def get(self,request):
user = request.user
serialized_user = UserSerializer(instance=user)
return Response(serialized_user.data)
|
flexible
|
{
"blob_id": "dff454cbde985a08b34377b80dd8e3b22f1cc13a",
"index": 3948,
"step-1": "<mask token>\n\n\nclass CreateUser(APIView):\n <mask token>\n\n\nclass EditUser(APIView):\n\n def put(self, request, pk):\n user = User.objects.filter(pk=pk, is_removed=False).first()\n if user is None:\n return Response({'error': 'User Does Not Exists', 'success': \n False}, status=status.HTTP_422_UNPROCESSABLE_ENTITY)\n serialized_user = UserEditionSerializer(data=request.data, instance\n =user)\n if serialized_user.is_valid():\n user = serialized_user.save()\n return Response(UserSerializer(instance=user).data, status=\n status.HTTP_202_ACCEPTED)\n return Response(serialized_user.errors, status=status.\n HTTP_400_BAD_REQUEST)\n\n\nclass RemoveUser(APIView):\n\n def delete(self, request, pk):\n user = User.objects.filter(pk=pk, is_removed=False).first()\n if user is None:\n return Response({'error': 'User Does Not Exists', 'success': \n False}, status=status.HTTP_422_UNPROCESSABLE_ENTITY)\n user.is_removed = True\n user.save()\n return Response(status=status.HTTP_204_NO_CONTENT)\n\n\nclass GetUserFromToken(APIView):\n permission_classes = [IsAuthenticated]\n\n def get(self, request):\n user = request.user\n serialized_user = UserSerializer(instance=user)\n return Response(serialized_user.data)\n",
"step-2": "<mask token>\n\n\nclass UserDetail(APIView):\n <mask token>\n\n\nclass CreateUser(APIView):\n\n def post(self, request):\n serialized_data = UserCreationSerialier(data=request.data)\n if serialized_data.is_valid():\n data = serialized_data.validated_data\n user = User.objects.filter(email=data['email'], is_removed=False\n ).first()\n if user is not None:\n return Response({'error': 'This email is Already Taken!',\n 'success': False}, status=status.HTTP_400_BAD_REQUEST)\n user = User(email=data['email'], full_name=data['full_name'])\n user.set_password(data['password'])\n user.save()\n serialized_user = UserSerializer(instance=user)\n return Response(serialized_user.data, status=status.\n HTTP_201_CREATED)\n return Response(serialized_data.errors, status=status.\n HTTP_400_BAD_REQUEST)\n\n\nclass EditUser(APIView):\n\n def put(self, request, pk):\n user = User.objects.filter(pk=pk, is_removed=False).first()\n if user is None:\n return Response({'error': 'User Does Not Exists', 'success': \n False}, status=status.HTTP_422_UNPROCESSABLE_ENTITY)\n serialized_user = UserEditionSerializer(data=request.data, instance\n =user)\n if serialized_user.is_valid():\n user = serialized_user.save()\n return Response(UserSerializer(instance=user).data, status=\n status.HTTP_202_ACCEPTED)\n return Response(serialized_user.errors, status=status.\n HTTP_400_BAD_REQUEST)\n\n\nclass RemoveUser(APIView):\n\n def delete(self, request, pk):\n user = User.objects.filter(pk=pk, is_removed=False).first()\n if user is None:\n return Response({'error': 'User Does Not Exists', 'success': \n False}, status=status.HTTP_422_UNPROCESSABLE_ENTITY)\n user.is_removed = True\n user.save()\n return Response(status=status.HTTP_204_NO_CONTENT)\n\n\nclass GetUserFromToken(APIView):\n permission_classes = [IsAuthenticated]\n\n def get(self, request):\n user = request.user\n serialized_user = UserSerializer(instance=user)\n return Response(serialized_user.data)\n",
"step-3": "<mask token>\n\n\nclass UserDetail(APIView):\n\n def get(self, request, pk):\n user = User.objects.filter(pk=pk, is_removed=False).first()\n if user is None:\n return Response({'error': 'User Does Not Exists', 'success': \n False}, status=status.HTTP_422_UNPROCESSABLE_ENTITY)\n serailized_data = UserSerializer(instance=user)\n return Response(serailized_data.data, status=status.HTTP_200_OK)\n\n\nclass CreateUser(APIView):\n\n def post(self, request):\n serialized_data = UserCreationSerialier(data=request.data)\n if serialized_data.is_valid():\n data = serialized_data.validated_data\n user = User.objects.filter(email=data['email'], is_removed=False\n ).first()\n if user is not None:\n return Response({'error': 'This email is Already Taken!',\n 'success': False}, status=status.HTTP_400_BAD_REQUEST)\n user = User(email=data['email'], full_name=data['full_name'])\n user.set_password(data['password'])\n user.save()\n serialized_user = UserSerializer(instance=user)\n return Response(serialized_user.data, status=status.\n HTTP_201_CREATED)\n return Response(serialized_data.errors, status=status.\n HTTP_400_BAD_REQUEST)\n\n\nclass EditUser(APIView):\n\n def put(self, request, pk):\n user = User.objects.filter(pk=pk, is_removed=False).first()\n if user is None:\n return Response({'error': 'User Does Not Exists', 'success': \n False}, status=status.HTTP_422_UNPROCESSABLE_ENTITY)\n serialized_user = UserEditionSerializer(data=request.data, instance\n =user)\n if serialized_user.is_valid():\n user = serialized_user.save()\n return Response(UserSerializer(instance=user).data, status=\n status.HTTP_202_ACCEPTED)\n return Response(serialized_user.errors, status=status.\n HTTP_400_BAD_REQUEST)\n\n\nclass RemoveUser(APIView):\n\n def delete(self, request, pk):\n user = User.objects.filter(pk=pk, is_removed=False).first()\n if user is None:\n return Response({'error': 'User Does Not Exists', 'success': \n False}, status=status.HTTP_422_UNPROCESSABLE_ENTITY)\n user.is_removed = True\n user.save()\n return Response(status=status.HTTP_204_NO_CONTENT)\n\n\nclass GetUserFromToken(APIView):\n permission_classes = [IsAuthenticated]\n\n def get(self, request):\n user = request.user\n serialized_user = UserSerializer(instance=user)\n return Response(serialized_user.data)\n",
"step-4": "<mask token>\n\n\nclass Users(APIView):\n\n def get(self, request):\n users = User.objects.filter(is_removed=False)\n serialized_users = UserSerializer(instance=users, many=True)\n return Response(serialized_users.data, status=status.HTTP_200_OK)\n\n\nclass UserDetail(APIView):\n\n def get(self, request, pk):\n user = User.objects.filter(pk=pk, is_removed=False).first()\n if user is None:\n return Response({'error': 'User Does Not Exists', 'success': \n False}, status=status.HTTP_422_UNPROCESSABLE_ENTITY)\n serailized_data = UserSerializer(instance=user)\n return Response(serailized_data.data, status=status.HTTP_200_OK)\n\n\nclass CreateUser(APIView):\n\n def post(self, request):\n serialized_data = UserCreationSerialier(data=request.data)\n if serialized_data.is_valid():\n data = serialized_data.validated_data\n user = User.objects.filter(email=data['email'], is_removed=False\n ).first()\n if user is not None:\n return Response({'error': 'This email is Already Taken!',\n 'success': False}, status=status.HTTP_400_BAD_REQUEST)\n user = User(email=data['email'], full_name=data['full_name'])\n user.set_password(data['password'])\n user.save()\n serialized_user = UserSerializer(instance=user)\n return Response(serialized_user.data, status=status.\n HTTP_201_CREATED)\n return Response(serialized_data.errors, status=status.\n HTTP_400_BAD_REQUEST)\n\n\nclass EditUser(APIView):\n\n def put(self, request, pk):\n user = User.objects.filter(pk=pk, is_removed=False).first()\n if user is None:\n return Response({'error': 'User Does Not Exists', 'success': \n False}, status=status.HTTP_422_UNPROCESSABLE_ENTITY)\n serialized_user = UserEditionSerializer(data=request.data, instance\n =user)\n if serialized_user.is_valid():\n user = serialized_user.save()\n return Response(UserSerializer(instance=user).data, status=\n status.HTTP_202_ACCEPTED)\n return Response(serialized_user.errors, status=status.\n HTTP_400_BAD_REQUEST)\n\n\nclass RemoveUser(APIView):\n\n def delete(self, request, pk):\n user = User.objects.filter(pk=pk, is_removed=False).first()\n if user is None:\n return Response({'error': 'User Does Not Exists', 'success': \n False}, status=status.HTTP_422_UNPROCESSABLE_ENTITY)\n user.is_removed = True\n user.save()\n return Response(status=status.HTTP_204_NO_CONTENT)\n\n\nclass GetUserFromToken(APIView):\n permission_classes = [IsAuthenticated]\n\n def get(self, request):\n user = request.user\n serialized_user = UserSerializer(instance=user)\n return Response(serialized_user.data)\n",
"step-5": "from django.http import response\nfrom django.shortcuts import render\nfrom rest_framework.views import APIView\nfrom rest_framework.response import Response\nfrom .models import User\nfrom .serializers import UserSerializer,UserCreationSerialier,UserEditionSerializer\nfrom rest_framework import status\nfrom rest_framework.permissions import IsAuthenticated\n\nclass Users(APIView):\n # permission_classes = [IsAuthenticated]\n\n def get(self,request):\n users = User.objects.filter(is_removed=False)\n serialized_users = UserSerializer(instance=users,many=True)\n return Response(serialized_users.data,status=status.HTTP_200_OK)\n \n\nclass UserDetail(APIView):\n\n def get(self,request,pk):\n user = User.objects.filter(pk=pk,is_removed=False).first()\n if user is None:\n return Response({'error':'User Does Not Exists','success':False},status=status.HTTP_422_UNPROCESSABLE_ENTITY)\n serailized_data = UserSerializer(instance=user)\n return Response(serailized_data.data,status=status.HTTP_200_OK)\n\n\nclass CreateUser(APIView):\n \n def post(self,request):\n serialized_data = UserCreationSerialier(data=request.data)\n if serialized_data.is_valid():\n data = serialized_data.validated_data\n user = User.objects.filter(email=data['email'],is_removed=False).first()\n if user is not None:\n return Response({'error':'This email is Already Taken!','success':False},status=status.HTTP_400_BAD_REQUEST)\n \n user = User(email=data['email'],full_name=data['full_name'])\n user.set_password(data['password'])\n user.save()\n serialized_user = UserSerializer(instance=user)\n return Response(serialized_user.data,status=status.HTTP_201_CREATED)\n return Response(serialized_data.errors,status=status.HTTP_400_BAD_REQUEST)\n\n\nclass EditUser(APIView):\n \n def put(self,request,pk):\n user = User.objects.filter(pk=pk,is_removed=False).first()\n if user is None:\n return Response({'error':'User Does Not Exists','success':False},status=status.HTTP_422_UNPROCESSABLE_ENTITY)\n serialized_user = UserEditionSerializer(data=request.data,instance=user)\n if serialized_user.is_valid():\n user = serialized_user.save()\n return Response(UserSerializer(instance=user).data,status=status.HTTP_202_ACCEPTED)\n return Response(serialized_user.errors,status=status.HTTP_400_BAD_REQUEST)\n\n\nclass RemoveUser(APIView):\n\n def delete(self,request,pk):\n user = User.objects.filter(pk=pk,is_removed=False).first()\n if user is None:\n return Response({'error':'User Does Not Exists','success':False},status=status.HTTP_422_UNPROCESSABLE_ENTITY)\n user.is_removed = True\n user.save()\n return Response(status=status.HTTP_204_NO_CONTENT)\n\n\n\nclass GetUserFromToken(APIView):\n permission_classes = [IsAuthenticated]\n \n def get(self,request):\n user = request.user\n serialized_user = UserSerializer(instance=user)\n return Response(serialized_user.data)",
"step-ids": [
8,
10,
11,
13,
15
]
}
|
[
8,
10,
11,
13,
15
] |
from speaker_verification import *
import numpy as np
region = 'westus'
api_key = load_json('./real_secrets.json')['api_key']
wav_path = './enrollment.wav'
temp_path = './temp.wav'
# If you want to list users by profile_id
print('All users are: ', list_users(api_key, region))
# This is handled by the development / production code, but if you want to test the identification...
profile_id = create_profile(api_key, region)
enroll_user(api_key, region, wav_path, profile_id)
print(f'Likelihood that {wav_path} came from this subject')
identify_user(api_key, region, wav_path, profile_id)
print(f'Likelihood that {wav_path} came from this subject or another (randomly chosen)')
identify_user(api_key, region, wav_path, profile_ids=[profile_id, np.random.choice(list_users(api_key, region))])
print('Removing this profile id...')
remove_user(api_key, region, profile_id)
|
normal
|
{
"blob_id": "5195dcf262c0be08f83cf66e79d48e51811a67a0",
"index": 6866,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint('All users are: ', list_users(api_key, region))\n<mask token>\nenroll_user(api_key, region, wav_path, profile_id)\nprint(f'Likelihood that {wav_path} came from this subject')\nidentify_user(api_key, region, wav_path, profile_id)\nprint(\n f'Likelihood that {wav_path} came from this subject or another (randomly chosen)'\n )\nidentify_user(api_key, region, wav_path, profile_ids=[profile_id, np.random\n .choice(list_users(api_key, region))])\nprint('Removing this profile id...')\nremove_user(api_key, region, profile_id)\n",
"step-3": "<mask token>\nregion = 'westus'\napi_key = load_json('./real_secrets.json')['api_key']\nwav_path = './enrollment.wav'\ntemp_path = './temp.wav'\nprint('All users are: ', list_users(api_key, region))\nprofile_id = create_profile(api_key, region)\nenroll_user(api_key, region, wav_path, profile_id)\nprint(f'Likelihood that {wav_path} came from this subject')\nidentify_user(api_key, region, wav_path, profile_id)\nprint(\n f'Likelihood that {wav_path} came from this subject or another (randomly chosen)'\n )\nidentify_user(api_key, region, wav_path, profile_ids=[profile_id, np.random\n .choice(list_users(api_key, region))])\nprint('Removing this profile id...')\nremove_user(api_key, region, profile_id)\n",
"step-4": "from speaker_verification import *\nimport numpy as np\nregion = 'westus'\napi_key = load_json('./real_secrets.json')['api_key']\nwav_path = './enrollment.wav'\ntemp_path = './temp.wav'\nprint('All users are: ', list_users(api_key, region))\nprofile_id = create_profile(api_key, region)\nenroll_user(api_key, region, wav_path, profile_id)\nprint(f'Likelihood that {wav_path} came from this subject')\nidentify_user(api_key, region, wav_path, profile_id)\nprint(\n f'Likelihood that {wav_path} came from this subject or another (randomly chosen)'\n )\nidentify_user(api_key, region, wav_path, profile_ids=[profile_id, np.random\n .choice(list_users(api_key, region))])\nprint('Removing this profile id...')\nremove_user(api_key, region, profile_id)\n",
"step-5": "from speaker_verification import *\nimport numpy as np\n\nregion = 'westus'\napi_key = load_json('./real_secrets.json')['api_key']\nwav_path = './enrollment.wav'\ntemp_path = './temp.wav'\n\n# If you want to list users by profile_id\nprint('All users are: ', list_users(api_key, region))\n\n# This is handled by the development / production code, but if you want to test the identification...\nprofile_id = create_profile(api_key, region)\nenroll_user(api_key, region, wav_path, profile_id)\n\nprint(f'Likelihood that {wav_path} came from this subject')\nidentify_user(api_key, region, wav_path, profile_id)\n\nprint(f'Likelihood that {wav_path} came from this subject or another (randomly chosen)')\nidentify_user(api_key, region, wav_path, profile_ids=[profile_id, np.random.choice(list_users(api_key, region))])\n\nprint('Removing this profile id...')\nremove_user(api_key, region, profile_id)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# 只放置可执行文件
#
# from ..src import package
# data_dict = package.pack()
# from ..src.plugins import * #解释一遍全放入内存
# from ..src import plugins #导入这个文件夹(包,模块,类库),默认加载init文件到内存
#
#
# plugins.pack()
from ..src.script import run
if __name__ == '__main__':
run()
|
normal
|
{
"blob_id": "4f870e0d86d9f9b8c620115a618ea32abc24c52d",
"index": 3008,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif __name__ == '__main__':\n run()\n",
"step-3": "from ..src.script import run\nif __name__ == '__main__':\n run()\n",
"step-4": "# 只放置可执行文件\n#\n# from ..src import package\n# data_dict = package.pack()\n\n# from ..src.plugins import * #解释一遍全放入内存\n# from ..src import plugins #导入这个文件夹(包,模块,类库),默认加载init文件到内存\n#\n#\n# plugins.pack()\n\n\nfrom ..src.script import run\n\nif __name__ == '__main__':\n run()\n\n\n\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import time
import threading
lock_a = threading.Lock()
lock_b = threading.Lock()
def task1():
print('Task 1 is starting...')
print('Task 1 is waiting to acquire Lock A')
with lock_a:
print('Task 1 has acquired Lock A')
print('Task 1 is doing some calculations')
time.sleep(2)
print('Task 1 is waiting to acquire Lock B')
with lock_b:
print('Task 1 has acquired Lock B')
print('Task 1 is doing some calculations')
time.sleep(2)
print('Task 1 is releasing both locks')
def task2():
print('Task 2 is starting...')
print('Task 2 is waiting to acquire Lock B')
with lock_b:
print('Task 2 has acquired Lock B')
print('Task 2 is doing some calculations')
time.sleep(5)
print('Task 2 is waiting to acquire Lock A')
with lock_a:
print('Task 2 has acquired Lock A')
print('Task 2 is doing some calculations')
time.sleep(5)
print('Task 2 is releasing both locks')
if __name__ == '__main__':
t1 = threading.Thread(target=task1)
t2 = threading.Thread(target=task2)
t1.start()
t2.start()
t1.join()
t2.join()
|
normal
|
{
"blob_id": "c7d8a67587a6ca01c23ed922faabbaca8bbaf337",
"index": 6307,
"step-1": "<mask token>\n\n\ndef task1():\n print('Task 1 is starting...')\n print('Task 1 is waiting to acquire Lock A')\n with lock_a:\n print('Task 1 has acquired Lock A')\n print('Task 1 is doing some calculations')\n time.sleep(2)\n print('Task 1 is waiting to acquire Lock B')\n with lock_b:\n print('Task 1 has acquired Lock B')\n print('Task 1 is doing some calculations')\n time.sleep(2)\n print('Task 1 is releasing both locks')\n\n\ndef task2():\n print('Task 2 is starting...')\n print('Task 2 is waiting to acquire Lock B')\n with lock_b:\n print('Task 2 has acquired Lock B')\n print('Task 2 is doing some calculations')\n time.sleep(5)\n print('Task 2 is waiting to acquire Lock A')\n with lock_a:\n print('Task 2 has acquired Lock A')\n print('Task 2 is doing some calculations')\n time.sleep(5)\n print('Task 2 is releasing both locks')\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef task1():\n print('Task 1 is starting...')\n print('Task 1 is waiting to acquire Lock A')\n with lock_a:\n print('Task 1 has acquired Lock A')\n print('Task 1 is doing some calculations')\n time.sleep(2)\n print('Task 1 is waiting to acquire Lock B')\n with lock_b:\n print('Task 1 has acquired Lock B')\n print('Task 1 is doing some calculations')\n time.sleep(2)\n print('Task 1 is releasing both locks')\n\n\ndef task2():\n print('Task 2 is starting...')\n print('Task 2 is waiting to acquire Lock B')\n with lock_b:\n print('Task 2 has acquired Lock B')\n print('Task 2 is doing some calculations')\n time.sleep(5)\n print('Task 2 is waiting to acquire Lock A')\n with lock_a:\n print('Task 2 has acquired Lock A')\n print('Task 2 is doing some calculations')\n time.sleep(5)\n print('Task 2 is releasing both locks')\n\n\nif __name__ == '__main__':\n t1 = threading.Thread(target=task1)\n t2 = threading.Thread(target=task2)\n t1.start()\n t2.start()\n t1.join()\n t2.join()\n",
"step-3": "<mask token>\nlock_a = threading.Lock()\nlock_b = threading.Lock()\n\n\ndef task1():\n print('Task 1 is starting...')\n print('Task 1 is waiting to acquire Lock A')\n with lock_a:\n print('Task 1 has acquired Lock A')\n print('Task 1 is doing some calculations')\n time.sleep(2)\n print('Task 1 is waiting to acquire Lock B')\n with lock_b:\n print('Task 1 has acquired Lock B')\n print('Task 1 is doing some calculations')\n time.sleep(2)\n print('Task 1 is releasing both locks')\n\n\ndef task2():\n print('Task 2 is starting...')\n print('Task 2 is waiting to acquire Lock B')\n with lock_b:\n print('Task 2 has acquired Lock B')\n print('Task 2 is doing some calculations')\n time.sleep(5)\n print('Task 2 is waiting to acquire Lock A')\n with lock_a:\n print('Task 2 has acquired Lock A')\n print('Task 2 is doing some calculations')\n time.sleep(5)\n print('Task 2 is releasing both locks')\n\n\nif __name__ == '__main__':\n t1 = threading.Thread(target=task1)\n t2 = threading.Thread(target=task2)\n t1.start()\n t2.start()\n t1.join()\n t2.join()\n",
"step-4": "import time\nimport threading\nlock_a = threading.Lock()\nlock_b = threading.Lock()\n\n\ndef task1():\n print('Task 1 is starting...')\n print('Task 1 is waiting to acquire Lock A')\n with lock_a:\n print('Task 1 has acquired Lock A')\n print('Task 1 is doing some calculations')\n time.sleep(2)\n print('Task 1 is waiting to acquire Lock B')\n with lock_b:\n print('Task 1 has acquired Lock B')\n print('Task 1 is doing some calculations')\n time.sleep(2)\n print('Task 1 is releasing both locks')\n\n\ndef task2():\n print('Task 2 is starting...')\n print('Task 2 is waiting to acquire Lock B')\n with lock_b:\n print('Task 2 has acquired Lock B')\n print('Task 2 is doing some calculations')\n time.sleep(5)\n print('Task 2 is waiting to acquire Lock A')\n with lock_a:\n print('Task 2 has acquired Lock A')\n print('Task 2 is doing some calculations')\n time.sleep(5)\n print('Task 2 is releasing both locks')\n\n\nif __name__ == '__main__':\n t1 = threading.Thread(target=task1)\n t2 = threading.Thread(target=task2)\n t1.start()\n t2.start()\n t1.join()\n t2.join()\n",
"step-5": null,
"step-ids": [
2,
3,
4,
5
]
}
|
[
2,
3,
4,
5
] |
import cv2
import numpy as np
import time
import itertools
from unionfind import UnionFind
R = 512
C = 512
# Setup window
cv2.namedWindow('main')
#img_i = np.zeros((R, C), np.uint8)
img_i = cv2.imread("window1.png", cv2.IMREAD_GRAYSCALE)
#img_i = cv2.threshold(img_i, 127, 255, cv2.THRESH_BINARY)[1]
down = False
last_pos = (0,0)
last_time = time.time()
def wtf(img):
"""
Source: http://opencvpython.blogspot.com.au/2012/05/skeletonization-using-opencv-python.html
:param img:
:return: thinned image
"""
thinned = np.zeros(img.shape, np.uint8)
ret, img = cv2.threshold(img, 127, 255, cv2.THRESH_BINARY)
element = cv2.getStructuringElement(cv2.MORPH_CROSS, (3, 3))
iteration = 0
file_prefix = "./images/" + time.strftime("wtf_%Y-%m-%d_%H-%M-%S_")
joined = np.zeros((img.shape[0], img.shape[1]*2), np.uint8)
joined[:img.shape[0], 0:img.shape[1]] = img
joined[:img.shape[0], img.shape[1]:img.shape[1]*2] = thinned
cv2.imwrite(file_prefix + str(iteration) + ".png", joined)
while True:
eroded = cv2.erode(img, element)
temp = cv2.dilate(eroded, element)
temp = cv2.subtract(img, temp)
thinned = cv2.bitwise_or(thinned, temp)
img = eroded.copy()
iteration += 1
joined[:img.shape[0], 0:img.shape[1]] = img
joined[:img.shape[0], img.shape[1]:img.shape[1] * 2] = thinned
cv2.imwrite(file_prefix + str(iteration) + ".png", joined)
if cv2.countNonZero(img) == 0:
break
return thinned
def neighbours8(bounds, pos, repeat_first_last=False):
# nhood8 = [(-1, 0), (-1, 1), (0, 1), (1, 1), (1, 0), (1, -1), (0, -1), (-1, -1)]
rows, cols = bounds
r, c = pos
cup = r > 0
crh = c < cols - 1
cdn = r < rows - 1
clf = c > 0
if cup:
yield (r - 1, c)
if crh:
yield (r - 1, c + 1)
if crh:
yield (r, c + 1)
if cdn:
yield (r + 1, c + 1)
if cdn:
yield (r + 1, c)
if clf:
yield (r + 1, c - 1)
if clf:
yield (r, c - 1)
if cup:
yield (r - 1, c - 1)
if repeat_first_last and cup:
yield (r - 1, c)
def neighbour_transitions_to_white(img, pos):
last_value = None
count = 0
for neighbour in neighbours8((img.shape[0], img.shape[1]), pos, True):
r, c = neighbour
if last_value is None:
last_value = img[r][c]
continue
count += last_value == 0 and img[r][c] != 0
last_value = img[r][c]
return count
def black_neighbours(img, pos):
count = 0
for neighbour in neighbours8((img.shape[0], img.shape[1]), pos):
r, c = neighbour
count += img[r][c] == 0
return count
def hilditch(img):
"""
Source: http://cgm.cs.mcgill.ca/~godfried/teaching/projects97/azar/skeleton.html
:param img:
:return: thinned image
"""
rows, cols = (img.shape[0], img.shape[1])
ret, img = cv2.threshold(img, 127, 255, cv2.THRESH_BINARY_INV)
temp = np.copy(img)
# Repeat these two steps till no changes
changed = True
iteration = 0
file_prefix = "./images/" + time.strftime("hilditch_%Y-%m-%d_%H-%M-%S_")
cv2.imwrite(file_prefix + str(iteration) + ".png", img)
while changed:
changed = False
# Step 1
# for each pixel that has 8 neighbours
for r in range(1, rows - 1):
for c in range(1, cols - 1):
# and is black
if img[r][c] != 0:
continue
# and 2 <= B(Pixel) <= 6
B = black_neighbours(img, (r, c))
if B < 2 or B > 6:
continue
# and A(Pixel) = 1
A = neighbour_transitions_to_white(img, (r, c))
if A != 1:
continue
# and P2||P4||P8||A(P2)!=1
if img[r-1][c] == 0 and img[r][c+1] == 0 and img[r][c-1] == 0 and neighbour_transitions_to_white(img, (r - 1, c)) == 1:
continue
# and P2||P4||P6||A(P4)!=1
if img[r-1][c] == 0 and img[r][c+1] == 0 and img[r+1][c-1] == 0 and neighbour_transitions_to_white(img, (r, c+1)) == 1:
continue
changed = True
temp[r][c] = 255
img = np.copy(temp)
iteration += 1
cv2.imwrite(file_prefix + str(iteration) + ".png", img)
return img
def zhangsuen(img):
"""
Source: http://rosettacode.org/wiki/Zhang-Suen_thinning_algorithm
:param img:
:return: thinned image
"""
rows, cols = (img.shape[0], img.shape[1])
ret, img = cv2.threshold(img, 127, 255, cv2.THRESH_BINARY_INV)
temp = np.copy(img)
# Repeat these two steps till no changes
changed = True
iteration = 0
file_prefix = "./images/" + time.strftime("zhangsuen_%Y-%m-%d_%H-%M-%S_")
cv2.imwrite(file_prefix + str(iteration) + ".png", img)
while changed:
changed = False
# Step 1
# for each pixel that has 8 neighbours
for r in range(1, rows - 1):
for c in range(1, cols - 1):
# and is black
if img[r][c] != 0:
continue
# and 2 <= B(Pixel) <= 6
B = black_neighbours(img, (r, c))
if B < 2 or B > 6:
continue
# and A(Pixel) = 1
A = neighbour_transitions_to_white(img, (r, c))
if A != 1:
continue
# and P2||P4||P6
if img[r-1][c] == 0 and img[r][c+1] == 0 and img[r+1][c] == 0:
continue
# and P4||P6||P8
if img[r][c+1] == 0 and img[r+1][c] == 0 and img[r][c-1] == 0:
continue
changed = True
temp[r][c] = 255
img = np.copy(temp)
# Step 2
# for each pixel that has 8 neighbours
for r in range(1, rows - 1):
for c in range(1, cols - 1):
# and is black
if img[r][c] != 0:
continue
# and 2 <= B(Pixel) <= 6
B = black_neighbours(img, (r, c))
if B < 2 or B > 6:
continue
# and A(Pixel) = 1
A = neighbour_transitions_to_white(img, (r, c))
if A != 1:
continue
# and P2||P4||P8 <===
if img[r-1][c] == 0 and img[r][c+1] == 0 and img[r][c-1] == 0:
continue
# and ===>P2||P6||P8
if img[r-1][c] == 0 and img[r+1][c] == 0 and img[r][c-1] == 0:
continue
changed = True
temp[r][c] = 255
img = np.copy(temp)
iteration += 1
cv2.imwrite(file_prefix + str(iteration) + ".png", img)
return img
class BFCell:
"""Brushfire Cell"""
def __init__(self, r, c, id, occupied):
"""BFCell(row, col)"""
self.r = r
self.c = c
self.id = id
self.occupied = occupied
def __repr__(self):
return str(self)
def __str__(self):
#return "(%d, %d)" % (self.r, self.c)
return "(%d)" % (self.id)
class BFCounter:
def __init__(self):
self.count = 0
def i(self):
orig = self.count
self.count += 1
return orig
def brushfire(img):
"""
:param img:
:return: Output Image
"""
WALL = 255
SPACE = 255 - WALL
colours = BFCounter()
VORONOI = colours.i()
LEFT = colours.i()
RIGHT = colours.i()
UP = colours.i()
DOWN = colours.i()
CV = BFCell(-1, -1, -1, False) # Voronoi
CL = BFCell(-1, -1, -2, True) # Left
CR = BFCell(-1, -1, -3, True) # Right
CU = BFCell(-1, -1, -4, True) # Up
CD = BFCell(-1, -1, -5, True) # Down
rows, cols = (img.shape[0], img.shape[1])
ret, img = cv2.threshold(img, 127, 255, cv2.THRESH_BINARY_INV)
regions = UnionFind()
cells = [[BFCell(r, c, r * cols + c) for c in range(cols)] for r in range(rows)]
cellsf = [cell for row in cells for cell in row]
regions.insert_objects(itertools.chain(cellsf, (CV, CL, CR, CU, CD)))
visited = set()
# Add the border cells to a set
for r in range(rows):
pass
return img
process = False
def mouse_callback(event, x, y, flags, param):
global img_i, down, last_pos, last_time, process
if event == cv2.EVENT_RBUTTONDOWN:
#img_i = np.zeros((R, C), np.uint8)
process = True
elif event == cv2.EVENT_LBUTTONDOWN:
down = True
last_pos = (x, y)
elif event == cv2.EVENT_LBUTTONUP:
down = False
last_pos = (x, y)
elif event == cv2.EVENT_MOUSEMOVE:
if down:
cv2.line(img_i, last_pos, (x, y), 255, 5)
last_pos = (x, y)
last_time = time.time()
cv2.setMouseCallback("main", mouse_callback)
edges = []
img_o = np.copy(img_i)
# iterr = None
while True:
key = cv2.waitKey(1) & 0xFF
if key == ord('q'):
break
# if (time.time() - last_time) > 1:
# last_time = time.time()
# del edges[:]
if process:
process = False
#img_o = hilditch(img_i)
img_o = zhangsuen(img_i)
#img_o = brushfire(img_i)
# iterr = zhangsuen(img_i)
# for edge in edges:
# cv2.line(img_o, edge[0], edge[1], 127, 1)
# if iterr is not None:
# try:
# img_o = iterr.next()
# except:
# iterr = None
combined = np.zeros((img_i.shape[0], img_i.shape[1]*2), np.uint8)
combined[:img_i.shape[0], :img_i.shape[1]] = img_i
combined[:img_i.shape[0], img_i.shape[1]:img_i.shape[1]*2] = img_o
cv2.imshow("main", combined)
|
normal
|
{
"blob_id": "86d3e90493ed04bbe23792716f46a68948911dc3",
"index": 6861,
"step-1": "<mask token>\n\n\nclass BFCell:\n <mask token>\n\n def __init__(self, r, c, id, occupied):\n \"\"\"BFCell(row, col)\"\"\"\n self.r = r\n self.c = c\n self.id = id\n self.occupied = occupied\n\n def __repr__(self):\n return str(self)\n\n def __str__(self):\n return '(%d)' % self.id\n\n\nclass BFCounter:\n\n def __init__(self):\n self.count = 0\n\n def i(self):\n orig = self.count\n self.count += 1\n return orig\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef black_neighbours(img, pos):\n count = 0\n for neighbour in neighbours8((img.shape[0], img.shape[1]), pos):\n r, c = neighbour\n count += img[r][c] == 0\n return count\n\n\ndef hilditch(img):\n \"\"\"\n Source: http://cgm.cs.mcgill.ca/~godfried/teaching/projects97/azar/skeleton.html\n :param img:\n :return: thinned image\n \"\"\"\n rows, cols = img.shape[0], img.shape[1]\n ret, img = cv2.threshold(img, 127, 255, cv2.THRESH_BINARY_INV)\n temp = np.copy(img)\n changed = True\n iteration = 0\n file_prefix = './images/' + time.strftime('hilditch_%Y-%m-%d_%H-%M-%S_')\n cv2.imwrite(file_prefix + str(iteration) + '.png', img)\n while changed:\n changed = False\n for r in range(1, rows - 1):\n for c in range(1, cols - 1):\n if img[r][c] != 0:\n continue\n B = black_neighbours(img, (r, c))\n if B < 2 or B > 6:\n continue\n A = neighbour_transitions_to_white(img, (r, c))\n if A != 1:\n continue\n if img[r - 1][c] == 0 and img[r][c + 1] == 0 and img[r][c - 1\n ] == 0 and neighbour_transitions_to_white(img, (r - 1, c)\n ) == 1:\n continue\n if img[r - 1][c] == 0 and img[r][c + 1] == 0 and img[r + 1][\n c - 1] == 0 and neighbour_transitions_to_white(img, (r,\n c + 1)) == 1:\n continue\n changed = True\n temp[r][c] = 255\n img = np.copy(temp)\n iteration += 1\n cv2.imwrite(file_prefix + str(iteration) + '.png', img)\n return img\n\n\ndef zhangsuen(img):\n \"\"\"\n Source: http://rosettacode.org/wiki/Zhang-Suen_thinning_algorithm\n :param img:\n :return: thinned image\n \"\"\"\n rows, cols = img.shape[0], img.shape[1]\n ret, img = cv2.threshold(img, 127, 255, cv2.THRESH_BINARY_INV)\n temp = np.copy(img)\n changed = True\n iteration = 0\n file_prefix = './images/' + time.strftime('zhangsuen_%Y-%m-%d_%H-%M-%S_')\n cv2.imwrite(file_prefix + str(iteration) + '.png', img)\n while changed:\n changed = False\n for r in range(1, rows - 1):\n for c in range(1, cols - 1):\n if img[r][c] != 0:\n continue\n B = black_neighbours(img, (r, c))\n if B < 2 or B > 6:\n continue\n A = neighbour_transitions_to_white(img, (r, c))\n if A != 1:\n continue\n if img[r - 1][c] == 0 and img[r][c + 1] == 0 and img[r + 1][c\n ] == 0:\n continue\n if img[r][c + 1] == 0 and img[r + 1][c] == 0 and img[r][c - 1\n ] == 0:\n continue\n changed = True\n temp[r][c] = 255\n img = np.copy(temp)\n for r in range(1, rows - 1):\n for c in range(1, cols - 1):\n if img[r][c] != 0:\n continue\n B = black_neighbours(img, (r, c))\n if B < 2 or B > 6:\n continue\n A = neighbour_transitions_to_white(img, (r, c))\n if A != 1:\n continue\n if img[r - 1][c] == 0 and img[r][c + 1] == 0 and img[r][c - 1\n ] == 0:\n continue\n if img[r - 1][c] == 0 and img[r + 1][c] == 0 and img[r][c - 1\n ] == 0:\n continue\n changed = True\n temp[r][c] = 255\n img = np.copy(temp)\n iteration += 1\n cv2.imwrite(file_prefix + str(iteration) + '.png', img)\n return img\n\n\nclass BFCell:\n \"\"\"Brushfire Cell\"\"\"\n\n def __init__(self, r, c, id, occupied):\n \"\"\"BFCell(row, col)\"\"\"\n self.r = r\n self.c = c\n self.id = id\n self.occupied = occupied\n\n def __repr__(self):\n return str(self)\n\n def __str__(self):\n return '(%d)' % self.id\n\n\nclass BFCounter:\n\n def __init__(self):\n self.count = 0\n\n def i(self):\n orig = self.count\n self.count += 1\n return orig\n\n\ndef brushfire(img):\n \"\"\"\n :param img:\n :return: Output Image\n \"\"\"\n WALL = 255\n SPACE = 255 - WALL\n colours = BFCounter()\n VORONOI = colours.i()\n LEFT = colours.i()\n RIGHT = colours.i()\n UP = colours.i()\n DOWN = colours.i()\n CV = BFCell(-1, -1, -1, False)\n CL = BFCell(-1, -1, -2, True)\n CR = BFCell(-1, -1, -3, True)\n CU = BFCell(-1, -1, -4, True)\n CD = BFCell(-1, -1, -5, True)\n rows, cols = img.shape[0], img.shape[1]\n ret, img = cv2.threshold(img, 127, 255, cv2.THRESH_BINARY_INV)\n regions = UnionFind()\n cells = [[BFCell(r, c, r * cols + c) for c in range(cols)] for r in\n range(rows)]\n cellsf = [cell for row in cells for cell in row]\n regions.insert_objects(itertools.chain(cellsf, (CV, CL, CR, CU, CD)))\n visited = set()\n for r in range(rows):\n pass\n return img\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef wtf(img):\n \"\"\"\n Source: http://opencvpython.blogspot.com.au/2012/05/skeletonization-using-opencv-python.html\n :param img:\n :return: thinned image\n \"\"\"\n thinned = np.zeros(img.shape, np.uint8)\n ret, img = cv2.threshold(img, 127, 255, cv2.THRESH_BINARY)\n element = cv2.getStructuringElement(cv2.MORPH_CROSS, (3, 3))\n iteration = 0\n file_prefix = './images/' + time.strftime('wtf_%Y-%m-%d_%H-%M-%S_')\n joined = np.zeros((img.shape[0], img.shape[1] * 2), np.uint8)\n joined[:img.shape[0], 0:img.shape[1]] = img\n joined[:img.shape[0], img.shape[1]:img.shape[1] * 2] = thinned\n cv2.imwrite(file_prefix + str(iteration) + '.png', joined)\n while True:\n eroded = cv2.erode(img, element)\n temp = cv2.dilate(eroded, element)\n temp = cv2.subtract(img, temp)\n thinned = cv2.bitwise_or(thinned, temp)\n img = eroded.copy()\n iteration += 1\n joined[:img.shape[0], 0:img.shape[1]] = img\n joined[:img.shape[0], img.shape[1]:img.shape[1] * 2] = thinned\n cv2.imwrite(file_prefix + str(iteration) + '.png', joined)\n if cv2.countNonZero(img) == 0:\n break\n return thinned\n\n\n<mask token>\n\n\ndef neighbour_transitions_to_white(img, pos):\n last_value = None\n count = 0\n for neighbour in neighbours8((img.shape[0], img.shape[1]), pos, True):\n r, c = neighbour\n if last_value is None:\n last_value = img[r][c]\n continue\n count += last_value == 0 and img[r][c] != 0\n last_value = img[r][c]\n return count\n\n\ndef black_neighbours(img, pos):\n count = 0\n for neighbour in neighbours8((img.shape[0], img.shape[1]), pos):\n r, c = neighbour\n count += img[r][c] == 0\n return count\n\n\ndef hilditch(img):\n \"\"\"\n Source: http://cgm.cs.mcgill.ca/~godfried/teaching/projects97/azar/skeleton.html\n :param img:\n :return: thinned image\n \"\"\"\n rows, cols = img.shape[0], img.shape[1]\n ret, img = cv2.threshold(img, 127, 255, cv2.THRESH_BINARY_INV)\n temp = np.copy(img)\n changed = True\n iteration = 0\n file_prefix = './images/' + time.strftime('hilditch_%Y-%m-%d_%H-%M-%S_')\n cv2.imwrite(file_prefix + str(iteration) + '.png', img)\n while changed:\n changed = False\n for r in range(1, rows - 1):\n for c in range(1, cols - 1):\n if img[r][c] != 0:\n continue\n B = black_neighbours(img, (r, c))\n if B < 2 or B > 6:\n continue\n A = neighbour_transitions_to_white(img, (r, c))\n if A != 1:\n continue\n if img[r - 1][c] == 0 and img[r][c + 1] == 0 and img[r][c - 1\n ] == 0 and neighbour_transitions_to_white(img, (r - 1, c)\n ) == 1:\n continue\n if img[r - 1][c] == 0 and img[r][c + 1] == 0 and img[r + 1][\n c - 1] == 0 and neighbour_transitions_to_white(img, (r,\n c + 1)) == 1:\n continue\n changed = True\n temp[r][c] = 255\n img = np.copy(temp)\n iteration += 1\n cv2.imwrite(file_prefix + str(iteration) + '.png', img)\n return img\n\n\ndef zhangsuen(img):\n \"\"\"\n Source: http://rosettacode.org/wiki/Zhang-Suen_thinning_algorithm\n :param img:\n :return: thinned image\n \"\"\"\n rows, cols = img.shape[0], img.shape[1]\n ret, img = cv2.threshold(img, 127, 255, cv2.THRESH_BINARY_INV)\n temp = np.copy(img)\n changed = True\n iteration = 0\n file_prefix = './images/' + time.strftime('zhangsuen_%Y-%m-%d_%H-%M-%S_')\n cv2.imwrite(file_prefix + str(iteration) + '.png', img)\n while changed:\n changed = False\n for r in range(1, rows - 1):\n for c in range(1, cols - 1):\n if img[r][c] != 0:\n continue\n B = black_neighbours(img, (r, c))\n if B < 2 or B > 6:\n continue\n A = neighbour_transitions_to_white(img, (r, c))\n if A != 1:\n continue\n if img[r - 1][c] == 0 and img[r][c + 1] == 0 and img[r + 1][c\n ] == 0:\n continue\n if img[r][c + 1] == 0 and img[r + 1][c] == 0 and img[r][c - 1\n ] == 0:\n continue\n changed = True\n temp[r][c] = 255\n img = np.copy(temp)\n for r in range(1, rows - 1):\n for c in range(1, cols - 1):\n if img[r][c] != 0:\n continue\n B = black_neighbours(img, (r, c))\n if B < 2 or B > 6:\n continue\n A = neighbour_transitions_to_white(img, (r, c))\n if A != 1:\n continue\n if img[r - 1][c] == 0 and img[r][c + 1] == 0 and img[r][c - 1\n ] == 0:\n continue\n if img[r - 1][c] == 0 and img[r + 1][c] == 0 and img[r][c - 1\n ] == 0:\n continue\n changed = True\n temp[r][c] = 255\n img = np.copy(temp)\n iteration += 1\n cv2.imwrite(file_prefix + str(iteration) + '.png', img)\n return img\n\n\nclass BFCell:\n \"\"\"Brushfire Cell\"\"\"\n\n def __init__(self, r, c, id, occupied):\n \"\"\"BFCell(row, col)\"\"\"\n self.r = r\n self.c = c\n self.id = id\n self.occupied = occupied\n\n def __repr__(self):\n return str(self)\n\n def __str__(self):\n return '(%d)' % self.id\n\n\nclass BFCounter:\n\n def __init__(self):\n self.count = 0\n\n def i(self):\n orig = self.count\n self.count += 1\n return orig\n\n\ndef brushfire(img):\n \"\"\"\n :param img:\n :return: Output Image\n \"\"\"\n WALL = 255\n SPACE = 255 - WALL\n colours = BFCounter()\n VORONOI = colours.i()\n LEFT = colours.i()\n RIGHT = colours.i()\n UP = colours.i()\n DOWN = colours.i()\n CV = BFCell(-1, -1, -1, False)\n CL = BFCell(-1, -1, -2, True)\n CR = BFCell(-1, -1, -3, True)\n CU = BFCell(-1, -1, -4, True)\n CD = BFCell(-1, -1, -5, True)\n rows, cols = img.shape[0], img.shape[1]\n ret, img = cv2.threshold(img, 127, 255, cv2.THRESH_BINARY_INV)\n regions = UnionFind()\n cells = [[BFCell(r, c, r * cols + c) for c in range(cols)] for r in\n range(rows)]\n cellsf = [cell for row in cells for cell in row]\n regions.insert_objects(itertools.chain(cellsf, (CV, CL, CR, CU, CD)))\n visited = set()\n for r in range(rows):\n pass\n return img\n\n\n<mask token>\n\n\ndef mouse_callback(event, x, y, flags, param):\n global img_i, down, last_pos, last_time, process\n if event == cv2.EVENT_RBUTTONDOWN:\n process = True\n elif event == cv2.EVENT_LBUTTONDOWN:\n down = True\n last_pos = x, y\n elif event == cv2.EVENT_LBUTTONUP:\n down = False\n last_pos = x, y\n elif event == cv2.EVENT_MOUSEMOVE:\n if down:\n cv2.line(img_i, last_pos, (x, y), 255, 5)\n last_pos = x, y\n last_time = time.time()\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\ndef wtf(img):\n \"\"\"\n Source: http://opencvpython.blogspot.com.au/2012/05/skeletonization-using-opencv-python.html\n :param img:\n :return: thinned image\n \"\"\"\n thinned = np.zeros(img.shape, np.uint8)\n ret, img = cv2.threshold(img, 127, 255, cv2.THRESH_BINARY)\n element = cv2.getStructuringElement(cv2.MORPH_CROSS, (3, 3))\n iteration = 0\n file_prefix = './images/' + time.strftime('wtf_%Y-%m-%d_%H-%M-%S_')\n joined = np.zeros((img.shape[0], img.shape[1] * 2), np.uint8)\n joined[:img.shape[0], 0:img.shape[1]] = img\n joined[:img.shape[0], img.shape[1]:img.shape[1] * 2] = thinned\n cv2.imwrite(file_prefix + str(iteration) + '.png', joined)\n while True:\n eroded = cv2.erode(img, element)\n temp = cv2.dilate(eroded, element)\n temp = cv2.subtract(img, temp)\n thinned = cv2.bitwise_or(thinned, temp)\n img = eroded.copy()\n iteration += 1\n joined[:img.shape[0], 0:img.shape[1]] = img\n joined[:img.shape[0], img.shape[1]:img.shape[1] * 2] = thinned\n cv2.imwrite(file_prefix + str(iteration) + '.png', joined)\n if cv2.countNonZero(img) == 0:\n break\n return thinned\n\n\ndef neighbours8(bounds, pos, repeat_first_last=False):\n rows, cols = bounds\n r, c = pos\n cup = r > 0\n crh = c < cols - 1\n cdn = r < rows - 1\n clf = c > 0\n if cup:\n yield r - 1, c\n if crh:\n yield r - 1, c + 1\n if crh:\n yield r, c + 1\n if cdn:\n yield r + 1, c + 1\n if cdn:\n yield r + 1, c\n if clf:\n yield r + 1, c - 1\n if clf:\n yield r, c - 1\n if cup:\n yield r - 1, c - 1\n if repeat_first_last and cup:\n yield r - 1, c\n\n\ndef neighbour_transitions_to_white(img, pos):\n last_value = None\n count = 0\n for neighbour in neighbours8((img.shape[0], img.shape[1]), pos, True):\n r, c = neighbour\n if last_value is None:\n last_value = img[r][c]\n continue\n count += last_value == 0 and img[r][c] != 0\n last_value = img[r][c]\n return count\n\n\ndef black_neighbours(img, pos):\n count = 0\n for neighbour in neighbours8((img.shape[0], img.shape[1]), pos):\n r, c = neighbour\n count += img[r][c] == 0\n return count\n\n\ndef hilditch(img):\n \"\"\"\n Source: http://cgm.cs.mcgill.ca/~godfried/teaching/projects97/azar/skeleton.html\n :param img:\n :return: thinned image\n \"\"\"\n rows, cols = img.shape[0], img.shape[1]\n ret, img = cv2.threshold(img, 127, 255, cv2.THRESH_BINARY_INV)\n temp = np.copy(img)\n changed = True\n iteration = 0\n file_prefix = './images/' + time.strftime('hilditch_%Y-%m-%d_%H-%M-%S_')\n cv2.imwrite(file_prefix + str(iteration) + '.png', img)\n while changed:\n changed = False\n for r in range(1, rows - 1):\n for c in range(1, cols - 1):\n if img[r][c] != 0:\n continue\n B = black_neighbours(img, (r, c))\n if B < 2 or B > 6:\n continue\n A = neighbour_transitions_to_white(img, (r, c))\n if A != 1:\n continue\n if img[r - 1][c] == 0 and img[r][c + 1] == 0 and img[r][c - 1\n ] == 0 and neighbour_transitions_to_white(img, (r - 1, c)\n ) == 1:\n continue\n if img[r - 1][c] == 0 and img[r][c + 1] == 0 and img[r + 1][\n c - 1] == 0 and neighbour_transitions_to_white(img, (r,\n c + 1)) == 1:\n continue\n changed = True\n temp[r][c] = 255\n img = np.copy(temp)\n iteration += 1\n cv2.imwrite(file_prefix + str(iteration) + '.png', img)\n return img\n\n\ndef zhangsuen(img):\n \"\"\"\n Source: http://rosettacode.org/wiki/Zhang-Suen_thinning_algorithm\n :param img:\n :return: thinned image\n \"\"\"\n rows, cols = img.shape[0], img.shape[1]\n ret, img = cv2.threshold(img, 127, 255, cv2.THRESH_BINARY_INV)\n temp = np.copy(img)\n changed = True\n iteration = 0\n file_prefix = './images/' + time.strftime('zhangsuen_%Y-%m-%d_%H-%M-%S_')\n cv2.imwrite(file_prefix + str(iteration) + '.png', img)\n while changed:\n changed = False\n for r in range(1, rows - 1):\n for c in range(1, cols - 1):\n if img[r][c] != 0:\n continue\n B = black_neighbours(img, (r, c))\n if B < 2 or B > 6:\n continue\n A = neighbour_transitions_to_white(img, (r, c))\n if A != 1:\n continue\n if img[r - 1][c] == 0 and img[r][c + 1] == 0 and img[r + 1][c\n ] == 0:\n continue\n if img[r][c + 1] == 0 and img[r + 1][c] == 0 and img[r][c - 1\n ] == 0:\n continue\n changed = True\n temp[r][c] = 255\n img = np.copy(temp)\n for r in range(1, rows - 1):\n for c in range(1, cols - 1):\n if img[r][c] != 0:\n continue\n B = black_neighbours(img, (r, c))\n if B < 2 or B > 6:\n continue\n A = neighbour_transitions_to_white(img, (r, c))\n if A != 1:\n continue\n if img[r - 1][c] == 0 and img[r][c + 1] == 0 and img[r][c - 1\n ] == 0:\n continue\n if img[r - 1][c] == 0 and img[r + 1][c] == 0 and img[r][c - 1\n ] == 0:\n continue\n changed = True\n temp[r][c] = 255\n img = np.copy(temp)\n iteration += 1\n cv2.imwrite(file_prefix + str(iteration) + '.png', img)\n return img\n\n\nclass BFCell:\n \"\"\"Brushfire Cell\"\"\"\n\n def __init__(self, r, c, id, occupied):\n \"\"\"BFCell(row, col)\"\"\"\n self.r = r\n self.c = c\n self.id = id\n self.occupied = occupied\n\n def __repr__(self):\n return str(self)\n\n def __str__(self):\n return '(%d)' % self.id\n\n\nclass BFCounter:\n\n def __init__(self):\n self.count = 0\n\n def i(self):\n orig = self.count\n self.count += 1\n return orig\n\n\ndef brushfire(img):\n \"\"\"\n :param img:\n :return: Output Image\n \"\"\"\n WALL = 255\n SPACE = 255 - WALL\n colours = BFCounter()\n VORONOI = colours.i()\n LEFT = colours.i()\n RIGHT = colours.i()\n UP = colours.i()\n DOWN = colours.i()\n CV = BFCell(-1, -1, -1, False)\n CL = BFCell(-1, -1, -2, True)\n CR = BFCell(-1, -1, -3, True)\n CU = BFCell(-1, -1, -4, True)\n CD = BFCell(-1, -1, -5, True)\n rows, cols = img.shape[0], img.shape[1]\n ret, img = cv2.threshold(img, 127, 255, cv2.THRESH_BINARY_INV)\n regions = UnionFind()\n cells = [[BFCell(r, c, r * cols + c) for c in range(cols)] for r in\n range(rows)]\n cellsf = [cell for row in cells for cell in row]\n regions.insert_objects(itertools.chain(cellsf, (CV, CL, CR, CU, CD)))\n visited = set()\n for r in range(rows):\n pass\n return img\n\n\n<mask token>\n\n\ndef mouse_callback(event, x, y, flags, param):\n global img_i, down, last_pos, last_time, process\n if event == cv2.EVENT_RBUTTONDOWN:\n process = True\n elif event == cv2.EVENT_LBUTTONDOWN:\n down = True\n last_pos = x, y\n elif event == cv2.EVENT_LBUTTONUP:\n down = False\n last_pos = x, y\n elif event == cv2.EVENT_MOUSEMOVE:\n if down:\n cv2.line(img_i, last_pos, (x, y), 255, 5)\n last_pos = x, y\n last_time = time.time()\n\n\n<mask token>\n",
"step-5": "import cv2\r\nimport numpy as np\r\nimport time\r\nimport itertools\r\nfrom unionfind import UnionFind\r\n\r\nR = 512\r\nC = 512\r\n\r\n# Setup window\r\ncv2.namedWindow('main')\r\n#img_i = np.zeros((R, C), np.uint8)\r\nimg_i = cv2.imread(\"window1.png\", cv2.IMREAD_GRAYSCALE)\r\n#img_i = cv2.threshold(img_i, 127, 255, cv2.THRESH_BINARY)[1]\r\n\r\ndown = False\r\nlast_pos = (0,0)\r\nlast_time = time.time()\r\n\r\ndef wtf(img):\r\n \"\"\"\r\n Source: http://opencvpython.blogspot.com.au/2012/05/skeletonization-using-opencv-python.html\r\n :param img:\r\n :return: thinned image\r\n \"\"\"\r\n thinned = np.zeros(img.shape, np.uint8)\r\n ret, img = cv2.threshold(img, 127, 255, cv2.THRESH_BINARY)\r\n element = cv2.getStructuringElement(cv2.MORPH_CROSS, (3, 3))\r\n\r\n iteration = 0\r\n file_prefix = \"./images/\" + time.strftime(\"wtf_%Y-%m-%d_%H-%M-%S_\")\r\n joined = np.zeros((img.shape[0], img.shape[1]*2), np.uint8)\r\n joined[:img.shape[0], 0:img.shape[1]] = img\r\n joined[:img.shape[0], img.shape[1]:img.shape[1]*2] = thinned\r\n cv2.imwrite(file_prefix + str(iteration) + \".png\", joined)\r\n while True:\r\n eroded = cv2.erode(img, element)\r\n temp = cv2.dilate(eroded, element)\r\n temp = cv2.subtract(img, temp)\r\n thinned = cv2.bitwise_or(thinned, temp)\r\n img = eroded.copy()\r\n iteration += 1\r\n joined[:img.shape[0], 0:img.shape[1]] = img\r\n joined[:img.shape[0], img.shape[1]:img.shape[1] * 2] = thinned\r\n cv2.imwrite(file_prefix + str(iteration) + \".png\", joined)\r\n if cv2.countNonZero(img) == 0:\r\n break\r\n\r\n return thinned\r\n\r\ndef neighbours8(bounds, pos, repeat_first_last=False):\r\n # nhood8 = [(-1, 0), (-1, 1), (0, 1), (1, 1), (1, 0), (1, -1), (0, -1), (-1, -1)]\r\n rows, cols = bounds\r\n r, c = pos\r\n cup = r > 0\r\n crh = c < cols - 1\r\n cdn = r < rows - 1\r\n clf = c > 0\r\n\r\n if cup:\r\n yield (r - 1, c)\r\n if crh:\r\n yield (r - 1, c + 1)\r\n if crh:\r\n yield (r, c + 1)\r\n if cdn:\r\n yield (r + 1, c + 1)\r\n if cdn:\r\n yield (r + 1, c)\r\n if clf:\r\n yield (r + 1, c - 1)\r\n if clf:\r\n yield (r, c - 1)\r\n if cup:\r\n yield (r - 1, c - 1)\r\n if repeat_first_last and cup:\r\n yield (r - 1, c)\r\n\r\ndef neighbour_transitions_to_white(img, pos):\r\n last_value = None\r\n count = 0\r\n for neighbour in neighbours8((img.shape[0], img.shape[1]), pos, True):\r\n r, c = neighbour\r\n if last_value is None:\r\n last_value = img[r][c]\r\n continue\r\n count += last_value == 0 and img[r][c] != 0\r\n last_value = img[r][c]\r\n return count\r\n\r\ndef black_neighbours(img, pos):\r\n count = 0\r\n for neighbour in neighbours8((img.shape[0], img.shape[1]), pos):\r\n r, c = neighbour\r\n count += img[r][c] == 0\r\n return count\r\n\r\ndef hilditch(img):\r\n \"\"\"\r\n Source: http://cgm.cs.mcgill.ca/~godfried/teaching/projects97/azar/skeleton.html\r\n :param img:\r\n :return: thinned image\r\n \"\"\"\r\n rows, cols = (img.shape[0], img.shape[1])\r\n ret, img = cv2.threshold(img, 127, 255, cv2.THRESH_BINARY_INV)\r\n temp = np.copy(img)\r\n\r\n # Repeat these two steps till no changes\r\n changed = True\r\n iteration = 0\r\n file_prefix = \"./images/\" + time.strftime(\"hilditch_%Y-%m-%d_%H-%M-%S_\")\r\n cv2.imwrite(file_prefix + str(iteration) + \".png\", img)\r\n while changed:\r\n changed = False\r\n # Step 1\r\n # for each pixel that has 8 neighbours\r\n for r in range(1, rows - 1):\r\n for c in range(1, cols - 1):\r\n # and is black\r\n if img[r][c] != 0:\r\n continue\r\n\r\n # and 2 <= B(Pixel) <= 6\r\n B = black_neighbours(img, (r, c))\r\n if B < 2 or B > 6:\r\n continue\r\n\r\n # and A(Pixel) = 1\r\n A = neighbour_transitions_to_white(img, (r, c))\r\n if A != 1:\r\n continue\r\n\r\n # and P2||P4||P8||A(P2)!=1\r\n if img[r-1][c] == 0 and img[r][c+1] == 0 and img[r][c-1] == 0 and neighbour_transitions_to_white(img, (r - 1, c)) == 1:\r\n continue\r\n\r\n # and P2||P4||P6||A(P4)!=1\r\n if img[r-1][c] == 0 and img[r][c+1] == 0 and img[r+1][c-1] == 0 and neighbour_transitions_to_white(img, (r, c+1)) == 1:\r\n continue\r\n\r\n changed = True\r\n temp[r][c] = 255\r\n img = np.copy(temp)\r\n iteration += 1\r\n cv2.imwrite(file_prefix + str(iteration) + \".png\", img)\r\n\r\n return img\r\n\r\ndef zhangsuen(img):\r\n \"\"\"\r\n Source: http://rosettacode.org/wiki/Zhang-Suen_thinning_algorithm\r\n :param img:\r\n :return: thinned image\r\n \"\"\"\r\n rows, cols = (img.shape[0], img.shape[1])\r\n ret, img = cv2.threshold(img, 127, 255, cv2.THRESH_BINARY_INV)\r\n temp = np.copy(img)\r\n\r\n # Repeat these two steps till no changes\r\n changed = True\r\n iteration = 0\r\n file_prefix = \"./images/\" + time.strftime(\"zhangsuen_%Y-%m-%d_%H-%M-%S_\")\r\n cv2.imwrite(file_prefix + str(iteration) + \".png\", img)\r\n while changed:\r\n changed = False\r\n # Step 1\r\n # for each pixel that has 8 neighbours\r\n for r in range(1, rows - 1):\r\n for c in range(1, cols - 1):\r\n # and is black\r\n if img[r][c] != 0:\r\n continue\r\n\r\n # and 2 <= B(Pixel) <= 6\r\n B = black_neighbours(img, (r, c))\r\n if B < 2 or B > 6:\r\n continue\r\n\r\n # and A(Pixel) = 1\r\n A = neighbour_transitions_to_white(img, (r, c))\r\n if A != 1:\r\n continue\r\n\r\n # and P2||P4||P6\r\n if img[r-1][c] == 0 and img[r][c+1] == 0 and img[r+1][c] == 0:\r\n continue\r\n\r\n # and P4||P6||P8\r\n if img[r][c+1] == 0 and img[r+1][c] == 0 and img[r][c-1] == 0:\r\n continue\r\n\r\n changed = True\r\n temp[r][c] = 255\r\n img = np.copy(temp)\r\n # Step 2\r\n # for each pixel that has 8 neighbours\r\n for r in range(1, rows - 1):\r\n for c in range(1, cols - 1):\r\n # and is black\r\n if img[r][c] != 0:\r\n continue\r\n\r\n # and 2 <= B(Pixel) <= 6\r\n B = black_neighbours(img, (r, c))\r\n if B < 2 or B > 6:\r\n continue\r\n\r\n # and A(Pixel) = 1\r\n A = neighbour_transitions_to_white(img, (r, c))\r\n if A != 1:\r\n continue\r\n\r\n # and P2||P4||P8 <===\r\n if img[r-1][c] == 0 and img[r][c+1] == 0 and img[r][c-1] == 0:\r\n continue\r\n\r\n # and ===>P2||P6||P8\r\n if img[r-1][c] == 0 and img[r+1][c] == 0 and img[r][c-1] == 0:\r\n continue\r\n\r\n changed = True\r\n temp[r][c] = 255\r\n img = np.copy(temp)\r\n iteration += 1\r\n cv2.imwrite(file_prefix + str(iteration) + \".png\", img)\r\n\r\n return img\r\n\r\nclass BFCell:\r\n \"\"\"Brushfire Cell\"\"\"\r\n def __init__(self, r, c, id, occupied):\r\n \"\"\"BFCell(row, col)\"\"\"\r\n self.r = r\r\n self.c = c\r\n self.id = id\r\n self.occupied = occupied\r\n\r\n def __repr__(self):\r\n return str(self)\r\n\r\n def __str__(self):\r\n #return \"(%d, %d)\" % (self.r, self.c)\r\n return \"(%d)\" % (self.id)\r\n\r\nclass BFCounter:\r\n def __init__(self):\r\n self.count = 0\r\n\r\n def i(self):\r\n orig = self.count\r\n self.count += 1\r\n return orig\r\n\r\ndef brushfire(img):\r\n \"\"\"\r\n :param img:\r\n :return: Output Image\r\n \"\"\"\r\n WALL = 255\r\n SPACE = 255 - WALL\r\n\r\n colours = BFCounter()\r\n\r\n VORONOI = colours.i()\r\n LEFT = colours.i()\r\n RIGHT = colours.i()\r\n UP = colours.i()\r\n DOWN = colours.i()\r\n\r\n CV = BFCell(-1, -1, -1, False) # Voronoi\r\n CL = BFCell(-1, -1, -2, True) # Left\r\n CR = BFCell(-1, -1, -3, True) # Right\r\n CU = BFCell(-1, -1, -4, True) # Up\r\n CD = BFCell(-1, -1, -5, True) # Down\r\n\r\n rows, cols = (img.shape[0], img.shape[1])\r\n ret, img = cv2.threshold(img, 127, 255, cv2.THRESH_BINARY_INV)\r\n regions = UnionFind()\r\n cells = [[BFCell(r, c, r * cols + c) for c in range(cols)] for r in range(rows)]\r\n cellsf = [cell for row in cells for cell in row]\r\n regions.insert_objects(itertools.chain(cellsf, (CV, CL, CR, CU, CD)))\r\n\r\n visited = set()\r\n\r\n # Add the border cells to a set\r\n for r in range(rows):\r\n pass\r\n\r\n return img\r\n\r\nprocess = False\r\n\r\ndef mouse_callback(event, x, y, flags, param):\r\n global img_i, down, last_pos, last_time, process\r\n if event == cv2.EVENT_RBUTTONDOWN:\r\n #img_i = np.zeros((R, C), np.uint8)\r\n process = True\r\n elif event == cv2.EVENT_LBUTTONDOWN:\r\n down = True\r\n last_pos = (x, y)\r\n elif event == cv2.EVENT_LBUTTONUP:\r\n down = False\r\n last_pos = (x, y)\r\n elif event == cv2.EVENT_MOUSEMOVE:\r\n if down:\r\n cv2.line(img_i, last_pos, (x, y), 255, 5)\r\n last_pos = (x, y)\r\n last_time = time.time()\r\n\r\ncv2.setMouseCallback(\"main\", mouse_callback)\r\n\r\nedges = []\r\n\r\nimg_o = np.copy(img_i)\r\n\r\n# iterr = None\r\n\r\nwhile True:\r\n key = cv2.waitKey(1) & 0xFF\r\n if key == ord('q'):\r\n break\r\n # if (time.time() - last_time) > 1:\r\n # last_time = time.time()\r\n # del edges[:]\r\n if process:\r\n process = False\r\n #img_o = hilditch(img_i)\r\n img_o = zhangsuen(img_i)\r\n #img_o = brushfire(img_i)\r\n # iterr = zhangsuen(img_i)\r\n # for edge in edges:\r\n # cv2.line(img_o, edge[0], edge[1], 127, 1)\r\n # if iterr is not None:\r\n # try:\r\n # img_o = iterr.next()\r\n # except:\r\n # iterr = None\r\n\r\n combined = np.zeros((img_i.shape[0], img_i.shape[1]*2), np.uint8)\r\n combined[:img_i.shape[0], :img_i.shape[1]] = img_i\r\n combined[:img_i.shape[0], img_i.shape[1]:img_i.shape[1]*2] = img_o\r\n cv2.imshow(\"main\", combined)\r\n",
"step-ids": [
7,
12,
15,
16,
20
]
}
|
[
7,
12,
15,
16,
20
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
files.sort()
<|reserved_special_token_0|>
for i, fname in enumerate(files):
sample = fname.split('/')[1].split('.')[0]
if sample in duplicates:
skipped += 1
if skipped % 100 == 99:
print(f'Skip {skipped}')
continue
f = pd.read_table(fname, index_col=0, squeeze=True)
if f.sum() < 1000000:
skipped += 1
if skipped % 100 == 99:
print(f'Skip {skipped}')
continue
f.values.flat[:] = subsamplex.subsample(f.values.ravel(), 1000 * 1000)
f = f[f > 0]
counts[biome[sample]].update(f.index)
if i % 100 == 99:
print('Done {}/{}'.format(i + 1, len(files)))
<|reserved_special_token_0|>
recounts.fillna(0, inplace=True)
<|reserved_special_token_0|>
recounts.reset_index(inplace=True)
recounts.to_feather('tables/genes.1m.unique.prevalence.no-dups.feather')
<|reserved_special_token_0|>
recounts.set_index('index', inplace=True)
<|reserved_special_token_0|>
recounts.to_csv('tables/genes.1m.unique.no-dups.prevalence.txt', sep='\t')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
files = glob('outputs.txt/*.unique.txt.gz')
files.sort()
biome = pd.read_table('cold/biome.txt', squeeze=True, index_col=0)
duplicates = set(line.strip() for line in open('cold/duplicates.txt'))
counts = defaultdict(Counter)
skipped = 0
for i, fname in enumerate(files):
sample = fname.split('/')[1].split('.')[0]
if sample in duplicates:
skipped += 1
if skipped % 100 == 99:
print(f'Skip {skipped}')
continue
f = pd.read_table(fname, index_col=0, squeeze=True)
if f.sum() < 1000000:
skipped += 1
if skipped % 100 == 99:
print(f'Skip {skipped}')
continue
f.values.flat[:] = subsamplex.subsample(f.values.ravel(), 1000 * 1000)
f = f[f > 0]
counts[biome[sample]].update(f.index)
if i % 100 == 99:
print('Done {}/{}'.format(i + 1, len(files)))
recounts = pd.DataFrame({k: pd.Series(v) for k, v in counts.items()})
recounts.fillna(0, inplace=True)
used_total = recounts.sum(1)
recounts['all'] = used_total
recounts = recounts.astype(int)
recounts.reset_index(inplace=True)
recounts.to_feather('tables/genes.1m.unique.prevalence.no-dups.feather')
names = [line.strip() for line in open('cold/derived/GMGC10.headers')]
recounts.set_index('index', inplace=True)
recounts.index = recounts.index.map(names.__getitem__)
recounts.to_csv('tables/genes.1m.unique.no-dups.prevalence.txt', sep='\t')
<|reserved_special_token_1|>
from collections import Counter, defaultdict
import pandas as pd
from glob import glob
import subsamplex
files = glob('outputs.txt/*.unique.txt.gz')
files.sort()
biome = pd.read_table('cold/biome.txt', squeeze=True, index_col=0)
duplicates = set(line.strip() for line in open('cold/duplicates.txt'))
counts = defaultdict(Counter)
skipped = 0
for i, fname in enumerate(files):
sample = fname.split('/')[1].split('.')[0]
if sample in duplicates:
skipped += 1
if skipped % 100 == 99:
print(f'Skip {skipped}')
continue
f = pd.read_table(fname, index_col=0, squeeze=True)
if f.sum() < 1000000:
skipped += 1
if skipped % 100 == 99:
print(f'Skip {skipped}')
continue
f.values.flat[:] = subsamplex.subsample(f.values.ravel(), 1000 * 1000)
f = f[f > 0]
counts[biome[sample]].update(f.index)
if i % 100 == 99:
print('Done {}/{}'.format(i + 1, len(files)))
recounts = pd.DataFrame({k: pd.Series(v) for k, v in counts.items()})
recounts.fillna(0, inplace=True)
used_total = recounts.sum(1)
recounts['all'] = used_total
recounts = recounts.astype(int)
recounts.reset_index(inplace=True)
recounts.to_feather('tables/genes.1m.unique.prevalence.no-dups.feather')
names = [line.strip() for line in open('cold/derived/GMGC10.headers')]
recounts.set_index('index', inplace=True)
recounts.index = recounts.index.map(names.__getitem__)
recounts.to_csv('tables/genes.1m.unique.no-dups.prevalence.txt', sep='\t')
<|reserved_special_token_1|>
from collections import Counter, defaultdict
import pandas as pd
from glob import glob
import subsamplex
files = glob('outputs.txt/*.unique.txt.gz')
files.sort()
biome = pd.read_table('cold/biome.txt', squeeze=True, index_col=0)
duplicates = set(line.strip() for line in open('cold/duplicates.txt'))
counts = defaultdict(Counter)
skipped = 0
for i,fname in enumerate(files):
sample = fname.split('/')[1].split('.')[0]
if sample in duplicates:
skipped += 1
if skipped % 100 == 99:
print(f'Skip {skipped}')
continue
f = pd.read_table(fname, index_col=0, squeeze=True)
if f.sum() < 1_000_000:
skipped += 1
if skipped % 100 == 99:
print(f'Skip {skipped}')
continue
f.values.flat[:] = subsamplex.subsample(f.values.ravel(), 1000*1000)
f = f[f>0]
counts[biome[sample]].update(f.index)
if i % 100 == 99:
print("Done {}/{}".format(i+1, len(files)))
recounts = pd.DataFrame({k:pd.Series(v) for k, v in counts.items()})
recounts.fillna(0, inplace=True)
used_total = recounts.sum(1)
recounts['all'] = used_total
recounts = recounts.astype(int)
recounts.reset_index(inplace=True)
recounts.to_feather('tables/genes.1m.unique.prevalence.no-dups.feather')
names = [line.strip() for line in open('cold/derived/GMGC10.headers')]
recounts.set_index('index', inplace=True)
recounts.index = recounts.index.map(names.__getitem__)
recounts.to_csv('tables/genes.1m.unique.no-dups.prevalence.txt', sep='\t')
|
flexible
|
{
"blob_id": "74eea67b8640a03e616bebdadba49891017b921d",
"index": 8914,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfiles.sort()\n<mask token>\nfor i, fname in enumerate(files):\n sample = fname.split('/')[1].split('.')[0]\n if sample in duplicates:\n skipped += 1\n if skipped % 100 == 99:\n print(f'Skip {skipped}')\n continue\n f = pd.read_table(fname, index_col=0, squeeze=True)\n if f.sum() < 1000000:\n skipped += 1\n if skipped % 100 == 99:\n print(f'Skip {skipped}')\n continue\n f.values.flat[:] = subsamplex.subsample(f.values.ravel(), 1000 * 1000)\n f = f[f > 0]\n counts[biome[sample]].update(f.index)\n if i % 100 == 99:\n print('Done {}/{}'.format(i + 1, len(files)))\n<mask token>\nrecounts.fillna(0, inplace=True)\n<mask token>\nrecounts.reset_index(inplace=True)\nrecounts.to_feather('tables/genes.1m.unique.prevalence.no-dups.feather')\n<mask token>\nrecounts.set_index('index', inplace=True)\n<mask token>\nrecounts.to_csv('tables/genes.1m.unique.no-dups.prevalence.txt', sep='\\t')\n",
"step-3": "<mask token>\nfiles = glob('outputs.txt/*.unique.txt.gz')\nfiles.sort()\nbiome = pd.read_table('cold/biome.txt', squeeze=True, index_col=0)\nduplicates = set(line.strip() for line in open('cold/duplicates.txt'))\ncounts = defaultdict(Counter)\nskipped = 0\nfor i, fname in enumerate(files):\n sample = fname.split('/')[1].split('.')[0]\n if sample in duplicates:\n skipped += 1\n if skipped % 100 == 99:\n print(f'Skip {skipped}')\n continue\n f = pd.read_table(fname, index_col=0, squeeze=True)\n if f.sum() < 1000000:\n skipped += 1\n if skipped % 100 == 99:\n print(f'Skip {skipped}')\n continue\n f.values.flat[:] = subsamplex.subsample(f.values.ravel(), 1000 * 1000)\n f = f[f > 0]\n counts[biome[sample]].update(f.index)\n if i % 100 == 99:\n print('Done {}/{}'.format(i + 1, len(files)))\nrecounts = pd.DataFrame({k: pd.Series(v) for k, v in counts.items()})\nrecounts.fillna(0, inplace=True)\nused_total = recounts.sum(1)\nrecounts['all'] = used_total\nrecounts = recounts.astype(int)\nrecounts.reset_index(inplace=True)\nrecounts.to_feather('tables/genes.1m.unique.prevalence.no-dups.feather')\nnames = [line.strip() for line in open('cold/derived/GMGC10.headers')]\nrecounts.set_index('index', inplace=True)\nrecounts.index = recounts.index.map(names.__getitem__)\nrecounts.to_csv('tables/genes.1m.unique.no-dups.prevalence.txt', sep='\\t')\n",
"step-4": "from collections import Counter, defaultdict\nimport pandas as pd\nfrom glob import glob\nimport subsamplex\nfiles = glob('outputs.txt/*.unique.txt.gz')\nfiles.sort()\nbiome = pd.read_table('cold/biome.txt', squeeze=True, index_col=0)\nduplicates = set(line.strip() for line in open('cold/duplicates.txt'))\ncounts = defaultdict(Counter)\nskipped = 0\nfor i, fname in enumerate(files):\n sample = fname.split('/')[1].split('.')[0]\n if sample in duplicates:\n skipped += 1\n if skipped % 100 == 99:\n print(f'Skip {skipped}')\n continue\n f = pd.read_table(fname, index_col=0, squeeze=True)\n if f.sum() < 1000000:\n skipped += 1\n if skipped % 100 == 99:\n print(f'Skip {skipped}')\n continue\n f.values.flat[:] = subsamplex.subsample(f.values.ravel(), 1000 * 1000)\n f = f[f > 0]\n counts[biome[sample]].update(f.index)\n if i % 100 == 99:\n print('Done {}/{}'.format(i + 1, len(files)))\nrecounts = pd.DataFrame({k: pd.Series(v) for k, v in counts.items()})\nrecounts.fillna(0, inplace=True)\nused_total = recounts.sum(1)\nrecounts['all'] = used_total\nrecounts = recounts.astype(int)\nrecounts.reset_index(inplace=True)\nrecounts.to_feather('tables/genes.1m.unique.prevalence.no-dups.feather')\nnames = [line.strip() for line in open('cold/derived/GMGC10.headers')]\nrecounts.set_index('index', inplace=True)\nrecounts.index = recounts.index.map(names.__getitem__)\nrecounts.to_csv('tables/genes.1m.unique.no-dups.prevalence.txt', sep='\\t')\n",
"step-5": "from collections import Counter, defaultdict\nimport pandas as pd\nfrom glob import glob\nimport subsamplex\n\nfiles = glob('outputs.txt/*.unique.txt.gz')\nfiles.sort()\nbiome = pd.read_table('cold/biome.txt', squeeze=True, index_col=0)\nduplicates = set(line.strip() for line in open('cold/duplicates.txt'))\n\ncounts = defaultdict(Counter)\nskipped = 0\nfor i,fname in enumerate(files):\n sample = fname.split('/')[1].split('.')[0]\n if sample in duplicates:\n skipped += 1\n if skipped % 100 == 99:\n print(f'Skip {skipped}')\n continue\n f = pd.read_table(fname, index_col=0, squeeze=True)\n if f.sum() < 1_000_000:\n skipped += 1\n if skipped % 100 == 99:\n print(f'Skip {skipped}')\n continue\n f.values.flat[:] = subsamplex.subsample(f.values.ravel(), 1000*1000)\n f = f[f>0]\n counts[biome[sample]].update(f.index)\n if i % 100 == 99:\n print(\"Done {}/{}\".format(i+1, len(files)))\n\nrecounts = pd.DataFrame({k:pd.Series(v) for k, v in counts.items()})\nrecounts.fillna(0, inplace=True)\nused_total = recounts.sum(1)\nrecounts['all'] = used_total\nrecounts = recounts.astype(int)\nrecounts.reset_index(inplace=True)\nrecounts.to_feather('tables/genes.1m.unique.prevalence.no-dups.feather')\n\nnames = [line.strip() for line in open('cold/derived/GMGC10.headers')]\nrecounts.set_index('index', inplace=True)\nrecounts.index = recounts.index.map(names.__getitem__)\nrecounts.to_csv('tables/genes.1m.unique.no-dups.prevalence.txt', sep='\\t')\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from django.urls import path
from . import views
urlpatterns = [
path('product', views.ProductCreateAndList.as_view()),
path('product/<int:pk>', views.ProductRetrieve.as_view()),
]
|
normal
|
{
"blob_id": "d21b89285d4b4c73a08bda746cea31b5a13d1050",
"index": 1967,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nurlpatterns = [path('product', views.ProductCreateAndList.as_view()), path(\n 'product/<int:pk>', views.ProductRetrieve.as_view())]\n",
"step-3": "from django.urls import path\nfrom . import views\nurlpatterns = [path('product', views.ProductCreateAndList.as_view()), path(\n 'product/<int:pk>', views.ProductRetrieve.as_view())]\n",
"step-4": "from django.urls import path\nfrom . import views\n\nurlpatterns = [\n path('product', views.ProductCreateAndList.as_view()),\n path('product/<int:pk>', views.ProductRetrieve.as_view()),\n\n]\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
class Patient(object):
def __init__(self, id_number, name, bed_number, *allergies):
self.id_number = id_number
self.name = name
self.allergies = allergies
self.bed_number = bed_number
class Hospital(object):
def __init__(self, name, capacity):
self.patients = []
self.name = name
self.capacity = capacity
def addPatient(self, patient):
if len(self.patients) <= self.capacity:
self.patients.append(patient)
else:
print "The hospital is full."
return self
def discharge(self, patient):
for patient1 in self.patients:
if patient1.name == patient.name:
self.patients.remove(patient)
patient.bed_number = 0
return self
def displayInfo(self):
for patient in self.patients:
print "Id Number:", patient.id_number
print "Name:", patient.name
print "Bed Number:", patient.bed_number
print "Allergies:", patient.allergies
return self
patientA = Patient(1235, "Helen Smith", 10, ("peanuts", "seafood"))
patientB = Patient(1594, "Robert Brown", 15, "eggs")
patientC = Patient(1587, "Amy Beard", 26, ("guinea pigs", "cats"))
patientD = Patient(1658, "Robin Meggs", 51, "coconut")
hospital1 = Hospital("Inova Fairfax", 2)
hospital1.addPatient(patientA).addPatient(patientB).addPatient(patientC).addPatient(patientD).discharge(patientA).displayInfo()
|
normal
|
{
"blob_id": "259a4bb39496bdfc71d60edb4994d26351c6961d",
"index": 3621,
"step-1": "class Patient(object):\r\n def __init__(self, id_number, name, bed_number, *allergies):\r\n self.id_number = id_number\r\n self.name = name\r\n self.allergies = allergies\r\n self.bed_number = bed_number\r\nclass Hospital(object):\r\n def __init__(self, name, capacity):\r\n self.patients = []\r\n self.name = name\r\n self.capacity = capacity\r\n def addPatient(self, patient):\r\n if len(self.patients) <= self.capacity:\r\n self.patients.append(patient)\r\n else:\r\n print \"The hospital is full.\"\r\n return self\r\n def discharge(self, patient):\r\n for patient1 in self.patients:\r\n if patient1.name == patient.name:\r\n self.patients.remove(patient)\r\n patient.bed_number = 0\r\n return self\r\n def displayInfo(self):\r\n for patient in self.patients:\r\n print \"Id Number:\", patient.id_number\r\n print \"Name:\", patient.name\r\n print \"Bed Number:\", patient.bed_number\r\n print \"Allergies:\", patient.allergies\r\n return self\r\n\r\npatientA = Patient(1235, \"Helen Smith\", 10, (\"peanuts\", \"seafood\"))\r\npatientB = Patient(1594, \"Robert Brown\", 15, \"eggs\")\r\npatientC = Patient(1587, \"Amy Beard\", 26, (\"guinea pigs\", \"cats\"))\r\npatientD = Patient(1658, \"Robin Meggs\", 51, \"coconut\")\r\nhospital1 = Hospital(\"Inova Fairfax\", 2)\r\nhospital1.addPatient(patientA).addPatient(patientB).addPatient(patientC).addPatient(patientD).discharge(patientA).displayInfo()\r\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def main(a, b, a_mod=-1, b_mod=-1, N=40000000, a_fact=16807, b_fact=48271):
genA = generator(a_fact, a_mod)
genB = generator(b_fact, b_mod)
match = 0
mask = (255 << 8) + 255
for i in range(N):
a = genA(a)
b = genB(b)
match += [0, 1][mask & a == mask & b]
return match
<|reserved_special_token_0|>
<|reserved_special_token_1|>
def generator(factor, modulus=-1, maxx=2147483647):
def next(prev):
nxt = prev * factor % maxx
if modulus > 0:
while nxt % modulus != 0:
nxt = nxt * factor % maxx
return nxt
return next
def main(a, b, a_mod=-1, b_mod=-1, N=40000000, a_fact=16807, b_fact=48271):
genA = generator(a_fact, a_mod)
genB = generator(b_fact, b_mod)
match = 0
mask = (255 << 8) + 255
for i in range(N):
a = genA(a)
b = genB(b)
match += [0, 1][mask & a == mask & b]
return match
<|reserved_special_token_0|>
<|reserved_special_token_1|>
def generator(factor, modulus=-1, maxx=2147483647):
def next(prev):
nxt = prev * factor % maxx
if modulus > 0:
while nxt % modulus != 0:
nxt = nxt * factor % maxx
return nxt
return next
def main(a, b, a_mod=-1, b_mod=-1, N=40000000, a_fact=16807, b_fact=48271):
genA = generator(a_fact, a_mod)
genB = generator(b_fact, b_mod)
match = 0
mask = (255 << 8) + 255
for i in range(N):
a = genA(a)
b = genB(b)
match += [0, 1][mask & a == mask & b]
return match
if __name__ == '__main__':
print(main(634, 301, 4, 8, 5000000))
<|reserved_special_token_1|>
def generator(factor, modulus=-1, maxx=2147483647):
def next(prev):
nxt = (prev*factor) % maxx
if modulus > 0:
while nxt % modulus != 0:
nxt = (nxt * factor) % maxx
return nxt
return next
def main(a, b, a_mod=-1, b_mod=-1, N=40000000, a_fact=16807, b_fact=48271):
genA = generator(a_fact, a_mod)
genB = generator(b_fact, b_mod)
match = 0
mask = (0xFF << 8) + 0xFF
for i in range(N):
a = genA(a)
b = genB(b)
match += [0, 1][(mask & a) == (mask & b)]
return match
if __name__ == '__main__':
#example
#print(main(65, 8921))
#print(main(65,8921,4,8,2000))
#print(main(65,8921,4,8,5000000))
#PART 1
#print(main(634,301))
#PART 2
print(main(634,301,4,8,5000000))
|
flexible
|
{
"blob_id": "6162911befc8ad37591f7c19b14b349c655ccac0",
"index": 3856,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef main(a, b, a_mod=-1, b_mod=-1, N=40000000, a_fact=16807, b_fact=48271):\n genA = generator(a_fact, a_mod)\n genB = generator(b_fact, b_mod)\n match = 0\n mask = (255 << 8) + 255\n for i in range(N):\n a = genA(a)\n b = genB(b)\n match += [0, 1][mask & a == mask & b]\n return match\n\n\n<mask token>\n",
"step-3": "def generator(factor, modulus=-1, maxx=2147483647):\n\n def next(prev):\n nxt = prev * factor % maxx\n if modulus > 0:\n while nxt % modulus != 0:\n nxt = nxt * factor % maxx\n return nxt\n return next\n\n\ndef main(a, b, a_mod=-1, b_mod=-1, N=40000000, a_fact=16807, b_fact=48271):\n genA = generator(a_fact, a_mod)\n genB = generator(b_fact, b_mod)\n match = 0\n mask = (255 << 8) + 255\n for i in range(N):\n a = genA(a)\n b = genB(b)\n match += [0, 1][mask & a == mask & b]\n return match\n\n\n<mask token>\n",
"step-4": "def generator(factor, modulus=-1, maxx=2147483647):\n\n def next(prev):\n nxt = prev * factor % maxx\n if modulus > 0:\n while nxt % modulus != 0:\n nxt = nxt * factor % maxx\n return nxt\n return next\n\n\ndef main(a, b, a_mod=-1, b_mod=-1, N=40000000, a_fact=16807, b_fact=48271):\n genA = generator(a_fact, a_mod)\n genB = generator(b_fact, b_mod)\n match = 0\n mask = (255 << 8) + 255\n for i in range(N):\n a = genA(a)\n b = genB(b)\n match += [0, 1][mask & a == mask & b]\n return match\n\n\nif __name__ == '__main__':\n print(main(634, 301, 4, 8, 5000000))\n",
"step-5": "def generator(factor, modulus=-1, maxx=2147483647):\n def next(prev):\n nxt = (prev*factor) % maxx\n if modulus > 0:\n while nxt % modulus != 0:\n nxt = (nxt * factor) % maxx\n return nxt\n return next\n\n\ndef main(a, b, a_mod=-1, b_mod=-1, N=40000000, a_fact=16807, b_fact=48271):\n genA = generator(a_fact, a_mod)\n genB = generator(b_fact, b_mod)\n match = 0\n mask = (0xFF << 8) + 0xFF\n for i in range(N):\n a = genA(a)\n b = genB(b)\n match += [0, 1][(mask & a) == (mask & b)]\n return match\n\nif __name__ == '__main__':\n #example\n #print(main(65, 8921))\n #print(main(65,8921,4,8,2000))\n #print(main(65,8921,4,8,5000000))\n \n #PART 1\n #print(main(634,301))\n\n #PART 2\n print(main(634,301,4,8,5000000))\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
def index(request):
blogs = Post.objects.filter(status=1).order_by('-created_on')[:10]
context = {'Post': blogs}
return render(request, 'blogapp/index.html', context)
def blogs(request):
return render(request, template_name='blogapp/blog.html')
def detail(request, slug):
try:
post = Post.objects.get(slug=slug)
context = {'post': post}
return render(request, 'blogapp/detail.html', context)
except ObjectDoesNotExist:
return render(request, template_name='blogapp/detail.html')
def about(request):
return render(request, template_name='blogapp/about.html')
def loginPage(request):
if request.method == 'POST':
username = request.POST.get('username')
password = request.POST.get('password1')
user = authenticate(username=username, password=password)
if user is not None:
login(request, user)
return redirect('index')
else:
messages.error(request, 'Username or Password Incorrect')
return render(request, 'accounts/login.html')
else:
return render(request, 'accounts/login.html')
def logoutUser(request):
logout(request)
return redirect('login')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def index(request):
blogs = Post.objects.filter(status=1).order_by('-created_on')[:10]
context = {'Post': blogs}
return render(request, 'blogapp/index.html', context)
def blogs(request):
return render(request, template_name='blogapp/blog.html')
def detail(request, slug):
try:
post = Post.objects.get(slug=slug)
context = {'post': post}
return render(request, 'blogapp/detail.html', context)
except ObjectDoesNotExist:
return render(request, template_name='blogapp/detail.html')
def about(request):
return render(request, template_name='blogapp/about.html')
def loginPage(request):
if request.method == 'POST':
username = request.POST.get('username')
password = request.POST.get('password1')
user = authenticate(username=username, password=password)
if user is not None:
login(request, user)
return redirect('index')
else:
messages.error(request, 'Username or Password Incorrect')
return render(request, 'accounts/login.html')
else:
return render(request, 'accounts/login.html')
def logoutUser(request):
logout(request)
return redirect('login')
<|reserved_special_token_0|>
def products(request):
return render(request, template_name='mainapp/products.html')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def index(request):
blogs = Post.objects.filter(status=1).order_by('-created_on')[:10]
context = {'Post': blogs}
return render(request, 'blogapp/index.html', context)
def blogs(request):
return render(request, template_name='blogapp/blog.html')
def detail(request, slug):
try:
post = Post.objects.get(slug=slug)
context = {'post': post}
return render(request, 'blogapp/detail.html', context)
except ObjectDoesNotExist:
return render(request, template_name='blogapp/detail.html')
def about(request):
return render(request, template_name='blogapp/about.html')
def loginPage(request):
if request.method == 'POST':
username = request.POST.get('username')
password = request.POST.get('password1')
user = authenticate(username=username, password=password)
if user is not None:
login(request, user)
return redirect('index')
else:
messages.error(request, 'Username or Password Incorrect')
return render(request, 'accounts/login.html')
else:
return render(request, 'accounts/login.html')
def logoutUser(request):
logout(request)
return redirect('login')
def contact(request):
return render(request, template_name='blogapp/contact.html')
def products(request):
return render(request, template_name='mainapp/products.html')
<|reserved_special_token_1|>
from django.shortcuts import render, redirect
from django.contrib import messages
from django.contrib.auth import authenticate, login, logout
from .models import Post
from django.shortcuts import redirect
from django.core.exceptions import ObjectDoesNotExist
def index(request):
blogs = Post.objects.filter(status=1).order_by('-created_on')[:10]
context = {'Post': blogs}
return render(request, 'blogapp/index.html', context)
def blogs(request):
return render(request, template_name='blogapp/blog.html')
def detail(request, slug):
try:
post = Post.objects.get(slug=slug)
context = {'post': post}
return render(request, 'blogapp/detail.html', context)
except ObjectDoesNotExist:
return render(request, template_name='blogapp/detail.html')
def about(request):
return render(request, template_name='blogapp/about.html')
def loginPage(request):
if request.method == 'POST':
username = request.POST.get('username')
password = request.POST.get('password1')
user = authenticate(username=username, password=password)
if user is not None:
login(request, user)
return redirect('index')
else:
messages.error(request, 'Username or Password Incorrect')
return render(request, 'accounts/login.html')
else:
return render(request, 'accounts/login.html')
def logoutUser(request):
logout(request)
return redirect('login')
def contact(request):
return render(request, template_name='blogapp/contact.html')
def products(request):
return render(request, template_name='mainapp/products.html')
<|reserved_special_token_1|>
from django.shortcuts import render, redirect
from django.contrib import messages
from django.contrib.auth import authenticate, login, logout
from .models import Post
from django.shortcuts import redirect
from django.core.exceptions import ObjectDoesNotExist
def index(request):
blogs = Post.objects.filter(status=1).order_by('-created_on')[:10]
context = {'Post': blogs}
return render(request, 'blogapp/index.html', context)
def blogs(request):
return render(request, template_name='blogapp/blog.html')
def detail(request, slug):
try:
post = Post.objects.get(slug=slug)
context = {'post': post}
return render(request, 'blogapp/detail.html', context)
except ObjectDoesNotExist:
return render(request, template_name='blogapp/detail.html')
def about(request):
return render(request, template_name='blogapp/about.html')
def loginPage(request):
# form = CreateUserForm()
# context = {'form': form}
if request.method == 'POST':
username = request.POST.get('username')
password = request.POST.get('password1')
user = authenticate(username=username, password=password)
if user is not None:
login(request, user)
return redirect('index')
else:
messages.error(request, 'Username or Password Incorrect')
return render(request, 'accounts/login.html',)
else:
return render(request, 'accounts/login.html',)
def logoutUser(request):
logout(request)
return redirect('login')
def contact(request):
return render(request, template_name='blogapp/contact.html')
def products(request):
return render(request, template_name='mainapp/products.html')
|
flexible
|
{
"blob_id": "aec374ffa368755350d0d75c96860f760e8524e1",
"index": 7301,
"step-1": "<mask token>\n\n\ndef index(request):\n blogs = Post.objects.filter(status=1).order_by('-created_on')[:10]\n context = {'Post': blogs}\n return render(request, 'blogapp/index.html', context)\n\n\ndef blogs(request):\n return render(request, template_name='blogapp/blog.html')\n\n\ndef detail(request, slug):\n try:\n post = Post.objects.get(slug=slug)\n context = {'post': post}\n return render(request, 'blogapp/detail.html', context)\n except ObjectDoesNotExist:\n return render(request, template_name='blogapp/detail.html')\n\n\ndef about(request):\n return render(request, template_name='blogapp/about.html')\n\n\ndef loginPage(request):\n if request.method == 'POST':\n username = request.POST.get('username')\n password = request.POST.get('password1')\n user = authenticate(username=username, password=password)\n if user is not None:\n login(request, user)\n return redirect('index')\n else:\n messages.error(request, 'Username or Password Incorrect')\n return render(request, 'accounts/login.html')\n else:\n return render(request, 'accounts/login.html')\n\n\ndef logoutUser(request):\n logout(request)\n return redirect('login')\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef index(request):\n blogs = Post.objects.filter(status=1).order_by('-created_on')[:10]\n context = {'Post': blogs}\n return render(request, 'blogapp/index.html', context)\n\n\ndef blogs(request):\n return render(request, template_name='blogapp/blog.html')\n\n\ndef detail(request, slug):\n try:\n post = Post.objects.get(slug=slug)\n context = {'post': post}\n return render(request, 'blogapp/detail.html', context)\n except ObjectDoesNotExist:\n return render(request, template_name='blogapp/detail.html')\n\n\ndef about(request):\n return render(request, template_name='blogapp/about.html')\n\n\ndef loginPage(request):\n if request.method == 'POST':\n username = request.POST.get('username')\n password = request.POST.get('password1')\n user = authenticate(username=username, password=password)\n if user is not None:\n login(request, user)\n return redirect('index')\n else:\n messages.error(request, 'Username or Password Incorrect')\n return render(request, 'accounts/login.html')\n else:\n return render(request, 'accounts/login.html')\n\n\ndef logoutUser(request):\n logout(request)\n return redirect('login')\n\n\n<mask token>\n\n\ndef products(request):\n return render(request, template_name='mainapp/products.html')\n",
"step-3": "<mask token>\n\n\ndef index(request):\n blogs = Post.objects.filter(status=1).order_by('-created_on')[:10]\n context = {'Post': blogs}\n return render(request, 'blogapp/index.html', context)\n\n\ndef blogs(request):\n return render(request, template_name='blogapp/blog.html')\n\n\ndef detail(request, slug):\n try:\n post = Post.objects.get(slug=slug)\n context = {'post': post}\n return render(request, 'blogapp/detail.html', context)\n except ObjectDoesNotExist:\n return render(request, template_name='blogapp/detail.html')\n\n\ndef about(request):\n return render(request, template_name='blogapp/about.html')\n\n\ndef loginPage(request):\n if request.method == 'POST':\n username = request.POST.get('username')\n password = request.POST.get('password1')\n user = authenticate(username=username, password=password)\n if user is not None:\n login(request, user)\n return redirect('index')\n else:\n messages.error(request, 'Username or Password Incorrect')\n return render(request, 'accounts/login.html')\n else:\n return render(request, 'accounts/login.html')\n\n\ndef logoutUser(request):\n logout(request)\n return redirect('login')\n\n\ndef contact(request):\n return render(request, template_name='blogapp/contact.html')\n\n\ndef products(request):\n return render(request, template_name='mainapp/products.html')\n",
"step-4": "from django.shortcuts import render, redirect\nfrom django.contrib import messages\nfrom django.contrib.auth import authenticate, login, logout\nfrom .models import Post\nfrom django.shortcuts import redirect\nfrom django.core.exceptions import ObjectDoesNotExist\n\n\ndef index(request):\n blogs = Post.objects.filter(status=1).order_by('-created_on')[:10]\n context = {'Post': blogs}\n return render(request, 'blogapp/index.html', context)\n\n\ndef blogs(request):\n return render(request, template_name='blogapp/blog.html')\n\n\ndef detail(request, slug):\n try:\n post = Post.objects.get(slug=slug)\n context = {'post': post}\n return render(request, 'blogapp/detail.html', context)\n except ObjectDoesNotExist:\n return render(request, template_name='blogapp/detail.html')\n\n\ndef about(request):\n return render(request, template_name='blogapp/about.html')\n\n\ndef loginPage(request):\n if request.method == 'POST':\n username = request.POST.get('username')\n password = request.POST.get('password1')\n user = authenticate(username=username, password=password)\n if user is not None:\n login(request, user)\n return redirect('index')\n else:\n messages.error(request, 'Username or Password Incorrect')\n return render(request, 'accounts/login.html')\n else:\n return render(request, 'accounts/login.html')\n\n\ndef logoutUser(request):\n logout(request)\n return redirect('login')\n\n\ndef contact(request):\n return render(request, template_name='blogapp/contact.html')\n\n\ndef products(request):\n return render(request, template_name='mainapp/products.html')\n",
"step-5": "from django.shortcuts import render, redirect\nfrom django.contrib import messages\nfrom django.contrib.auth import authenticate, login, logout\nfrom .models import Post\nfrom django.shortcuts import redirect\nfrom django.core.exceptions import ObjectDoesNotExist\n\n\ndef index(request):\n blogs = Post.objects.filter(status=1).order_by('-created_on')[:10]\n context = {'Post': blogs}\n return render(request, 'blogapp/index.html', context)\n\ndef blogs(request):\n return render(request, template_name='blogapp/blog.html')\n\n\ndef detail(request, slug):\n try:\n post = Post.objects.get(slug=slug)\n context = {'post': post}\n return render(request, 'blogapp/detail.html', context)\n except ObjectDoesNotExist:\n return render(request, template_name='blogapp/detail.html')\n\n\ndef about(request):\n return render(request, template_name='blogapp/about.html')\n\ndef loginPage(request):\n\n # form = CreateUserForm()\n # context = {'form': form}\n if request.method == 'POST':\n username = request.POST.get('username')\n password = request.POST.get('password1')\n user = authenticate(username=username, password=password)\n if user is not None:\n login(request, user)\n return redirect('index')\n else:\n messages.error(request, 'Username or Password Incorrect')\n return render(request, 'accounts/login.html',)\n else:\n return render(request, 'accounts/login.html',)\n\n\ndef logoutUser(request):\n\n logout(request)\n return redirect('login')\n\n\n\ndef contact(request):\n return render(request, template_name='blogapp/contact.html')\n\n\ndef products(request):\n return render(request, template_name='mainapp/products.html')",
"step-ids": [
6,
7,
8,
9,
10
]
}
|
[
6,
7,
8,
9,
10
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def PlotFunctions(phi_orthonormalized_list, StartFunctionIndex, Interval):
PlotSettings()
t_array = numpy.logspace(-7, numpy.log10(Interval[1]), 1000)
NumFunctions = len(phi_orthonormalized_list)
f = numpy.zeros((NumFunctions, t_array.size), dtype=float)
for j in range(NumFunctions):
f_lambdify = sympy.lambdify(t, phi_orthonormalized_list[j], 'numpy')
f[j, :] = f_lambdify(t_array)
fig, ax = plt.subplots(figsize=(7, 4.8))
for j in range(NumFunctions):
ax.semilogx(t_array, f[j, :], label='$i = %d$' % (j +
StartFunctionIndex))
ax.legend(ncol=3, loc='lower left', borderpad=0.5, frameon=False)
ax.set_xlim([t_array[0], t_array[-1]])
ax.set_ylim([-1, 1])
ax.set_yticks([-1, 0, 1])
ax.set_xlabel('$t$')
ax.set_ylabel('$\\phi_i^{\\perp}(t)$')
ax.set_title('Orthogonal functions')
ax.grid(axis='y')
FileDirectory = os.path.dirname(__file__)
ParentDirectory = os.path.dirname(FileDirectory)
SaveDir = os.path.join(ParentDirectory, 'doc', 'images')
if not os.path.isdir(SaveDir) or not os.access(SaveDir, os.W_OK):
SaveDir = os.getcwd()
if os.access(SaveDir, os.W_OK):
SaveFullname_SVG = os.path.join(SaveDir, 'OrthogonalFunctions.svg')
SaveFullname_PDF = os.path.join(SaveDir, 'OrthogonalFunctions.pdf')
plt.savefig(SaveFullname_SVG, transparent=True, bbox_inches='tight')
plt.savefig(SaveFullname_PDF, transparent=True, bbox_inches='tight')
print('')
print('Plot saved to "%s".' % SaveFullname_SVG)
print('Plot saved to "%s".' % SaveFullname_PDF)
else:
print('Cannot save plot to %s. Directory is not writable.' % SaveDir)
if matplotlib.get_backend() != 'agg':
plt.show()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def PlotSettings():
"""
General settings for the plot.
"""
import seaborn as sns
sns.set(font_scale=1.2)
if find_executable('latex'):
plt.rc('text', usetex=True)
matplotlib.font_manager._rebuild()
sns.set_style('white')
sns.set_style('ticks')
plt.rc('font', family='serif')
plt.rcParams['svg.fonttype'] = 'none'
def PlotFunctions(phi_orthonormalized_list, StartFunctionIndex, Interval):
PlotSettings()
t_array = numpy.logspace(-7, numpy.log10(Interval[1]), 1000)
NumFunctions = len(phi_orthonormalized_list)
f = numpy.zeros((NumFunctions, t_array.size), dtype=float)
for j in range(NumFunctions):
f_lambdify = sympy.lambdify(t, phi_orthonormalized_list[j], 'numpy')
f[j, :] = f_lambdify(t_array)
fig, ax = plt.subplots(figsize=(7, 4.8))
for j in range(NumFunctions):
ax.semilogx(t_array, f[j, :], label='$i = %d$' % (j +
StartFunctionIndex))
ax.legend(ncol=3, loc='lower left', borderpad=0.5, frameon=False)
ax.set_xlim([t_array[0], t_array[-1]])
ax.set_ylim([-1, 1])
ax.set_yticks([-1, 0, 1])
ax.set_xlabel('$t$')
ax.set_ylabel('$\\phi_i^{\\perp}(t)$')
ax.set_title('Orthogonal functions')
ax.grid(axis='y')
FileDirectory = os.path.dirname(__file__)
ParentDirectory = os.path.dirname(FileDirectory)
SaveDir = os.path.join(ParentDirectory, 'doc', 'images')
if not os.path.isdir(SaveDir) or not os.access(SaveDir, os.W_OK):
SaveDir = os.getcwd()
if os.access(SaveDir, os.W_OK):
SaveFullname_SVG = os.path.join(SaveDir, 'OrthogonalFunctions.svg')
SaveFullname_PDF = os.path.join(SaveDir, 'OrthogonalFunctions.pdf')
plt.savefig(SaveFullname_SVG, transparent=True, bbox_inches='tight')
plt.savefig(SaveFullname_PDF, transparent=True, bbox_inches='tight')
print('')
print('Plot saved to "%s".' % SaveFullname_SVG)
print('Plot saved to "%s".' % SaveFullname_PDF)
else:
print('Cannot save plot to %s. Directory is not writable.' % SaveDir)
if matplotlib.get_backend() != 'agg':
plt.show()
<|reserved_special_token_1|>
import os
import sympy
import numpy
from distutils.spawn import find_executable
import matplotlib
import matplotlib.pyplot as plt
from .Declarations import n, t
def PlotSettings():
"""
General settings for the plot.
"""
import seaborn as sns
sns.set(font_scale=1.2)
if find_executable('latex'):
plt.rc('text', usetex=True)
matplotlib.font_manager._rebuild()
sns.set_style('white')
sns.set_style('ticks')
plt.rc('font', family='serif')
plt.rcParams['svg.fonttype'] = 'none'
def PlotFunctions(phi_orthonormalized_list, StartFunctionIndex, Interval):
PlotSettings()
t_array = numpy.logspace(-7, numpy.log10(Interval[1]), 1000)
NumFunctions = len(phi_orthonormalized_list)
f = numpy.zeros((NumFunctions, t_array.size), dtype=float)
for j in range(NumFunctions):
f_lambdify = sympy.lambdify(t, phi_orthonormalized_list[j], 'numpy')
f[j, :] = f_lambdify(t_array)
fig, ax = plt.subplots(figsize=(7, 4.8))
for j in range(NumFunctions):
ax.semilogx(t_array, f[j, :], label='$i = %d$' % (j +
StartFunctionIndex))
ax.legend(ncol=3, loc='lower left', borderpad=0.5, frameon=False)
ax.set_xlim([t_array[0], t_array[-1]])
ax.set_ylim([-1, 1])
ax.set_yticks([-1, 0, 1])
ax.set_xlabel('$t$')
ax.set_ylabel('$\\phi_i^{\\perp}(t)$')
ax.set_title('Orthogonal functions')
ax.grid(axis='y')
FileDirectory = os.path.dirname(__file__)
ParentDirectory = os.path.dirname(FileDirectory)
SaveDir = os.path.join(ParentDirectory, 'doc', 'images')
if not os.path.isdir(SaveDir) or not os.access(SaveDir, os.W_OK):
SaveDir = os.getcwd()
if os.access(SaveDir, os.W_OK):
SaveFullname_SVG = os.path.join(SaveDir, 'OrthogonalFunctions.svg')
SaveFullname_PDF = os.path.join(SaveDir, 'OrthogonalFunctions.pdf')
plt.savefig(SaveFullname_SVG, transparent=True, bbox_inches='tight')
plt.savefig(SaveFullname_PDF, transparent=True, bbox_inches='tight')
print('')
print('Plot saved to "%s".' % SaveFullname_SVG)
print('Plot saved to "%s".' % SaveFullname_PDF)
else:
print('Cannot save plot to %s. Directory is not writable.' % SaveDir)
if matplotlib.get_backend() != 'agg':
plt.show()
<|reserved_special_token_1|>
# =======
# Imports
# =======
import os
import sympy
import numpy
from distutils.spawn import find_executable
import matplotlib
import matplotlib.pyplot as plt
from .Declarations import n,t
# =============
# Plot Settings
# =============
def PlotSettings():
"""
General settings for the plot.
"""
# Color palette
import seaborn as sns
# sns.set()
# Axes font size
sns.set(font_scale=1.2)
# LaTeX
if find_executable('latex'):
plt.rc('text',usetex=True)
matplotlib.font_manager._rebuild()
# Style sheet
sns.set_style("white")
sns.set_style("ticks")
# Font (Note: this should be AFTER the plt.style.use)
plt.rc('font', family='serif')
plt.rcParams['svg.fonttype'] = 'none' # text in svg file will be text not path.
# ==============
# Plot Functions
# ==============
def PlotFunctions(phi_orthonormalized_list,StartFunctionIndex,Interval):
# Run plot settings
PlotSettings()
# Axis
t_array = numpy.logspace(-7,numpy.log10(Interval[1]),1000)
# Evaluate functions
NumFunctions = len(phi_orthonormalized_list)
f = numpy.zeros((NumFunctions,t_array.size),dtype=float)
for j in range(NumFunctions):
f_lambdify = sympy.lambdify(t,phi_orthonormalized_list[j],'numpy')
f[j,:] = f_lambdify(t_array)
# Plot
fig,ax = plt.subplots(figsize=(7,4.8))
for j in range(NumFunctions):
ax.semilogx(t_array,f[j,:],label=r'$i = %d$'%(j+StartFunctionIndex))
ax.legend(ncol=3,loc='lower left',borderpad=0.5,frameon=False)
ax.set_xlim([t_array[0],t_array[-1]])
ax.set_ylim([-1,1])
ax.set_yticks([-1,0,1])
ax.set_xlabel(r'$t$')
ax.set_ylabel(r'$\phi_i^{\perp}(t)$')
ax.set_title('Orthogonal functions')
ax.grid(axis='y')
# Get the root directory of the package (parent directory of this script)
FileDirectory = os.path.dirname(__file__)
ParentDirectory = os.path.dirname(FileDirectory)
# Try to save in the doc/images dirctory. Check if exists and writable
SaveDir = os.path.join(ParentDirectory,'doc','images')
if (not os.path.isdir(SaveDir)) or (not os.access(SaveDir,os.W_OK)):
# Write in the current working directory
SaveDir = os.getcwd()
# Save plot in both svg and pdf format
if os.access(SaveDir,os.W_OK):
SaveFullname_SVG = os.path.join(SaveDir,'OrthogonalFunctions.svg')
SaveFullname_PDF = os.path.join(SaveDir,'OrthogonalFunctions.pdf')
plt.savefig(SaveFullname_SVG,transparent=True,bbox_inches='tight')
plt.savefig(SaveFullname_PDF,transparent=True,bbox_inches='tight')
print('')
print('Plot saved to "%s".'%(SaveFullname_SVG))
print('Plot saved to "%s".'%(SaveFullname_PDF))
else:
print('Cannot save plot to %s. Directory is not writable.'%SaveDir)
# If no display backend is enabled, do not plot in the interactive mode
if matplotlib.get_backend() != 'agg':
plt.show()
|
flexible
|
{
"blob_id": "81da2aab9ca11e63dafdd4eefc340d37b326fc6f",
"index": 1846,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef PlotFunctions(phi_orthonormalized_list, StartFunctionIndex, Interval):\n PlotSettings()\n t_array = numpy.logspace(-7, numpy.log10(Interval[1]), 1000)\n NumFunctions = len(phi_orthonormalized_list)\n f = numpy.zeros((NumFunctions, t_array.size), dtype=float)\n for j in range(NumFunctions):\n f_lambdify = sympy.lambdify(t, phi_orthonormalized_list[j], 'numpy')\n f[j, :] = f_lambdify(t_array)\n fig, ax = plt.subplots(figsize=(7, 4.8))\n for j in range(NumFunctions):\n ax.semilogx(t_array, f[j, :], label='$i = %d$' % (j +\n StartFunctionIndex))\n ax.legend(ncol=3, loc='lower left', borderpad=0.5, frameon=False)\n ax.set_xlim([t_array[0], t_array[-1]])\n ax.set_ylim([-1, 1])\n ax.set_yticks([-1, 0, 1])\n ax.set_xlabel('$t$')\n ax.set_ylabel('$\\\\phi_i^{\\\\perp}(t)$')\n ax.set_title('Orthogonal functions')\n ax.grid(axis='y')\n FileDirectory = os.path.dirname(__file__)\n ParentDirectory = os.path.dirname(FileDirectory)\n SaveDir = os.path.join(ParentDirectory, 'doc', 'images')\n if not os.path.isdir(SaveDir) or not os.access(SaveDir, os.W_OK):\n SaveDir = os.getcwd()\n if os.access(SaveDir, os.W_OK):\n SaveFullname_SVG = os.path.join(SaveDir, 'OrthogonalFunctions.svg')\n SaveFullname_PDF = os.path.join(SaveDir, 'OrthogonalFunctions.pdf')\n plt.savefig(SaveFullname_SVG, transparent=True, bbox_inches='tight')\n plt.savefig(SaveFullname_PDF, transparent=True, bbox_inches='tight')\n print('')\n print('Plot saved to \"%s\".' % SaveFullname_SVG)\n print('Plot saved to \"%s\".' % SaveFullname_PDF)\n else:\n print('Cannot save plot to %s. Directory is not writable.' % SaveDir)\n if matplotlib.get_backend() != 'agg':\n plt.show()\n",
"step-3": "<mask token>\n\n\ndef PlotSettings():\n \"\"\"\n General settings for the plot.\n \"\"\"\n import seaborn as sns\n sns.set(font_scale=1.2)\n if find_executable('latex'):\n plt.rc('text', usetex=True)\n matplotlib.font_manager._rebuild()\n sns.set_style('white')\n sns.set_style('ticks')\n plt.rc('font', family='serif')\n plt.rcParams['svg.fonttype'] = 'none'\n\n\ndef PlotFunctions(phi_orthonormalized_list, StartFunctionIndex, Interval):\n PlotSettings()\n t_array = numpy.logspace(-7, numpy.log10(Interval[1]), 1000)\n NumFunctions = len(phi_orthonormalized_list)\n f = numpy.zeros((NumFunctions, t_array.size), dtype=float)\n for j in range(NumFunctions):\n f_lambdify = sympy.lambdify(t, phi_orthonormalized_list[j], 'numpy')\n f[j, :] = f_lambdify(t_array)\n fig, ax = plt.subplots(figsize=(7, 4.8))\n for j in range(NumFunctions):\n ax.semilogx(t_array, f[j, :], label='$i = %d$' % (j +\n StartFunctionIndex))\n ax.legend(ncol=3, loc='lower left', borderpad=0.5, frameon=False)\n ax.set_xlim([t_array[0], t_array[-1]])\n ax.set_ylim([-1, 1])\n ax.set_yticks([-1, 0, 1])\n ax.set_xlabel('$t$')\n ax.set_ylabel('$\\\\phi_i^{\\\\perp}(t)$')\n ax.set_title('Orthogonal functions')\n ax.grid(axis='y')\n FileDirectory = os.path.dirname(__file__)\n ParentDirectory = os.path.dirname(FileDirectory)\n SaveDir = os.path.join(ParentDirectory, 'doc', 'images')\n if not os.path.isdir(SaveDir) or not os.access(SaveDir, os.W_OK):\n SaveDir = os.getcwd()\n if os.access(SaveDir, os.W_OK):\n SaveFullname_SVG = os.path.join(SaveDir, 'OrthogonalFunctions.svg')\n SaveFullname_PDF = os.path.join(SaveDir, 'OrthogonalFunctions.pdf')\n plt.savefig(SaveFullname_SVG, transparent=True, bbox_inches='tight')\n plt.savefig(SaveFullname_PDF, transparent=True, bbox_inches='tight')\n print('')\n print('Plot saved to \"%s\".' % SaveFullname_SVG)\n print('Plot saved to \"%s\".' % SaveFullname_PDF)\n else:\n print('Cannot save plot to %s. Directory is not writable.' % SaveDir)\n if matplotlib.get_backend() != 'agg':\n plt.show()\n",
"step-4": "import os\nimport sympy\nimport numpy\nfrom distutils.spawn import find_executable\nimport matplotlib\nimport matplotlib.pyplot as plt\nfrom .Declarations import n, t\n\n\ndef PlotSettings():\n \"\"\"\n General settings for the plot.\n \"\"\"\n import seaborn as sns\n sns.set(font_scale=1.2)\n if find_executable('latex'):\n plt.rc('text', usetex=True)\n matplotlib.font_manager._rebuild()\n sns.set_style('white')\n sns.set_style('ticks')\n plt.rc('font', family='serif')\n plt.rcParams['svg.fonttype'] = 'none'\n\n\ndef PlotFunctions(phi_orthonormalized_list, StartFunctionIndex, Interval):\n PlotSettings()\n t_array = numpy.logspace(-7, numpy.log10(Interval[1]), 1000)\n NumFunctions = len(phi_orthonormalized_list)\n f = numpy.zeros((NumFunctions, t_array.size), dtype=float)\n for j in range(NumFunctions):\n f_lambdify = sympy.lambdify(t, phi_orthonormalized_list[j], 'numpy')\n f[j, :] = f_lambdify(t_array)\n fig, ax = plt.subplots(figsize=(7, 4.8))\n for j in range(NumFunctions):\n ax.semilogx(t_array, f[j, :], label='$i = %d$' % (j +\n StartFunctionIndex))\n ax.legend(ncol=3, loc='lower left', borderpad=0.5, frameon=False)\n ax.set_xlim([t_array[0], t_array[-1]])\n ax.set_ylim([-1, 1])\n ax.set_yticks([-1, 0, 1])\n ax.set_xlabel('$t$')\n ax.set_ylabel('$\\\\phi_i^{\\\\perp}(t)$')\n ax.set_title('Orthogonal functions')\n ax.grid(axis='y')\n FileDirectory = os.path.dirname(__file__)\n ParentDirectory = os.path.dirname(FileDirectory)\n SaveDir = os.path.join(ParentDirectory, 'doc', 'images')\n if not os.path.isdir(SaveDir) or not os.access(SaveDir, os.W_OK):\n SaveDir = os.getcwd()\n if os.access(SaveDir, os.W_OK):\n SaveFullname_SVG = os.path.join(SaveDir, 'OrthogonalFunctions.svg')\n SaveFullname_PDF = os.path.join(SaveDir, 'OrthogonalFunctions.pdf')\n plt.savefig(SaveFullname_SVG, transparent=True, bbox_inches='tight')\n plt.savefig(SaveFullname_PDF, transparent=True, bbox_inches='tight')\n print('')\n print('Plot saved to \"%s\".' % SaveFullname_SVG)\n print('Plot saved to \"%s\".' % SaveFullname_PDF)\n else:\n print('Cannot save plot to %s. Directory is not writable.' % SaveDir)\n if matplotlib.get_backend() != 'agg':\n plt.show()\n",
"step-5": "# =======\n# Imports\n# =======\n\nimport os\nimport sympy\nimport numpy\nfrom distutils.spawn import find_executable\nimport matplotlib\nimport matplotlib.pyplot as plt\nfrom .Declarations import n,t\n\n# =============\n# Plot Settings\n# =============\n\ndef PlotSettings():\n \"\"\"\n General settings for the plot.\n \"\"\"\n\n # Color palette\n import seaborn as sns\n # sns.set()\n\n # Axes font size\n sns.set(font_scale=1.2)\n\n # LaTeX\n if find_executable('latex'):\n plt.rc('text',usetex=True)\n matplotlib.font_manager._rebuild()\n\n # Style sheet\n sns.set_style(\"white\")\n sns.set_style(\"ticks\")\n\n # Font (Note: this should be AFTER the plt.style.use)\n plt.rc('font', family='serif')\n plt.rcParams['svg.fonttype'] = 'none' # text in svg file will be text not path.\n\n# ==============\n# Plot Functions\n# ==============\n\ndef PlotFunctions(phi_orthonormalized_list,StartFunctionIndex,Interval):\n\n # Run plot settings\n PlotSettings()\n\n # Axis\n t_array = numpy.logspace(-7,numpy.log10(Interval[1]),1000)\n\n # Evaluate functions\n NumFunctions = len(phi_orthonormalized_list)\n\n f = numpy.zeros((NumFunctions,t_array.size),dtype=float)\n for j in range(NumFunctions):\n f_lambdify = sympy.lambdify(t,phi_orthonormalized_list[j],'numpy')\n f[j,:] = f_lambdify(t_array)\n\n # Plot\n fig,ax = plt.subplots(figsize=(7,4.8))\n for j in range(NumFunctions):\n ax.semilogx(t_array,f[j,:],label=r'$i = %d$'%(j+StartFunctionIndex))\n\n ax.legend(ncol=3,loc='lower left',borderpad=0.5,frameon=False)\n ax.set_xlim([t_array[0],t_array[-1]])\n ax.set_ylim([-1,1])\n ax.set_yticks([-1,0,1])\n ax.set_xlabel(r'$t$')\n ax.set_ylabel(r'$\\phi_i^{\\perp}(t)$')\n ax.set_title('Orthogonal functions')\n ax.grid(axis='y')\n\n # Get the root directory of the package (parent directory of this script)\n FileDirectory = os.path.dirname(__file__)\n ParentDirectory = os.path.dirname(FileDirectory)\n\n # Try to save in the doc/images dirctory. Check if exists and writable\n SaveDir = os.path.join(ParentDirectory,'doc','images')\n if (not os.path.isdir(SaveDir)) or (not os.access(SaveDir,os.W_OK)):\n\n # Write in the current working directory\n SaveDir = os.getcwd()\n\n # Save plot in both svg and pdf format\n if os.access(SaveDir,os.W_OK):\n SaveFullname_SVG = os.path.join(SaveDir,'OrthogonalFunctions.svg')\n SaveFullname_PDF = os.path.join(SaveDir,'OrthogonalFunctions.pdf')\n plt.savefig(SaveFullname_SVG,transparent=True,bbox_inches='tight')\n plt.savefig(SaveFullname_PDF,transparent=True,bbox_inches='tight')\n print('')\n print('Plot saved to \"%s\".'%(SaveFullname_SVG))\n print('Plot saved to \"%s\".'%(SaveFullname_PDF))\n else:\n print('Cannot save plot to %s. Directory is not writable.'%SaveDir)\n\n # If no display backend is enabled, do not plot in the interactive mode\n if matplotlib.get_backend() != 'agg':\n plt.show()\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Block:
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Block:
def __init__(self, index, transactions, previous_hash, nonce=0):
self.index = index
self.transaction = transactions
self.timestamp = time.time()
self.previous_hash = previous_hash
self.nonce = nonce
self.hash = None
<|reserved_special_token_1|>
import time
class Block:
def __init__(self, index, transactions, previous_hash, nonce=0):
self.index = index
self.transaction = transactions
self.timestamp = time.time()
self.previous_hash = previous_hash
self.nonce = nonce
self.hash = None
|
flexible
|
{
"blob_id": "43a23958b8c8779e3292f0f523a37b6d712fdbac",
"index": 4448,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Block:\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Block:\n\n def __init__(self, index, transactions, previous_hash, nonce=0):\n self.index = index\n self.transaction = transactions\n self.timestamp = time.time()\n self.previous_hash = previous_hash\n self.nonce = nonce\n self.hash = None\n",
"step-4": "import time\n\n\nclass Block:\n\n def __init__(self, index, transactions, previous_hash, nonce=0):\n self.index = index\n self.transaction = transactions\n self.timestamp = time.time()\n self.previous_hash = previous_hash\n self.nonce = nonce\n self.hash = None\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import pandas as pd
import matplotlib.pyplot as plt
loansData = pd.read_csv('loansData.csv')
# Print the first 5 rows of each of the column to see what needs to be cleaned
print loansData['Interest.Rate'][0:5]
print loansData['Loan.Length'][0:5]
print loansData['FICO.Range'][0:5]
# Clean up the columns
loansData['Interest.Rate'] = loansData['Interest.Rate'].map(
lambda x: x.rstrip('%'))
loansData['Loan.Length'] = loansData['Loan.Length'].map(
lambda x: x.rstrip('months'))
# Print again to see if cleaning took place or not
print loansData['Interest.Rate'][0:5]
print loansData['Loan.Length'][0:5]
'''
convert the data in FICO Range into string and
split the string and take the lowest value.
'''
loansData['FICO.Score'] = loansData['FICO.Range'].astype(str)
print loansData['FICO.Score'][0:5]
loansData['FICO.Score'] = loansData['FICO.Score'].split()
print loansData['FICO.Score'][0:5]
loans_list = loansData['FICO.Score'].tolist()
FICO = []
for array in range(len(loans_list)):
loan = loans_list[array].split("-") # Split each sub-array on '-'
FICO.append(int(loan[0]))
loansData['FICO.Score'] = FICO
# Plot histogram
plt.figure()
p = loansData['FICO.Score'].hist()
plt.show()
# Create a scatterplot matrix
a = pd.scatter_matrix(loansData, alpha=0.05, figure=(10, 10))
plt.show()
a = pd.scatter_matrix(loansData, alpha=0.05, figure=(10, 10), diagonal='hist')
plt.show()
|
normal
|
{
"blob_id": "fc17b865815a7a5ec51f477a9fdda54667686eed",
"index": 1672,
"step-1": "import pandas as pd\nimport matplotlib.pyplot as plt\n\n\nloansData = pd.read_csv('loansData.csv')\n\n# Print the first 5 rows of each of the column to see what needs to be cleaned\nprint loansData['Interest.Rate'][0:5]\nprint loansData['Loan.Length'][0:5]\nprint loansData['FICO.Range'][0:5]\n\n\n# Clean up the columns\nloansData['Interest.Rate'] = loansData['Interest.Rate'].map(\n lambda x: x.rstrip('%'))\nloansData['Loan.Length'] = loansData['Loan.Length'].map(\n lambda x: x.rstrip('months'))\n\n# Print again to see if cleaning took place or not\nprint loansData['Interest.Rate'][0:5]\nprint loansData['Loan.Length'][0:5]\n\n\n'''\nconvert the data in FICO Range into string and\nsplit the string and take the lowest value.\n'''\nloansData['FICO.Score'] = loansData['FICO.Range'].astype(str)\nprint loansData['FICO.Score'][0:5]\nloansData['FICO.Score'] = loansData['FICO.Score'].split()\nprint loansData['FICO.Score'][0:5]\n\n\nloans_list = loansData['FICO.Score'].tolist()\n\nFICO = []\nfor array in range(len(loans_list)):\n loan = loans_list[array].split(\"-\") # Split each sub-array on '-'\n FICO.append(int(loan[0]))\n\nloansData['FICO.Score'] = FICO\n\n# Plot histogram\nplt.figure()\np = loansData['FICO.Score'].hist()\nplt.show()\n\n# Create a scatterplot matrix\na = pd.scatter_matrix(loansData, alpha=0.05, figure=(10, 10))\nplt.show()\n\na = pd.scatter_matrix(loansData, alpha=0.05, figure=(10, 10), diagonal='hist')\nplt.show()\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
<|reserved_special_token_0|>
def drawPieChart(central_angles, angle_of_rest, probability_of_rest):
turtle.reset()
window.colormode(255)
turtle.fillcolor('gray')
turtle.speed(10)
turtle.begin_fill()
turtle.circle(120)
turtle.end_fill()
turtle.up()
angle_counter = 0
prev_angle = 0
for index, (letter, angle, probability) in enumerate(central_angles):
if index == 0:
angle_counter += angle * (360 / math.pi)
turtle.fillcolor((random.randrange(0, 255), random.randrange(0, 255
), random.randrange(0, 255)))
turtle.begin_fill()
turtle.goto(x=0, y=120)
turtle.setheading(angle_counter)
angle_counter += angle * (360 / math.pi)
turtle.forward(120)
turtle.right(270)
turtle.circle(120, angle * (360 / math.pi))
turtle.setheading(angle_counter)
turtle.forward(50)
turtle.write('{}, {}'.format(letter, round(probability, 3)), font=(
'Arial', 10, 'normal'))
turtle.backward(50)
turtle.setheading(angle * (360 / math.pi) + prev_angle)
turtle.goto(x=0, y=120)
turtle.end_fill()
prev_angle += angle_counter
if index == len(central_angles) - 1:
turtle.fillcolor('gray')
turtle.begin_fill()
turtle.goto(x=0, y=120)
turtle.setheading(angle_counter)
turtle.forward(120)
turtle.right(270)
turtle.circle(120, angle_of_rest * (180 / math.pi))
angle_counter += angle_of_rest * (180 / math.pi)
turtle.setheading(angle_counter)
turtle.forward(50)
turtle.write('All other letters, {}'.format(round(
probability_of_rest, 3)), font=('Arial', 10, 'normal'))
turtle.backward(50)
turtle.setheading(angle_of_rest * (180 / math.pi) + prev_angle)
turtle.goto(x=0, y=120)
turtle.end_fill()
def calculateFrequencies(arg=None):
try:
result = int(entry.get())
if result >= 54:
return
entry.delete(0, END)
most_frequent_characters = frequency.getNthMostFrequentCharacters(
result)
probability_of_other_characters = frequency.sumOfAllOtherProbabilites(
most_frequent_characters)
angle_of_rest = probability_of_other_characters * 2 * math.pi
central_angles = frequency.getCentralAngles(most_frequent_characters)
drawPieChart(central_angles, angle_of_rest,
probability_of_other_characters)
except ValueError:
return
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def drawPieChart(central_angles, angle_of_rest, probability_of_rest):
turtle.reset()
window.colormode(255)
turtle.fillcolor('gray')
turtle.speed(10)
turtle.begin_fill()
turtle.circle(120)
turtle.end_fill()
turtle.up()
angle_counter = 0
prev_angle = 0
for index, (letter, angle, probability) in enumerate(central_angles):
if index == 0:
angle_counter += angle * (360 / math.pi)
turtle.fillcolor((random.randrange(0, 255), random.randrange(0, 255
), random.randrange(0, 255)))
turtle.begin_fill()
turtle.goto(x=0, y=120)
turtle.setheading(angle_counter)
angle_counter += angle * (360 / math.pi)
turtle.forward(120)
turtle.right(270)
turtle.circle(120, angle * (360 / math.pi))
turtle.setheading(angle_counter)
turtle.forward(50)
turtle.write('{}, {}'.format(letter, round(probability, 3)), font=(
'Arial', 10, 'normal'))
turtle.backward(50)
turtle.setheading(angle * (360 / math.pi) + prev_angle)
turtle.goto(x=0, y=120)
turtle.end_fill()
prev_angle += angle_counter
if index == len(central_angles) - 1:
turtle.fillcolor('gray')
turtle.begin_fill()
turtle.goto(x=0, y=120)
turtle.setheading(angle_counter)
turtle.forward(120)
turtle.right(270)
turtle.circle(120, angle_of_rest * (180 / math.pi))
angle_counter += angle_of_rest * (180 / math.pi)
turtle.setheading(angle_counter)
turtle.forward(50)
turtle.write('All other letters, {}'.format(round(
probability_of_rest, 3)), font=('Arial', 10, 'normal'))
turtle.backward(50)
turtle.setheading(angle_of_rest * (180 / math.pi) + prev_angle)
turtle.goto(x=0, y=120)
turtle.end_fill()
def calculateFrequencies(arg=None):
try:
result = int(entry.get())
if result >= 54:
return
entry.delete(0, END)
most_frequent_characters = frequency.getNthMostFrequentCharacters(
result)
probability_of_other_characters = frequency.sumOfAllOtherProbabilites(
most_frequent_characters)
angle_of_rest = probability_of_other_characters * 2 * math.pi
central_angles = frequency.getCentralAngles(most_frequent_characters)
drawPieChart(central_angles, angle_of_rest,
probability_of_other_characters)
except ValueError:
return
entry.bind('<Return>', calculateFrequencies)
label_1.grid(row=0)
entry.grid(row=0, column=1)
root.mainloop()
window.exitonclick()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
root = Tk()
window = turtle.Screen()
label_1 = Label(root, text=
'Enter a number less than 54 to get the Nth most frequent letters in Words.txt: '
)
entry = Entry(root)
def drawPieChart(central_angles, angle_of_rest, probability_of_rest):
turtle.reset()
window.colormode(255)
turtle.fillcolor('gray')
turtle.speed(10)
turtle.begin_fill()
turtle.circle(120)
turtle.end_fill()
turtle.up()
angle_counter = 0
prev_angle = 0
for index, (letter, angle, probability) in enumerate(central_angles):
if index == 0:
angle_counter += angle * (360 / math.pi)
turtle.fillcolor((random.randrange(0, 255), random.randrange(0, 255
), random.randrange(0, 255)))
turtle.begin_fill()
turtle.goto(x=0, y=120)
turtle.setheading(angle_counter)
angle_counter += angle * (360 / math.pi)
turtle.forward(120)
turtle.right(270)
turtle.circle(120, angle * (360 / math.pi))
turtle.setheading(angle_counter)
turtle.forward(50)
turtle.write('{}, {}'.format(letter, round(probability, 3)), font=(
'Arial', 10, 'normal'))
turtle.backward(50)
turtle.setheading(angle * (360 / math.pi) + prev_angle)
turtle.goto(x=0, y=120)
turtle.end_fill()
prev_angle += angle_counter
if index == len(central_angles) - 1:
turtle.fillcolor('gray')
turtle.begin_fill()
turtle.goto(x=0, y=120)
turtle.setheading(angle_counter)
turtle.forward(120)
turtle.right(270)
turtle.circle(120, angle_of_rest * (180 / math.pi))
angle_counter += angle_of_rest * (180 / math.pi)
turtle.setheading(angle_counter)
turtle.forward(50)
turtle.write('All other letters, {}'.format(round(
probability_of_rest, 3)), font=('Arial', 10, 'normal'))
turtle.backward(50)
turtle.setheading(angle_of_rest * (180 / math.pi) + prev_angle)
turtle.goto(x=0, y=120)
turtle.end_fill()
def calculateFrequencies(arg=None):
try:
result = int(entry.get())
if result >= 54:
return
entry.delete(0, END)
most_frequent_characters = frequency.getNthMostFrequentCharacters(
result)
probability_of_other_characters = frequency.sumOfAllOtherProbabilites(
most_frequent_characters)
angle_of_rest = probability_of_other_characters * 2 * math.pi
central_angles = frequency.getCentralAngles(most_frequent_characters)
drawPieChart(central_angles, angle_of_rest,
probability_of_other_characters)
except ValueError:
return
entry.bind('<Return>', calculateFrequencies)
label_1.grid(row=0)
entry.grid(row=0, column=1)
root.mainloop()
window.exitonclick()
<|reserved_special_token_1|>
from tkinter import *
import frequency
import turtle
import math
import random
root = Tk()
window = turtle.Screen()
label_1 = Label(root, text=
'Enter a number less than 54 to get the Nth most frequent letters in Words.txt: '
)
entry = Entry(root)
def drawPieChart(central_angles, angle_of_rest, probability_of_rest):
turtle.reset()
window.colormode(255)
turtle.fillcolor('gray')
turtle.speed(10)
turtle.begin_fill()
turtle.circle(120)
turtle.end_fill()
turtle.up()
angle_counter = 0
prev_angle = 0
for index, (letter, angle, probability) in enumerate(central_angles):
if index == 0:
angle_counter += angle * (360 / math.pi)
turtle.fillcolor((random.randrange(0, 255), random.randrange(0, 255
), random.randrange(0, 255)))
turtle.begin_fill()
turtle.goto(x=0, y=120)
turtle.setheading(angle_counter)
angle_counter += angle * (360 / math.pi)
turtle.forward(120)
turtle.right(270)
turtle.circle(120, angle * (360 / math.pi))
turtle.setheading(angle_counter)
turtle.forward(50)
turtle.write('{}, {}'.format(letter, round(probability, 3)), font=(
'Arial', 10, 'normal'))
turtle.backward(50)
turtle.setheading(angle * (360 / math.pi) + prev_angle)
turtle.goto(x=0, y=120)
turtle.end_fill()
prev_angle += angle_counter
if index == len(central_angles) - 1:
turtle.fillcolor('gray')
turtle.begin_fill()
turtle.goto(x=0, y=120)
turtle.setheading(angle_counter)
turtle.forward(120)
turtle.right(270)
turtle.circle(120, angle_of_rest * (180 / math.pi))
angle_counter += angle_of_rest * (180 / math.pi)
turtle.setheading(angle_counter)
turtle.forward(50)
turtle.write('All other letters, {}'.format(round(
probability_of_rest, 3)), font=('Arial', 10, 'normal'))
turtle.backward(50)
turtle.setheading(angle_of_rest * (180 / math.pi) + prev_angle)
turtle.goto(x=0, y=120)
turtle.end_fill()
def calculateFrequencies(arg=None):
try:
result = int(entry.get())
if result >= 54:
return
entry.delete(0, END)
most_frequent_characters = frequency.getNthMostFrequentCharacters(
result)
probability_of_other_characters = frequency.sumOfAllOtherProbabilites(
most_frequent_characters)
angle_of_rest = probability_of_other_characters * 2 * math.pi
central_angles = frequency.getCentralAngles(most_frequent_characters)
drawPieChart(central_angles, angle_of_rest,
probability_of_other_characters)
except ValueError:
return
entry.bind('<Return>', calculateFrequencies)
label_1.grid(row=0)
entry.grid(row=0, column=1)
root.mainloop()
window.exitonclick()
<|reserved_special_token_1|>
# Patrick Vanegas - Final project
from tkinter import *
import frequency
import turtle
import math
import random
# intitalize a blank window
root = Tk()
# initialize turtle window
window = turtle.Screen()
# Create widgets to be viewed on the Tkinter window
label_1 = Label(root, text = "Enter a number less than 54 to get the Nth most frequent letters in Words.txt: ")
entry = Entry(root)
def drawPieChart(central_angles, angle_of_rest, probability_of_rest):
# reset turtle to redraw the piechart if the user enters a new value for N.
turtle.reset()
# set color mode to accept rgb values
window.colormode(255)
turtle.fillcolor('gray')
turtle.speed(10)
# draw base circle and fill it with color
turtle.begin_fill()
turtle.circle(120)
turtle.end_fill()
turtle.up()
angle_counter = 0
prev_angle = 0
# draw arc sectors for each probability in the circle
for index, (letter, angle, probability) in enumerate(central_angles):
if index == 0:
# turn radians to degrees
angle_counter += angle * (360 / math.pi)
turtle.fillcolor((random.randrange(0, 255), random.randrange(0, 255), random.randrange(0, 255)))
turtle.begin_fill()
turtle.goto(x = 0, y = 120)
turtle.setheading(angle_counter)
angle_counter += angle * (360 / math.pi)
turtle.forward(120)
turtle.right(270)
turtle.circle(120, angle * (360 / math.pi))
turtle.setheading(angle_counter)
turtle.forward(50)
turtle.write('{}, {}'.format(letter, round(probability, 3)), font = ("Arial", 10, "normal"))
turtle.backward(50)
turtle.setheading(angle * (360 / math.pi) + prev_angle)
turtle.goto(x = 0, y = 120)
turtle.end_fill()
prev_angle += angle_counter
# draw the arc for the remaining probabilites.
if index == len(central_angles) - 1:
turtle.fillcolor('gray')
turtle.begin_fill()
turtle.goto(x = 0, y = 120)
turtle.setheading(angle_counter)
turtle.forward(120)
turtle.right(270)
turtle.circle(120, angle_of_rest * (180 / math.pi) )
angle_counter += angle_of_rest * (180 / math.pi)
turtle.setheading(angle_counter)
turtle.forward(50)
turtle.write('All other letters, {}'.format(round(probability_of_rest, 3)), font = ("Arial", 10, "normal"))
turtle.backward(50)
turtle.setheading(angle_of_rest * (180 / math.pi) + prev_angle)
turtle.goto(x = 0, y = 120)
turtle.end_fill()
def calculateFrequencies(arg = None):
# get the text value from the entry field
# if the value is not a valid integer, simply return and do nothing.
try:
result = int(entry.get())
# return if the input is greater than 54
if (result >= 54):
return
# delete the text in the entry field
entry.delete(0, END)
# calculate the most frequent characters
most_frequent_characters = frequency.getNthMostFrequentCharacters(result)
# calculate the probability of all other letters not included in the top N.
probability_of_other_characters = frequency.sumOfAllOtherProbabilites(most_frequent_characters)
# calculate the central angle of the rest of the letters.
angle_of_rest = probability_of_other_characters * 2 * math.pi
# calculate central angles of the most frequenct character's probabilities
central_angles = frequency.getCentralAngles(most_frequent_characters)
# draw pie chart
drawPieChart(central_angles, angle_of_rest, probability_of_other_characters)
except ValueError:
return
# When the user presses enter on the entry field, calculate frequencies
entry.bind('<Return>', calculateFrequencies)
# Position widgets on a grid layout
label_1.grid(row=0)
entry.grid(row=0, column=1)
# keep both the turtle and tkinter windows open until user presses the close button on either
root.mainloop()
window.exitonclick()
|
flexible
|
{
"blob_id": "0ac99816248e3306ca6340f7bee8a518877bc3e9",
"index": 1186,
"step-1": "<mask token>\n\n\ndef drawPieChart(central_angles, angle_of_rest, probability_of_rest):\n turtle.reset()\n window.colormode(255)\n turtle.fillcolor('gray')\n turtle.speed(10)\n turtle.begin_fill()\n turtle.circle(120)\n turtle.end_fill()\n turtle.up()\n angle_counter = 0\n prev_angle = 0\n for index, (letter, angle, probability) in enumerate(central_angles):\n if index == 0:\n angle_counter += angle * (360 / math.pi)\n turtle.fillcolor((random.randrange(0, 255), random.randrange(0, 255\n ), random.randrange(0, 255)))\n turtle.begin_fill()\n turtle.goto(x=0, y=120)\n turtle.setheading(angle_counter)\n angle_counter += angle * (360 / math.pi)\n turtle.forward(120)\n turtle.right(270)\n turtle.circle(120, angle * (360 / math.pi))\n turtle.setheading(angle_counter)\n turtle.forward(50)\n turtle.write('{}, {}'.format(letter, round(probability, 3)), font=(\n 'Arial', 10, 'normal'))\n turtle.backward(50)\n turtle.setheading(angle * (360 / math.pi) + prev_angle)\n turtle.goto(x=0, y=120)\n turtle.end_fill()\n prev_angle += angle_counter\n if index == len(central_angles) - 1:\n turtle.fillcolor('gray')\n turtle.begin_fill()\n turtle.goto(x=0, y=120)\n turtle.setheading(angle_counter)\n turtle.forward(120)\n turtle.right(270)\n turtle.circle(120, angle_of_rest * (180 / math.pi))\n angle_counter += angle_of_rest * (180 / math.pi)\n turtle.setheading(angle_counter)\n turtle.forward(50)\n turtle.write('All other letters, {}'.format(round(\n probability_of_rest, 3)), font=('Arial', 10, 'normal'))\n turtle.backward(50)\n turtle.setheading(angle_of_rest * (180 / math.pi) + prev_angle)\n turtle.goto(x=0, y=120)\n turtle.end_fill()\n\n\ndef calculateFrequencies(arg=None):\n try:\n result = int(entry.get())\n if result >= 54:\n return\n entry.delete(0, END)\n most_frequent_characters = frequency.getNthMostFrequentCharacters(\n result)\n probability_of_other_characters = frequency.sumOfAllOtherProbabilites(\n most_frequent_characters)\n angle_of_rest = probability_of_other_characters * 2 * math.pi\n central_angles = frequency.getCentralAngles(most_frequent_characters)\n drawPieChart(central_angles, angle_of_rest,\n probability_of_other_characters)\n except ValueError:\n return\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef drawPieChart(central_angles, angle_of_rest, probability_of_rest):\n turtle.reset()\n window.colormode(255)\n turtle.fillcolor('gray')\n turtle.speed(10)\n turtle.begin_fill()\n turtle.circle(120)\n turtle.end_fill()\n turtle.up()\n angle_counter = 0\n prev_angle = 0\n for index, (letter, angle, probability) in enumerate(central_angles):\n if index == 0:\n angle_counter += angle * (360 / math.pi)\n turtle.fillcolor((random.randrange(0, 255), random.randrange(0, 255\n ), random.randrange(0, 255)))\n turtle.begin_fill()\n turtle.goto(x=0, y=120)\n turtle.setheading(angle_counter)\n angle_counter += angle * (360 / math.pi)\n turtle.forward(120)\n turtle.right(270)\n turtle.circle(120, angle * (360 / math.pi))\n turtle.setheading(angle_counter)\n turtle.forward(50)\n turtle.write('{}, {}'.format(letter, round(probability, 3)), font=(\n 'Arial', 10, 'normal'))\n turtle.backward(50)\n turtle.setheading(angle * (360 / math.pi) + prev_angle)\n turtle.goto(x=0, y=120)\n turtle.end_fill()\n prev_angle += angle_counter\n if index == len(central_angles) - 1:\n turtle.fillcolor('gray')\n turtle.begin_fill()\n turtle.goto(x=0, y=120)\n turtle.setheading(angle_counter)\n turtle.forward(120)\n turtle.right(270)\n turtle.circle(120, angle_of_rest * (180 / math.pi))\n angle_counter += angle_of_rest * (180 / math.pi)\n turtle.setheading(angle_counter)\n turtle.forward(50)\n turtle.write('All other letters, {}'.format(round(\n probability_of_rest, 3)), font=('Arial', 10, 'normal'))\n turtle.backward(50)\n turtle.setheading(angle_of_rest * (180 / math.pi) + prev_angle)\n turtle.goto(x=0, y=120)\n turtle.end_fill()\n\n\ndef calculateFrequencies(arg=None):\n try:\n result = int(entry.get())\n if result >= 54:\n return\n entry.delete(0, END)\n most_frequent_characters = frequency.getNthMostFrequentCharacters(\n result)\n probability_of_other_characters = frequency.sumOfAllOtherProbabilites(\n most_frequent_characters)\n angle_of_rest = probability_of_other_characters * 2 * math.pi\n central_angles = frequency.getCentralAngles(most_frequent_characters)\n drawPieChart(central_angles, angle_of_rest,\n probability_of_other_characters)\n except ValueError:\n return\n\n\nentry.bind('<Return>', calculateFrequencies)\nlabel_1.grid(row=0)\nentry.grid(row=0, column=1)\nroot.mainloop()\nwindow.exitonclick()\n",
"step-3": "<mask token>\nroot = Tk()\nwindow = turtle.Screen()\nlabel_1 = Label(root, text=\n 'Enter a number less than 54 to get the Nth most frequent letters in Words.txt: '\n )\nentry = Entry(root)\n\n\ndef drawPieChart(central_angles, angle_of_rest, probability_of_rest):\n turtle.reset()\n window.colormode(255)\n turtle.fillcolor('gray')\n turtle.speed(10)\n turtle.begin_fill()\n turtle.circle(120)\n turtle.end_fill()\n turtle.up()\n angle_counter = 0\n prev_angle = 0\n for index, (letter, angle, probability) in enumerate(central_angles):\n if index == 0:\n angle_counter += angle * (360 / math.pi)\n turtle.fillcolor((random.randrange(0, 255), random.randrange(0, 255\n ), random.randrange(0, 255)))\n turtle.begin_fill()\n turtle.goto(x=0, y=120)\n turtle.setheading(angle_counter)\n angle_counter += angle * (360 / math.pi)\n turtle.forward(120)\n turtle.right(270)\n turtle.circle(120, angle * (360 / math.pi))\n turtle.setheading(angle_counter)\n turtle.forward(50)\n turtle.write('{}, {}'.format(letter, round(probability, 3)), font=(\n 'Arial', 10, 'normal'))\n turtle.backward(50)\n turtle.setheading(angle * (360 / math.pi) + prev_angle)\n turtle.goto(x=0, y=120)\n turtle.end_fill()\n prev_angle += angle_counter\n if index == len(central_angles) - 1:\n turtle.fillcolor('gray')\n turtle.begin_fill()\n turtle.goto(x=0, y=120)\n turtle.setheading(angle_counter)\n turtle.forward(120)\n turtle.right(270)\n turtle.circle(120, angle_of_rest * (180 / math.pi))\n angle_counter += angle_of_rest * (180 / math.pi)\n turtle.setheading(angle_counter)\n turtle.forward(50)\n turtle.write('All other letters, {}'.format(round(\n probability_of_rest, 3)), font=('Arial', 10, 'normal'))\n turtle.backward(50)\n turtle.setheading(angle_of_rest * (180 / math.pi) + prev_angle)\n turtle.goto(x=0, y=120)\n turtle.end_fill()\n\n\ndef calculateFrequencies(arg=None):\n try:\n result = int(entry.get())\n if result >= 54:\n return\n entry.delete(0, END)\n most_frequent_characters = frequency.getNthMostFrequentCharacters(\n result)\n probability_of_other_characters = frequency.sumOfAllOtherProbabilites(\n most_frequent_characters)\n angle_of_rest = probability_of_other_characters * 2 * math.pi\n central_angles = frequency.getCentralAngles(most_frequent_characters)\n drawPieChart(central_angles, angle_of_rest,\n probability_of_other_characters)\n except ValueError:\n return\n\n\nentry.bind('<Return>', calculateFrequencies)\nlabel_1.grid(row=0)\nentry.grid(row=0, column=1)\nroot.mainloop()\nwindow.exitonclick()\n",
"step-4": "from tkinter import *\nimport frequency\nimport turtle\nimport math\nimport random\nroot = Tk()\nwindow = turtle.Screen()\nlabel_1 = Label(root, text=\n 'Enter a number less than 54 to get the Nth most frequent letters in Words.txt: '\n )\nentry = Entry(root)\n\n\ndef drawPieChart(central_angles, angle_of_rest, probability_of_rest):\n turtle.reset()\n window.colormode(255)\n turtle.fillcolor('gray')\n turtle.speed(10)\n turtle.begin_fill()\n turtle.circle(120)\n turtle.end_fill()\n turtle.up()\n angle_counter = 0\n prev_angle = 0\n for index, (letter, angle, probability) in enumerate(central_angles):\n if index == 0:\n angle_counter += angle * (360 / math.pi)\n turtle.fillcolor((random.randrange(0, 255), random.randrange(0, 255\n ), random.randrange(0, 255)))\n turtle.begin_fill()\n turtle.goto(x=0, y=120)\n turtle.setheading(angle_counter)\n angle_counter += angle * (360 / math.pi)\n turtle.forward(120)\n turtle.right(270)\n turtle.circle(120, angle * (360 / math.pi))\n turtle.setheading(angle_counter)\n turtle.forward(50)\n turtle.write('{}, {}'.format(letter, round(probability, 3)), font=(\n 'Arial', 10, 'normal'))\n turtle.backward(50)\n turtle.setheading(angle * (360 / math.pi) + prev_angle)\n turtle.goto(x=0, y=120)\n turtle.end_fill()\n prev_angle += angle_counter\n if index == len(central_angles) - 1:\n turtle.fillcolor('gray')\n turtle.begin_fill()\n turtle.goto(x=0, y=120)\n turtle.setheading(angle_counter)\n turtle.forward(120)\n turtle.right(270)\n turtle.circle(120, angle_of_rest * (180 / math.pi))\n angle_counter += angle_of_rest * (180 / math.pi)\n turtle.setheading(angle_counter)\n turtle.forward(50)\n turtle.write('All other letters, {}'.format(round(\n probability_of_rest, 3)), font=('Arial', 10, 'normal'))\n turtle.backward(50)\n turtle.setheading(angle_of_rest * (180 / math.pi) + prev_angle)\n turtle.goto(x=0, y=120)\n turtle.end_fill()\n\n\ndef calculateFrequencies(arg=None):\n try:\n result = int(entry.get())\n if result >= 54:\n return\n entry.delete(0, END)\n most_frequent_characters = frequency.getNthMostFrequentCharacters(\n result)\n probability_of_other_characters = frequency.sumOfAllOtherProbabilites(\n most_frequent_characters)\n angle_of_rest = probability_of_other_characters * 2 * math.pi\n central_angles = frequency.getCentralAngles(most_frequent_characters)\n drawPieChart(central_angles, angle_of_rest,\n probability_of_other_characters)\n except ValueError:\n return\n\n\nentry.bind('<Return>', calculateFrequencies)\nlabel_1.grid(row=0)\nentry.grid(row=0, column=1)\nroot.mainloop()\nwindow.exitonclick()\n",
"step-5": "# Patrick Vanegas - Final project\n\nfrom tkinter import *\nimport frequency\nimport turtle\nimport math\nimport random\n\n# intitalize a blank window\nroot = Tk() \n\n# initialize turtle window\nwindow = turtle.Screen() \n\n# Create widgets to be viewed on the Tkinter window\nlabel_1 = Label(root, text = \"Enter a number less than 54 to get the Nth most frequent letters in Words.txt: \")\nentry = Entry(root)\n\ndef drawPieChart(central_angles, angle_of_rest, probability_of_rest):\n # reset turtle to redraw the piechart if the user enters a new value for N.\n turtle.reset()\n\n # set color mode to accept rgb values\n window.colormode(255)\n turtle.fillcolor('gray')\n turtle.speed(10)\n\n # draw base circle and fill it with color\n turtle.begin_fill()\n turtle.circle(120)\n turtle.end_fill()\n turtle.up()\n\n angle_counter = 0\n prev_angle = 0\n\n # draw arc sectors for each probability in the circle\n for index, (letter, angle, probability) in enumerate(central_angles):\n if index == 0:\n # turn radians to degrees\n angle_counter += angle * (360 / math.pi) \n turtle.fillcolor((random.randrange(0, 255), random.randrange(0, 255), random.randrange(0, 255)))\n turtle.begin_fill()\n turtle.goto(x = 0, y = 120)\n turtle.setheading(angle_counter)\n angle_counter += angle * (360 / math.pi)\n turtle.forward(120)\n turtle.right(270)\n turtle.circle(120, angle * (360 / math.pi))\n turtle.setheading(angle_counter)\n turtle.forward(50)\n turtle.write('{}, {}'.format(letter, round(probability, 3)), font = (\"Arial\", 10, \"normal\"))\n turtle.backward(50)\n turtle.setheading(angle * (360 / math.pi) + prev_angle)\n turtle.goto(x = 0, y = 120)\n turtle.end_fill()\n prev_angle += angle_counter\n\n # draw the arc for the remaining probabilites.\n if index == len(central_angles) - 1:\n turtle.fillcolor('gray')\n turtle.begin_fill()\n turtle.goto(x = 0, y = 120)\n turtle.setheading(angle_counter)\n turtle.forward(120)\n turtle.right(270)\n turtle.circle(120, angle_of_rest * (180 / math.pi) )\n angle_counter += angle_of_rest * (180 / math.pi)\n turtle.setheading(angle_counter)\n turtle.forward(50)\n turtle.write('All other letters, {}'.format(round(probability_of_rest, 3)), font = (\"Arial\", 10, \"normal\"))\n turtle.backward(50)\n turtle.setheading(angle_of_rest * (180 / math.pi) + prev_angle)\n turtle.goto(x = 0, y = 120)\n turtle.end_fill()\n\ndef calculateFrequencies(arg = None):\n # get the text value from the entry field\n # if the value is not a valid integer, simply return and do nothing.\n try:\n result = int(entry.get())\n\n # return if the input is greater than 54\n if (result >= 54):\n return\n \n # delete the text in the entry field\n entry.delete(0, END)\n\n # calculate the most frequent characters\n most_frequent_characters = frequency.getNthMostFrequentCharacters(result)\n\n # calculate the probability of all other letters not included in the top N.\n probability_of_other_characters = frequency.sumOfAllOtherProbabilites(most_frequent_characters)\n\n # calculate the central angle of the rest of the letters.\n angle_of_rest = probability_of_other_characters * 2 * math.pi\n\n # calculate central angles of the most frequenct character's probabilities\n central_angles = frequency.getCentralAngles(most_frequent_characters)\n\n # draw pie chart\n drawPieChart(central_angles, angle_of_rest, probability_of_other_characters)\n except ValueError:\n return \n \n# When the user presses enter on the entry field, calculate frequencies\nentry.bind('<Return>', calculateFrequencies)\n\n# Position widgets on a grid layout\nlabel_1.grid(row=0)\nentry.grid(row=0, column=1)\n\n# keep both the turtle and tkinter windows open until user presses the close button on either\nroot.mainloop() \nwindow.exitonclick()\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
incremental_factors_file = '../2019_2020_IncrementalFactorsList.csv'
tax_pickle_for_apns = 'kmes_taxes.p'
tax_history_pickle = '../cusd_1percent_tax_history.p'
distribution_pickle_out = 'kmes_distribution.p'
cabrillo_key = 50200
def read_incremental_factors():
import csv
inc_file = open(incremental_factors_file, 'r')
reader = csv.reader(inc_file)
increment_map = dict()
funding_code_map = dict()
this_trn_code = ''
for row in reader:
if row[0] != '':
this_trn_code = row[0].replace('-','')
this_trn = increment_map.get(this_trn_code,{})
this_trn[int(row[1])] = float(row[3])
funding_code_map[int(row[1])] = row[2]
increment_map[this_trn_code] = this_trn
return increment_map, funding_code_map
increment_map, funding_code_map = read_incremental_factors()
import pickle as p
tax_data_apns = p.load(open(tax_pickle_for_apns,'rb'))
apns = list(set([d[0] for d in tax_data_apns]))
apns.sort()
tax_distribution = list()
tax_history = p.load(open(tax_history_pickle,'rb'))
tax_history_apns = [d[0] for d in tax_history]
for apn in apns:
try:
tax_history_index = tax_history_apns.index(apn)
except:
tax_history_index = None
if tax_history_index is None:
print('No Matching APN: ' + apn)
else:
this_tax_history = tax_history[tax_history_index]
total_tax = this_tax_history[3]
tra = this_tax_history[1]
this_tra = increment_map.get(tra, None)
if this_tra is None:
print('TRA is Null for APN: ' + apn)
else:
fraction = this_tra.get(cabrillo_key, None)
if fraction is None:
print('APN: ' + apn + ' is not in district')
else:
tax_distribution += [[this_tax_history[0], this_tax_history[1], this_tax_history[2], fraction, this_tax_history[3], [t*fraction for t in this_tax_history[3]]]]
import numpy as np
district_data = np.array(np.array([x[5] for x in tax_distribution]))
print('District Contributions: ')
district_sum = np.sum(district_data, axis=0)
year = 2007
for ds in district_sum:
print(str(year) + ": " + str(ds))
year += 1
p.dump([tax_distribution, funding_code_map], open(distribution_pickle_out,'wb'))
|
normal
|
{
"blob_id": "18dae039f6455f944cbaa97bcb9c36ed29ac9a21",
"index": 867,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef read_incremental_factors():\n import csv\n inc_file = open(incremental_factors_file, 'r')\n reader = csv.reader(inc_file)\n increment_map = dict()\n funding_code_map = dict()\n this_trn_code = ''\n for row in reader:\n if row[0] != '':\n this_trn_code = row[0].replace('-', '')\n this_trn = increment_map.get(this_trn_code, {})\n this_trn[int(row[1])] = float(row[3])\n funding_code_map[int(row[1])] = row[2]\n increment_map[this_trn_code] = this_trn\n return increment_map, funding_code_map\n\n\n<mask token>\napns.sort()\n<mask token>\nfor apn in apns:\n try:\n tax_history_index = tax_history_apns.index(apn)\n except:\n tax_history_index = None\n if tax_history_index is None:\n print('No Matching APN: ' + apn)\n else:\n this_tax_history = tax_history[tax_history_index]\n total_tax = this_tax_history[3]\n tra = this_tax_history[1]\n this_tra = increment_map.get(tra, None)\n if this_tra is None:\n print('TRA is Null for APN: ' + apn)\n else:\n fraction = this_tra.get(cabrillo_key, None)\n if fraction is None:\n print('APN: ' + apn + ' is not in district')\n else:\n tax_distribution += [[this_tax_history[0], this_tax_history\n [1], this_tax_history[2], fraction, this_tax_history[3],\n [(t * fraction) for t in this_tax_history[3]]]]\n<mask token>\nprint('District Contributions: ')\n<mask token>\nfor ds in district_sum:\n print(str(year) + ': ' + str(ds))\n year += 1\np.dump([tax_distribution, funding_code_map], open(distribution_pickle_out,\n 'wb'))\n",
"step-3": "incremental_factors_file = '../2019_2020_IncrementalFactorsList.csv'\ntax_pickle_for_apns = 'kmes_taxes.p'\ntax_history_pickle = '../cusd_1percent_tax_history.p'\ndistribution_pickle_out = 'kmes_distribution.p'\ncabrillo_key = 50200\n\n\ndef read_incremental_factors():\n import csv\n inc_file = open(incremental_factors_file, 'r')\n reader = csv.reader(inc_file)\n increment_map = dict()\n funding_code_map = dict()\n this_trn_code = ''\n for row in reader:\n if row[0] != '':\n this_trn_code = row[0].replace('-', '')\n this_trn = increment_map.get(this_trn_code, {})\n this_trn[int(row[1])] = float(row[3])\n funding_code_map[int(row[1])] = row[2]\n increment_map[this_trn_code] = this_trn\n return increment_map, funding_code_map\n\n\nincrement_map, funding_code_map = read_incremental_factors()\n<mask token>\ntax_data_apns = p.load(open(tax_pickle_for_apns, 'rb'))\napns = list(set([d[0] for d in tax_data_apns]))\napns.sort()\ntax_distribution = list()\ntax_history = p.load(open(tax_history_pickle, 'rb'))\ntax_history_apns = [d[0] for d in tax_history]\nfor apn in apns:\n try:\n tax_history_index = tax_history_apns.index(apn)\n except:\n tax_history_index = None\n if tax_history_index is None:\n print('No Matching APN: ' + apn)\n else:\n this_tax_history = tax_history[tax_history_index]\n total_tax = this_tax_history[3]\n tra = this_tax_history[1]\n this_tra = increment_map.get(tra, None)\n if this_tra is None:\n print('TRA is Null for APN: ' + apn)\n else:\n fraction = this_tra.get(cabrillo_key, None)\n if fraction is None:\n print('APN: ' + apn + ' is not in district')\n else:\n tax_distribution += [[this_tax_history[0], this_tax_history\n [1], this_tax_history[2], fraction, this_tax_history[3],\n [(t * fraction) for t in this_tax_history[3]]]]\n<mask token>\ndistrict_data = np.array(np.array([x[5] for x in tax_distribution]))\nprint('District Contributions: ')\ndistrict_sum = np.sum(district_data, axis=0)\nyear = 2007\nfor ds in district_sum:\n print(str(year) + ': ' + str(ds))\n year += 1\np.dump([tax_distribution, funding_code_map], open(distribution_pickle_out,\n 'wb'))\n",
"step-4": "incremental_factors_file = '../2019_2020_IncrementalFactorsList.csv'\ntax_pickle_for_apns = 'kmes_taxes.p'\ntax_history_pickle = '../cusd_1percent_tax_history.p'\ndistribution_pickle_out = 'kmes_distribution.p'\ncabrillo_key = 50200\n\n\ndef read_incremental_factors():\n import csv\n inc_file = open(incremental_factors_file, 'r')\n reader = csv.reader(inc_file)\n increment_map = dict()\n funding_code_map = dict()\n this_trn_code = ''\n for row in reader:\n if row[0] != '':\n this_trn_code = row[0].replace('-', '')\n this_trn = increment_map.get(this_trn_code, {})\n this_trn[int(row[1])] = float(row[3])\n funding_code_map[int(row[1])] = row[2]\n increment_map[this_trn_code] = this_trn\n return increment_map, funding_code_map\n\n\nincrement_map, funding_code_map = read_incremental_factors()\nimport pickle as p\ntax_data_apns = p.load(open(tax_pickle_for_apns, 'rb'))\napns = list(set([d[0] for d in tax_data_apns]))\napns.sort()\ntax_distribution = list()\ntax_history = p.load(open(tax_history_pickle, 'rb'))\ntax_history_apns = [d[0] for d in tax_history]\nfor apn in apns:\n try:\n tax_history_index = tax_history_apns.index(apn)\n except:\n tax_history_index = None\n if tax_history_index is None:\n print('No Matching APN: ' + apn)\n else:\n this_tax_history = tax_history[tax_history_index]\n total_tax = this_tax_history[3]\n tra = this_tax_history[1]\n this_tra = increment_map.get(tra, None)\n if this_tra is None:\n print('TRA is Null for APN: ' + apn)\n else:\n fraction = this_tra.get(cabrillo_key, None)\n if fraction is None:\n print('APN: ' + apn + ' is not in district')\n else:\n tax_distribution += [[this_tax_history[0], this_tax_history\n [1], this_tax_history[2], fraction, this_tax_history[3],\n [(t * fraction) for t in this_tax_history[3]]]]\nimport numpy as np\ndistrict_data = np.array(np.array([x[5] for x in tax_distribution]))\nprint('District Contributions: ')\ndistrict_sum = np.sum(district_data, axis=0)\nyear = 2007\nfor ds in district_sum:\n print(str(year) + ': ' + str(ds))\n year += 1\np.dump([tax_distribution, funding_code_map], open(distribution_pickle_out,\n 'wb'))\n",
"step-5": "incremental_factors_file = '../2019_2020_IncrementalFactorsList.csv'\ntax_pickle_for_apns = 'kmes_taxes.p'\ntax_history_pickle = '../cusd_1percent_tax_history.p'\ndistribution_pickle_out = 'kmes_distribution.p'\ncabrillo_key = 50200\n\ndef read_incremental_factors():\n import csv\n inc_file = open(incremental_factors_file, 'r')\n reader = csv.reader(inc_file)\n increment_map = dict()\n funding_code_map = dict()\n this_trn_code = ''\n for row in reader:\n if row[0] != '':\n this_trn_code = row[0].replace('-','')\n this_trn = increment_map.get(this_trn_code,{})\n this_trn[int(row[1])] = float(row[3])\n funding_code_map[int(row[1])] = row[2]\n increment_map[this_trn_code] = this_trn\n return increment_map, funding_code_map\n\nincrement_map, funding_code_map = read_incremental_factors()\nimport pickle as p\ntax_data_apns = p.load(open(tax_pickle_for_apns,'rb'))\napns = list(set([d[0] for d in tax_data_apns]))\napns.sort()\ntax_distribution = list()\ntax_history = p.load(open(tax_history_pickle,'rb'))\ntax_history_apns = [d[0] for d in tax_history]\n\nfor apn in apns:\n try:\n tax_history_index = tax_history_apns.index(apn)\n except:\n tax_history_index = None\n if tax_history_index is None:\n print('No Matching APN: ' + apn)\n else:\n this_tax_history = tax_history[tax_history_index]\n total_tax = this_tax_history[3]\n tra = this_tax_history[1]\n this_tra = increment_map.get(tra, None)\n if this_tra is None:\n print('TRA is Null for APN: ' + apn)\n else:\n fraction = this_tra.get(cabrillo_key, None)\n if fraction is None:\n print('APN: ' + apn + ' is not in district')\n else:\n tax_distribution += [[this_tax_history[0], this_tax_history[1], this_tax_history[2], fraction, this_tax_history[3], [t*fraction for t in this_tax_history[3]]]]\n\nimport numpy as np\n\ndistrict_data = np.array(np.array([x[5] for x in tax_distribution]))\n\nprint('District Contributions: ')\n\ndistrict_sum = np.sum(district_data, axis=0)\nyear = 2007\nfor ds in district_sum:\n print(str(year) + \": \" + str(ds))\n year += 1\n\np.dump([tax_distribution, funding_code_map], open(distribution_pickle_out,'wb'))\n\n",
"step-ids": [
0,
2,
3,
4,
5
]
}
|
[
0,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
for ticker in tickers:
params = urlencode([('market', market), ('em', tickers[ticker]), (
'code', ticker), ('apply', 0), ('df', start_date.day), ('mf',
start_date.month - 1), ('yf', start_date.year), ('from', start_date
), ('dt', end_date.day), ('mt', end_date.month - 1), ('yt',
end_date.year), ('to', end_date), ('p', period), ('f', ticker + '_' +
start_date_rev + '_' + end_date_rev), ('e', '.csv'), ('cn', ticker),
('dtf', 1), ('tmf', 1), ('MSOR', 0), ('mstime', 'on'), ('mstimever',
1), ('sep', 1), ('sep2', 1), ('datf', 1), ('at', 1)])
url = (FINAM_URL + ticker + '_' + start_date_rev + '_' + end_date_rev +
'.csv?' + params)
print('Стучимся на Финам по ссылке: ' + url)
txt = urlopen(Request(url, headers={'User-Agent': 'Mozilla'})).readlines()
local_file = open(f'{ticker}_{start}_{end}.txt', 'w')
for line in txt:
local_file.write(line.strip().decode('utf-8') + '\n')
local_file.close()
print('Готово. Проверьте файл quotes.txt в папке где лежит скрипт')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
period = 7
start = '01.01.2021'
end = '10.06.2021'
periods = {'tick': 1, 'min': 2, '5min': 3, '10min': 4, '15min': 5, '30min':
6, 'hour': 7, 'daily': 8, 'week': 9, 'month': 10}
tickers = {'ABRD': 82460, 'AESL': 181867, 'AFKS': 19715, 'AFLT': 29, 'AGRO':
399716, 'AKRN': 17564, 'ALBK': 82616, 'ALNU': 81882, 'ALRS': 81820,
'AMEZ': 20702, 'APTK': 13855, 'AQUA': 35238, 'ARMD': 19676, 'ARSA':
19915, 'ASSB': 16452, 'AVAN': 82843, 'AVAZ': 39, 'AVAZP': 40, 'BANE':
81757, 'BANEP': 81758, 'BGDE': 175840, 'BISV': 35242, 'BISVP': 35243,
'BLNG': 21078, 'BRZL': 81901, 'BSPB': 20066, 'CBOM': 420694, 'CHEP':
20999, 'CHGZ': 81933, 'CHKZ': 21000, 'CHMF': 16136, 'CHMK': 21001,
'CHZN': 19960, 'CLSB': 16712, 'CLSBP': 16713, 'CNTL': 21002, 'CNTLP':
81575, 'DASB': 16825, 'DGBZ': 17919, 'DIOD': 35363, 'DIXY': 18564,
'DVEC': 19724, 'DZRD': 74744, 'DZRDP': 74745, 'ELTZ': 81934, 'ENRU':
16440, 'EPLN': 451471, 'ERCO': 81935, 'FEES': 20509, 'FESH': 20708,
'FORTP': 82164, 'GAZA': 81997, 'GAZAP': 81998, 'GAZC': 81398, 'GAZP':
16842, 'GAZS': 81399, 'GAZT': 82115, 'GCHE': 20125, 'GMKN': 795, 'GRAZ':
16610, 'GRNT': 449114, 'GTLC': 152876, 'GTPR': 175842, 'GTSS': 436120,
'HALS': 17698, 'HIMC': 81939, 'HIMCP': 81940, 'HYDR': 20266, 'IDJT':
388276, 'IDVP': 409486, 'IGST': 81885, 'IGST03': 81886, 'IGSTP': 81887,
'IRAO': 20516, 'IRGZ': 9, 'IRKT': 15547, 'ISKJ': 17137, 'JNOS': 15722,
'JNOSP': 15723, 'KAZT': 81941, 'KAZTP': 81942, 'KBSB': 19916, 'KBTK':
35285, 'KCHE': 20030, 'KCHEP': 20498, 'KGKC': 83261, 'KGKCP': 152350,
'KLSB': 16329, 'KMAZ': 15544, 'KMEZ': 22525, 'KMTZ': 81903, 'KOGK':
20710, 'KRKN': 81891, 'KRKNP': 81892, 'KRKO': 81905, 'KRKOP': 81906,
'KROT': 510, 'KROTP': 511, 'KRSB': 20912, 'KRSBP': 20913, 'KRSG': 15518,
'KSGR': 75094, 'KTSB': 16284, 'KTSBP': 16285, 'KUBE': 522, 'KUNF':
81943, 'KUZB': 83165, 'KZMS': 17359, 'KZOS': 81856, 'KZOSP': 81857,
'LIFE': 74584, 'LKOH': 8, 'LNTA': 385792, 'LNZL': 21004, 'LNZLP': 22094,
'LPSB': 16276, 'LSNG': 31, 'LSNGP': 542, 'LSRG': 19736, 'LVHK': 152517,
'MAGE': 74562, 'MAGEP': 74563, 'MAGN': 16782, 'MERF': 20947, 'MFGS': 30,
'MFGSP': 51, 'MFON': 152516, 'MGNT': 17086, 'MGNZ': 20892, 'MGTS':
12984, 'MGTSP': 12983, 'MGVM': 81829, 'MISB': 16330, 'MISBP': 16331,
'MNFD': 80390, 'MOBB': 82890, 'MOEX': 152798, 'MORI': 81944, 'MOTZ':
21116, 'MRKC': 20235, 'MRKK': 20412, 'MRKP': 20107, 'MRKS': 20346,
'MRKU': 20402, 'MRKV': 20286, 'MRKY': 20681, 'MRKZ': 20309, 'MRSB':
16359, 'MSNG': 6, 'MSRS': 16917, 'MSST': 152676, 'MSTT': 74549, 'MTLR':
21018, 'MTLRP': 80745, 'MTSS': 15523, 'MUGS': 81945, 'MUGSP': 81946,
'MVID': 19737, 'NAUK': 81992, 'NFAZ': 81287, 'NKHP': 450432, 'NKNC':
20100, 'NKNCP': 20101, 'NKSH': 81947, 'NLMK': 17046, 'NMTP': 19629,
'NNSB': 16615, 'NNSBP': 16616, 'NPOF': 81858, 'NSVZ': 81929, 'NVTK':
17370, 'ODVA': 20737, 'OFCB': 80728, 'OGKB': 18684, 'OMSH': 22891,
'OMZZP': 15844, 'OPIN': 20711, 'OSMP': 21006, 'OTCP': 407627, 'PAZA':
81896, 'PHOR': 81114, 'PHST': 19717, 'PIKK': 18654, 'PLSM': 81241,
'PLZL': 17123, 'PMSB': 16908, 'PMSBP': 16909, 'POLY': 175924, 'PRFN':
83121, 'PRIM': 17850, 'PRIN': 22806, 'PRMB': 80818, 'PRTK': 35247,
'PSBR': 152320, 'QIWI': 181610, 'RASP': 17713, 'RBCM': 74779, 'RDRB':
181755, 'RGSS': 181934, 'RKKE': 20321, 'RLMN': 152677, 'RLMNP': 388313,
'RNAV': 66644, 'RODNP': 66693, 'ROLO': 181316, 'ROSB': 16866, 'ROSN':
17273, 'ROST': 20637, 'RSTI': 20971, 'RSTIP': 20972, 'RTGZ': 152397,
'RTKM': 7, 'RTKMP': 15, 'RTSB': 16783, 'RTSBP': 16784, 'RUAL': 414279,
'RUALR': 74718, 'RUGR': 66893, 'RUSI': 81786, 'RUSP': 20712, 'RZSB':
16455, 'SAGO': 445, 'SAGOP': 70, 'SARE': 11, 'SAREP': 24, 'SBER': 3,
'SBERP': 23, 'SELG': 81360, 'SELGP': 82610, 'SELL': 21166, 'SIBG':
436091, 'SIBN': 2, 'SKYC': 83122, 'SNGS': 4, 'SNGSP': 13, 'STSB': 20087,
'STSBP': 20088, 'SVAV': 16080, 'SYNG': 19651, 'SZPR': 22401, 'TAER':
80593, 'TANL': 81914, 'TANLP': 81915, 'TASB': 16265, 'TASBP': 16266,
'TATN': 825, 'TATNP': 826, 'TGKA': 18382, 'TGKB': 17597, 'TGKBP': 18189,
'TGKD': 18310, 'TGKDP': 18391, 'TGKN': 18176, 'TGKO': 81899, 'TNSE':
420644, 'TORS': 16797, 'TORSP': 16798, 'TRCN': 74561, 'TRMK': 18441,
'TRNFP': 1012, 'TTLK': 18371, 'TUCH': 74746, 'TUZA': 20716, 'UCSS':
175781, 'UKUZ': 20717, 'UNAC': 22843, 'UNKL': 82493, 'UPRO': 18584,
'URFD': 75124, 'URKA': 19623, 'URKZ': 82611, 'USBN': 81953, 'UTAR':
15522, 'UTII': 81040, 'UTSY': 419504, 'UWGN': 414560, 'VDSB': 16352,
'VGSB': 16456, 'VGSBP': 16457, 'VJGZ': 81954, 'VJGZP': 81955, 'VLHZ':
17257, 'VRAO': 20958, 'VRAOP': 20959, 'VRSB': 16546, 'VRSBP': 16547,
'VSMO': 15965, 'VSYD': 83251, 'VSYDP': 83252, 'VTBR': 19043, 'VTGK':
19632, 'VTRS': 82886, 'VZRZ': 17068, 'VZRZP': 17067, 'WTCM': 19095,
'WTCMP': 19096, 'YAKG': 81917, 'YKEN': 81766, 'YKENP': 81769, 'YNDX':
388383, 'YRSB': 16342, 'YRSBP': 16343, 'ZHIV': 181674, 'ZILL': 81918,
'ZMZN': 556, 'ZMZNP': 603, 'ZVEZ': 82001}
FINAM_URL = 'http://export.finam.ru/'
market = 0
start_date = datetime.strptime(start, '%d.%m.%Y').date()
start_date_rev = datetime.strptime(start, '%d.%m.%Y').strftime('%Y%m%d')
end_date = datetime.strptime(end, '%d.%m.%Y').date()
end_date_rev = datetime.strptime(end, '%d.%m.%Y').strftime('%Y%m%d')
for ticker in tickers:
params = urlencode([('market', market), ('em', tickers[ticker]), (
'code', ticker), ('apply', 0), ('df', start_date.day), ('mf',
start_date.month - 1), ('yf', start_date.year), ('from', start_date
), ('dt', end_date.day), ('mt', end_date.month - 1), ('yt',
end_date.year), ('to', end_date), ('p', period), ('f', ticker + '_' +
start_date_rev + '_' + end_date_rev), ('e', '.csv'), ('cn', ticker),
('dtf', 1), ('tmf', 1), ('MSOR', 0), ('mstime', 'on'), ('mstimever',
1), ('sep', 1), ('sep2', 1), ('datf', 1), ('at', 1)])
url = (FINAM_URL + ticker + '_' + start_date_rev + '_' + end_date_rev +
'.csv?' + params)
print('Стучимся на Финам по ссылке: ' + url)
txt = urlopen(Request(url, headers={'User-Agent': 'Mozilla'})).readlines()
local_file = open(f'{ticker}_{start}_{end}.txt', 'w')
for line in txt:
local_file.write(line.strip().decode('utf-8') + '\n')
local_file.close()
print('Готово. Проверьте файл quotes.txt в папке где лежит скрипт')
<|reserved_special_token_1|>
from urllib.parse import urlencode
from urllib.request import urlopen, Request
from datetime import datetime
period = 7
start = '01.01.2021'
end = '10.06.2021'
periods = {'tick': 1, 'min': 2, '5min': 3, '10min': 4, '15min': 5, '30min':
6, 'hour': 7, 'daily': 8, 'week': 9, 'month': 10}
tickers = {'ABRD': 82460, 'AESL': 181867, 'AFKS': 19715, 'AFLT': 29, 'AGRO':
399716, 'AKRN': 17564, 'ALBK': 82616, 'ALNU': 81882, 'ALRS': 81820,
'AMEZ': 20702, 'APTK': 13855, 'AQUA': 35238, 'ARMD': 19676, 'ARSA':
19915, 'ASSB': 16452, 'AVAN': 82843, 'AVAZ': 39, 'AVAZP': 40, 'BANE':
81757, 'BANEP': 81758, 'BGDE': 175840, 'BISV': 35242, 'BISVP': 35243,
'BLNG': 21078, 'BRZL': 81901, 'BSPB': 20066, 'CBOM': 420694, 'CHEP':
20999, 'CHGZ': 81933, 'CHKZ': 21000, 'CHMF': 16136, 'CHMK': 21001,
'CHZN': 19960, 'CLSB': 16712, 'CLSBP': 16713, 'CNTL': 21002, 'CNTLP':
81575, 'DASB': 16825, 'DGBZ': 17919, 'DIOD': 35363, 'DIXY': 18564,
'DVEC': 19724, 'DZRD': 74744, 'DZRDP': 74745, 'ELTZ': 81934, 'ENRU':
16440, 'EPLN': 451471, 'ERCO': 81935, 'FEES': 20509, 'FESH': 20708,
'FORTP': 82164, 'GAZA': 81997, 'GAZAP': 81998, 'GAZC': 81398, 'GAZP':
16842, 'GAZS': 81399, 'GAZT': 82115, 'GCHE': 20125, 'GMKN': 795, 'GRAZ':
16610, 'GRNT': 449114, 'GTLC': 152876, 'GTPR': 175842, 'GTSS': 436120,
'HALS': 17698, 'HIMC': 81939, 'HIMCP': 81940, 'HYDR': 20266, 'IDJT':
388276, 'IDVP': 409486, 'IGST': 81885, 'IGST03': 81886, 'IGSTP': 81887,
'IRAO': 20516, 'IRGZ': 9, 'IRKT': 15547, 'ISKJ': 17137, 'JNOS': 15722,
'JNOSP': 15723, 'KAZT': 81941, 'KAZTP': 81942, 'KBSB': 19916, 'KBTK':
35285, 'KCHE': 20030, 'KCHEP': 20498, 'KGKC': 83261, 'KGKCP': 152350,
'KLSB': 16329, 'KMAZ': 15544, 'KMEZ': 22525, 'KMTZ': 81903, 'KOGK':
20710, 'KRKN': 81891, 'KRKNP': 81892, 'KRKO': 81905, 'KRKOP': 81906,
'KROT': 510, 'KROTP': 511, 'KRSB': 20912, 'KRSBP': 20913, 'KRSG': 15518,
'KSGR': 75094, 'KTSB': 16284, 'KTSBP': 16285, 'KUBE': 522, 'KUNF':
81943, 'KUZB': 83165, 'KZMS': 17359, 'KZOS': 81856, 'KZOSP': 81857,
'LIFE': 74584, 'LKOH': 8, 'LNTA': 385792, 'LNZL': 21004, 'LNZLP': 22094,
'LPSB': 16276, 'LSNG': 31, 'LSNGP': 542, 'LSRG': 19736, 'LVHK': 152517,
'MAGE': 74562, 'MAGEP': 74563, 'MAGN': 16782, 'MERF': 20947, 'MFGS': 30,
'MFGSP': 51, 'MFON': 152516, 'MGNT': 17086, 'MGNZ': 20892, 'MGTS':
12984, 'MGTSP': 12983, 'MGVM': 81829, 'MISB': 16330, 'MISBP': 16331,
'MNFD': 80390, 'MOBB': 82890, 'MOEX': 152798, 'MORI': 81944, 'MOTZ':
21116, 'MRKC': 20235, 'MRKK': 20412, 'MRKP': 20107, 'MRKS': 20346,
'MRKU': 20402, 'MRKV': 20286, 'MRKY': 20681, 'MRKZ': 20309, 'MRSB':
16359, 'MSNG': 6, 'MSRS': 16917, 'MSST': 152676, 'MSTT': 74549, 'MTLR':
21018, 'MTLRP': 80745, 'MTSS': 15523, 'MUGS': 81945, 'MUGSP': 81946,
'MVID': 19737, 'NAUK': 81992, 'NFAZ': 81287, 'NKHP': 450432, 'NKNC':
20100, 'NKNCP': 20101, 'NKSH': 81947, 'NLMK': 17046, 'NMTP': 19629,
'NNSB': 16615, 'NNSBP': 16616, 'NPOF': 81858, 'NSVZ': 81929, 'NVTK':
17370, 'ODVA': 20737, 'OFCB': 80728, 'OGKB': 18684, 'OMSH': 22891,
'OMZZP': 15844, 'OPIN': 20711, 'OSMP': 21006, 'OTCP': 407627, 'PAZA':
81896, 'PHOR': 81114, 'PHST': 19717, 'PIKK': 18654, 'PLSM': 81241,
'PLZL': 17123, 'PMSB': 16908, 'PMSBP': 16909, 'POLY': 175924, 'PRFN':
83121, 'PRIM': 17850, 'PRIN': 22806, 'PRMB': 80818, 'PRTK': 35247,
'PSBR': 152320, 'QIWI': 181610, 'RASP': 17713, 'RBCM': 74779, 'RDRB':
181755, 'RGSS': 181934, 'RKKE': 20321, 'RLMN': 152677, 'RLMNP': 388313,
'RNAV': 66644, 'RODNP': 66693, 'ROLO': 181316, 'ROSB': 16866, 'ROSN':
17273, 'ROST': 20637, 'RSTI': 20971, 'RSTIP': 20972, 'RTGZ': 152397,
'RTKM': 7, 'RTKMP': 15, 'RTSB': 16783, 'RTSBP': 16784, 'RUAL': 414279,
'RUALR': 74718, 'RUGR': 66893, 'RUSI': 81786, 'RUSP': 20712, 'RZSB':
16455, 'SAGO': 445, 'SAGOP': 70, 'SARE': 11, 'SAREP': 24, 'SBER': 3,
'SBERP': 23, 'SELG': 81360, 'SELGP': 82610, 'SELL': 21166, 'SIBG':
436091, 'SIBN': 2, 'SKYC': 83122, 'SNGS': 4, 'SNGSP': 13, 'STSB': 20087,
'STSBP': 20088, 'SVAV': 16080, 'SYNG': 19651, 'SZPR': 22401, 'TAER':
80593, 'TANL': 81914, 'TANLP': 81915, 'TASB': 16265, 'TASBP': 16266,
'TATN': 825, 'TATNP': 826, 'TGKA': 18382, 'TGKB': 17597, 'TGKBP': 18189,
'TGKD': 18310, 'TGKDP': 18391, 'TGKN': 18176, 'TGKO': 81899, 'TNSE':
420644, 'TORS': 16797, 'TORSP': 16798, 'TRCN': 74561, 'TRMK': 18441,
'TRNFP': 1012, 'TTLK': 18371, 'TUCH': 74746, 'TUZA': 20716, 'UCSS':
175781, 'UKUZ': 20717, 'UNAC': 22843, 'UNKL': 82493, 'UPRO': 18584,
'URFD': 75124, 'URKA': 19623, 'URKZ': 82611, 'USBN': 81953, 'UTAR':
15522, 'UTII': 81040, 'UTSY': 419504, 'UWGN': 414560, 'VDSB': 16352,
'VGSB': 16456, 'VGSBP': 16457, 'VJGZ': 81954, 'VJGZP': 81955, 'VLHZ':
17257, 'VRAO': 20958, 'VRAOP': 20959, 'VRSB': 16546, 'VRSBP': 16547,
'VSMO': 15965, 'VSYD': 83251, 'VSYDP': 83252, 'VTBR': 19043, 'VTGK':
19632, 'VTRS': 82886, 'VZRZ': 17068, 'VZRZP': 17067, 'WTCM': 19095,
'WTCMP': 19096, 'YAKG': 81917, 'YKEN': 81766, 'YKENP': 81769, 'YNDX':
388383, 'YRSB': 16342, 'YRSBP': 16343, 'ZHIV': 181674, 'ZILL': 81918,
'ZMZN': 556, 'ZMZNP': 603, 'ZVEZ': 82001}
FINAM_URL = 'http://export.finam.ru/'
market = 0
start_date = datetime.strptime(start, '%d.%m.%Y').date()
start_date_rev = datetime.strptime(start, '%d.%m.%Y').strftime('%Y%m%d')
end_date = datetime.strptime(end, '%d.%m.%Y').date()
end_date_rev = datetime.strptime(end, '%d.%m.%Y').strftime('%Y%m%d')
for ticker in tickers:
params = urlencode([('market', market), ('em', tickers[ticker]), (
'code', ticker), ('apply', 0), ('df', start_date.day), ('mf',
start_date.month - 1), ('yf', start_date.year), ('from', start_date
), ('dt', end_date.day), ('mt', end_date.month - 1), ('yt',
end_date.year), ('to', end_date), ('p', period), ('f', ticker + '_' +
start_date_rev + '_' + end_date_rev), ('e', '.csv'), ('cn', ticker),
('dtf', 1), ('tmf', 1), ('MSOR', 0), ('mstime', 'on'), ('mstimever',
1), ('sep', 1), ('sep2', 1), ('datf', 1), ('at', 1)])
url = (FINAM_URL + ticker + '_' + start_date_rev + '_' + end_date_rev +
'.csv?' + params)
print('Стучимся на Финам по ссылке: ' + url)
txt = urlopen(Request(url, headers={'User-Agent': 'Mozilla'})).readlines()
local_file = open(f'{ticker}_{start}_{end}.txt', 'w')
for line in txt:
local_file.write(line.strip().decode('utf-8') + '\n')
local_file.close()
print('Готово. Проверьте файл quotes.txt в папке где лежит скрипт')
<|reserved_special_token_1|>
from urllib.parse import urlencode
from urllib.request import urlopen, Request
from datetime import datetime
#пользовательские переменные
period=7 # задаём период. Выбор из: 'tick': 1, 'min': 2, '5min': 3, '10min': 4, '15min': 5, '30min': 6, 'hour': 7, 'daily': 8, 'week': 9, 'month': 10
start = "01.01.2021" #с какой даты начинать тянуть котировки
end = "10.06.2021" #финальная дата, по которую тянуть котировки
periods={'tick': 1, 'min': 2, '5min': 3, '10min': 4, '15min': 5, '30min': 6, 'hour': 7, 'daily': 8, 'week': 9, 'month': 10}
#каждой акции Финам присвоил цифровой код:
tickers={'ABRD':82460,'AESL':181867,'AFKS':19715,'AFLT':29,'AGRO':399716,'AKRN':17564,'ALBK':82616,'ALNU':81882,'ALRS':81820,'AMEZ':20702,'APTK':13855,'AQUA':35238,'ARMD':19676,'ARSA':19915,'ASSB':16452,'AVAN':82843,'AVAZ':39,'AVAZP':40,'BANE':81757,'BANEP':81758,'BGDE':175840,'BISV':35242,'BISVP':35243,'BLNG':21078,'BRZL':81901,'BSPB':20066,'CBOM':420694,'CHEP':20999,'CHGZ':81933,'CHKZ':21000,'CHMF':16136,'CHMK':21001,'CHZN':19960,'CLSB':16712,'CLSBP':16713,'CNTL':21002,'CNTLP':81575,'DASB':16825,'DGBZ':17919,'DIOD':35363,'DIXY':18564,'DVEC':19724,'DZRD':74744,'DZRDP':74745,'ELTZ':81934,'ENRU':16440,'EPLN':451471,'ERCO':81935,'FEES':20509,'FESH':20708,'FORTP':82164,'GAZA':81997,'GAZAP':81998,'GAZC':81398,'GAZP':16842,'GAZS':81399,'GAZT':82115,'GCHE':20125,'GMKN':795,'GRAZ':16610,'GRNT':449114,'GTLC':152876,'GTPR':175842,'GTSS':436120,'HALS':17698,'HIMC':81939,'HIMCP':81940,'HYDR':20266,'IDJT':388276,'IDVP':409486,'IGST':81885,'IGST03':81886,'IGSTP':81887,'IRAO':20516,'IRGZ':9,'IRKT':15547,'ISKJ':17137,'JNOS':15722,'JNOSP':15723,'KAZT':81941,'KAZTP':81942,'KBSB':19916,'KBTK':35285,'KCHE':20030,'KCHEP':20498,'KGKC':83261,'KGKCP':152350,'KLSB':16329,'KMAZ':15544,'KMEZ':22525,'KMTZ':81903,'KOGK':20710,'KRKN':81891,'KRKNP':81892,'KRKO':81905,'KRKOP':81906,'KROT':510,'KROTP':511,'KRSB':20912,'KRSBP':20913,'KRSG':15518,'KSGR':75094,'KTSB':16284,'KTSBP':16285,'KUBE':522,'KUNF':81943,'KUZB':83165,'KZMS':17359,'KZOS':81856,'KZOSP':81857,'LIFE':74584,'LKOH':8,'LNTA':385792,'LNZL':21004,'LNZLP':22094,'LPSB':16276,'LSNG':31,'LSNGP':542,'LSRG':19736,'LVHK':152517,'MAGE':74562,'MAGEP':74563,'MAGN':16782,'MERF':20947,'MFGS':30,'MFGSP':51,'MFON':152516,'MGNT':17086,'MGNZ':20892,'MGTS':12984,'MGTSP':12983,'MGVM':81829,'MISB':16330,'MISBP':16331,'MNFD':80390,'MOBB':82890,'MOEX':152798,'MORI':81944,'MOTZ':21116,'MRKC':20235,'MRKK':20412,'MRKP':20107,'MRKS':20346,'MRKU':20402,'MRKV':20286,'MRKY':20681,'MRKZ':20309,'MRSB':16359,'MSNG':6,'MSRS':16917,'MSST':152676,'MSTT':74549,'MTLR':21018,'MTLRP':80745,'MTSS':15523,'MUGS':81945,'MUGSP':81946,'MVID':19737,'NAUK':81992,'NFAZ':81287,'NKHP':450432,'NKNC':20100,'NKNCP':20101,'NKSH':81947,'NLMK':17046,'NMTP':19629,'NNSB':16615,'NNSBP':16616,'NPOF':81858,'NSVZ':81929,'NVTK':17370,'ODVA':20737,'OFCB':80728,'OGKB':18684,'OMSH':22891,'OMZZP':15844,'OPIN':20711,'OSMP':21006,'OTCP':407627,'PAZA':81896,'PHOR':81114,'PHST':19717,'PIKK':18654,'PLSM':81241,'PLZL':17123,'PMSB':16908,'PMSBP':16909,'POLY':175924,'PRFN':83121,'PRIM':17850,'PRIN':22806,'PRMB':80818,'PRTK':35247,'PSBR':152320,'QIWI':181610,'RASP':17713,'RBCM':74779,'RDRB':181755,'RGSS':181934,'RKKE':20321,'RLMN':152677,'RLMNP':388313,'RNAV':66644,'RODNP':66693,'ROLO':181316,'ROSB':16866,'ROSN':17273,'ROST':20637,'RSTI':20971,'RSTIP':20972,'RTGZ':152397,'RTKM':7,'RTKMP':15,'RTSB':16783,'RTSBP':16784,'RUAL':414279,'RUALR':74718,'RUGR':66893,'RUSI':81786,'RUSP':20712,'RZSB':16455,'SAGO':445,'SAGOP':70,'SARE':11,'SAREP':24,'SBER':3,'SBERP':23,'SELG':81360,'SELGP':82610,'SELL':21166,'SIBG':436091,'SIBN':2,'SKYC':83122,'SNGS':4,'SNGSP':13,'STSB':20087,'STSBP':20088,'SVAV':16080,'SYNG':19651,'SZPR':22401,'TAER':80593,'TANL':81914,'TANLP':81915, 'TASB':16265,'TASBP':16266,'TATN':825,'TATNP':826,'TGKA':18382,'TGKB':17597,'TGKBP':18189,'TGKD':18310,'TGKDP':18391,'TGKN':18176,'TGKO':81899,'TNSE':420644,'TORS':16797,'TORSP':16798,'TRCN':74561,'TRMK':18441,'TRNFP':1012,'TTLK':18371,'TUCH':74746,'TUZA':20716,'UCSS':175781,'UKUZ':20717,'UNAC':22843,'UNKL':82493,'UPRO':18584,'URFD':75124,'URKA':19623,'URKZ':82611,'USBN':81953,'UTAR':15522,'UTII':81040,'UTSY':419504,'UWGN':414560,'VDSB':16352,'VGSB':16456,'VGSBP':16457,'VJGZ':81954,'VJGZP':81955,'VLHZ':17257,'VRAO':20958,'VRAOP':20959,'VRSB':16546,'VRSBP':16547,'VSMO':15965,'VSYD':83251,'VSYDP':83252,'VTBR':19043,'VTGK':19632,'VTRS':82886,'VZRZ':17068,'VZRZP':17067,'WTCM':19095,'WTCMP':19096,'YAKG':81917,'YKEN':81766,'YKENP':81769,'YNDX':388383,'YRSB':16342,'YRSBP':16343,'ZHIV':181674,'ZILL':81918,'ZMZN':556,'ZMZNP':603,'ZVEZ':82001}
FINAM_URL = "http://export.finam.ru/"# сервер, на который стучимся
market = 0 #можно не задавать. Это рынок, на котором торгуется бумага. Для акций работает с любой цифрой. Другие рынки не проверял.
#Делаем преобразования дат:
start_date = datetime.strptime(start, "%d.%m.%Y").date()
start_date_rev=datetime.strptime(start, '%d.%m.%Y').strftime('%Y%m%d')
end_date = datetime.strptime(end, "%d.%m.%Y").date()
end_date_rev=datetime.strptime(end, '%d.%m.%Y').strftime('%Y%m%d')
for ticker in tickers:
params = urlencode([
('market', market), #на каком рынке торгуется бумага
('em', tickers[ticker]), #вытягиваем цифровой символ, который соответствует бумаге.
('code', ticker), #тикер нашей акции
('apply',0), #не нашёл что это значит.
('df', start_date.day), #Начальная дата, номер дня (1-31)
('mf', start_date.month - 1), #Начальная дата, номер месяца (0-11)
('yf', start_date.year), #Начальная дата, год
('from', start_date), #Начальная дата полностью
('dt', end_date.day), #Конечная дата, номер дня
('mt', end_date.month - 1), #Конечная дата, номер месяца
('yt', end_date.year), #Конечная дата, год
('to', end_date), #Конечная дата
('p', period), #Таймфрейм
('f', ticker+"_" + start_date_rev + "_" + end_date_rev), #Имя сформированного файла
('e', ".csv"), #Расширение сформированного файла
('cn', ticker), #ещё раз тикер акции
('dtf', 1), #В каком формате брать даты. Выбор из 5 возможных. См. страницу https://www.finam.ru/profile/moex-akcii/sberbank/export/
('tmf', 1), #В каком формате брать время. Выбор из 4 возможных.
('MSOR', 0), #Время свечи (0 - open; 1 - close)
('mstime', "on"), #Московское время
('mstimever', 1), #Коррекция часового пояса
('sep', 1), #Разделитель полей (1 - запятая, 2 - точка, 3 - точка с запятой, 4 - табуляция, 5 - пробел)
('sep2', 1), #Разделитель разрядов
('datf', 1), #Формат записи в файл. Выбор из 6 возможных.
('at', 1)]) #Нужны ли заголовки столбцов
url = FINAM_URL + ticker+"_" + start_date_rev + "_" + end_date_rev + ".csv?" + params #урл составлен!
print("Стучимся на Финам по ссылке: "+url)
##!txt=urlopen(url).readlines() #здесь лежит огромный массив данных, прилетевший с Финама.
txt=urlopen(Request(url, headers={'User-Agent': 'Mozilla'})).readlines() #здесь лежит огромный массив данных, прилетевший с Финама.
local_file = open(f'{ticker}_{start}_{end}.txt', "w") #задаём файл, в который запишем котировки.
for line in txt: #записываем свечи строку за строкой.
local_file.write(line.strip().decode( "utf-8" )+'\n')
local_file.close()
print("Готово. Проверьте файл quotes.txt в папке где лежит скрипт")
|
flexible
|
{
"blob_id": "9d22a90835f5cf293808ab359244fe1bde81f3e1",
"index": 2171,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor ticker in tickers:\n params = urlencode([('market', market), ('em', tickers[ticker]), (\n 'code', ticker), ('apply', 0), ('df', start_date.day), ('mf', \n start_date.month - 1), ('yf', start_date.year), ('from', start_date\n ), ('dt', end_date.day), ('mt', end_date.month - 1), ('yt',\n end_date.year), ('to', end_date), ('p', period), ('f', ticker + '_' +\n start_date_rev + '_' + end_date_rev), ('e', '.csv'), ('cn', ticker),\n ('dtf', 1), ('tmf', 1), ('MSOR', 0), ('mstime', 'on'), ('mstimever',\n 1), ('sep', 1), ('sep2', 1), ('datf', 1), ('at', 1)])\n url = (FINAM_URL + ticker + '_' + start_date_rev + '_' + end_date_rev +\n '.csv?' + params)\n print('Стучимся на Финам по ссылке: ' + url)\n txt = urlopen(Request(url, headers={'User-Agent': 'Mozilla'})).readlines()\n local_file = open(f'{ticker}_{start}_{end}.txt', 'w')\n for line in txt:\n local_file.write(line.strip().decode('utf-8') + '\\n')\n local_file.close()\n print('Готово. Проверьте файл quotes.txt в папке где лежит скрипт')\n",
"step-3": "<mask token>\nperiod = 7\nstart = '01.01.2021'\nend = '10.06.2021'\nperiods = {'tick': 1, 'min': 2, '5min': 3, '10min': 4, '15min': 5, '30min':\n 6, 'hour': 7, 'daily': 8, 'week': 9, 'month': 10}\ntickers = {'ABRD': 82460, 'AESL': 181867, 'AFKS': 19715, 'AFLT': 29, 'AGRO':\n 399716, 'AKRN': 17564, 'ALBK': 82616, 'ALNU': 81882, 'ALRS': 81820,\n 'AMEZ': 20702, 'APTK': 13855, 'AQUA': 35238, 'ARMD': 19676, 'ARSA': \n 19915, 'ASSB': 16452, 'AVAN': 82843, 'AVAZ': 39, 'AVAZP': 40, 'BANE': \n 81757, 'BANEP': 81758, 'BGDE': 175840, 'BISV': 35242, 'BISVP': 35243,\n 'BLNG': 21078, 'BRZL': 81901, 'BSPB': 20066, 'CBOM': 420694, 'CHEP': \n 20999, 'CHGZ': 81933, 'CHKZ': 21000, 'CHMF': 16136, 'CHMK': 21001,\n 'CHZN': 19960, 'CLSB': 16712, 'CLSBP': 16713, 'CNTL': 21002, 'CNTLP': \n 81575, 'DASB': 16825, 'DGBZ': 17919, 'DIOD': 35363, 'DIXY': 18564,\n 'DVEC': 19724, 'DZRD': 74744, 'DZRDP': 74745, 'ELTZ': 81934, 'ENRU': \n 16440, 'EPLN': 451471, 'ERCO': 81935, 'FEES': 20509, 'FESH': 20708,\n 'FORTP': 82164, 'GAZA': 81997, 'GAZAP': 81998, 'GAZC': 81398, 'GAZP': \n 16842, 'GAZS': 81399, 'GAZT': 82115, 'GCHE': 20125, 'GMKN': 795, 'GRAZ':\n 16610, 'GRNT': 449114, 'GTLC': 152876, 'GTPR': 175842, 'GTSS': 436120,\n 'HALS': 17698, 'HIMC': 81939, 'HIMCP': 81940, 'HYDR': 20266, 'IDJT': \n 388276, 'IDVP': 409486, 'IGST': 81885, 'IGST03': 81886, 'IGSTP': 81887,\n 'IRAO': 20516, 'IRGZ': 9, 'IRKT': 15547, 'ISKJ': 17137, 'JNOS': 15722,\n 'JNOSP': 15723, 'KAZT': 81941, 'KAZTP': 81942, 'KBSB': 19916, 'KBTK': \n 35285, 'KCHE': 20030, 'KCHEP': 20498, 'KGKC': 83261, 'KGKCP': 152350,\n 'KLSB': 16329, 'KMAZ': 15544, 'KMEZ': 22525, 'KMTZ': 81903, 'KOGK': \n 20710, 'KRKN': 81891, 'KRKNP': 81892, 'KRKO': 81905, 'KRKOP': 81906,\n 'KROT': 510, 'KROTP': 511, 'KRSB': 20912, 'KRSBP': 20913, 'KRSG': 15518,\n 'KSGR': 75094, 'KTSB': 16284, 'KTSBP': 16285, 'KUBE': 522, 'KUNF': \n 81943, 'KUZB': 83165, 'KZMS': 17359, 'KZOS': 81856, 'KZOSP': 81857,\n 'LIFE': 74584, 'LKOH': 8, 'LNTA': 385792, 'LNZL': 21004, 'LNZLP': 22094,\n 'LPSB': 16276, 'LSNG': 31, 'LSNGP': 542, 'LSRG': 19736, 'LVHK': 152517,\n 'MAGE': 74562, 'MAGEP': 74563, 'MAGN': 16782, 'MERF': 20947, 'MFGS': 30,\n 'MFGSP': 51, 'MFON': 152516, 'MGNT': 17086, 'MGNZ': 20892, 'MGTS': \n 12984, 'MGTSP': 12983, 'MGVM': 81829, 'MISB': 16330, 'MISBP': 16331,\n 'MNFD': 80390, 'MOBB': 82890, 'MOEX': 152798, 'MORI': 81944, 'MOTZ': \n 21116, 'MRKC': 20235, 'MRKK': 20412, 'MRKP': 20107, 'MRKS': 20346,\n 'MRKU': 20402, 'MRKV': 20286, 'MRKY': 20681, 'MRKZ': 20309, 'MRSB': \n 16359, 'MSNG': 6, 'MSRS': 16917, 'MSST': 152676, 'MSTT': 74549, 'MTLR':\n 21018, 'MTLRP': 80745, 'MTSS': 15523, 'MUGS': 81945, 'MUGSP': 81946,\n 'MVID': 19737, 'NAUK': 81992, 'NFAZ': 81287, 'NKHP': 450432, 'NKNC': \n 20100, 'NKNCP': 20101, 'NKSH': 81947, 'NLMK': 17046, 'NMTP': 19629,\n 'NNSB': 16615, 'NNSBP': 16616, 'NPOF': 81858, 'NSVZ': 81929, 'NVTK': \n 17370, 'ODVA': 20737, 'OFCB': 80728, 'OGKB': 18684, 'OMSH': 22891,\n 'OMZZP': 15844, 'OPIN': 20711, 'OSMP': 21006, 'OTCP': 407627, 'PAZA': \n 81896, 'PHOR': 81114, 'PHST': 19717, 'PIKK': 18654, 'PLSM': 81241,\n 'PLZL': 17123, 'PMSB': 16908, 'PMSBP': 16909, 'POLY': 175924, 'PRFN': \n 83121, 'PRIM': 17850, 'PRIN': 22806, 'PRMB': 80818, 'PRTK': 35247,\n 'PSBR': 152320, 'QIWI': 181610, 'RASP': 17713, 'RBCM': 74779, 'RDRB': \n 181755, 'RGSS': 181934, 'RKKE': 20321, 'RLMN': 152677, 'RLMNP': 388313,\n 'RNAV': 66644, 'RODNP': 66693, 'ROLO': 181316, 'ROSB': 16866, 'ROSN': \n 17273, 'ROST': 20637, 'RSTI': 20971, 'RSTIP': 20972, 'RTGZ': 152397,\n 'RTKM': 7, 'RTKMP': 15, 'RTSB': 16783, 'RTSBP': 16784, 'RUAL': 414279,\n 'RUALR': 74718, 'RUGR': 66893, 'RUSI': 81786, 'RUSP': 20712, 'RZSB': \n 16455, 'SAGO': 445, 'SAGOP': 70, 'SARE': 11, 'SAREP': 24, 'SBER': 3,\n 'SBERP': 23, 'SELG': 81360, 'SELGP': 82610, 'SELL': 21166, 'SIBG': \n 436091, 'SIBN': 2, 'SKYC': 83122, 'SNGS': 4, 'SNGSP': 13, 'STSB': 20087,\n 'STSBP': 20088, 'SVAV': 16080, 'SYNG': 19651, 'SZPR': 22401, 'TAER': \n 80593, 'TANL': 81914, 'TANLP': 81915, 'TASB': 16265, 'TASBP': 16266,\n 'TATN': 825, 'TATNP': 826, 'TGKA': 18382, 'TGKB': 17597, 'TGKBP': 18189,\n 'TGKD': 18310, 'TGKDP': 18391, 'TGKN': 18176, 'TGKO': 81899, 'TNSE': \n 420644, 'TORS': 16797, 'TORSP': 16798, 'TRCN': 74561, 'TRMK': 18441,\n 'TRNFP': 1012, 'TTLK': 18371, 'TUCH': 74746, 'TUZA': 20716, 'UCSS': \n 175781, 'UKUZ': 20717, 'UNAC': 22843, 'UNKL': 82493, 'UPRO': 18584,\n 'URFD': 75124, 'URKA': 19623, 'URKZ': 82611, 'USBN': 81953, 'UTAR': \n 15522, 'UTII': 81040, 'UTSY': 419504, 'UWGN': 414560, 'VDSB': 16352,\n 'VGSB': 16456, 'VGSBP': 16457, 'VJGZ': 81954, 'VJGZP': 81955, 'VLHZ': \n 17257, 'VRAO': 20958, 'VRAOP': 20959, 'VRSB': 16546, 'VRSBP': 16547,\n 'VSMO': 15965, 'VSYD': 83251, 'VSYDP': 83252, 'VTBR': 19043, 'VTGK': \n 19632, 'VTRS': 82886, 'VZRZ': 17068, 'VZRZP': 17067, 'WTCM': 19095,\n 'WTCMP': 19096, 'YAKG': 81917, 'YKEN': 81766, 'YKENP': 81769, 'YNDX': \n 388383, 'YRSB': 16342, 'YRSBP': 16343, 'ZHIV': 181674, 'ZILL': 81918,\n 'ZMZN': 556, 'ZMZNP': 603, 'ZVEZ': 82001}\nFINAM_URL = 'http://export.finam.ru/'\nmarket = 0\nstart_date = datetime.strptime(start, '%d.%m.%Y').date()\nstart_date_rev = datetime.strptime(start, '%d.%m.%Y').strftime('%Y%m%d')\nend_date = datetime.strptime(end, '%d.%m.%Y').date()\nend_date_rev = datetime.strptime(end, '%d.%m.%Y').strftime('%Y%m%d')\nfor ticker in tickers:\n params = urlencode([('market', market), ('em', tickers[ticker]), (\n 'code', ticker), ('apply', 0), ('df', start_date.day), ('mf', \n start_date.month - 1), ('yf', start_date.year), ('from', start_date\n ), ('dt', end_date.day), ('mt', end_date.month - 1), ('yt',\n end_date.year), ('to', end_date), ('p', period), ('f', ticker + '_' +\n start_date_rev + '_' + end_date_rev), ('e', '.csv'), ('cn', ticker),\n ('dtf', 1), ('tmf', 1), ('MSOR', 0), ('mstime', 'on'), ('mstimever',\n 1), ('sep', 1), ('sep2', 1), ('datf', 1), ('at', 1)])\n url = (FINAM_URL + ticker + '_' + start_date_rev + '_' + end_date_rev +\n '.csv?' + params)\n print('Стучимся на Финам по ссылке: ' + url)\n txt = urlopen(Request(url, headers={'User-Agent': 'Mozilla'})).readlines()\n local_file = open(f'{ticker}_{start}_{end}.txt', 'w')\n for line in txt:\n local_file.write(line.strip().decode('utf-8') + '\\n')\n local_file.close()\n print('Готово. Проверьте файл quotes.txt в папке где лежит скрипт')\n",
"step-4": "from urllib.parse import urlencode\nfrom urllib.request import urlopen, Request\nfrom datetime import datetime\nperiod = 7\nstart = '01.01.2021'\nend = '10.06.2021'\nperiods = {'tick': 1, 'min': 2, '5min': 3, '10min': 4, '15min': 5, '30min':\n 6, 'hour': 7, 'daily': 8, 'week': 9, 'month': 10}\ntickers = {'ABRD': 82460, 'AESL': 181867, 'AFKS': 19715, 'AFLT': 29, 'AGRO':\n 399716, 'AKRN': 17564, 'ALBK': 82616, 'ALNU': 81882, 'ALRS': 81820,\n 'AMEZ': 20702, 'APTK': 13855, 'AQUA': 35238, 'ARMD': 19676, 'ARSA': \n 19915, 'ASSB': 16452, 'AVAN': 82843, 'AVAZ': 39, 'AVAZP': 40, 'BANE': \n 81757, 'BANEP': 81758, 'BGDE': 175840, 'BISV': 35242, 'BISVP': 35243,\n 'BLNG': 21078, 'BRZL': 81901, 'BSPB': 20066, 'CBOM': 420694, 'CHEP': \n 20999, 'CHGZ': 81933, 'CHKZ': 21000, 'CHMF': 16136, 'CHMK': 21001,\n 'CHZN': 19960, 'CLSB': 16712, 'CLSBP': 16713, 'CNTL': 21002, 'CNTLP': \n 81575, 'DASB': 16825, 'DGBZ': 17919, 'DIOD': 35363, 'DIXY': 18564,\n 'DVEC': 19724, 'DZRD': 74744, 'DZRDP': 74745, 'ELTZ': 81934, 'ENRU': \n 16440, 'EPLN': 451471, 'ERCO': 81935, 'FEES': 20509, 'FESH': 20708,\n 'FORTP': 82164, 'GAZA': 81997, 'GAZAP': 81998, 'GAZC': 81398, 'GAZP': \n 16842, 'GAZS': 81399, 'GAZT': 82115, 'GCHE': 20125, 'GMKN': 795, 'GRAZ':\n 16610, 'GRNT': 449114, 'GTLC': 152876, 'GTPR': 175842, 'GTSS': 436120,\n 'HALS': 17698, 'HIMC': 81939, 'HIMCP': 81940, 'HYDR': 20266, 'IDJT': \n 388276, 'IDVP': 409486, 'IGST': 81885, 'IGST03': 81886, 'IGSTP': 81887,\n 'IRAO': 20516, 'IRGZ': 9, 'IRKT': 15547, 'ISKJ': 17137, 'JNOS': 15722,\n 'JNOSP': 15723, 'KAZT': 81941, 'KAZTP': 81942, 'KBSB': 19916, 'KBTK': \n 35285, 'KCHE': 20030, 'KCHEP': 20498, 'KGKC': 83261, 'KGKCP': 152350,\n 'KLSB': 16329, 'KMAZ': 15544, 'KMEZ': 22525, 'KMTZ': 81903, 'KOGK': \n 20710, 'KRKN': 81891, 'KRKNP': 81892, 'KRKO': 81905, 'KRKOP': 81906,\n 'KROT': 510, 'KROTP': 511, 'KRSB': 20912, 'KRSBP': 20913, 'KRSG': 15518,\n 'KSGR': 75094, 'KTSB': 16284, 'KTSBP': 16285, 'KUBE': 522, 'KUNF': \n 81943, 'KUZB': 83165, 'KZMS': 17359, 'KZOS': 81856, 'KZOSP': 81857,\n 'LIFE': 74584, 'LKOH': 8, 'LNTA': 385792, 'LNZL': 21004, 'LNZLP': 22094,\n 'LPSB': 16276, 'LSNG': 31, 'LSNGP': 542, 'LSRG': 19736, 'LVHK': 152517,\n 'MAGE': 74562, 'MAGEP': 74563, 'MAGN': 16782, 'MERF': 20947, 'MFGS': 30,\n 'MFGSP': 51, 'MFON': 152516, 'MGNT': 17086, 'MGNZ': 20892, 'MGTS': \n 12984, 'MGTSP': 12983, 'MGVM': 81829, 'MISB': 16330, 'MISBP': 16331,\n 'MNFD': 80390, 'MOBB': 82890, 'MOEX': 152798, 'MORI': 81944, 'MOTZ': \n 21116, 'MRKC': 20235, 'MRKK': 20412, 'MRKP': 20107, 'MRKS': 20346,\n 'MRKU': 20402, 'MRKV': 20286, 'MRKY': 20681, 'MRKZ': 20309, 'MRSB': \n 16359, 'MSNG': 6, 'MSRS': 16917, 'MSST': 152676, 'MSTT': 74549, 'MTLR':\n 21018, 'MTLRP': 80745, 'MTSS': 15523, 'MUGS': 81945, 'MUGSP': 81946,\n 'MVID': 19737, 'NAUK': 81992, 'NFAZ': 81287, 'NKHP': 450432, 'NKNC': \n 20100, 'NKNCP': 20101, 'NKSH': 81947, 'NLMK': 17046, 'NMTP': 19629,\n 'NNSB': 16615, 'NNSBP': 16616, 'NPOF': 81858, 'NSVZ': 81929, 'NVTK': \n 17370, 'ODVA': 20737, 'OFCB': 80728, 'OGKB': 18684, 'OMSH': 22891,\n 'OMZZP': 15844, 'OPIN': 20711, 'OSMP': 21006, 'OTCP': 407627, 'PAZA': \n 81896, 'PHOR': 81114, 'PHST': 19717, 'PIKK': 18654, 'PLSM': 81241,\n 'PLZL': 17123, 'PMSB': 16908, 'PMSBP': 16909, 'POLY': 175924, 'PRFN': \n 83121, 'PRIM': 17850, 'PRIN': 22806, 'PRMB': 80818, 'PRTK': 35247,\n 'PSBR': 152320, 'QIWI': 181610, 'RASP': 17713, 'RBCM': 74779, 'RDRB': \n 181755, 'RGSS': 181934, 'RKKE': 20321, 'RLMN': 152677, 'RLMNP': 388313,\n 'RNAV': 66644, 'RODNP': 66693, 'ROLO': 181316, 'ROSB': 16866, 'ROSN': \n 17273, 'ROST': 20637, 'RSTI': 20971, 'RSTIP': 20972, 'RTGZ': 152397,\n 'RTKM': 7, 'RTKMP': 15, 'RTSB': 16783, 'RTSBP': 16784, 'RUAL': 414279,\n 'RUALR': 74718, 'RUGR': 66893, 'RUSI': 81786, 'RUSP': 20712, 'RZSB': \n 16455, 'SAGO': 445, 'SAGOP': 70, 'SARE': 11, 'SAREP': 24, 'SBER': 3,\n 'SBERP': 23, 'SELG': 81360, 'SELGP': 82610, 'SELL': 21166, 'SIBG': \n 436091, 'SIBN': 2, 'SKYC': 83122, 'SNGS': 4, 'SNGSP': 13, 'STSB': 20087,\n 'STSBP': 20088, 'SVAV': 16080, 'SYNG': 19651, 'SZPR': 22401, 'TAER': \n 80593, 'TANL': 81914, 'TANLP': 81915, 'TASB': 16265, 'TASBP': 16266,\n 'TATN': 825, 'TATNP': 826, 'TGKA': 18382, 'TGKB': 17597, 'TGKBP': 18189,\n 'TGKD': 18310, 'TGKDP': 18391, 'TGKN': 18176, 'TGKO': 81899, 'TNSE': \n 420644, 'TORS': 16797, 'TORSP': 16798, 'TRCN': 74561, 'TRMK': 18441,\n 'TRNFP': 1012, 'TTLK': 18371, 'TUCH': 74746, 'TUZA': 20716, 'UCSS': \n 175781, 'UKUZ': 20717, 'UNAC': 22843, 'UNKL': 82493, 'UPRO': 18584,\n 'URFD': 75124, 'URKA': 19623, 'URKZ': 82611, 'USBN': 81953, 'UTAR': \n 15522, 'UTII': 81040, 'UTSY': 419504, 'UWGN': 414560, 'VDSB': 16352,\n 'VGSB': 16456, 'VGSBP': 16457, 'VJGZ': 81954, 'VJGZP': 81955, 'VLHZ': \n 17257, 'VRAO': 20958, 'VRAOP': 20959, 'VRSB': 16546, 'VRSBP': 16547,\n 'VSMO': 15965, 'VSYD': 83251, 'VSYDP': 83252, 'VTBR': 19043, 'VTGK': \n 19632, 'VTRS': 82886, 'VZRZ': 17068, 'VZRZP': 17067, 'WTCM': 19095,\n 'WTCMP': 19096, 'YAKG': 81917, 'YKEN': 81766, 'YKENP': 81769, 'YNDX': \n 388383, 'YRSB': 16342, 'YRSBP': 16343, 'ZHIV': 181674, 'ZILL': 81918,\n 'ZMZN': 556, 'ZMZNP': 603, 'ZVEZ': 82001}\nFINAM_URL = 'http://export.finam.ru/'\nmarket = 0\nstart_date = datetime.strptime(start, '%d.%m.%Y').date()\nstart_date_rev = datetime.strptime(start, '%d.%m.%Y').strftime('%Y%m%d')\nend_date = datetime.strptime(end, '%d.%m.%Y').date()\nend_date_rev = datetime.strptime(end, '%d.%m.%Y').strftime('%Y%m%d')\nfor ticker in tickers:\n params = urlencode([('market', market), ('em', tickers[ticker]), (\n 'code', ticker), ('apply', 0), ('df', start_date.day), ('mf', \n start_date.month - 1), ('yf', start_date.year), ('from', start_date\n ), ('dt', end_date.day), ('mt', end_date.month - 1), ('yt',\n end_date.year), ('to', end_date), ('p', period), ('f', ticker + '_' +\n start_date_rev + '_' + end_date_rev), ('e', '.csv'), ('cn', ticker),\n ('dtf', 1), ('tmf', 1), ('MSOR', 0), ('mstime', 'on'), ('mstimever',\n 1), ('sep', 1), ('sep2', 1), ('datf', 1), ('at', 1)])\n url = (FINAM_URL + ticker + '_' + start_date_rev + '_' + end_date_rev +\n '.csv?' + params)\n print('Стучимся на Финам по ссылке: ' + url)\n txt = urlopen(Request(url, headers={'User-Agent': 'Mozilla'})).readlines()\n local_file = open(f'{ticker}_{start}_{end}.txt', 'w')\n for line in txt:\n local_file.write(line.strip().decode('utf-8') + '\\n')\n local_file.close()\n print('Готово. Проверьте файл quotes.txt в папке где лежит скрипт')\n",
"step-5": "from urllib.parse import urlencode\nfrom urllib.request import urlopen, Request\nfrom datetime import datetime\n\n#пользовательские переменные\nperiod=7 # задаём период. Выбор из: 'tick': 1, 'min': 2, '5min': 3, '10min': 4, '15min': 5, '30min': 6, 'hour': 7, 'daily': 8, 'week': 9, 'month': 10\nstart = \"01.01.2021\" #с какой даты начинать тянуть котировки \nend = \"10.06.2021\" #финальная дата, по которую тянуть котировки\n\nperiods={'tick': 1, 'min': 2, '5min': 3, '10min': 4, '15min': 5, '30min': 6, 'hour': 7, 'daily': 8, 'week': 9, 'month': 10}\n#каждой акции Финам присвоил цифровой код:\ntickers={'ABRD':82460,'AESL':181867,'AFKS':19715,'AFLT':29,'AGRO':399716,'AKRN':17564,'ALBK':82616,'ALNU':81882,'ALRS':81820,'AMEZ':20702,'APTK':13855,'AQUA':35238,'ARMD':19676,'ARSA':19915,'ASSB':16452,'AVAN':82843,'AVAZ':39,'AVAZP':40,'BANE':81757,'BANEP':81758,'BGDE':175840,'BISV':35242,'BISVP':35243,'BLNG':21078,'BRZL':81901,'BSPB':20066,'CBOM':420694,'CHEP':20999,'CHGZ':81933,'CHKZ':21000,'CHMF':16136,'CHMK':21001,'CHZN':19960,'CLSB':16712,'CLSBP':16713,'CNTL':21002,'CNTLP':81575,'DASB':16825,'DGBZ':17919,'DIOD':35363,'DIXY':18564,'DVEC':19724,'DZRD':74744,'DZRDP':74745,'ELTZ':81934,'ENRU':16440,'EPLN':451471,'ERCO':81935,'FEES':20509,'FESH':20708,'FORTP':82164,'GAZA':81997,'GAZAP':81998,'GAZC':81398,'GAZP':16842,'GAZS':81399,'GAZT':82115,'GCHE':20125,'GMKN':795,'GRAZ':16610,'GRNT':449114,'GTLC':152876,'GTPR':175842,'GTSS':436120,'HALS':17698,'HIMC':81939,'HIMCP':81940,'HYDR':20266,'IDJT':388276,'IDVP':409486,'IGST':81885,'IGST03':81886,'IGSTP':81887,'IRAO':20516,'IRGZ':9,'IRKT':15547,'ISKJ':17137,'JNOS':15722,'JNOSP':15723,'KAZT':81941,'KAZTP':81942,'KBSB':19916,'KBTK':35285,'KCHE':20030,'KCHEP':20498,'KGKC':83261,'KGKCP':152350,'KLSB':16329,'KMAZ':15544,'KMEZ':22525,'KMTZ':81903,'KOGK':20710,'KRKN':81891,'KRKNP':81892,'KRKO':81905,'KRKOP':81906,'KROT':510,'KROTP':511,'KRSB':20912,'KRSBP':20913,'KRSG':15518,'KSGR':75094,'KTSB':16284,'KTSBP':16285,'KUBE':522,'KUNF':81943,'KUZB':83165,'KZMS':17359,'KZOS':81856,'KZOSP':81857,'LIFE':74584,'LKOH':8,'LNTA':385792,'LNZL':21004,'LNZLP':22094,'LPSB':16276,'LSNG':31,'LSNGP':542,'LSRG':19736,'LVHK':152517,'MAGE':74562,'MAGEP':74563,'MAGN':16782,'MERF':20947,'MFGS':30,'MFGSP':51,'MFON':152516,'MGNT':17086,'MGNZ':20892,'MGTS':12984,'MGTSP':12983,'MGVM':81829,'MISB':16330,'MISBP':16331,'MNFD':80390,'MOBB':82890,'MOEX':152798,'MORI':81944,'MOTZ':21116,'MRKC':20235,'MRKK':20412,'MRKP':20107,'MRKS':20346,'MRKU':20402,'MRKV':20286,'MRKY':20681,'MRKZ':20309,'MRSB':16359,'MSNG':6,'MSRS':16917,'MSST':152676,'MSTT':74549,'MTLR':21018,'MTLRP':80745,'MTSS':15523,'MUGS':81945,'MUGSP':81946,'MVID':19737,'NAUK':81992,'NFAZ':81287,'NKHP':450432,'NKNC':20100,'NKNCP':20101,'NKSH':81947,'NLMK':17046,'NMTP':19629,'NNSB':16615,'NNSBP':16616,'NPOF':81858,'NSVZ':81929,'NVTK':17370,'ODVA':20737,'OFCB':80728,'OGKB':18684,'OMSH':22891,'OMZZP':15844,'OPIN':20711,'OSMP':21006,'OTCP':407627,'PAZA':81896,'PHOR':81114,'PHST':19717,'PIKK':18654,'PLSM':81241,'PLZL':17123,'PMSB':16908,'PMSBP':16909,'POLY':175924,'PRFN':83121,'PRIM':17850,'PRIN':22806,'PRMB':80818,'PRTK':35247,'PSBR':152320,'QIWI':181610,'RASP':17713,'RBCM':74779,'RDRB':181755,'RGSS':181934,'RKKE':20321,'RLMN':152677,'RLMNP':388313,'RNAV':66644,'RODNP':66693,'ROLO':181316,'ROSB':16866,'ROSN':17273,'ROST':20637,'RSTI':20971,'RSTIP':20972,'RTGZ':152397,'RTKM':7,'RTKMP':15,'RTSB':16783,'RTSBP':16784,'RUAL':414279,'RUALR':74718,'RUGR':66893,'RUSI':81786,'RUSP':20712,'RZSB':16455,'SAGO':445,'SAGOP':70,'SARE':11,'SAREP':24,'SBER':3,'SBERP':23,'SELG':81360,'SELGP':82610,'SELL':21166,'SIBG':436091,'SIBN':2,'SKYC':83122,'SNGS':4,'SNGSP':13,'STSB':20087,'STSBP':20088,'SVAV':16080,'SYNG':19651,'SZPR':22401,'TAER':80593,'TANL':81914,'TANLP':81915, 'TASB':16265,'TASBP':16266,'TATN':825,'TATNP':826,'TGKA':18382,'TGKB':17597,'TGKBP':18189,'TGKD':18310,'TGKDP':18391,'TGKN':18176,'TGKO':81899,'TNSE':420644,'TORS':16797,'TORSP':16798,'TRCN':74561,'TRMK':18441,'TRNFP':1012,'TTLK':18371,'TUCH':74746,'TUZA':20716,'UCSS':175781,'UKUZ':20717,'UNAC':22843,'UNKL':82493,'UPRO':18584,'URFD':75124,'URKA':19623,'URKZ':82611,'USBN':81953,'UTAR':15522,'UTII':81040,'UTSY':419504,'UWGN':414560,'VDSB':16352,'VGSB':16456,'VGSBP':16457,'VJGZ':81954,'VJGZP':81955,'VLHZ':17257,'VRAO':20958,'VRAOP':20959,'VRSB':16546,'VRSBP':16547,'VSMO':15965,'VSYD':83251,'VSYDP':83252,'VTBR':19043,'VTGK':19632,'VTRS':82886,'VZRZ':17068,'VZRZP':17067,'WTCM':19095,'WTCMP':19096,'YAKG':81917,'YKEN':81766,'YKENP':81769,'YNDX':388383,'YRSB':16342,'YRSBP':16343,'ZHIV':181674,'ZILL':81918,'ZMZN':556,'ZMZNP':603,'ZVEZ':82001}\nFINAM_URL = \"http://export.finam.ru/\"# сервер, на который стучимся\nmarket = 0 #можно не задавать. Это рынок, на котором торгуется бумага. Для акций работает с любой цифрой. Другие рынки не проверял.\n\n\n#Делаем преобразования дат:\nstart_date = datetime.strptime(start, \"%d.%m.%Y\").date()\nstart_date_rev=datetime.strptime(start, '%d.%m.%Y').strftime('%Y%m%d')\nend_date = datetime.strptime(end, \"%d.%m.%Y\").date()\nend_date_rev=datetime.strptime(end, '%d.%m.%Y').strftime('%Y%m%d')\n\n\nfor ticker in tickers:\n params = urlencode([\n\t\t\t\t\t('market', market), #на каком рынке торгуется бумага\n\t\t\t\t\t('em', tickers[ticker]), #вытягиваем цифровой символ, который соответствует бумаге.\n\t\t\t\t\t('code', ticker), #тикер нашей акции\n\t\t\t\t\t('apply',0), #не нашёл что это значит. \n\t\t\t\t\t('df', start_date.day), #Начальная дата, номер дня (1-31)\n\t\t\t\t\t('mf', start_date.month - 1), #Начальная дата, номер месяца (0-11)\n\t\t\t\t\t('yf', start_date.year), #Начальная дата, год\n\t\t\t\t\t('from', start_date), #Начальная дата полностью\n\t\t\t\t\t('dt', end_date.day), #Конечная дата, номер дня\t\n\t\t\t\t\t('mt', end_date.month - 1), #Конечная дата, номер месяца\n\t\t\t\t\t('yt', end_date.year), #Конечная дата, год\n\t\t\t\t\t('to', end_date), #Конечная дата\n\t\t\t\t\t('p', period), #Таймфрейм\n\t\t\t\t\t('f', ticker+\"_\" + start_date_rev + \"_\" + end_date_rev), #Имя сформированного файла\n\t\t\t\t\t('e', \".csv\"), #Расширение сформированного файла\n\t\t\t\t\t('cn', ticker), #ещё раз тикер акции\t\n\t\t\t\t\t('dtf', 1), #В каком формате брать даты. Выбор из 5 возможных. См. страницу https://www.finam.ru/profile/moex-akcii/sberbank/export/\n\t\t\t\t\t('tmf', 1), #В каком формате брать время. Выбор из 4 возможных.\n\t\t\t\t\t('MSOR', 0), #Время свечи (0 - open; 1 - close)\t\n\t\t\t\t\t('mstime', \"on\"), #Московское время\t\n\t\t\t\t\t('mstimever', 1), #Коррекция часового пояса\t\n\t\t\t\t\t('sep', 1), #Разделитель полей\t(1 - запятая, 2 - точка, 3 - точка с запятой, 4 - табуляция, 5 - пробел)\n\t\t\t\t\t('sep2', 1), #Разделитель разрядов\n\t\t\t\t\t('datf', 1), #Формат записи в файл. Выбор из 6 возможных.\n\t\t\t\t\t('at', 1)]) #Нужны ли заголовки столбцов\n url = FINAM_URL + ticker+\"_\" + start_date_rev + \"_\" + end_date_rev + \".csv?\" + params #урл составлен!\n print(\"Стучимся на Финам по ссылке: \"+url)\n ##!txt=urlopen(url).readlines() #здесь лежит огромный массив данных, прилетевший с Финама.\n txt=urlopen(Request(url, headers={'User-Agent': 'Mozilla'})).readlines() #здесь лежит огромный массив данных, прилетевший с Финама.\n local_file = open(f'{ticker}_{start}_{end}.txt', \"w\") #задаём файл, в который запишем котировки.\n for line in txt: #записываем свечи строку за строкой. \n\t local_file.write(line.strip().decode( \"utf-8\" )+'\\n')\t\n local_file.close()\n print(\"Готово. Проверьте файл quotes.txt в папке где лежит скрипт\")\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
c.execute(
"SELECT a.Name, count(c.visitorID) FROM attraction as a, checkin c WHERE a.AttractionID = c.attraction AND a.Category like 'Thrill Rides%' GROUP BY a.AttractionID "
)
<|reserved_special_token_0|>
print(thrillRidesVisitsResult)
<|reserved_special_token_0|>
c.close()
plt.pie(thrillRidesVisitsDataFrame['visits_count'], labels=
thrillRidesVisitsDataFrame['ride_name'], autopct='%1.1f%%', shadow=False)
plt.axis('equal')
plt.show()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
db_filename = 'readonly/dinofunworld.db'
conn = sqlite3.connect(db_filename)
c = conn.cursor()
c.execute(
"SELECT a.Name, count(c.visitorID) FROM attraction as a, checkin c WHERE a.AttractionID = c.attraction AND a.Category like 'Thrill Rides%' GROUP BY a.AttractionID "
)
thrillRidesVisitsResult = c.fetchall()
print(thrillRidesVisitsResult)
thrillRidesVisitsDataFrame = pd.DataFrame.from_records(thrillRidesVisitsResult,
columns=['ride_name', 'visits_count'])
c.close()
plt.pie(thrillRidesVisitsDataFrame['visits_count'], labels=
thrillRidesVisitsDataFrame['ride_name'], autopct='%1.1f%%', shadow=False)
plt.axis('equal')
plt.show()
<|reserved_special_token_1|>
import sqlite3
import pandas as pd
import matplotlib.pyplot as plt
db_filename = 'readonly/dinofunworld.db'
conn = sqlite3.connect(db_filename)
c = conn.cursor()
c.execute(
"SELECT a.Name, count(c.visitorID) FROM attraction as a, checkin c WHERE a.AttractionID = c.attraction AND a.Category like 'Thrill Rides%' GROUP BY a.AttractionID "
)
thrillRidesVisitsResult = c.fetchall()
print(thrillRidesVisitsResult)
thrillRidesVisitsDataFrame = pd.DataFrame.from_records(thrillRidesVisitsResult,
columns=['ride_name', 'visits_count'])
c.close()
plt.pie(thrillRidesVisitsDataFrame['visits_count'], labels=
thrillRidesVisitsDataFrame['ride_name'], autopct='%1.1f%%', shadow=False)
plt.axis('equal')
plt.show()
<|reserved_special_token_1|>
import sqlite3
import pandas as pd
#%matplotlib inline
import matplotlib.pyplot as plt
db_filename = 'readonly/dinofunworld.db'
conn = sqlite3.connect(db_filename)
c = conn.cursor()
c.execute("SELECT a.Name, count(c.visitorID) \
FROM attraction as a, checkin c \
WHERE \
a.AttractionID = c.attraction \
AND a.Category like 'Thrill Rides%' \
GROUP BY a.AttractionID \
")
thrillRidesVisitsResult = c.fetchall()
print(thrillRidesVisitsResult)
thrillRidesVisitsDataFrame = pd.DataFrame.from_records(thrillRidesVisitsResult, columns=['ride_name', 'visits_count'])
c.close()
plt.pie(thrillRidesVisitsDataFrame['visits_count'], labels=thrillRidesVisitsDataFrame['ride_name'], autopct='%1.1f%%', shadow=False)
plt.axis('equal')
plt.show()
|
flexible
|
{
"blob_id": "c19c3f580d7555379bd7e077b0264a3784179e93",
"index": 696,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nc.execute(\n \"SELECT a.Name, count(c.visitorID) FROM attraction as a, checkin c WHERE a.AttractionID = c.attraction AND a.Category like 'Thrill Rides%' GROUP BY a.AttractionID \"\n )\n<mask token>\nprint(thrillRidesVisitsResult)\n<mask token>\nc.close()\nplt.pie(thrillRidesVisitsDataFrame['visits_count'], labels=\n thrillRidesVisitsDataFrame['ride_name'], autopct='%1.1f%%', shadow=False)\nplt.axis('equal')\nplt.show()\n",
"step-3": "<mask token>\ndb_filename = 'readonly/dinofunworld.db'\nconn = sqlite3.connect(db_filename)\nc = conn.cursor()\nc.execute(\n \"SELECT a.Name, count(c.visitorID) FROM attraction as a, checkin c WHERE a.AttractionID = c.attraction AND a.Category like 'Thrill Rides%' GROUP BY a.AttractionID \"\n )\nthrillRidesVisitsResult = c.fetchall()\nprint(thrillRidesVisitsResult)\nthrillRidesVisitsDataFrame = pd.DataFrame.from_records(thrillRidesVisitsResult,\n columns=['ride_name', 'visits_count'])\nc.close()\nplt.pie(thrillRidesVisitsDataFrame['visits_count'], labels=\n thrillRidesVisitsDataFrame['ride_name'], autopct='%1.1f%%', shadow=False)\nplt.axis('equal')\nplt.show()\n",
"step-4": "import sqlite3\nimport pandas as pd\nimport matplotlib.pyplot as plt\ndb_filename = 'readonly/dinofunworld.db'\nconn = sqlite3.connect(db_filename)\nc = conn.cursor()\nc.execute(\n \"SELECT a.Name, count(c.visitorID) FROM attraction as a, checkin c WHERE a.AttractionID = c.attraction AND a.Category like 'Thrill Rides%' GROUP BY a.AttractionID \"\n )\nthrillRidesVisitsResult = c.fetchall()\nprint(thrillRidesVisitsResult)\nthrillRidesVisitsDataFrame = pd.DataFrame.from_records(thrillRidesVisitsResult,\n columns=['ride_name', 'visits_count'])\nc.close()\nplt.pie(thrillRidesVisitsDataFrame['visits_count'], labels=\n thrillRidesVisitsDataFrame['ride_name'], autopct='%1.1f%%', shadow=False)\nplt.axis('equal')\nplt.show()\n",
"step-5": "import sqlite3\nimport pandas as pd\n#%matplotlib inline\nimport matplotlib.pyplot as plt\n\ndb_filename = 'readonly/dinofunworld.db'\nconn = sqlite3.connect(db_filename)\nc = conn.cursor()\nc.execute(\"SELECT a.Name, count(c.visitorID) \\\nFROM attraction as a, checkin c \\\nWHERE \\\na.AttractionID = c.attraction \\\nAND a.Category like 'Thrill Rides%' \\\nGROUP BY a.AttractionID \\\n\")\nthrillRidesVisitsResult = c.fetchall()\nprint(thrillRidesVisitsResult)\nthrillRidesVisitsDataFrame = pd.DataFrame.from_records(thrillRidesVisitsResult, columns=['ride_name', 'visits_count'])\nc.close()\nplt.pie(thrillRidesVisitsDataFrame['visits_count'], labels=thrillRidesVisitsDataFrame['ride_name'], autopct='%1.1f%%', shadow=False)\nplt.axis('equal')\nplt.show()",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from .exec_generator import *
|
normal
|
{
"blob_id": "b6ee3c980357ab22a7969c21207b34546c87092d",
"index": 7305,
"step-1": "<mask token>\n",
"step-2": "from .exec_generator import *\n",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
<|reserved_special_token_0|>
def normalize_for_url(text: str) ->str:
""" Takes the given text and makes it fit to be used for an url.
That means replacing spaces and other unwanted characters with '-',
lowercasing everything and turning unicode characters into their closest
ascii equivalent using Unidecode.
See https://pypi.python.org/pypi/Unidecode
"""
text = text.replace('ü', 'ue')
text = text.replace('ä', 'ae')
text = text.replace('ö', 'oe')
clean = _unwanted_url_chars.sub('-', unidecode(text).strip(' ').lower())
clean = _double_dash.sub('-', clean)
clean = clean.strip('-')
return clean
def increment_name(name: str) ->str:
""" Takes the given name and adds a numbered suffix beginning at 1.
For example::
foo => foo-1
foo-1 => foo-2
"""
match = _number_suffix.search(name)
if match:
number_str = match.group(1)
next_number = int(number_str) + 1
return f'{name[:-len(number_str)]}{next_number}'
else:
return f'{name}-1'
def remove_repeated_spaces(text: str) ->str:
""" Removes repeated spaces in the text ('a b' -> 'a b'). """
return _repeated_spaces.sub(' ', text)
@contextmanager
def profile(filename: str) ->'Iterator[None]':
""" Profiles the wrapped code and stores the result in the profiles folder
with the given filename.
"""
profiler = Profile()
profiler.enable()
yield
profiler.disable()
profiler.create_stats()
profiler.dump_stats('profiles/{}'.format(filename))
@contextmanager
def timing(name: (str | None)=None) ->'Iterator[None]':
""" Runs the wrapped code and prints the time in ms it took to run it.
The name is printed in front of the time, if given.
"""
start = perf_counter()
yield
duration_ms = 1000.0 * (perf_counter() - start)
if name:
print(f'{name}: {duration_ms:.0f} ms')
else:
print(f'{duration_ms:.0f} ms')
<|reserved_special_token_0|>
class Bunch:
""" A simple but handy "collector of a bunch of named stuff" class.
See `<https://code.activestate.com/recipes/ 52308-the-simple-but-handy-collector-of-a-bunch-of-named/>`_.
For example::
point = Bunch(x=1, y=2)
assert point.x == 1
assert point.y == 2
point.z = 3
assert point.z == 3
Allows the creation of simple nested bunches, for example::
request = Bunch(**{'app.settings.org.my_setting': True})
assert request.app.settings.org.my_setting is True
"""
def __init__(self, **kwargs: Any):
self.__dict__.update((key, value) for key, value in kwargs.items() if
'.' not in key)
for key, value in kwargs.items():
if '.' in key:
name, _, key = key.partition('.')
setattr(self, name, Bunch(**{key: value}))
if TYPE_CHECKING:
def __getattr__(self, name: str) ->Any:
...
def __setattr__(self, name: str, value: Any) ->None:
...
def __delattr__(self, name: str) ->None:
...
def __eq__(self, other: object) ->bool:
if type(other) is type(self):
return self.__dict__ == other.__dict__
return False
def __ne__(self, other: object) ->bool:
return not self.__eq__(other)
<|reserved_special_token_0|>
def hash_dictionary(dictionary: dict[str, Any]) ->str:
""" Computes a sha256 hash for the given dictionary. The dictionary
is expected to only contain values that can be serialized by json.
That includes int, decimal, string, boolean.
Note that this function is not meant to be used for hashing secrets. Do
not include data in this dictionary that is secret!
"""
dict_as_string = json.dumps(dictionary, sort_keys=True).encode('utf-8')
return hashlib.new('sha1', dict_as_string, usedforsecurity=False
).hexdigest()
<|reserved_special_token_0|>
def linkify_phone(text: str) ->str:
""" Takes a string and replaces valid phone numbers with html links. If a
phone number is matched, it will be replaced by the result of a callback
function, that does further checks on the regex match. If these checks do
not pass, the matched number will remain unchanged.
"""
def strip_whitespace(number: str) ->str:
return re.sub('\\s', '', number)
def is_valid_length(number: str) ->bool:
if number.startswith('+00'):
return False
if number.startswith('00'):
return len(number) == 13
elif number.startswith('0'):
return len(number) == 10
elif number.startswith('+'):
return len(number) == 12
return False
def handle_match(match: 'Match[str]') ->str:
inside_html = match.group(1)
number = f'{match.group(2)}{match.group(3)}'
assert not number.endswith('\n')
if inside_html:
return match.group(0)
if is_valid_length(strip_whitespace(number)):
number = remove_repeated_spaces(number).strip()
return f'<a href="tel:{number}">{number}</a> '
return match.group(0)
return _phone_ch_html_safe.sub(handle_match, text)
def linkify(text: str, escape: bool=True) ->str:
""" Takes plain text and injects html links for urls and email addresses.
By default the text is html escaped before it is linkified. This accounts
for the fact that we usually use this for text blocks that we mean to
extend with email addresses and urls.
If html is already possible, why linkify it?
Note: We need to clean the html after we've created it (linkify
parses escaped html and turns it into real html). As a consequence it
is possible to have html urls in the text that won't be escaped.
"""
if not text:
return text
long_top_level_domains = ['.agency']
if any(domain in text for domain in long_top_level_domains):
if '@' in text:
linkified = str(Markup('<a href="mailto:{text}">{text}</a>').
format(text=text))
else:
linkified = str(Markup('<a href="{text}">{text}</a>').format(
text=text))
else:
linkified = linkify_phone(bleach.linkify(text, parse_email=True))
if not escape:
return linkified
return bleach.clean(linkified, tags=['a'], attributes={'a': ['href',
'rel']}, protocols=['http', 'https', 'mailto', 'tel'])
def paragraphify(text: str) ->str:
""" Takes a text with newlines groups them into paragraphs according to the
following rules:
If there's a single newline between two lines, a <br> will replace that
newline.
If there are multiple newlines between two lines, each line will become
a paragraph and the extra newlines are discarded.
"""
text = text and text.replace('\r', '').strip('\n')
if not text:
return ''
return ''.join(f'<p>{p}</p>' for p in (p.replace('\n', '<br>') for p in
_multiple_newlines.split(text)))
def to_html_ul(value: str, convert_dashes: bool=True, with_title: bool=False
) ->str:
""" Linkify and convert to text to one or multiple ul's or paragraphs.
"""
if not value:
return ''
value = value.replace('\r', '').strip('\n')
value = value.replace('\n\n', '\n \n')
if not convert_dashes:
return '<p>{}</p>'.format('<br>'.join(linkify(value).splitlines()))
elements = []
temp: list[str] = []
def ul(inner: str) ->str:
return f'<ul class="bulleted">{inner}</ul>'
def li(inner: str) ->str:
return f'<li>{inner}</li>'
def p(inner: str) ->str:
return f'<p>{inner}</p>'
was_list = False
for i, line in enumerate(value.splitlines()):
if not line:
continue
line = linkify(line)
is_list = line.startswith('-')
new_p_or_ul = True if line == ' ' else False
line = line.lstrip('-').strip()
if with_title:
elements.append(p(f'<span class="title">{line}</span>'))
with_title = False
else:
if new_p_or_ul or was_list != is_list and i > 0:
elements.append(ul(''.join(temp)) if was_list else p('<br>'
.join(temp)))
temp = []
was_list = False
if not new_p_or_ul:
temp.append(li(line) if is_list else line)
new_p_or_ul = False
was_list = is_list
if temp:
elements.append(ul(''.join(temp)) if was_list else p('<br>'.join(temp))
)
return ''.join(elements)
def ensure_scheme(url: str, default: str='http') ->str:
""" Makes sure that the given url has a scheme in front, if none
was provided.
"""
if not url:
return url
if '//' not in url:
url = '//' + url
_url = URL(url)
if _url.scheme():
return url
return _url.scheme(default).as_string()
def is_uuid(value: (str | UUID)) ->bool:
""" Returns true if the given value is a uuid. The value may be a string
or of type UUID. If it's a string, the uuid is checked with a regex.
"""
if isinstance(value, str):
return _uuid.match(str(value)) and True or False
return isinstance(value, UUID)
<|reserved_special_token_0|>
def is_subpath(directory: str, path: str) ->bool:
""" Returns true if the given path is inside the given directory. """
directory = os.path.join(os.path.realpath(directory), '')
path = os.path.realpath(path)
return os.path.commonprefix([path, directory]) == directory
@overload
def is_sorted(iterable: 'Iterable[SupportsRichComparison]', key:
'Callable[[SupportsRichComparison], SupportsRichComparison]'=...,
reverse: bool=...) ->bool:
...
@overload
def is_sorted(iterable: 'Iterable[_T]', key:
'Callable[[_T], SupportsRichComparison]', reverse: bool=...) ->bool:
...
<|reserved_special_token_0|>
def morepath_modules(cls: type[morepath.App]) ->'Iterator[str]':
""" Returns all morepath modules which should be scanned for the given
morepath application class.
We can't reliably know the actual morepath modules that
need to be scanned, which is why we assume that each module has
one namespace (like 'more.transaction' or 'onegov.core').
"""
for base in cls.__mro__:
if not issubclass(base, morepath.App):
continue
if base is morepath.App:
continue
module = '.'.join(base.__module__.split('.')[:2])
if module.startswith('test'):
continue
yield module
<|reserved_special_token_0|>
def get_unique_hstore_keys(session: 'Session', column: 'Column[dict[str, Any]]'
) ->set[str]:
""" Returns a set of keys found in an hstore column over all records
of its table.
"""
base = session.query(column.keys()).with_entities(sqlalchemy.func.skeys
(column).label('keys'))
query = sqlalchemy.select([sqlalchemy.func.array_agg(sqlalchemy.column(
'keys'))], distinct=True).select_from(base.subquery())
keys = session.execute(query).scalar()
return set(keys) if keys else set()
def makeopendir(fs: 'FS', directory: str) ->'SubFS[FS]':
""" Creates and opens the given directory in the given PyFilesystem. """
if not fs.isdir(directory):
fs.makedir(directory)
return fs.opendir(directory)
def append_query_param(url: str, key: str, value: str) ->str:
""" Appends a single query parameter to an url. This is faster than
using Purl, if and only if we only add one query param.
Also this function assumes that the value is already url encoded.
"""
template = '?' in url and '{}&{}={}' or '{}?{}={}'
return template.format(url, key, value)
class PostThread(Thread):
""" POSTs the given data with the headers to the URL.
Example::
data = {'a': 1, 'b': 2}
data = json.dumps(data).encode('utf-8')
PostThread(
'https://example.com/post',
data,
(
('Content-Type', 'application/json; charset=utf-8'),
('Content-Length', len(data))
)
).start()
This only works for external URLs! If posting to server itself is
needed, use a process instead of the thread!
"""
def __init__(self, url: str, data: bytes, headers:
'Collection[tuple[str, str]]', timeout: float=30):
Thread.__init__(self)
self.url = url
self.data = data
self.headers = headers
self.timeout = timeout
def run(self) ->None:
try:
if not self.url.lower().startswith('http'):
raise ValueError from None
request = urllib.request.Request(self.url)
for header in self.headers:
request.add_header(header[0], header[1])
urllib.request.urlopen(request, self.data, self.timeout)
except Exception as e:
log.error('Error while sending a POST request to {}: {}'.format
(self.url, str(e)))
<|reserved_special_token_0|>
def dictionary_to_binary(dictionary: 'LaxFileDict') ->bytes:
""" Takes a dictionary created by :func:`binary_to_dictionary` and returns
the original binary data.
"""
data = base64.b64decode(dictionary['data'])
with gzip.GzipFile(fileobj=BytesIO(data), mode='r') as f:
return f.read()
@overload
def safe_format(format: str, dictionary: dict[str, str | int | float],
types: None=..., adapt: 'Callable[[str], str] | None'=...,
raise_on_missing: bool=...) ->str:
...
<|reserved_special_token_0|>
def is_valid_yubikey(client_id: str, secret_key: str, expected_yubikey_id:
str, yubikey: str) ->bool:
""" Asks the yubico validation servers if the given yubikey OTP is valid.
:client_id:
The yubico API client id.
:secret_key:
The yubico API secret key.
:expected_yubikey_id:
The expected yubikey id. The yubikey id is defined as the first twelve
characters of any yubikey value. Each user should have a yubikey
associated with it's account. If the yubikey value comes from a
different key, the key is invalid.
:yubikey:
The actual yubikey value that should be verified.
:return: True if yubico confirmed the validity of the key.
"""
assert client_id and secret_key and expected_yubikey_id and yubikey
assert len(expected_yubikey_id) == 12
if not yubikey.startswith(expected_yubikey_id):
return False
try:
return Yubico(client_id, secret_key).verify(yubikey)
except StatusCodeError as e:
if e.status_code != 'REPLAYED_OTP':
raise e
return False
except SignatureVerificationError:
return False
<|reserved_special_token_0|>
def yubikey_otp_to_serial(otp: str) ->(int | None):
""" Takes a Yubikey OTP and calculates the serial number of the key.
The serial key is printed on the yubikey, in decimal and as a QR code.
Example:
>>> yubikey_otp_to_serial(
'ccccccdefghdefghdefghdefghdefghdefghdefghklv')
2311522
Adapted from Java:
https://github.com/Yubico/yubikey-salesforce-client/blob/
e38e46ee90296a852374a8b744555e99d16b6ca7/src/classes/Modhex.cls
If the key cannot be calculated, None is returned. This can happen if
they key is malformed.
"""
if not is_valid_yubikey_format(otp):
return None
token = 'cccc' + otp[:12]
toggle = False
keep = 0
bytesarray = []
for char in token:
n = ALPHABET.index(char)
toggle = not toggle
if toggle:
keep = n
else:
bytesarray.append(keep << 4 | n)
value = 0
mask_value = 31
for i in range(0, 8):
shift = (4 - 1 - i) * 8
value += (bytesarray[i] & 255) << (shift & mask_value)
return value
<|reserved_special_token_0|>
def dict_path(dictionary: dict[str, _T], path: str) ->_T:
""" Gets the value of the given dictionary at the given path. For example:
>>> data = {'foo': {'bar': True}}
>>> dict_path(data, 'foo.bar')
True
"""
if not dictionary:
raise KeyError()
return reduce(operator.getitem, path.split('.'), dictionary)
<|reserved_special_token_0|>
@overload
def batched(iterable: Iterable[_T], batch_size: int, container_factory:
'type[list]') ->'Iterator[list[_T]]':
...
@overload
def batched(iterable: Iterable[_T], batch_size: int, container_factory:
'Callable[[Iterator[_T]], Collection[_T]]') ->'Iterator[Collection[_T]]':
...
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@contextmanager
def local_lock(namespace: str, key: str) ->'Iterator[None]':
""" Locks the given namespace/key combination on the current system,
automatically freeing it after the with statement has been completed or
once the process is killed.
Usage::
with lock('namespace', 'key'):
pass
"""
name = f'{namespace}-{key}'.replace('/', '-')
with open(f'/tmp/{name}', 'w+') as f:
try:
fcntl.flock(f, fcntl.LOCK_EX | fcntl.LOCK_NB)
yield
fcntl.flock(f, fcntl.LOCK_UN)
except BlockingIOError as exception:
raise AlreadyLockedError from exception
def normalize_for_url(text: str) ->str:
""" Takes the given text and makes it fit to be used for an url.
That means replacing spaces and other unwanted characters with '-',
lowercasing everything and turning unicode characters into their closest
ascii equivalent using Unidecode.
See https://pypi.python.org/pypi/Unidecode
"""
text = text.replace('ü', 'ue')
text = text.replace('ä', 'ae')
text = text.replace('ö', 'oe')
clean = _unwanted_url_chars.sub('-', unidecode(text).strip(' ').lower())
clean = _double_dash.sub('-', clean)
clean = clean.strip('-')
return clean
def increment_name(name: str) ->str:
""" Takes the given name and adds a numbered suffix beginning at 1.
For example::
foo => foo-1
foo-1 => foo-2
"""
match = _number_suffix.search(name)
if match:
number_str = match.group(1)
next_number = int(number_str) + 1
return f'{name[:-len(number_str)]}{next_number}'
else:
return f'{name}-1'
def remove_repeated_spaces(text: str) ->str:
""" Removes repeated spaces in the text ('a b' -> 'a b'). """
return _repeated_spaces.sub(' ', text)
@contextmanager
def profile(filename: str) ->'Iterator[None]':
""" Profiles the wrapped code and stores the result in the profiles folder
with the given filename.
"""
profiler = Profile()
profiler.enable()
yield
profiler.disable()
profiler.create_stats()
profiler.dump_stats('profiles/{}'.format(filename))
@contextmanager
def timing(name: (str | None)=None) ->'Iterator[None]':
""" Runs the wrapped code and prints the time in ms it took to run it.
The name is printed in front of the time, if given.
"""
start = perf_counter()
yield
duration_ms = 1000.0 * (perf_counter() - start)
if name:
print(f'{name}: {duration_ms:.0f} ms')
else:
print(f'{duration_ms:.0f} ms')
<|reserved_special_token_0|>
class Bunch:
""" A simple but handy "collector of a bunch of named stuff" class.
See `<https://code.activestate.com/recipes/ 52308-the-simple-but-handy-collector-of-a-bunch-of-named/>`_.
For example::
point = Bunch(x=1, y=2)
assert point.x == 1
assert point.y == 2
point.z = 3
assert point.z == 3
Allows the creation of simple nested bunches, for example::
request = Bunch(**{'app.settings.org.my_setting': True})
assert request.app.settings.org.my_setting is True
"""
def __init__(self, **kwargs: Any):
self.__dict__.update((key, value) for key, value in kwargs.items() if
'.' not in key)
for key, value in kwargs.items():
if '.' in key:
name, _, key = key.partition('.')
setattr(self, name, Bunch(**{key: value}))
if TYPE_CHECKING:
def __getattr__(self, name: str) ->Any:
...
def __setattr__(self, name: str, value: Any) ->None:
...
def __delattr__(self, name: str) ->None:
...
def __eq__(self, other: object) ->bool:
if type(other) is type(self):
return self.__dict__ == other.__dict__
return False
def __ne__(self, other: object) ->bool:
return not self.__eq__(other)
<|reserved_special_token_0|>
def hash_dictionary(dictionary: dict[str, Any]) ->str:
""" Computes a sha256 hash for the given dictionary. The dictionary
is expected to only contain values that can be serialized by json.
That includes int, decimal, string, boolean.
Note that this function is not meant to be used for hashing secrets. Do
not include data in this dictionary that is secret!
"""
dict_as_string = json.dumps(dictionary, sort_keys=True).encode('utf-8')
return hashlib.new('sha1', dict_as_string, usedforsecurity=False
).hexdigest()
<|reserved_special_token_0|>
def linkify_phone(text: str) ->str:
""" Takes a string and replaces valid phone numbers with html links. If a
phone number is matched, it will be replaced by the result of a callback
function, that does further checks on the regex match. If these checks do
not pass, the matched number will remain unchanged.
"""
def strip_whitespace(number: str) ->str:
return re.sub('\\s', '', number)
def is_valid_length(number: str) ->bool:
if number.startswith('+00'):
return False
if number.startswith('00'):
return len(number) == 13
elif number.startswith('0'):
return len(number) == 10
elif number.startswith('+'):
return len(number) == 12
return False
def handle_match(match: 'Match[str]') ->str:
inside_html = match.group(1)
number = f'{match.group(2)}{match.group(3)}'
assert not number.endswith('\n')
if inside_html:
return match.group(0)
if is_valid_length(strip_whitespace(number)):
number = remove_repeated_spaces(number).strip()
return f'<a href="tel:{number}">{number}</a> '
return match.group(0)
return _phone_ch_html_safe.sub(handle_match, text)
def linkify(text: str, escape: bool=True) ->str:
""" Takes plain text and injects html links for urls and email addresses.
By default the text is html escaped before it is linkified. This accounts
for the fact that we usually use this for text blocks that we mean to
extend with email addresses and urls.
If html is already possible, why linkify it?
Note: We need to clean the html after we've created it (linkify
parses escaped html and turns it into real html). As a consequence it
is possible to have html urls in the text that won't be escaped.
"""
if not text:
return text
long_top_level_domains = ['.agency']
if any(domain in text for domain in long_top_level_domains):
if '@' in text:
linkified = str(Markup('<a href="mailto:{text}">{text}</a>').
format(text=text))
else:
linkified = str(Markup('<a href="{text}">{text}</a>').format(
text=text))
else:
linkified = linkify_phone(bleach.linkify(text, parse_email=True))
if not escape:
return linkified
return bleach.clean(linkified, tags=['a'], attributes={'a': ['href',
'rel']}, protocols=['http', 'https', 'mailto', 'tel'])
def paragraphify(text: str) ->str:
""" Takes a text with newlines groups them into paragraphs according to the
following rules:
If there's a single newline between two lines, a <br> will replace that
newline.
If there are multiple newlines between two lines, each line will become
a paragraph and the extra newlines are discarded.
"""
text = text and text.replace('\r', '').strip('\n')
if not text:
return ''
return ''.join(f'<p>{p}</p>' for p in (p.replace('\n', '<br>') for p in
_multiple_newlines.split(text)))
def to_html_ul(value: str, convert_dashes: bool=True, with_title: bool=False
) ->str:
""" Linkify and convert to text to one or multiple ul's or paragraphs.
"""
if not value:
return ''
value = value.replace('\r', '').strip('\n')
value = value.replace('\n\n', '\n \n')
if not convert_dashes:
return '<p>{}</p>'.format('<br>'.join(linkify(value).splitlines()))
elements = []
temp: list[str] = []
def ul(inner: str) ->str:
return f'<ul class="bulleted">{inner}</ul>'
def li(inner: str) ->str:
return f'<li>{inner}</li>'
def p(inner: str) ->str:
return f'<p>{inner}</p>'
was_list = False
for i, line in enumerate(value.splitlines()):
if not line:
continue
line = linkify(line)
is_list = line.startswith('-')
new_p_or_ul = True if line == ' ' else False
line = line.lstrip('-').strip()
if with_title:
elements.append(p(f'<span class="title">{line}</span>'))
with_title = False
else:
if new_p_or_ul or was_list != is_list and i > 0:
elements.append(ul(''.join(temp)) if was_list else p('<br>'
.join(temp)))
temp = []
was_list = False
if not new_p_or_ul:
temp.append(li(line) if is_list else line)
new_p_or_ul = False
was_list = is_list
if temp:
elements.append(ul(''.join(temp)) if was_list else p('<br>'.join(temp))
)
return ''.join(elements)
def ensure_scheme(url: str, default: str='http') ->str:
""" Makes sure that the given url has a scheme in front, if none
was provided.
"""
if not url:
return url
if '//' not in url:
url = '//' + url
_url = URL(url)
if _url.scheme():
return url
return _url.scheme(default).as_string()
def is_uuid(value: (str | UUID)) ->bool:
""" Returns true if the given value is a uuid. The value may be a string
or of type UUID. If it's a string, the uuid is checked with a regex.
"""
if isinstance(value, str):
return _uuid.match(str(value)) and True or False
return isinstance(value, UUID)
<|reserved_special_token_0|>
def relative_url(absolute_url: (str | None)) ->str:
""" Removes everything in front of the path, including scheme, host,
username, password and port.
"""
url = URL._mutate(URL(absolute_url), scheme=None, username=None,
password=None, host=None, port=None)
return url.as_string()
def is_subpath(directory: str, path: str) ->bool:
""" Returns true if the given path is inside the given directory. """
directory = os.path.join(os.path.realpath(directory), '')
path = os.path.realpath(path)
return os.path.commonprefix([path, directory]) == directory
@overload
def is_sorted(iterable: 'Iterable[SupportsRichComparison]', key:
'Callable[[SupportsRichComparison], SupportsRichComparison]'=...,
reverse: bool=...) ->bool:
...
@overload
def is_sorted(iterable: 'Iterable[_T]', key:
'Callable[[_T], SupportsRichComparison]', reverse: bool=...) ->bool:
...
<|reserved_special_token_0|>
def morepath_modules(cls: type[morepath.App]) ->'Iterator[str]':
""" Returns all morepath modules which should be scanned for the given
morepath application class.
We can't reliably know the actual morepath modules that
need to be scanned, which is why we assume that each module has
one namespace (like 'more.transaction' or 'onegov.core').
"""
for base in cls.__mro__:
if not issubclass(base, morepath.App):
continue
if base is morepath.App:
continue
module = '.'.join(base.__module__.split('.')[:2])
if module.startswith('test'):
continue
yield module
<|reserved_special_token_0|>
def get_unique_hstore_keys(session: 'Session', column: 'Column[dict[str, Any]]'
) ->set[str]:
""" Returns a set of keys found in an hstore column over all records
of its table.
"""
base = session.query(column.keys()).with_entities(sqlalchemy.func.skeys
(column).label('keys'))
query = sqlalchemy.select([sqlalchemy.func.array_agg(sqlalchemy.column(
'keys'))], distinct=True).select_from(base.subquery())
keys = session.execute(query).scalar()
return set(keys) if keys else set()
def makeopendir(fs: 'FS', directory: str) ->'SubFS[FS]':
""" Creates and opens the given directory in the given PyFilesystem. """
if not fs.isdir(directory):
fs.makedir(directory)
return fs.opendir(directory)
def append_query_param(url: str, key: str, value: str) ->str:
""" Appends a single query parameter to an url. This is faster than
using Purl, if and only if we only add one query param.
Also this function assumes that the value is already url encoded.
"""
template = '?' in url and '{}&{}={}' or '{}?{}={}'
return template.format(url, key, value)
class PostThread(Thread):
""" POSTs the given data with the headers to the URL.
Example::
data = {'a': 1, 'b': 2}
data = json.dumps(data).encode('utf-8')
PostThread(
'https://example.com/post',
data,
(
('Content-Type', 'application/json; charset=utf-8'),
('Content-Length', len(data))
)
).start()
This only works for external URLs! If posting to server itself is
needed, use a process instead of the thread!
"""
def __init__(self, url: str, data: bytes, headers:
'Collection[tuple[str, str]]', timeout: float=30):
Thread.__init__(self)
self.url = url
self.data = data
self.headers = headers
self.timeout = timeout
def run(self) ->None:
try:
if not self.url.lower().startswith('http'):
raise ValueError from None
request = urllib.request.Request(self.url)
for header in self.headers:
request.add_header(header[0], header[1])
urllib.request.urlopen(request, self.data, self.timeout)
except Exception as e:
log.error('Error while sending a POST request to {}: {}'.format
(self.url, str(e)))
<|reserved_special_token_0|>
def dictionary_to_binary(dictionary: 'LaxFileDict') ->bytes:
""" Takes a dictionary created by :func:`binary_to_dictionary` and returns
the original binary data.
"""
data = base64.b64decode(dictionary['data'])
with gzip.GzipFile(fileobj=BytesIO(data), mode='r') as f:
return f.read()
@overload
def safe_format(format: str, dictionary: dict[str, str | int | float],
types: None=..., adapt: 'Callable[[str], str] | None'=...,
raise_on_missing: bool=...) ->str:
...
<|reserved_special_token_0|>
def is_valid_yubikey(client_id: str, secret_key: str, expected_yubikey_id:
str, yubikey: str) ->bool:
""" Asks the yubico validation servers if the given yubikey OTP is valid.
:client_id:
The yubico API client id.
:secret_key:
The yubico API secret key.
:expected_yubikey_id:
The expected yubikey id. The yubikey id is defined as the first twelve
characters of any yubikey value. Each user should have a yubikey
associated with it's account. If the yubikey value comes from a
different key, the key is invalid.
:yubikey:
The actual yubikey value that should be verified.
:return: True if yubico confirmed the validity of the key.
"""
assert client_id and secret_key and expected_yubikey_id and yubikey
assert len(expected_yubikey_id) == 12
if not yubikey.startswith(expected_yubikey_id):
return False
try:
return Yubico(client_id, secret_key).verify(yubikey)
except StatusCodeError as e:
if e.status_code != 'REPLAYED_OTP':
raise e
return False
except SignatureVerificationError:
return False
<|reserved_special_token_0|>
def yubikey_otp_to_serial(otp: str) ->(int | None):
""" Takes a Yubikey OTP and calculates the serial number of the key.
The serial key is printed on the yubikey, in decimal and as a QR code.
Example:
>>> yubikey_otp_to_serial(
'ccccccdefghdefghdefghdefghdefghdefghdefghklv')
2311522
Adapted from Java:
https://github.com/Yubico/yubikey-salesforce-client/blob/
e38e46ee90296a852374a8b744555e99d16b6ca7/src/classes/Modhex.cls
If the key cannot be calculated, None is returned. This can happen if
they key is malformed.
"""
if not is_valid_yubikey_format(otp):
return None
token = 'cccc' + otp[:12]
toggle = False
keep = 0
bytesarray = []
for char in token:
n = ALPHABET.index(char)
toggle = not toggle
if toggle:
keep = n
else:
bytesarray.append(keep << 4 | n)
value = 0
mask_value = 31
for i in range(0, 8):
shift = (4 - 1 - i) * 8
value += (bytesarray[i] & 255) << (shift & mask_value)
return value
<|reserved_special_token_0|>
def dict_path(dictionary: dict[str, _T], path: str) ->_T:
""" Gets the value of the given dictionary at the given path. For example:
>>> data = {'foo': {'bar': True}}
>>> dict_path(data, 'foo.bar')
True
"""
if not dictionary:
raise KeyError()
return reduce(operator.getitem, path.split('.'), dictionary)
<|reserved_special_token_0|>
@overload
def batched(iterable: Iterable[_T], batch_size: int, container_factory:
'type[list]') ->'Iterator[list[_T]]':
...
@overload
def batched(iterable: Iterable[_T], batch_size: int, container_factory:
'Callable[[Iterator[_T]], Collection[_T]]') ->'Iterator[Collection[_T]]':
...
def batched(iterable: Iterable[_T], batch_size: int, container_factory:
'Callable[[Iterator[_T]], Collection[_T]]'=tuple
) ->'Iterator[Collection[_T]]':
""" Splits an iterable into batches of batch_size and puts them
inside a given collection (tuple by default).
The container_factory is necessary in order to consume the iterator
returned by islice. Otherwise this function would never return.
"""
iterator = iter(iterable)
while True:
batch = container_factory(islice(iterator, batch_size))
if len(batch) == 0:
return
yield batch
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@contextmanager
def local_lock(namespace: str, key: str) ->'Iterator[None]':
""" Locks the given namespace/key combination on the current system,
automatically freeing it after the with statement has been completed or
once the process is killed.
Usage::
with lock('namespace', 'key'):
pass
"""
name = f'{namespace}-{key}'.replace('/', '-')
with open(f'/tmp/{name}', 'w+') as f:
try:
fcntl.flock(f, fcntl.LOCK_EX | fcntl.LOCK_NB)
yield
fcntl.flock(f, fcntl.LOCK_UN)
except BlockingIOError as exception:
raise AlreadyLockedError from exception
def normalize_for_url(text: str) ->str:
""" Takes the given text and makes it fit to be used for an url.
That means replacing spaces and other unwanted characters with '-',
lowercasing everything and turning unicode characters into their closest
ascii equivalent using Unidecode.
See https://pypi.python.org/pypi/Unidecode
"""
text = text.replace('ü', 'ue')
text = text.replace('ä', 'ae')
text = text.replace('ö', 'oe')
clean = _unwanted_url_chars.sub('-', unidecode(text).strip(' ').lower())
clean = _double_dash.sub('-', clean)
clean = clean.strip('-')
return clean
def increment_name(name: str) ->str:
""" Takes the given name and adds a numbered suffix beginning at 1.
For example::
foo => foo-1
foo-1 => foo-2
"""
match = _number_suffix.search(name)
if match:
number_str = match.group(1)
next_number = int(number_str) + 1
return f'{name[:-len(number_str)]}{next_number}'
else:
return f'{name}-1'
def remove_repeated_spaces(text: str) ->str:
""" Removes repeated spaces in the text ('a b' -> 'a b'). """
return _repeated_spaces.sub(' ', text)
@contextmanager
def profile(filename: str) ->'Iterator[None]':
""" Profiles the wrapped code and stores the result in the profiles folder
with the given filename.
"""
profiler = Profile()
profiler.enable()
yield
profiler.disable()
profiler.create_stats()
profiler.dump_stats('profiles/{}'.format(filename))
@contextmanager
def timing(name: (str | None)=None) ->'Iterator[None]':
""" Runs the wrapped code and prints the time in ms it took to run it.
The name is printed in front of the time, if given.
"""
start = perf_counter()
yield
duration_ms = 1000.0 * (perf_counter() - start)
if name:
print(f'{name}: {duration_ms:.0f} ms')
else:
print(f'{duration_ms:.0f} ms')
<|reserved_special_token_0|>
def module_path(module: 'ModuleType | str', subpath: str) ->str:
""" Returns a subdirectory in the given python module.
:mod:
A python module (actual module or string)
:subpath:
Subpath below that python module. Leading slashes ('/') are ignored.
"""
parent = module_path_root(module)
path = os.path.join(parent, subpath.strip('/'))
assert is_subpath(parent, path)
return path
<|reserved_special_token_0|>
class Bunch:
""" A simple but handy "collector of a bunch of named stuff" class.
See `<https://code.activestate.com/recipes/ 52308-the-simple-but-handy-collector-of-a-bunch-of-named/>`_.
For example::
point = Bunch(x=1, y=2)
assert point.x == 1
assert point.y == 2
point.z = 3
assert point.z == 3
Allows the creation of simple nested bunches, for example::
request = Bunch(**{'app.settings.org.my_setting': True})
assert request.app.settings.org.my_setting is True
"""
def __init__(self, **kwargs: Any):
self.__dict__.update((key, value) for key, value in kwargs.items() if
'.' not in key)
for key, value in kwargs.items():
if '.' in key:
name, _, key = key.partition('.')
setattr(self, name, Bunch(**{key: value}))
if TYPE_CHECKING:
def __getattr__(self, name: str) ->Any:
...
def __setattr__(self, name: str, value: Any) ->None:
...
def __delattr__(self, name: str) ->None:
...
def __eq__(self, other: object) ->bool:
if type(other) is type(self):
return self.__dict__ == other.__dict__
return False
def __ne__(self, other: object) ->bool:
return not self.__eq__(other)
<|reserved_special_token_0|>
def hash_dictionary(dictionary: dict[str, Any]) ->str:
""" Computes a sha256 hash for the given dictionary. The dictionary
is expected to only contain values that can be serialized by json.
That includes int, decimal, string, boolean.
Note that this function is not meant to be used for hashing secrets. Do
not include data in this dictionary that is secret!
"""
dict_as_string = json.dumps(dictionary, sort_keys=True).encode('utf-8')
return hashlib.new('sha1', dict_as_string, usedforsecurity=False
).hexdigest()
@overload
def groupbylist(iterable: Iterable[_T], key: None=...) ->list[tuple[_T,
list[_T]]]:
...
<|reserved_special_token_0|>
def groupbylist(iterable: Iterable[_T], key: 'Callable[[_T], Any] | None'=None
) ->list[tuple[Any, list[_T]]]:
""" Works just like Python's ``itertools.groupby`` function, but instead
of returning generators, it returns lists.
"""
return [(k, list(g)) for k, g in groupby(iterable, key=key)]
def linkify_phone(text: str) ->str:
""" Takes a string and replaces valid phone numbers with html links. If a
phone number is matched, it will be replaced by the result of a callback
function, that does further checks on the regex match. If these checks do
not pass, the matched number will remain unchanged.
"""
def strip_whitespace(number: str) ->str:
return re.sub('\\s', '', number)
def is_valid_length(number: str) ->bool:
if number.startswith('+00'):
return False
if number.startswith('00'):
return len(number) == 13
elif number.startswith('0'):
return len(number) == 10
elif number.startswith('+'):
return len(number) == 12
return False
def handle_match(match: 'Match[str]') ->str:
inside_html = match.group(1)
number = f'{match.group(2)}{match.group(3)}'
assert not number.endswith('\n')
if inside_html:
return match.group(0)
if is_valid_length(strip_whitespace(number)):
number = remove_repeated_spaces(number).strip()
return f'<a href="tel:{number}">{number}</a> '
return match.group(0)
return _phone_ch_html_safe.sub(handle_match, text)
def linkify(text: str, escape: bool=True) ->str:
""" Takes plain text and injects html links for urls and email addresses.
By default the text is html escaped before it is linkified. This accounts
for the fact that we usually use this for text blocks that we mean to
extend with email addresses and urls.
If html is already possible, why linkify it?
Note: We need to clean the html after we've created it (linkify
parses escaped html and turns it into real html). As a consequence it
is possible to have html urls in the text that won't be escaped.
"""
if not text:
return text
long_top_level_domains = ['.agency']
if any(domain in text for domain in long_top_level_domains):
if '@' in text:
linkified = str(Markup('<a href="mailto:{text}">{text}</a>').
format(text=text))
else:
linkified = str(Markup('<a href="{text}">{text}</a>').format(
text=text))
else:
linkified = linkify_phone(bleach.linkify(text, parse_email=True))
if not escape:
return linkified
return bleach.clean(linkified, tags=['a'], attributes={'a': ['href',
'rel']}, protocols=['http', 'https', 'mailto', 'tel'])
def paragraphify(text: str) ->str:
""" Takes a text with newlines groups them into paragraphs according to the
following rules:
If there's a single newline between two lines, a <br> will replace that
newline.
If there are multiple newlines between two lines, each line will become
a paragraph and the extra newlines are discarded.
"""
text = text and text.replace('\r', '').strip('\n')
if not text:
return ''
return ''.join(f'<p>{p}</p>' for p in (p.replace('\n', '<br>') for p in
_multiple_newlines.split(text)))
def to_html_ul(value: str, convert_dashes: bool=True, with_title: bool=False
) ->str:
""" Linkify and convert to text to one or multiple ul's or paragraphs.
"""
if not value:
return ''
value = value.replace('\r', '').strip('\n')
value = value.replace('\n\n', '\n \n')
if not convert_dashes:
return '<p>{}</p>'.format('<br>'.join(linkify(value).splitlines()))
elements = []
temp: list[str] = []
def ul(inner: str) ->str:
return f'<ul class="bulleted">{inner}</ul>'
def li(inner: str) ->str:
return f'<li>{inner}</li>'
def p(inner: str) ->str:
return f'<p>{inner}</p>'
was_list = False
for i, line in enumerate(value.splitlines()):
if not line:
continue
line = linkify(line)
is_list = line.startswith('-')
new_p_or_ul = True if line == ' ' else False
line = line.lstrip('-').strip()
if with_title:
elements.append(p(f'<span class="title">{line}</span>'))
with_title = False
else:
if new_p_or_ul or was_list != is_list and i > 0:
elements.append(ul(''.join(temp)) if was_list else p('<br>'
.join(temp)))
temp = []
was_list = False
if not new_p_or_ul:
temp.append(li(line) if is_list else line)
new_p_or_ul = False
was_list = is_list
if temp:
elements.append(ul(''.join(temp)) if was_list else p('<br>'.join(temp))
)
return ''.join(elements)
def ensure_scheme(url: str, default: str='http') ->str:
""" Makes sure that the given url has a scheme in front, if none
was provided.
"""
if not url:
return url
if '//' not in url:
url = '//' + url
_url = URL(url)
if _url.scheme():
return url
return _url.scheme(default).as_string()
def is_uuid(value: (str | UUID)) ->bool:
""" Returns true if the given value is a uuid. The value may be a string
or of type UUID. If it's a string, the uuid is checked with a regex.
"""
if isinstance(value, str):
return _uuid.match(str(value)) and True or False
return isinstance(value, UUID)
<|reserved_special_token_0|>
def relative_url(absolute_url: (str | None)) ->str:
""" Removes everything in front of the path, including scheme, host,
username, password and port.
"""
url = URL._mutate(URL(absolute_url), scheme=None, username=None,
password=None, host=None, port=None)
return url.as_string()
def is_subpath(directory: str, path: str) ->bool:
""" Returns true if the given path is inside the given directory. """
directory = os.path.join(os.path.realpath(directory), '')
path = os.path.realpath(path)
return os.path.commonprefix([path, directory]) == directory
@overload
def is_sorted(iterable: 'Iterable[SupportsRichComparison]', key:
'Callable[[SupportsRichComparison], SupportsRichComparison]'=...,
reverse: bool=...) ->bool:
...
@overload
def is_sorted(iterable: 'Iterable[_T]', key:
'Callable[[_T], SupportsRichComparison]', reverse: bool=...) ->bool:
...
<|reserved_special_token_0|>
def morepath_modules(cls: type[morepath.App]) ->'Iterator[str]':
""" Returns all morepath modules which should be scanned for the given
morepath application class.
We can't reliably know the actual morepath modules that
need to be scanned, which is why we assume that each module has
one namespace (like 'more.transaction' or 'onegov.core').
"""
for base in cls.__mro__:
if not issubclass(base, morepath.App):
continue
if base is morepath.App:
continue
module = '.'.join(base.__module__.split('.')[:2])
if module.startswith('test'):
continue
yield module
def scan_morepath_modules(cls: type[morepath.App]) ->None:
""" Tries to scan all the morepath modules required for the given
application class. This is not guaranteed to stay reliable as there is
no sure way to discover all modules required by the application class.
"""
for module in sorted(morepath_modules(cls)):
morepath.scan(import_module(module))
def get_unique_hstore_keys(session: 'Session', column: 'Column[dict[str, Any]]'
) ->set[str]:
""" Returns a set of keys found in an hstore column over all records
of its table.
"""
base = session.query(column.keys()).with_entities(sqlalchemy.func.skeys
(column).label('keys'))
query = sqlalchemy.select([sqlalchemy.func.array_agg(sqlalchemy.column(
'keys'))], distinct=True).select_from(base.subquery())
keys = session.execute(query).scalar()
return set(keys) if keys else set()
def makeopendir(fs: 'FS', directory: str) ->'SubFS[FS]':
""" Creates and opens the given directory in the given PyFilesystem. """
if not fs.isdir(directory):
fs.makedir(directory)
return fs.opendir(directory)
def append_query_param(url: str, key: str, value: str) ->str:
""" Appends a single query parameter to an url. This is faster than
using Purl, if and only if we only add one query param.
Also this function assumes that the value is already url encoded.
"""
template = '?' in url and '{}&{}={}' or '{}?{}={}'
return template.format(url, key, value)
class PostThread(Thread):
""" POSTs the given data with the headers to the URL.
Example::
data = {'a': 1, 'b': 2}
data = json.dumps(data).encode('utf-8')
PostThread(
'https://example.com/post',
data,
(
('Content-Type', 'application/json; charset=utf-8'),
('Content-Length', len(data))
)
).start()
This only works for external URLs! If posting to server itself is
needed, use a process instead of the thread!
"""
def __init__(self, url: str, data: bytes, headers:
'Collection[tuple[str, str]]', timeout: float=30):
Thread.__init__(self)
self.url = url
self.data = data
self.headers = headers
self.timeout = timeout
def run(self) ->None:
try:
if not self.url.lower().startswith('http'):
raise ValueError from None
request = urllib.request.Request(self.url)
for header in self.headers:
request.add_header(header[0], header[1])
urllib.request.urlopen(request, self.data, self.timeout)
except Exception as e:
log.error('Error while sending a POST request to {}: {}'.format
(self.url, str(e)))
<|reserved_special_token_0|>
def dictionary_to_binary(dictionary: 'LaxFileDict') ->bytes:
""" Takes a dictionary created by :func:`binary_to_dictionary` and returns
the original binary data.
"""
data = base64.b64decode(dictionary['data'])
with gzip.GzipFile(fileobj=BytesIO(data), mode='r') as f:
return f.read()
@overload
def safe_format(format: str, dictionary: dict[str, str | int | float],
types: None=..., adapt: 'Callable[[str], str] | None'=...,
raise_on_missing: bool=...) ->str:
...
@overload
def safe_format(format: str, dictionary: dict[str, _T], types: set[type[_T]
]=..., adapt: 'Callable[[str], str] | None'=..., raise_on_missing: bool=...
) ->str:
...
<|reserved_special_token_0|>
def safe_format_keys(format: str, adapt: 'Callable[[str], str] | None'=None
) ->list[str]:
""" Takes a :func:`safe_format` string and returns the found keys. """
keys = []
def adapt_and_record(key: str) ->str:
key = adapt(key) if adapt else key
keys.append(key)
return key
safe_format(format, {}, adapt=adapt_and_record)
return keys
def is_valid_yubikey(client_id: str, secret_key: str, expected_yubikey_id:
str, yubikey: str) ->bool:
""" Asks the yubico validation servers if the given yubikey OTP is valid.
:client_id:
The yubico API client id.
:secret_key:
The yubico API secret key.
:expected_yubikey_id:
The expected yubikey id. The yubikey id is defined as the first twelve
characters of any yubikey value. Each user should have a yubikey
associated with it's account. If the yubikey value comes from a
different key, the key is invalid.
:yubikey:
The actual yubikey value that should be verified.
:return: True if yubico confirmed the validity of the key.
"""
assert client_id and secret_key and expected_yubikey_id and yubikey
assert len(expected_yubikey_id) == 12
if not yubikey.startswith(expected_yubikey_id):
return False
try:
return Yubico(client_id, secret_key).verify(yubikey)
except StatusCodeError as e:
if e.status_code != 'REPLAYED_OTP':
raise e
return False
except SignatureVerificationError:
return False
def is_valid_yubikey_format(otp: str) ->bool:
""" Returns True if the given OTP has the correct format. Does not actually
contact Yubico, so this function may return true, for some invalid keys.
"""
return ALPHABET_RE.match(otp) and True or False
def yubikey_otp_to_serial(otp: str) ->(int | None):
""" Takes a Yubikey OTP and calculates the serial number of the key.
The serial key is printed on the yubikey, in decimal and as a QR code.
Example:
>>> yubikey_otp_to_serial(
'ccccccdefghdefghdefghdefghdefghdefghdefghklv')
2311522
Adapted from Java:
https://github.com/Yubico/yubikey-salesforce-client/blob/
e38e46ee90296a852374a8b744555e99d16b6ca7/src/classes/Modhex.cls
If the key cannot be calculated, None is returned. This can happen if
they key is malformed.
"""
if not is_valid_yubikey_format(otp):
return None
token = 'cccc' + otp[:12]
toggle = False
keep = 0
bytesarray = []
for char in token:
n = ALPHABET.index(char)
toggle = not toggle
if toggle:
keep = n
else:
bytesarray.append(keep << 4 | n)
value = 0
mask_value = 31
for i in range(0, 8):
shift = (4 - 1 - i) * 8
value += (bytesarray[i] & 255) << (shift & mask_value)
return value
<|reserved_special_token_0|>
def dict_path(dictionary: dict[str, _T], path: str) ->_T:
""" Gets the value of the given dictionary at the given path. For example:
>>> data = {'foo': {'bar': True}}
>>> dict_path(data, 'foo.bar')
True
"""
if not dictionary:
raise KeyError()
return reduce(operator.getitem, path.split('.'), dictionary)
def safe_move(src: str, dst: str) ->None:
""" Rename a file from ``src`` to ``dst``.
* Moves must be atomic. ``shutil.move()`` is not atomic.
* Moves must work across filesystems. Often temp directories and the
cache directories live on different filesystems. ``os.rename()`` can
throw errors if run across filesystems.
So we try ``os.rename()``, but if we detect a cross-filesystem copy, we
switch to ``shutil.move()`` with some wrappers to make it atomic.
Via https://alexwlchan.net/2019/03/atomic-cross-filesystem-moves-in-python
"""
try:
os.rename(src, dst)
except OSError as err:
if err.errno == errno.EXDEV:
copy_id = uuid4()
tmp_dst = '%s.%s.tmp' % (dst, copy_id)
shutil.copyfile(src, tmp_dst)
os.rename(tmp_dst, dst)
os.unlink(src)
else:
raise
<|reserved_special_token_0|>
@overload
def batched(iterable: Iterable[_T], batch_size: int, container_factory:
'type[list]') ->'Iterator[list[_T]]':
...
@overload
def batched(iterable: Iterable[_T], batch_size: int, container_factory:
'Callable[[Iterator[_T]], Collection[_T]]') ->'Iterator[Collection[_T]]':
...
def batched(iterable: Iterable[_T], batch_size: int, container_factory:
'Callable[[Iterator[_T]], Collection[_T]]'=tuple
) ->'Iterator[Collection[_T]]':
""" Splits an iterable into batches of batch_size and puts them
inside a given collection (tuple by default).
The container_factory is necessary in order to consume the iterator
returned by islice. Otherwise this function would never return.
"""
iterator = iter(iterable)
while True:
batch = container_factory(islice(iterator, batch_size))
if len(batch) == 0:
return
yield batch
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@contextmanager
def local_lock(namespace: str, key: str) ->'Iterator[None]':
""" Locks the given namespace/key combination on the current system,
automatically freeing it after the with statement has been completed or
once the process is killed.
Usage::
with lock('namespace', 'key'):
pass
"""
name = f'{namespace}-{key}'.replace('/', '-')
with open(f'/tmp/{name}', 'w+') as f:
try:
fcntl.flock(f, fcntl.LOCK_EX | fcntl.LOCK_NB)
yield
fcntl.flock(f, fcntl.LOCK_UN)
except BlockingIOError as exception:
raise AlreadyLockedError from exception
def normalize_for_url(text: str) ->str:
""" Takes the given text and makes it fit to be used for an url.
That means replacing spaces and other unwanted characters with '-',
lowercasing everything and turning unicode characters into their closest
ascii equivalent using Unidecode.
See https://pypi.python.org/pypi/Unidecode
"""
text = text.replace('ü', 'ue')
text = text.replace('ä', 'ae')
text = text.replace('ö', 'oe')
clean = _unwanted_url_chars.sub('-', unidecode(text).strip(' ').lower())
clean = _double_dash.sub('-', clean)
clean = clean.strip('-')
return clean
def increment_name(name: str) ->str:
""" Takes the given name and adds a numbered suffix beginning at 1.
For example::
foo => foo-1
foo-1 => foo-2
"""
match = _number_suffix.search(name)
if match:
number_str = match.group(1)
next_number = int(number_str) + 1
return f'{name[:-len(number_str)]}{next_number}'
else:
return f'{name}-1'
def remove_repeated_spaces(text: str) ->str:
""" Removes repeated spaces in the text ('a b' -> 'a b'). """
return _repeated_spaces.sub(' ', text)
@contextmanager
def profile(filename: str) ->'Iterator[None]':
""" Profiles the wrapped code and stores the result in the profiles folder
with the given filename.
"""
profiler = Profile()
profiler.enable()
yield
profiler.disable()
profiler.create_stats()
profiler.dump_stats('profiles/{}'.format(filename))
@contextmanager
def timing(name: (str | None)=None) ->'Iterator[None]':
""" Runs the wrapped code and prints the time in ms it took to run it.
The name is printed in front of the time, if given.
"""
start = perf_counter()
yield
duration_ms = 1000.0 * (perf_counter() - start)
if name:
print(f'{name}: {duration_ms:.0f} ms')
else:
print(f'{duration_ms:.0f} ms')
<|reserved_special_token_0|>
def module_path(module: 'ModuleType | str', subpath: str) ->str:
""" Returns a subdirectory in the given python module.
:mod:
A python module (actual module or string)
:subpath:
Subpath below that python module. Leading slashes ('/') are ignored.
"""
parent = module_path_root(module)
path = os.path.join(parent, subpath.strip('/'))
assert is_subpath(parent, path)
return path
def touch(file_path: str) ->None:
""" Touches the file on the given path. """
try:
os.utime(file_path, None)
except Exception:
open(file_path, 'a').close()
class Bunch:
""" A simple but handy "collector of a bunch of named stuff" class.
See `<https://code.activestate.com/recipes/ 52308-the-simple-but-handy-collector-of-a-bunch-of-named/>`_.
For example::
point = Bunch(x=1, y=2)
assert point.x == 1
assert point.y == 2
point.z = 3
assert point.z == 3
Allows the creation of simple nested bunches, for example::
request = Bunch(**{'app.settings.org.my_setting': True})
assert request.app.settings.org.my_setting is True
"""
def __init__(self, **kwargs: Any):
self.__dict__.update((key, value) for key, value in kwargs.items() if
'.' not in key)
for key, value in kwargs.items():
if '.' in key:
name, _, key = key.partition('.')
setattr(self, name, Bunch(**{key: value}))
if TYPE_CHECKING:
def __getattr__(self, name: str) ->Any:
...
def __setattr__(self, name: str, value: Any) ->None:
...
def __delattr__(self, name: str) ->None:
...
def __eq__(self, other: object) ->bool:
if type(other) is type(self):
return self.__dict__ == other.__dict__
return False
def __ne__(self, other: object) ->bool:
return not self.__eq__(other)
<|reserved_special_token_0|>
def hash_dictionary(dictionary: dict[str, Any]) ->str:
""" Computes a sha256 hash for the given dictionary. The dictionary
is expected to only contain values that can be serialized by json.
That includes int, decimal, string, boolean.
Note that this function is not meant to be used for hashing secrets. Do
not include data in this dictionary that is secret!
"""
dict_as_string = json.dumps(dictionary, sort_keys=True).encode('utf-8')
return hashlib.new('sha1', dict_as_string, usedforsecurity=False
).hexdigest()
@overload
def groupbylist(iterable: Iterable[_T], key: None=...) ->list[tuple[_T,
list[_T]]]:
...
@overload
def groupbylist(iterable: Iterable[_T], key: 'Callable[[_T], _KT]') ->list[
tuple[_KT, list[_T]]]:
...
def groupbylist(iterable: Iterable[_T], key: 'Callable[[_T], Any] | None'=None
) ->list[tuple[Any, list[_T]]]:
""" Works just like Python's ``itertools.groupby`` function, but instead
of returning generators, it returns lists.
"""
return [(k, list(g)) for k, g in groupby(iterable, key=key)]
def linkify_phone(text: str) ->str:
""" Takes a string and replaces valid phone numbers with html links. If a
phone number is matched, it will be replaced by the result of a callback
function, that does further checks on the regex match. If these checks do
not pass, the matched number will remain unchanged.
"""
def strip_whitespace(number: str) ->str:
return re.sub('\\s', '', number)
def is_valid_length(number: str) ->bool:
if number.startswith('+00'):
return False
if number.startswith('00'):
return len(number) == 13
elif number.startswith('0'):
return len(number) == 10
elif number.startswith('+'):
return len(number) == 12
return False
def handle_match(match: 'Match[str]') ->str:
inside_html = match.group(1)
number = f'{match.group(2)}{match.group(3)}'
assert not number.endswith('\n')
if inside_html:
return match.group(0)
if is_valid_length(strip_whitespace(number)):
number = remove_repeated_spaces(number).strip()
return f'<a href="tel:{number}">{number}</a> '
return match.group(0)
return _phone_ch_html_safe.sub(handle_match, text)
def linkify(text: str, escape: bool=True) ->str:
""" Takes plain text and injects html links for urls and email addresses.
By default the text is html escaped before it is linkified. This accounts
for the fact that we usually use this for text blocks that we mean to
extend with email addresses and urls.
If html is already possible, why linkify it?
Note: We need to clean the html after we've created it (linkify
parses escaped html and turns it into real html). As a consequence it
is possible to have html urls in the text that won't be escaped.
"""
if not text:
return text
long_top_level_domains = ['.agency']
if any(domain in text for domain in long_top_level_domains):
if '@' in text:
linkified = str(Markup('<a href="mailto:{text}">{text}</a>').
format(text=text))
else:
linkified = str(Markup('<a href="{text}">{text}</a>').format(
text=text))
else:
linkified = linkify_phone(bleach.linkify(text, parse_email=True))
if not escape:
return linkified
return bleach.clean(linkified, tags=['a'], attributes={'a': ['href',
'rel']}, protocols=['http', 'https', 'mailto', 'tel'])
def paragraphify(text: str) ->str:
""" Takes a text with newlines groups them into paragraphs according to the
following rules:
If there's a single newline between two lines, a <br> will replace that
newline.
If there are multiple newlines between two lines, each line will become
a paragraph and the extra newlines are discarded.
"""
text = text and text.replace('\r', '').strip('\n')
if not text:
return ''
return ''.join(f'<p>{p}</p>' for p in (p.replace('\n', '<br>') for p in
_multiple_newlines.split(text)))
def to_html_ul(value: str, convert_dashes: bool=True, with_title: bool=False
) ->str:
""" Linkify and convert to text to one or multiple ul's or paragraphs.
"""
if not value:
return ''
value = value.replace('\r', '').strip('\n')
value = value.replace('\n\n', '\n \n')
if not convert_dashes:
return '<p>{}</p>'.format('<br>'.join(linkify(value).splitlines()))
elements = []
temp: list[str] = []
def ul(inner: str) ->str:
return f'<ul class="bulleted">{inner}</ul>'
def li(inner: str) ->str:
return f'<li>{inner}</li>'
def p(inner: str) ->str:
return f'<p>{inner}</p>'
was_list = False
for i, line in enumerate(value.splitlines()):
if not line:
continue
line = linkify(line)
is_list = line.startswith('-')
new_p_or_ul = True if line == ' ' else False
line = line.lstrip('-').strip()
if with_title:
elements.append(p(f'<span class="title">{line}</span>'))
with_title = False
else:
if new_p_or_ul or was_list != is_list and i > 0:
elements.append(ul(''.join(temp)) if was_list else p('<br>'
.join(temp)))
temp = []
was_list = False
if not new_p_or_ul:
temp.append(li(line) if is_list else line)
new_p_or_ul = False
was_list = is_list
if temp:
elements.append(ul(''.join(temp)) if was_list else p('<br>'.join(temp))
)
return ''.join(elements)
def ensure_scheme(url: str, default: str='http') ->str:
""" Makes sure that the given url has a scheme in front, if none
was provided.
"""
if not url:
return url
if '//' not in url:
url = '//' + url
_url = URL(url)
if _url.scheme():
return url
return _url.scheme(default).as_string()
def is_uuid(value: (str | UUID)) ->bool:
""" Returns true if the given value is a uuid. The value may be a string
or of type UUID. If it's a string, the uuid is checked with a regex.
"""
if isinstance(value, str):
return _uuid.match(str(value)) and True or False
return isinstance(value, UUID)
<|reserved_special_token_0|>
def relative_url(absolute_url: (str | None)) ->str:
""" Removes everything in front of the path, including scheme, host,
username, password and port.
"""
url = URL._mutate(URL(absolute_url), scheme=None, username=None,
password=None, host=None, port=None)
return url.as_string()
def is_subpath(directory: str, path: str) ->bool:
""" Returns true if the given path is inside the given directory. """
directory = os.path.join(os.path.realpath(directory), '')
path = os.path.realpath(path)
return os.path.commonprefix([path, directory]) == directory
@overload
def is_sorted(iterable: 'Iterable[SupportsRichComparison]', key:
'Callable[[SupportsRichComparison], SupportsRichComparison]'=...,
reverse: bool=...) ->bool:
...
@overload
def is_sorted(iterable: 'Iterable[_T]', key:
'Callable[[_T], SupportsRichComparison]', reverse: bool=...) ->bool:
...
def is_sorted(iterable: 'Iterable[Any]', key:
'Callable[[Any], SupportsRichComparison]'=lambda i: i, reverse: bool=False
) ->bool:
""" Returns True if the iterable is sorted. """
seq = list(iterable)
for a, b in zip(seq, sorted(seq, key=key, reverse=reverse)):
if a is not b:
return False
return True
def morepath_modules(cls: type[morepath.App]) ->'Iterator[str]':
""" Returns all morepath modules which should be scanned for the given
morepath application class.
We can't reliably know the actual morepath modules that
need to be scanned, which is why we assume that each module has
one namespace (like 'more.transaction' or 'onegov.core').
"""
for base in cls.__mro__:
if not issubclass(base, morepath.App):
continue
if base is morepath.App:
continue
module = '.'.join(base.__module__.split('.')[:2])
if module.startswith('test'):
continue
yield module
def scan_morepath_modules(cls: type[morepath.App]) ->None:
""" Tries to scan all the morepath modules required for the given
application class. This is not guaranteed to stay reliable as there is
no sure way to discover all modules required by the application class.
"""
for module in sorted(morepath_modules(cls)):
morepath.scan(import_module(module))
def get_unique_hstore_keys(session: 'Session', column: 'Column[dict[str, Any]]'
) ->set[str]:
""" Returns a set of keys found in an hstore column over all records
of its table.
"""
base = session.query(column.keys()).with_entities(sqlalchemy.func.skeys
(column).label('keys'))
query = sqlalchemy.select([sqlalchemy.func.array_agg(sqlalchemy.column(
'keys'))], distinct=True).select_from(base.subquery())
keys = session.execute(query).scalar()
return set(keys) if keys else set()
def makeopendir(fs: 'FS', directory: str) ->'SubFS[FS]':
""" Creates and opens the given directory in the given PyFilesystem. """
if not fs.isdir(directory):
fs.makedir(directory)
return fs.opendir(directory)
def append_query_param(url: str, key: str, value: str) ->str:
""" Appends a single query parameter to an url. This is faster than
using Purl, if and only if we only add one query param.
Also this function assumes that the value is already url encoded.
"""
template = '?' in url and '{}&{}={}' or '{}?{}={}'
return template.format(url, key, value)
class PostThread(Thread):
""" POSTs the given data with the headers to the URL.
Example::
data = {'a': 1, 'b': 2}
data = json.dumps(data).encode('utf-8')
PostThread(
'https://example.com/post',
data,
(
('Content-Type', 'application/json; charset=utf-8'),
('Content-Length', len(data))
)
).start()
This only works for external URLs! If posting to server itself is
needed, use a process instead of the thread!
"""
def __init__(self, url: str, data: bytes, headers:
'Collection[tuple[str, str]]', timeout: float=30):
Thread.__init__(self)
self.url = url
self.data = data
self.headers = headers
self.timeout = timeout
def run(self) ->None:
try:
if not self.url.lower().startswith('http'):
raise ValueError from None
request = urllib.request.Request(self.url)
for header in self.headers:
request.add_header(header[0], header[1])
urllib.request.urlopen(request, self.data, self.timeout)
except Exception as e:
log.error('Error while sending a POST request to {}: {}'.format
(self.url, str(e)))
<|reserved_special_token_0|>
def dictionary_to_binary(dictionary: 'LaxFileDict') ->bytes:
""" Takes a dictionary created by :func:`binary_to_dictionary` and returns
the original binary data.
"""
data = base64.b64decode(dictionary['data'])
with gzip.GzipFile(fileobj=BytesIO(data), mode='r') as f:
return f.read()
@overload
def safe_format(format: str, dictionary: dict[str, str | int | float],
types: None=..., adapt: 'Callable[[str], str] | None'=...,
raise_on_missing: bool=...) ->str:
...
@overload
def safe_format(format: str, dictionary: dict[str, _T], types: set[type[_T]
]=..., adapt: 'Callable[[str], str] | None'=..., raise_on_missing: bool=...
) ->str:
...
<|reserved_special_token_0|>
def safe_format_keys(format: str, adapt: 'Callable[[str], str] | None'=None
) ->list[str]:
""" Takes a :func:`safe_format` string and returns the found keys. """
keys = []
def adapt_and_record(key: str) ->str:
key = adapt(key) if adapt else key
keys.append(key)
return key
safe_format(format, {}, adapt=adapt_and_record)
return keys
def is_valid_yubikey(client_id: str, secret_key: str, expected_yubikey_id:
str, yubikey: str) ->bool:
""" Asks the yubico validation servers if the given yubikey OTP is valid.
:client_id:
The yubico API client id.
:secret_key:
The yubico API secret key.
:expected_yubikey_id:
The expected yubikey id. The yubikey id is defined as the first twelve
characters of any yubikey value. Each user should have a yubikey
associated with it's account. If the yubikey value comes from a
different key, the key is invalid.
:yubikey:
The actual yubikey value that should be verified.
:return: True if yubico confirmed the validity of the key.
"""
assert client_id and secret_key and expected_yubikey_id and yubikey
assert len(expected_yubikey_id) == 12
if not yubikey.startswith(expected_yubikey_id):
return False
try:
return Yubico(client_id, secret_key).verify(yubikey)
except StatusCodeError as e:
if e.status_code != 'REPLAYED_OTP':
raise e
return False
except SignatureVerificationError:
return False
def is_valid_yubikey_format(otp: str) ->bool:
""" Returns True if the given OTP has the correct format. Does not actually
contact Yubico, so this function may return true, for some invalid keys.
"""
return ALPHABET_RE.match(otp) and True or False
def yubikey_otp_to_serial(otp: str) ->(int | None):
""" Takes a Yubikey OTP and calculates the serial number of the key.
The serial key is printed on the yubikey, in decimal and as a QR code.
Example:
>>> yubikey_otp_to_serial(
'ccccccdefghdefghdefghdefghdefghdefghdefghklv')
2311522
Adapted from Java:
https://github.com/Yubico/yubikey-salesforce-client/blob/
e38e46ee90296a852374a8b744555e99d16b6ca7/src/classes/Modhex.cls
If the key cannot be calculated, None is returned. This can happen if
they key is malformed.
"""
if not is_valid_yubikey_format(otp):
return None
token = 'cccc' + otp[:12]
toggle = False
keep = 0
bytesarray = []
for char in token:
n = ALPHABET.index(char)
toggle = not toggle
if toggle:
keep = n
else:
bytesarray.append(keep << 4 | n)
value = 0
mask_value = 31
for i in range(0, 8):
shift = (4 - 1 - i) * 8
value += (bytesarray[i] & 255) << (shift & mask_value)
return value
<|reserved_special_token_0|>
def dict_path(dictionary: dict[str, _T], path: str) ->_T:
""" Gets the value of the given dictionary at the given path. For example:
>>> data = {'foo': {'bar': True}}
>>> dict_path(data, 'foo.bar')
True
"""
if not dictionary:
raise KeyError()
return reduce(operator.getitem, path.split('.'), dictionary)
def safe_move(src: str, dst: str) ->None:
""" Rename a file from ``src`` to ``dst``.
* Moves must be atomic. ``shutil.move()`` is not atomic.
* Moves must work across filesystems. Often temp directories and the
cache directories live on different filesystems. ``os.rename()`` can
throw errors if run across filesystems.
So we try ``os.rename()``, but if we detect a cross-filesystem copy, we
switch to ``shutil.move()`` with some wrappers to make it atomic.
Via https://alexwlchan.net/2019/03/atomic-cross-filesystem-moves-in-python
"""
try:
os.rename(src, dst)
except OSError as err:
if err.errno == errno.EXDEV:
copy_id = uuid4()
tmp_dst = '%s.%s.tmp' % (dst, copy_id)
shutil.copyfile(src, tmp_dst)
os.rename(tmp_dst, dst)
os.unlink(src)
else:
raise
<|reserved_special_token_0|>
@overload
def batched(iterable: Iterable[_T], batch_size: int, container_factory:
'type[list]') ->'Iterator[list[_T]]':
...
@overload
def batched(iterable: Iterable[_T], batch_size: int, container_factory:
'Callable[[Iterator[_T]], Collection[_T]]') ->'Iterator[Collection[_T]]':
...
def batched(iterable: Iterable[_T], batch_size: int, container_factory:
'Callable[[Iterator[_T]], Collection[_T]]'=tuple
) ->'Iterator[Collection[_T]]':
""" Splits an iterable into batches of batch_size and puts them
inside a given collection (tuple by default).
The container_factory is necessary in order to consume the iterator
returned by islice. Otherwise this function would never return.
"""
iterator = iter(iterable)
while True:
batch = container_factory(islice(iterator, batch_size))
if len(batch) == 0:
return
yield batch
<|reserved_special_token_1|>
import base64
import bleach
import errno
import fcntl
import gzip
import hashlib
import importlib
import inspect
import magic
import mimetypes
import morepath
import operator
import os.path
import re
import shutil
import sqlalchemy
import urllib.request
from markupsafe import Markup
from collections.abc import Iterable
from contextlib import contextmanager
from cProfile import Profile
from functools import reduce
from importlib import import_module
from io import BytesIO, StringIO
from itertools import groupby, islice
from onegov.core import log
from onegov.core.cache import lru_cache
from onegov.core.custom import json
from onegov.core.errors import AlreadyLockedError
from purl import URL
from threading import Thread
from time import perf_counter
from unidecode import unidecode
from uuid import UUID, uuid4
from webob import static
from yubico_client import Yubico
from yubico_client.yubico_exceptions import SignatureVerificationError
from yubico_client.yubico_exceptions import StatusCodeError
from typing import overload, Any, TypeVar, TYPE_CHECKING
if TYPE_CHECKING:
from _typeshed import SupportsRichComparison
from collections.abc import Callable, Collection, Iterator
from fs.base import FS, SubFS
from re import Match
from sqlalchemy import Column
from sqlalchemy.orm import Session
from types import ModuleType
from webob import Response
from .request import CoreRequest
from .types import FileDict, LaxFileDict
_T = TypeVar('_T')
_KT = TypeVar('_KT')
# http://stackoverflow.com/a/13500078
_unwanted_url_chars = re.compile(r'[\.\(\)\\/\s<>\[\]{},:;?!@&=+$#@%|\*"\'`]+')
_double_dash = re.compile(r'[-]+')
_number_suffix = re.compile(r'-([0-9]+)$')
_repeated_spaces = re.compile(r'\s\s+')
_uuid = re.compile(
r'^[a-f0-9]{8}-?[a-f0-9]{4}-?[a-f0-9]{4}-?[a-f0-9]{4}-?[a-f0-9]{12}$')
# only temporary until bleach has a release > 1.4.1 -
_email_regex = re.compile((
r"([a-z0-9!#$%&'*+\/=?^_`{|}~-]+(?:\.[a-z0-9!#$%&'*+\/=?^_`"
r"{|}~-]+)*(@|\sat\s)(?:[a-z0-9](?:[a-z0-9-]*[a-z0-9])?(\.|"
r"\sdot\s))+[a-z0-9](?:[a-z0-9-]*[a-z0-9])?)"
))
# detects multiple successive newlines
_multiple_newlines = re.compile(r'\n{2,}', re.MULTILINE)
# detect starting strings of phone inside a link
_phone_inside_a_tags = r'(\">|href=\"tel:)?'
# regex pattern for swiss phone numbers
_phone_ch_country_code = r"(\+41|0041|0[0-9]{2})"
_phone_ch = re.compile(_phone_ch_country_code + r'([ \r\f\t\d]+)')
# Adds a regex group to capture if a leading a tag is present or if the
# number is part of the href attributes
_phone_ch_html_safe = re.compile(
_phone_inside_a_tags + _phone_ch_country_code + r'([ \r\f\t\d]+)')
# for yubikeys
ALPHABET = 'cbdefghijklnrtuv'
ALPHABET_RE = re.compile(r'^[cbdefghijklnrtuv]{12,44}$')
@contextmanager
def local_lock(namespace: str, key: str) -> 'Iterator[None]':
""" Locks the given namespace/key combination on the current system,
automatically freeing it after the with statement has been completed or
once the process is killed.
Usage::
with lock('namespace', 'key'):
pass
"""
name = f'{namespace}-{key}'.replace('/', '-')
with open(f'/tmp/{name}', 'w+') as f:
try:
fcntl.flock(f, fcntl.LOCK_EX | fcntl.LOCK_NB)
yield
fcntl.flock(f, fcntl.LOCK_UN)
except BlockingIOError as exception:
raise AlreadyLockedError from exception
def normalize_for_url(text: str) -> str:
""" Takes the given text and makes it fit to be used for an url.
That means replacing spaces and other unwanted characters with '-',
lowercasing everything and turning unicode characters into their closest
ascii equivalent using Unidecode.
See https://pypi.python.org/pypi/Unidecode
"""
# German is our main language, so we are extra considerate about it
# (unidecode turns ü into u)
text = text.replace("ü", "ue")
text = text.replace("ä", "ae")
text = text.replace("ö", "oe")
clean = _unwanted_url_chars.sub('-', unidecode(text).strip(' ').lower())
clean = _double_dash.sub('-', clean)
clean = clean.strip('-')
return clean
def increment_name(name: str) -> str:
""" Takes the given name and adds a numbered suffix beginning at 1.
For example::
foo => foo-1
foo-1 => foo-2
"""
match = _number_suffix.search(name)
if match:
number_str = match.group(1)
next_number = int(number_str) + 1
return f'{name[:-len(number_str)]}{next_number}'
else:
return f'{name}-1'
def remove_repeated_spaces(text: str) -> str:
""" Removes repeated spaces in the text ('a b' -> 'a b'). """
return _repeated_spaces.sub(' ', text)
@contextmanager
def profile(filename: str) -> 'Iterator[None]':
""" Profiles the wrapped code and stores the result in the profiles folder
with the given filename.
"""
profiler = Profile()
profiler.enable()
yield
profiler.disable()
profiler.create_stats()
profiler.dump_stats('profiles/{}'.format(filename))
@contextmanager
def timing(name: str | None = None) -> 'Iterator[None]':
""" Runs the wrapped code and prints the time in ms it took to run it.
The name is printed in front of the time, if given.
"""
start = perf_counter()
yield
duration_ms = 1000.0 * (perf_counter() - start)
if name:
print(f'{name}: {duration_ms:.0f} ms')
else:
print(f'{duration_ms:.0f} ms')
@lru_cache(maxsize=32)
def module_path_root(module: 'ModuleType | str') -> str:
if isinstance(module, str):
module = importlib.import_module(module)
assert module is not None
return os.path.dirname(inspect.getfile(module))
def module_path(module: 'ModuleType | str', subpath: str) -> str:
""" Returns a subdirectory in the given python module.
:mod:
A python module (actual module or string)
:subpath:
Subpath below that python module. Leading slashes ('/') are ignored.
"""
parent = module_path_root(module)
path = os.path.join(parent, subpath.strip('/'))
# always be paranoid with path manipulation
assert is_subpath(parent, path)
return path
def touch(file_path: str) -> None:
""" Touches the file on the given path. """
try:
os.utime(file_path, None)
except Exception:
open(file_path, 'a').close()
class Bunch:
""" A simple but handy "collector of a bunch of named stuff" class.
See `<https://code.activestate.com/recipes/\
52308-the-simple-but-handy-collector-of-a-bunch-of-named/>`_.
For example::
point = Bunch(x=1, y=2)
assert point.x == 1
assert point.y == 2
point.z = 3
assert point.z == 3
Allows the creation of simple nested bunches, for example::
request = Bunch(**{'app.settings.org.my_setting': True})
assert request.app.settings.org.my_setting is True
"""
def __init__(self, **kwargs: Any):
self.__dict__.update(
(key, value)
for key, value in kwargs.items()
if '.' not in key
)
for key, value in kwargs.items():
if '.' in key:
name, _, key = key.partition('.')
setattr(self, name, Bunch(**{key: value}))
if TYPE_CHECKING:
# let mypy know that any attribute access could be valid
def __getattr__(self, name: str) -> Any: ...
def __setattr__(self, name: str, value: Any) -> None: ...
def __delattr__(self, name: str) -> None: ...
def __eq__(self, other: object) -> bool:
if type(other) is type(self):
return self.__dict__ == other.__dict__
return False
def __ne__(self, other: object) -> bool:
return not self.__eq__(other)
def render_file(file_path: str, request: 'CoreRequest') -> 'Response':
""" Takes the given file_path (content) and renders it to the browser.
The file must exist on the local system and be readable by the current
process.
"""
def hash_path(path: str) -> str:
return hashlib.new( # nosec:B324
'sha1',
path.encode('utf-8'),
usedforsecurity=False
).hexdigest()
# this is a very cachable result - though it's possible that a file
# changes it's content type, it should usually not, especially since
# we emphasize the use of random filenames
@request.app.cache.cache_on_arguments(to_str=hash_path)
def get_content_type(file_path: str) -> str:
content_type = mimetypes.guess_type(file_path)[0]
if not content_type:
content_type = magic.from_file(file_path, mime=True)
return content_type
return request.get_response(
static.FileApp(file_path, content_type=get_content_type(file_path)))
def hash_dictionary(dictionary: dict[str, Any]) -> str:
""" Computes a sha256 hash for the given dictionary. The dictionary
is expected to only contain values that can be serialized by json.
That includes int, decimal, string, boolean.
Note that this function is not meant to be used for hashing secrets. Do
not include data in this dictionary that is secret!
"""
dict_as_string = json.dumps(dictionary, sort_keys=True).encode('utf-8')
return hashlib.new( # nosec:B324
'sha1',
dict_as_string,
usedforsecurity=False
).hexdigest()
@overload
def groupbylist(
iterable: Iterable[_T],
key: None = ...
) -> list[tuple[_T, list[_T]]]: ...
@overload
def groupbylist(
iterable: Iterable[_T],
key: 'Callable[[_T], _KT]'
) -> list[tuple[_KT, list[_T]]]: ...
def groupbylist(
iterable: Iterable[_T],
key: 'Callable[[_T], Any] | None' = None
) -> list[tuple[Any, list[_T]]]:
""" Works just like Python's ``itertools.groupby`` function, but instead
of returning generators, it returns lists.
"""
return [(k, list(g)) for k, g in groupby(iterable, key=key)]
def linkify_phone(text: str) -> str:
""" Takes a string and replaces valid phone numbers with html links. If a
phone number is matched, it will be replaced by the result of a callback
function, that does further checks on the regex match. If these checks do
not pass, the matched number will remain unchanged.
"""
def strip_whitespace(number: str) -> str:
return re.sub(r'\s', '', number)
def is_valid_length(number: str) -> bool:
if number.startswith('+00'):
return False
if number.startswith('00'):
return len(number) == 13
elif number.startswith('0'):
return len(number) == 10
elif number.startswith('+'):
return len(number) == 12
return False
def handle_match(match: 'Match[str]') -> str:
inside_html = match.group(1)
number = f'{match.group(2)}{match.group(3)}'
assert not number.endswith('\n')
if inside_html:
return match.group(0)
if is_valid_length(strip_whitespace(number)):
number = remove_repeated_spaces(number).strip()
return f'<a href="tel:{number}">{number}</a> '
return match.group(0)
return _phone_ch_html_safe.sub(handle_match, text)
# FIXME: A lot of these methods should be using MarkupSafe
def linkify(text: str, escape: bool = True) -> str:
""" Takes plain text and injects html links for urls and email addresses.
By default the text is html escaped before it is linkified. This accounts
for the fact that we usually use this for text blocks that we mean to
extend with email addresses and urls.
If html is already possible, why linkify it?
Note: We need to clean the html after we've created it (linkify
parses escaped html and turns it into real html). As a consequence it
is possible to have html urls in the text that won't be escaped.
"""
if not text:
return text
long_top_level_domains = ['.agency']
# bleach.linkify supports only a fairly limited amount of tlds
if any(domain in text for domain in long_top_level_domains):
if '@' in text:
linkified = str(
Markup('<a href="mailto:{text}">{text}</a>').format(
text=text
)
)
else:
linkified = str(
Markup('<a href="{text}">{text}</a>').format(text=text)
)
else:
linkified = linkify_phone(bleach.linkify(text, parse_email=True))
if not escape:
return linkified
return bleach.clean(
linkified,
tags=['a'],
attributes={'a': ['href', 'rel']},
protocols=['http', 'https', 'mailto', 'tel']
)
def paragraphify(text: str) -> str:
""" Takes a text with newlines groups them into paragraphs according to the
following rules:
If there's a single newline between two lines, a <br> will replace that
newline.
If there are multiple newlines between two lines, each line will become
a paragraph and the extra newlines are discarded.
"""
text = text and text.replace('\r', '').strip('\n')
if not text:
return ''
return ''.join(f'<p>{p}</p>' for p in (
p.replace('\n', '<br>') for p in _multiple_newlines.split(text)
))
def to_html_ul(
value: str,
convert_dashes: bool = True,
with_title: bool = False
) -> str:
""" Linkify and convert to text to one or multiple ul's or paragraphs.
"""
if not value:
return ''
value = value.replace('\r', '').strip('\n')
value = value.replace('\n\n', '\n \n')
if not convert_dashes:
return '<p>{}</p>'.format(
'<br>'.join(linkify(value).splitlines())
)
elements = []
temp: list[str] = []
def ul(inner: str) -> str:
return f'<ul class="bulleted">{inner}</ul>'
def li(inner: str) -> str:
return f'<li>{inner}</li>'
def p(inner: str) -> str:
return f'<p>{inner}</p>'
was_list = False
for i, line in enumerate(value.splitlines()):
if not line:
continue
line = linkify(line)
is_list = line.startswith('-')
new_p_or_ul = True if line == ' ' else False
line = line.lstrip('-').strip()
if with_title:
elements.append(p(f'<span class="title">{line}</span>'))
with_title = False
else:
if new_p_or_ul or (was_list != is_list and i > 0):
elements.append(
ul(''.join(temp)) if was_list else p('<br>'.join(temp))
)
temp = []
was_list = False
if not new_p_or_ul:
temp.append((li(line) if is_list else line))
new_p_or_ul = False
was_list = is_list
if temp:
elements.append(
ul(''.join(temp)) if was_list else p('<br>'.join(temp))
)
return ''.join(elements)
def ensure_scheme(url: str, default: str = 'http') -> str:
""" Makes sure that the given url has a scheme in front, if none
was provided.
"""
if not url:
return url
# purl (or to be precise urlparse) will parse empty host names ('abc.xyz')
# wrongly, assuming the abc.xyz is a path. by adding a double slash if
# there isn't one already, we can circumvent that problem
if '//' not in url:
url = '//' + url
_url = URL(url)
if _url.scheme():
return url
return _url.scheme(default).as_string()
def is_uuid(value: str | UUID) -> bool:
""" Returns true if the given value is a uuid. The value may be a string
or of type UUID. If it's a string, the uuid is checked with a regex.
"""
if isinstance(value, str):
return _uuid.match(str(value)) and True or False
return isinstance(value, UUID)
def is_non_string_iterable(obj: object) -> bool:
""" Returns true if the given obj is an iterable, but not a string. """
return not (isinstance(obj, str) or isinstance(obj, bytes))\
and isinstance(obj, Iterable)
def relative_url(absolute_url: str | None) -> str:
""" Removes everything in front of the path, including scheme, host,
username, password and port.
"""
url = URL._mutate(
URL(absolute_url),
scheme=None,
username=None,
password=None,
host=None,
port=None
)
return url.as_string()
def is_subpath(directory: str, path: str) -> bool:
""" Returns true if the given path is inside the given directory. """
directory = os.path.join(os.path.realpath(directory), '')
path = os.path.realpath(path)
# return true, if the common prefix of both is equal to directory
# e.g. /a/b/c/d.rst and directory is /a/b, the common prefix is /a/b
return os.path.commonprefix([path, directory]) == directory
@overload
def is_sorted(
iterable: 'Iterable[SupportsRichComparison]',
key: 'Callable[[SupportsRichComparison], SupportsRichComparison]' = ...,
reverse: bool = ...
) -> bool: ...
@overload
def is_sorted(
iterable: 'Iterable[_T]',
key: 'Callable[[_T], SupportsRichComparison]',
reverse: bool = ...
) -> bool: ...
# FIXME: Do we really want to allow any Iterable? This seems like a bad
# idea to me... Iterators will be consumed and the Iterable might
# be infinite. This seems like it should be a Container instead,
# then we also don't need to use tee or list to make a copy
def is_sorted(
iterable: 'Iterable[Any]',
key: 'Callable[[Any], SupportsRichComparison]' = lambda i: i,
reverse: bool = False
) -> bool:
""" Returns True if the iterable is sorted. """
# NOTE: we previously used `tee` here, but since `sorted` consumes
# the entire iterator, this is the exact case where tee is
# slower than just pulling the entire sequence into a list
seq = list(iterable)
for a, b in zip(seq, sorted(seq, key=key, reverse=reverse)):
if a is not b:
return False
return True
def morepath_modules(cls: type[morepath.App]) -> 'Iterator[str]':
""" Returns all morepath modules which should be scanned for the given
morepath application class.
We can't reliably know the actual morepath modules that
need to be scanned, which is why we assume that each module has
one namespace (like 'more.transaction' or 'onegov.core').
"""
for base in cls.__mro__:
if not issubclass(base, morepath.App):
continue
if base is morepath.App:
continue
module = '.'.join(base.__module__.split('.')[:2])
if module.startswith('test'):
continue
yield module
def scan_morepath_modules(cls: type[morepath.App]) -> None:
""" Tries to scan all the morepath modules required for the given
application class. This is not guaranteed to stay reliable as there is
no sure way to discover all modules required by the application class.
"""
for module in sorted(morepath_modules(cls)):
morepath.scan(import_module(module))
def get_unique_hstore_keys(
session: 'Session',
column: 'Column[dict[str, Any]]'
) -> set[str]:
""" Returns a set of keys found in an hstore column over all records
of its table.
"""
base = session.query(column.keys()).with_entities( # type:ignore
sqlalchemy.func.skeys(column).label('keys'))
query = sqlalchemy.select(
[sqlalchemy.func.array_agg(sqlalchemy.column('keys'))],
distinct=True
).select_from(base.subquery())
keys = session.execute(query).scalar()
return set(keys) if keys else set()
def makeopendir(fs: 'FS', directory: str) -> 'SubFS[FS]':
""" Creates and opens the given directory in the given PyFilesystem. """
if not fs.isdir(directory):
fs.makedir(directory)
return fs.opendir(directory)
def append_query_param(url: str, key: str, value: str) -> str:
""" Appends a single query parameter to an url. This is faster than
using Purl, if and only if we only add one query param.
Also this function assumes that the value is already url encoded.
"""
template = '?' in url and '{}&{}={}' or '{}?{}={}'
return template.format(url, key, value)
class PostThread(Thread):
""" POSTs the given data with the headers to the URL.
Example::
data = {'a': 1, 'b': 2}
data = json.dumps(data).encode('utf-8')
PostThread(
'https://example.com/post',
data,
(
('Content-Type', 'application/json; charset=utf-8'),
('Content-Length', len(data))
)
).start()
This only works for external URLs! If posting to server itself is
needed, use a process instead of the thread!
"""
def __init__(
self,
url: str,
data: bytes,
headers: 'Collection[tuple[str, str]]',
timeout: float = 30
):
Thread.__init__(self)
self.url = url
self.data = data
self.headers = headers
self.timeout = timeout
def run(self) -> None:
try:
# Validate URL protocol before opening it, since it's possible to
# open ftp:// and file:// as well.
if not self.url.lower().startswith('http'):
raise ValueError from None
request = urllib.request.Request(self.url)
for header in self.headers:
request.add_header(header[0], header[1])
urllib.request.urlopen( # nosec B310
request, self.data, self.timeout
)
except Exception as e:
log.error(
'Error while sending a POST request to {}: {}'.format(
self.url, str(e)
)
)
def toggle(collection: set[_T], item: _T | None) -> set[_T]:
""" Returns a new set where the item has been toggled. """
if item is None:
return collection
if item in collection:
return collection - {item}
else:
return collection | {item}
def binary_to_dictionary(
binary: bytes,
filename: str | None = None
) -> 'FileDict':
""" Takes raw binary filedata and stores it in a dictionary together
with metadata information.
The data is compressed before it is stored int he dictionary. Use
:func:`dictionary_to_binary` to get the original binary data back.
"""
assert isinstance(binary, bytes)
mimetype = magic.from_buffer(binary, mime=True)
# according to https://tools.ietf.org/html/rfc7111, text/csv should be used
if mimetype == 'application/csv':
mimetype = 'text/csv'
gzipdata = BytesIO()
with gzip.GzipFile(fileobj=gzipdata, mode='wb') as f:
f.write(binary)
return {
'data': base64.b64encode(gzipdata.getvalue()).decode('ascii'),
'filename': filename,
'mimetype': mimetype,
'size': len(binary)
}
def dictionary_to_binary(dictionary: 'LaxFileDict') -> bytes:
""" Takes a dictionary created by :func:`binary_to_dictionary` and returns
the original binary data.
"""
data = base64.b64decode(dictionary['data'])
with gzip.GzipFile(fileobj=BytesIO(data), mode='r') as f:
return f.read()
@overload
def safe_format(
format: str,
dictionary: dict[str, str | int | float],
types: None = ...,
adapt: 'Callable[[str], str] | None' = ...,
raise_on_missing: bool = ...
) -> str: ...
@overload
def safe_format(
format: str,
dictionary: dict[str, _T],
types: set[type[_T]] = ...,
adapt: 'Callable[[str], str] | None' = ...,
raise_on_missing: bool = ...
) -> str: ...
def safe_format(
format: str,
dictionary: dict[str, Any],
types: set[type[Any]] | None = None,
adapt: 'Callable[[str], str] | None' = None,
raise_on_missing: bool = False
) -> str:
""" Takes a user-supplied string with format blocks and returns a string
where those blocks are replaced by values in a dictionary.
For example::
>>> safe_format('[user] has logged in', {'user': 'admin'})
'admin has logged in'
:param format:
The format to use. Square brackets denote dictionary keys. To
literally print square bracktes, mask them by doubling ('[[' -> '[')
:param dictionary:
The dictionary holding the variables to use. If the key is not found
in the dictionary, the bracket is replaced with an empty string.
:param types:
A set of types supported by the dictionary. Limiting this to safe
types like builtins (str, int, float) ensure that no values are
accidentally leaked through faulty __str__ representations.
Note that inheritance is ignored. Supported types need to be
whitelisted explicitly.
:param adapt:
An optional callable that receives the key before it is used. Returns
the same key or an altered version.
:param raise_on_missing:
True if missing keys should result in a runtime error (defaults to
False).
This is strictly meant for formats provided by users. Python's string
formatting options are clearly superior to this, however it is less
secure!
"""
types = types or {int, str, float}
output = StringIO()
buffer = StringIO()
opened = 0
for ix, char in enumerate(format):
if char == '[':
opened += 1
if char == ']':
opened -= 1
if opened == 1 and char != '[' and char != ']':
print(char, file=buffer, end='')
continue
if opened == 2 or opened == -2:
if buffer.tell():
raise RuntimeError("Unexpected bracket inside bracket found")
print(char, file=output, end='')
opened = 0
continue
if buffer.tell():
k = adapt(buffer.getvalue()) if adapt else buffer.getvalue()
if raise_on_missing and k not in dictionary:
raise RuntimeError("Key '{}' is unknown".format(k))
v = dictionary.get(k, '')
t = type(v)
if t not in types:
raise RuntimeError("Invalid type for '{}': {}".format(k, t))
print(v, file=output, end='')
buffer = StringIO()
if char != '[' and char != ']':
print(char, file=output, end='')
if opened != 0:
raise RuntimeError("Uneven number of brackets in '{}'".format(format))
return output.getvalue()
def safe_format_keys(
format: str,
adapt: 'Callable[[str], str] | None' = None
) -> list[str]:
""" Takes a :func:`safe_format` string and returns the found keys. """
keys = []
def adapt_and_record(key: str) -> str:
key = adapt(key) if adapt else key
keys.append(key)
return key
safe_format(format, {}, adapt=adapt_and_record)
return keys
def is_valid_yubikey(
client_id: str,
secret_key: str,
expected_yubikey_id: str,
yubikey: str
) -> bool:
""" Asks the yubico validation servers if the given yubikey OTP is valid.
:client_id:
The yubico API client id.
:secret_key:
The yubico API secret key.
:expected_yubikey_id:
The expected yubikey id. The yubikey id is defined as the first twelve
characters of any yubikey value. Each user should have a yubikey
associated with it's account. If the yubikey value comes from a
different key, the key is invalid.
:yubikey:
The actual yubikey value that should be verified.
:return: True if yubico confirmed the validity of the key.
"""
assert client_id and secret_key and expected_yubikey_id and yubikey
assert len(expected_yubikey_id) == 12
# if the yubikey doesn't start with the expected yubikey id we do not
# need to make a roundtrip to the validation server
if not yubikey.startswith(expected_yubikey_id):
# FIXME: Are we leaking information with this early out?
return False
try:
return Yubico(client_id, secret_key).verify(yubikey)
except StatusCodeError as e:
if e.status_code != 'REPLAYED_OTP':
raise e
return False
except SignatureVerificationError:
return False
def is_valid_yubikey_format(otp: str) -> bool:
""" Returns True if the given OTP has the correct format. Does not actually
contact Yubico, so this function may return true, for some invalid keys.
"""
return ALPHABET_RE.match(otp) and True or False
def yubikey_otp_to_serial(otp: str) -> int | None:
""" Takes a Yubikey OTP and calculates the serial number of the key.
The serial key is printed on the yubikey, in decimal and as a QR code.
Example:
>>> yubikey_otp_to_serial(
'ccccccdefghdefghdefghdefghdefghdefghdefghklv')
2311522
Adapted from Java:
https://github.com/Yubico/yubikey-salesforce-client/blob/
e38e46ee90296a852374a8b744555e99d16b6ca7/src/classes/Modhex.cls
If the key cannot be calculated, None is returned. This can happen if
they key is malformed.
"""
if not is_valid_yubikey_format(otp):
return None
token = 'cccc' + otp[:12]
toggle = False
keep = 0
bytesarray = []
for char in token:
n = ALPHABET.index(char)
toggle = not toggle
if toggle:
keep = n
else:
bytesarray.append((keep << 4) | n)
value = 0
# in Java, shifts on integers are masked with 0x1f using AND
# https://docs.oracle.com/javase/specs/jls/se8/html/jls-15.html#jls-15.19
mask_value = 0x1f
for i in range(0, 8):
shift = (4 - 1 - i) * 8
value += (bytesarray[i] & 255) << (shift & mask_value)
return value
def yubikey_public_id(otp: str) -> str:
""" Returns the yubikey identity given a token. """
return otp[:12]
def dict_path(dictionary: dict[str, _T], path: str) -> _T:
""" Gets the value of the given dictionary at the given path. For example:
>>> data = {'foo': {'bar': True}}
>>> dict_path(data, 'foo.bar')
True
"""
if not dictionary:
raise KeyError()
return reduce(operator.getitem, path.split('.'), dictionary) # type:ignore
def safe_move(src: str, dst: str) -> None:
""" Rename a file from ``src`` to ``dst``.
* Moves must be atomic. ``shutil.move()`` is not atomic.
* Moves must work across filesystems. Often temp directories and the
cache directories live on different filesystems. ``os.rename()`` can
throw errors if run across filesystems.
So we try ``os.rename()``, but if we detect a cross-filesystem copy, we
switch to ``shutil.move()`` with some wrappers to make it atomic.
Via https://alexwlchan.net/2019/03/atomic-cross-filesystem-moves-in-python
"""
try:
os.rename(src, dst)
except OSError as err:
if err.errno == errno.EXDEV:
# Generate a unique ID, and copy `<src>` to the target directory
# with a temporary name `<dst>.<ID>.tmp`. Because we're copying
# across a filesystem boundary, this initial copy may not be
# atomic. We intersperse a random UUID so if different processes
# are copying into `<dst>`, they don't overlap in their tmp copies.
copy_id = uuid4()
tmp_dst = "%s.%s.tmp" % (dst, copy_id)
shutil.copyfile(src, tmp_dst)
# Then do an atomic rename onto the new name, and clean up the
# source image.
os.rename(tmp_dst, dst)
os.unlink(src)
else:
raise
@overload
def batched(
iterable: Iterable[_T],
batch_size: int,
container_factory: 'type[tuple]' = ... # type:ignore[type-arg]
) -> 'Iterator[tuple[_T, ...]]': ...
@overload
def batched(
iterable: Iterable[_T],
batch_size: int,
container_factory: 'type[list]' # type:ignore[type-arg]
) -> 'Iterator[list[_T]]': ...
# NOTE: If there were higher order TypeVars, we could properly infer
# the type of the Container, for now we just add overloads for
# two of the most common container_factories
@overload
def batched(
iterable: Iterable[_T],
batch_size: int,
container_factory: 'Callable[[Iterator[_T]], Collection[_T]]'
) -> 'Iterator[Collection[_T]]': ...
def batched(
iterable: Iterable[_T],
batch_size: int,
container_factory: 'Callable[[Iterator[_T]], Collection[_T]]' = tuple
) -> 'Iterator[Collection[_T]]':
""" Splits an iterable into batches of batch_size and puts them
inside a given collection (tuple by default).
The container_factory is necessary in order to consume the iterator
returned by islice. Otherwise this function would never return.
"""
iterator = iter(iterable)
while True:
batch = container_factory(islice(iterator, batch_size))
if len(batch) == 0:
return
yield batch
|
flexible
|
{
"blob_id": "084c9ad83091f6f96d19c0f0c28520ccda93bbaf",
"index": 7778,
"step-1": "<mask token>\n\n\ndef normalize_for_url(text: str) ->str:\n \"\"\" Takes the given text and makes it fit to be used for an url.\n\n That means replacing spaces and other unwanted characters with '-',\n lowercasing everything and turning unicode characters into their closest\n ascii equivalent using Unidecode.\n\n See https://pypi.python.org/pypi/Unidecode\n\n \"\"\"\n text = text.replace('ü', 'ue')\n text = text.replace('ä', 'ae')\n text = text.replace('ö', 'oe')\n clean = _unwanted_url_chars.sub('-', unidecode(text).strip(' ').lower())\n clean = _double_dash.sub('-', clean)\n clean = clean.strip('-')\n return clean\n\n\ndef increment_name(name: str) ->str:\n \"\"\" Takes the given name and adds a numbered suffix beginning at 1.\n\n For example::\n\n foo => foo-1\n foo-1 => foo-2\n\n \"\"\"\n match = _number_suffix.search(name)\n if match:\n number_str = match.group(1)\n next_number = int(number_str) + 1\n return f'{name[:-len(number_str)]}{next_number}'\n else:\n return f'{name}-1'\n\n\ndef remove_repeated_spaces(text: str) ->str:\n \"\"\" Removes repeated spaces in the text ('a b' -> 'a b'). \"\"\"\n return _repeated_spaces.sub(' ', text)\n\n\n@contextmanager\ndef profile(filename: str) ->'Iterator[None]':\n \"\"\" Profiles the wrapped code and stores the result in the profiles folder\n with the given filename.\n\n \"\"\"\n profiler = Profile()\n profiler.enable()\n yield\n profiler.disable()\n profiler.create_stats()\n profiler.dump_stats('profiles/{}'.format(filename))\n\n\n@contextmanager\ndef timing(name: (str | None)=None) ->'Iterator[None]':\n \"\"\" Runs the wrapped code and prints the time in ms it took to run it.\n The name is printed in front of the time, if given.\n\n \"\"\"\n start = perf_counter()\n yield\n duration_ms = 1000.0 * (perf_counter() - start)\n if name:\n print(f'{name}: {duration_ms:.0f} ms')\n else:\n print(f'{duration_ms:.0f} ms')\n\n\n<mask token>\n\n\nclass Bunch:\n \"\"\" A simple but handy \"collector of a bunch of named stuff\" class.\n\n See `<https://code.activestate.com/recipes/ 52308-the-simple-but-handy-collector-of-a-bunch-of-named/>`_.\n\n For example::\n\n point = Bunch(x=1, y=2)\n assert point.x == 1\n assert point.y == 2\n\n point.z = 3\n assert point.z == 3\n\n Allows the creation of simple nested bunches, for example::\n\n request = Bunch(**{'app.settings.org.my_setting': True})\n assert request.app.settings.org.my_setting is True\n\n \"\"\"\n\n def __init__(self, **kwargs: Any):\n self.__dict__.update((key, value) for key, value in kwargs.items() if\n '.' not in key)\n for key, value in kwargs.items():\n if '.' in key:\n name, _, key = key.partition('.')\n setattr(self, name, Bunch(**{key: value}))\n if TYPE_CHECKING:\n\n def __getattr__(self, name: str) ->Any:\n ...\n\n def __setattr__(self, name: str, value: Any) ->None:\n ...\n\n def __delattr__(self, name: str) ->None:\n ...\n\n def __eq__(self, other: object) ->bool:\n if type(other) is type(self):\n return self.__dict__ == other.__dict__\n return False\n\n def __ne__(self, other: object) ->bool:\n return not self.__eq__(other)\n\n\n<mask token>\n\n\ndef hash_dictionary(dictionary: dict[str, Any]) ->str:\n \"\"\" Computes a sha256 hash for the given dictionary. The dictionary\n is expected to only contain values that can be serialized by json.\n\n That includes int, decimal, string, boolean.\n\n Note that this function is not meant to be used for hashing secrets. Do\n not include data in this dictionary that is secret!\n\n \"\"\"\n dict_as_string = json.dumps(dictionary, sort_keys=True).encode('utf-8')\n return hashlib.new('sha1', dict_as_string, usedforsecurity=False\n ).hexdigest()\n\n\n<mask token>\n\n\ndef linkify_phone(text: str) ->str:\n \"\"\" Takes a string and replaces valid phone numbers with html links. If a\n phone number is matched, it will be replaced by the result of a callback\n function, that does further checks on the regex match. If these checks do\n not pass, the matched number will remain unchanged.\n\n \"\"\"\n\n def strip_whitespace(number: str) ->str:\n return re.sub('\\\\s', '', number)\n\n def is_valid_length(number: str) ->bool:\n if number.startswith('+00'):\n return False\n if number.startswith('00'):\n return len(number) == 13\n elif number.startswith('0'):\n return len(number) == 10\n elif number.startswith('+'):\n return len(number) == 12\n return False\n\n def handle_match(match: 'Match[str]') ->str:\n inside_html = match.group(1)\n number = f'{match.group(2)}{match.group(3)}'\n assert not number.endswith('\\n')\n if inside_html:\n return match.group(0)\n if is_valid_length(strip_whitespace(number)):\n number = remove_repeated_spaces(number).strip()\n return f'<a href=\"tel:{number}\">{number}</a> '\n return match.group(0)\n return _phone_ch_html_safe.sub(handle_match, text)\n\n\ndef linkify(text: str, escape: bool=True) ->str:\n \"\"\" Takes plain text and injects html links for urls and email addresses.\n\n By default the text is html escaped before it is linkified. This accounts\n for the fact that we usually use this for text blocks that we mean to\n extend with email addresses and urls.\n\n If html is already possible, why linkify it?\n\n Note: We need to clean the html after we've created it (linkify\n parses escaped html and turns it into real html). As a consequence it\n is possible to have html urls in the text that won't be escaped.\n\n \"\"\"\n if not text:\n return text\n long_top_level_domains = ['.agency']\n if any(domain in text for domain in long_top_level_domains):\n if '@' in text:\n linkified = str(Markup('<a href=\"mailto:{text}\">{text}</a>').\n format(text=text))\n else:\n linkified = str(Markup('<a href=\"{text}\">{text}</a>').format(\n text=text))\n else:\n linkified = linkify_phone(bleach.linkify(text, parse_email=True))\n if not escape:\n return linkified\n return bleach.clean(linkified, tags=['a'], attributes={'a': ['href',\n 'rel']}, protocols=['http', 'https', 'mailto', 'tel'])\n\n\ndef paragraphify(text: str) ->str:\n \"\"\" Takes a text with newlines groups them into paragraphs according to the\n following rules:\n\n If there's a single newline between two lines, a <br> will replace that\n newline.\n\n If there are multiple newlines between two lines, each line will become\n a paragraph and the extra newlines are discarded.\n\n \"\"\"\n text = text and text.replace('\\r', '').strip('\\n')\n if not text:\n return ''\n return ''.join(f'<p>{p}</p>' for p in (p.replace('\\n', '<br>') for p in\n _multiple_newlines.split(text)))\n\n\ndef to_html_ul(value: str, convert_dashes: bool=True, with_title: bool=False\n ) ->str:\n \"\"\" Linkify and convert to text to one or multiple ul's or paragraphs.\n \"\"\"\n if not value:\n return ''\n value = value.replace('\\r', '').strip('\\n')\n value = value.replace('\\n\\n', '\\n \\n')\n if not convert_dashes:\n return '<p>{}</p>'.format('<br>'.join(linkify(value).splitlines()))\n elements = []\n temp: list[str] = []\n\n def ul(inner: str) ->str:\n return f'<ul class=\"bulleted\">{inner}</ul>'\n\n def li(inner: str) ->str:\n return f'<li>{inner}</li>'\n\n def p(inner: str) ->str:\n return f'<p>{inner}</p>'\n was_list = False\n for i, line in enumerate(value.splitlines()):\n if not line:\n continue\n line = linkify(line)\n is_list = line.startswith('-')\n new_p_or_ul = True if line == ' ' else False\n line = line.lstrip('-').strip()\n if with_title:\n elements.append(p(f'<span class=\"title\">{line}</span>'))\n with_title = False\n else:\n if new_p_or_ul or was_list != is_list and i > 0:\n elements.append(ul(''.join(temp)) if was_list else p('<br>'\n .join(temp)))\n temp = []\n was_list = False\n if not new_p_or_ul:\n temp.append(li(line) if is_list else line)\n new_p_or_ul = False\n was_list = is_list\n if temp:\n elements.append(ul(''.join(temp)) if was_list else p('<br>'.join(temp))\n )\n return ''.join(elements)\n\n\ndef ensure_scheme(url: str, default: str='http') ->str:\n \"\"\" Makes sure that the given url has a scheme in front, if none\n was provided.\n\n \"\"\"\n if not url:\n return url\n if '//' not in url:\n url = '//' + url\n _url = URL(url)\n if _url.scheme():\n return url\n return _url.scheme(default).as_string()\n\n\ndef is_uuid(value: (str | UUID)) ->bool:\n \"\"\" Returns true if the given value is a uuid. The value may be a string\n or of type UUID. If it's a string, the uuid is checked with a regex.\n \"\"\"\n if isinstance(value, str):\n return _uuid.match(str(value)) and True or False\n return isinstance(value, UUID)\n\n\n<mask token>\n\n\ndef is_subpath(directory: str, path: str) ->bool:\n \"\"\" Returns true if the given path is inside the given directory. \"\"\"\n directory = os.path.join(os.path.realpath(directory), '')\n path = os.path.realpath(path)\n return os.path.commonprefix([path, directory]) == directory\n\n\n@overload\ndef is_sorted(iterable: 'Iterable[SupportsRichComparison]', key:\n 'Callable[[SupportsRichComparison], SupportsRichComparison]'=...,\n reverse: bool=...) ->bool:\n ...\n\n\n@overload\ndef is_sorted(iterable: 'Iterable[_T]', key:\n 'Callable[[_T], SupportsRichComparison]', reverse: bool=...) ->bool:\n ...\n\n\n<mask token>\n\n\ndef morepath_modules(cls: type[morepath.App]) ->'Iterator[str]':\n \"\"\" Returns all morepath modules which should be scanned for the given\n morepath application class.\n\n We can't reliably know the actual morepath modules that\n need to be scanned, which is why we assume that each module has\n one namespace (like 'more.transaction' or 'onegov.core').\n\n \"\"\"\n for base in cls.__mro__:\n if not issubclass(base, morepath.App):\n continue\n if base is morepath.App:\n continue\n module = '.'.join(base.__module__.split('.')[:2])\n if module.startswith('test'):\n continue\n yield module\n\n\n<mask token>\n\n\ndef get_unique_hstore_keys(session: 'Session', column: 'Column[dict[str, Any]]'\n ) ->set[str]:\n \"\"\" Returns a set of keys found in an hstore column over all records\n of its table.\n\n \"\"\"\n base = session.query(column.keys()).with_entities(sqlalchemy.func.skeys\n (column).label('keys'))\n query = sqlalchemy.select([sqlalchemy.func.array_agg(sqlalchemy.column(\n 'keys'))], distinct=True).select_from(base.subquery())\n keys = session.execute(query).scalar()\n return set(keys) if keys else set()\n\n\ndef makeopendir(fs: 'FS', directory: str) ->'SubFS[FS]':\n \"\"\" Creates and opens the given directory in the given PyFilesystem. \"\"\"\n if not fs.isdir(directory):\n fs.makedir(directory)\n return fs.opendir(directory)\n\n\ndef append_query_param(url: str, key: str, value: str) ->str:\n \"\"\" Appends a single query parameter to an url. This is faster than\n using Purl, if and only if we only add one query param.\n\n Also this function assumes that the value is already url encoded.\n\n \"\"\"\n template = '?' in url and '{}&{}={}' or '{}?{}={}'\n return template.format(url, key, value)\n\n\nclass PostThread(Thread):\n \"\"\" POSTs the given data with the headers to the URL.\n\n Example::\n\n data = {'a': 1, 'b': 2}\n data = json.dumps(data).encode('utf-8')\n PostThread(\n 'https://example.com/post',\n data,\n (\n ('Content-Type', 'application/json; charset=utf-8'),\n ('Content-Length', len(data))\n )\n ).start()\n\n This only works for external URLs! If posting to server itself is\n needed, use a process instead of the thread!\n\n \"\"\"\n\n def __init__(self, url: str, data: bytes, headers:\n 'Collection[tuple[str, str]]', timeout: float=30):\n Thread.__init__(self)\n self.url = url\n self.data = data\n self.headers = headers\n self.timeout = timeout\n\n def run(self) ->None:\n try:\n if not self.url.lower().startswith('http'):\n raise ValueError from None\n request = urllib.request.Request(self.url)\n for header in self.headers:\n request.add_header(header[0], header[1])\n urllib.request.urlopen(request, self.data, self.timeout)\n except Exception as e:\n log.error('Error while sending a POST request to {}: {}'.format\n (self.url, str(e)))\n\n\n<mask token>\n\n\ndef dictionary_to_binary(dictionary: 'LaxFileDict') ->bytes:\n \"\"\" Takes a dictionary created by :func:`binary_to_dictionary` and returns\n the original binary data.\n\n \"\"\"\n data = base64.b64decode(dictionary['data'])\n with gzip.GzipFile(fileobj=BytesIO(data), mode='r') as f:\n return f.read()\n\n\n@overload\ndef safe_format(format: str, dictionary: dict[str, str | int | float],\n types: None=..., adapt: 'Callable[[str], str] | None'=...,\n raise_on_missing: bool=...) ->str:\n ...\n\n\n<mask token>\n\n\ndef is_valid_yubikey(client_id: str, secret_key: str, expected_yubikey_id:\n str, yubikey: str) ->bool:\n \"\"\" Asks the yubico validation servers if the given yubikey OTP is valid.\n\n :client_id:\n The yubico API client id.\n\n :secret_key:\n The yubico API secret key.\n\n :expected_yubikey_id:\n The expected yubikey id. The yubikey id is defined as the first twelve\n characters of any yubikey value. Each user should have a yubikey\n associated with it's account. If the yubikey value comes from a\n different key, the key is invalid.\n\n :yubikey:\n The actual yubikey value that should be verified.\n\n :return: True if yubico confirmed the validity of the key.\n\n \"\"\"\n assert client_id and secret_key and expected_yubikey_id and yubikey\n assert len(expected_yubikey_id) == 12\n if not yubikey.startswith(expected_yubikey_id):\n return False\n try:\n return Yubico(client_id, secret_key).verify(yubikey)\n except StatusCodeError as e:\n if e.status_code != 'REPLAYED_OTP':\n raise e\n return False\n except SignatureVerificationError:\n return False\n\n\n<mask token>\n\n\ndef yubikey_otp_to_serial(otp: str) ->(int | None):\n \"\"\" Takes a Yubikey OTP and calculates the serial number of the key.\n\n The serial key is printed on the yubikey, in decimal and as a QR code.\n\n Example:\n\n >>> yubikey_otp_to_serial(\n 'ccccccdefghdefghdefghdefghdefghdefghdefghklv')\n 2311522\n\n Adapted from Java:\n\n https://github.com/Yubico/yubikey-salesforce-client/blob/\n e38e46ee90296a852374a8b744555e99d16b6ca7/src/classes/Modhex.cls\n\n If the key cannot be calculated, None is returned. This can happen if\n they key is malformed.\n\n \"\"\"\n if not is_valid_yubikey_format(otp):\n return None\n token = 'cccc' + otp[:12]\n toggle = False\n keep = 0\n bytesarray = []\n for char in token:\n n = ALPHABET.index(char)\n toggle = not toggle\n if toggle:\n keep = n\n else:\n bytesarray.append(keep << 4 | n)\n value = 0\n mask_value = 31\n for i in range(0, 8):\n shift = (4 - 1 - i) * 8\n value += (bytesarray[i] & 255) << (shift & mask_value)\n return value\n\n\n<mask token>\n\n\ndef dict_path(dictionary: dict[str, _T], path: str) ->_T:\n \"\"\" Gets the value of the given dictionary at the given path. For example:\n\n >>> data = {'foo': {'bar': True}}\n >>> dict_path(data, 'foo.bar')\n True\n\n \"\"\"\n if not dictionary:\n raise KeyError()\n return reduce(operator.getitem, path.split('.'), dictionary)\n\n\n<mask token>\n\n\n@overload\ndef batched(iterable: Iterable[_T], batch_size: int, container_factory:\n 'type[list]') ->'Iterator[list[_T]]':\n ...\n\n\n@overload\ndef batched(iterable: Iterable[_T], batch_size: int, container_factory:\n 'Callable[[Iterator[_T]], Collection[_T]]') ->'Iterator[Collection[_T]]':\n ...\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\n@contextmanager\ndef local_lock(namespace: str, key: str) ->'Iterator[None]':\n \"\"\" Locks the given namespace/key combination on the current system,\n automatically freeing it after the with statement has been completed or\n once the process is killed.\n\n Usage::\n\n with lock('namespace', 'key'):\n pass\n\n \"\"\"\n name = f'{namespace}-{key}'.replace('/', '-')\n with open(f'/tmp/{name}', 'w+') as f:\n try:\n fcntl.flock(f, fcntl.LOCK_EX | fcntl.LOCK_NB)\n yield\n fcntl.flock(f, fcntl.LOCK_UN)\n except BlockingIOError as exception:\n raise AlreadyLockedError from exception\n\n\ndef normalize_for_url(text: str) ->str:\n \"\"\" Takes the given text and makes it fit to be used for an url.\n\n That means replacing spaces and other unwanted characters with '-',\n lowercasing everything and turning unicode characters into their closest\n ascii equivalent using Unidecode.\n\n See https://pypi.python.org/pypi/Unidecode\n\n \"\"\"\n text = text.replace('ü', 'ue')\n text = text.replace('ä', 'ae')\n text = text.replace('ö', 'oe')\n clean = _unwanted_url_chars.sub('-', unidecode(text).strip(' ').lower())\n clean = _double_dash.sub('-', clean)\n clean = clean.strip('-')\n return clean\n\n\ndef increment_name(name: str) ->str:\n \"\"\" Takes the given name and adds a numbered suffix beginning at 1.\n\n For example::\n\n foo => foo-1\n foo-1 => foo-2\n\n \"\"\"\n match = _number_suffix.search(name)\n if match:\n number_str = match.group(1)\n next_number = int(number_str) + 1\n return f'{name[:-len(number_str)]}{next_number}'\n else:\n return f'{name}-1'\n\n\ndef remove_repeated_spaces(text: str) ->str:\n \"\"\" Removes repeated spaces in the text ('a b' -> 'a b'). \"\"\"\n return _repeated_spaces.sub(' ', text)\n\n\n@contextmanager\ndef profile(filename: str) ->'Iterator[None]':\n \"\"\" Profiles the wrapped code and stores the result in the profiles folder\n with the given filename.\n\n \"\"\"\n profiler = Profile()\n profiler.enable()\n yield\n profiler.disable()\n profiler.create_stats()\n profiler.dump_stats('profiles/{}'.format(filename))\n\n\n@contextmanager\ndef timing(name: (str | None)=None) ->'Iterator[None]':\n \"\"\" Runs the wrapped code and prints the time in ms it took to run it.\n The name is printed in front of the time, if given.\n\n \"\"\"\n start = perf_counter()\n yield\n duration_ms = 1000.0 * (perf_counter() - start)\n if name:\n print(f'{name}: {duration_ms:.0f} ms')\n else:\n print(f'{duration_ms:.0f} ms')\n\n\n<mask token>\n\n\nclass Bunch:\n \"\"\" A simple but handy \"collector of a bunch of named stuff\" class.\n\n See `<https://code.activestate.com/recipes/ 52308-the-simple-but-handy-collector-of-a-bunch-of-named/>`_.\n\n For example::\n\n point = Bunch(x=1, y=2)\n assert point.x == 1\n assert point.y == 2\n\n point.z = 3\n assert point.z == 3\n\n Allows the creation of simple nested bunches, for example::\n\n request = Bunch(**{'app.settings.org.my_setting': True})\n assert request.app.settings.org.my_setting is True\n\n \"\"\"\n\n def __init__(self, **kwargs: Any):\n self.__dict__.update((key, value) for key, value in kwargs.items() if\n '.' not in key)\n for key, value in kwargs.items():\n if '.' in key:\n name, _, key = key.partition('.')\n setattr(self, name, Bunch(**{key: value}))\n if TYPE_CHECKING:\n\n def __getattr__(self, name: str) ->Any:\n ...\n\n def __setattr__(self, name: str, value: Any) ->None:\n ...\n\n def __delattr__(self, name: str) ->None:\n ...\n\n def __eq__(self, other: object) ->bool:\n if type(other) is type(self):\n return self.__dict__ == other.__dict__\n return False\n\n def __ne__(self, other: object) ->bool:\n return not self.__eq__(other)\n\n\n<mask token>\n\n\ndef hash_dictionary(dictionary: dict[str, Any]) ->str:\n \"\"\" Computes a sha256 hash for the given dictionary. The dictionary\n is expected to only contain values that can be serialized by json.\n\n That includes int, decimal, string, boolean.\n\n Note that this function is not meant to be used for hashing secrets. Do\n not include data in this dictionary that is secret!\n\n \"\"\"\n dict_as_string = json.dumps(dictionary, sort_keys=True).encode('utf-8')\n return hashlib.new('sha1', dict_as_string, usedforsecurity=False\n ).hexdigest()\n\n\n<mask token>\n\n\ndef linkify_phone(text: str) ->str:\n \"\"\" Takes a string and replaces valid phone numbers with html links. If a\n phone number is matched, it will be replaced by the result of a callback\n function, that does further checks on the regex match. If these checks do\n not pass, the matched number will remain unchanged.\n\n \"\"\"\n\n def strip_whitespace(number: str) ->str:\n return re.sub('\\\\s', '', number)\n\n def is_valid_length(number: str) ->bool:\n if number.startswith('+00'):\n return False\n if number.startswith('00'):\n return len(number) == 13\n elif number.startswith('0'):\n return len(number) == 10\n elif number.startswith('+'):\n return len(number) == 12\n return False\n\n def handle_match(match: 'Match[str]') ->str:\n inside_html = match.group(1)\n number = f'{match.group(2)}{match.group(3)}'\n assert not number.endswith('\\n')\n if inside_html:\n return match.group(0)\n if is_valid_length(strip_whitespace(number)):\n number = remove_repeated_spaces(number).strip()\n return f'<a href=\"tel:{number}\">{number}</a> '\n return match.group(0)\n return _phone_ch_html_safe.sub(handle_match, text)\n\n\ndef linkify(text: str, escape: bool=True) ->str:\n \"\"\" Takes plain text and injects html links for urls and email addresses.\n\n By default the text is html escaped before it is linkified. This accounts\n for the fact that we usually use this for text blocks that we mean to\n extend with email addresses and urls.\n\n If html is already possible, why linkify it?\n\n Note: We need to clean the html after we've created it (linkify\n parses escaped html and turns it into real html). As a consequence it\n is possible to have html urls in the text that won't be escaped.\n\n \"\"\"\n if not text:\n return text\n long_top_level_domains = ['.agency']\n if any(domain in text for domain in long_top_level_domains):\n if '@' in text:\n linkified = str(Markup('<a href=\"mailto:{text}\">{text}</a>').\n format(text=text))\n else:\n linkified = str(Markup('<a href=\"{text}\">{text}</a>').format(\n text=text))\n else:\n linkified = linkify_phone(bleach.linkify(text, parse_email=True))\n if not escape:\n return linkified\n return bleach.clean(linkified, tags=['a'], attributes={'a': ['href',\n 'rel']}, protocols=['http', 'https', 'mailto', 'tel'])\n\n\ndef paragraphify(text: str) ->str:\n \"\"\" Takes a text with newlines groups them into paragraphs according to the\n following rules:\n\n If there's a single newline between two lines, a <br> will replace that\n newline.\n\n If there are multiple newlines between two lines, each line will become\n a paragraph and the extra newlines are discarded.\n\n \"\"\"\n text = text and text.replace('\\r', '').strip('\\n')\n if not text:\n return ''\n return ''.join(f'<p>{p}</p>' for p in (p.replace('\\n', '<br>') for p in\n _multiple_newlines.split(text)))\n\n\ndef to_html_ul(value: str, convert_dashes: bool=True, with_title: bool=False\n ) ->str:\n \"\"\" Linkify and convert to text to one or multiple ul's or paragraphs.\n \"\"\"\n if not value:\n return ''\n value = value.replace('\\r', '').strip('\\n')\n value = value.replace('\\n\\n', '\\n \\n')\n if not convert_dashes:\n return '<p>{}</p>'.format('<br>'.join(linkify(value).splitlines()))\n elements = []\n temp: list[str] = []\n\n def ul(inner: str) ->str:\n return f'<ul class=\"bulleted\">{inner}</ul>'\n\n def li(inner: str) ->str:\n return f'<li>{inner}</li>'\n\n def p(inner: str) ->str:\n return f'<p>{inner}</p>'\n was_list = False\n for i, line in enumerate(value.splitlines()):\n if not line:\n continue\n line = linkify(line)\n is_list = line.startswith('-')\n new_p_or_ul = True if line == ' ' else False\n line = line.lstrip('-').strip()\n if with_title:\n elements.append(p(f'<span class=\"title\">{line}</span>'))\n with_title = False\n else:\n if new_p_or_ul or was_list != is_list and i > 0:\n elements.append(ul(''.join(temp)) if was_list else p('<br>'\n .join(temp)))\n temp = []\n was_list = False\n if not new_p_or_ul:\n temp.append(li(line) if is_list else line)\n new_p_or_ul = False\n was_list = is_list\n if temp:\n elements.append(ul(''.join(temp)) if was_list else p('<br>'.join(temp))\n )\n return ''.join(elements)\n\n\ndef ensure_scheme(url: str, default: str='http') ->str:\n \"\"\" Makes sure that the given url has a scheme in front, if none\n was provided.\n\n \"\"\"\n if not url:\n return url\n if '//' not in url:\n url = '//' + url\n _url = URL(url)\n if _url.scheme():\n return url\n return _url.scheme(default).as_string()\n\n\ndef is_uuid(value: (str | UUID)) ->bool:\n \"\"\" Returns true if the given value is a uuid. The value may be a string\n or of type UUID. If it's a string, the uuid is checked with a regex.\n \"\"\"\n if isinstance(value, str):\n return _uuid.match(str(value)) and True or False\n return isinstance(value, UUID)\n\n\n<mask token>\n\n\ndef relative_url(absolute_url: (str | None)) ->str:\n \"\"\" Removes everything in front of the path, including scheme, host,\n username, password and port.\n\n \"\"\"\n url = URL._mutate(URL(absolute_url), scheme=None, username=None,\n password=None, host=None, port=None)\n return url.as_string()\n\n\ndef is_subpath(directory: str, path: str) ->bool:\n \"\"\" Returns true if the given path is inside the given directory. \"\"\"\n directory = os.path.join(os.path.realpath(directory), '')\n path = os.path.realpath(path)\n return os.path.commonprefix([path, directory]) == directory\n\n\n@overload\ndef is_sorted(iterable: 'Iterable[SupportsRichComparison]', key:\n 'Callable[[SupportsRichComparison], SupportsRichComparison]'=...,\n reverse: bool=...) ->bool:\n ...\n\n\n@overload\ndef is_sorted(iterable: 'Iterable[_T]', key:\n 'Callable[[_T], SupportsRichComparison]', reverse: bool=...) ->bool:\n ...\n\n\n<mask token>\n\n\ndef morepath_modules(cls: type[morepath.App]) ->'Iterator[str]':\n \"\"\" Returns all morepath modules which should be scanned for the given\n morepath application class.\n\n We can't reliably know the actual morepath modules that\n need to be scanned, which is why we assume that each module has\n one namespace (like 'more.transaction' or 'onegov.core').\n\n \"\"\"\n for base in cls.__mro__:\n if not issubclass(base, morepath.App):\n continue\n if base is morepath.App:\n continue\n module = '.'.join(base.__module__.split('.')[:2])\n if module.startswith('test'):\n continue\n yield module\n\n\n<mask token>\n\n\ndef get_unique_hstore_keys(session: 'Session', column: 'Column[dict[str, Any]]'\n ) ->set[str]:\n \"\"\" Returns a set of keys found in an hstore column over all records\n of its table.\n\n \"\"\"\n base = session.query(column.keys()).with_entities(sqlalchemy.func.skeys\n (column).label('keys'))\n query = sqlalchemy.select([sqlalchemy.func.array_agg(sqlalchemy.column(\n 'keys'))], distinct=True).select_from(base.subquery())\n keys = session.execute(query).scalar()\n return set(keys) if keys else set()\n\n\ndef makeopendir(fs: 'FS', directory: str) ->'SubFS[FS]':\n \"\"\" Creates and opens the given directory in the given PyFilesystem. \"\"\"\n if not fs.isdir(directory):\n fs.makedir(directory)\n return fs.opendir(directory)\n\n\ndef append_query_param(url: str, key: str, value: str) ->str:\n \"\"\" Appends a single query parameter to an url. This is faster than\n using Purl, if and only if we only add one query param.\n\n Also this function assumes that the value is already url encoded.\n\n \"\"\"\n template = '?' in url and '{}&{}={}' or '{}?{}={}'\n return template.format(url, key, value)\n\n\nclass PostThread(Thread):\n \"\"\" POSTs the given data with the headers to the URL.\n\n Example::\n\n data = {'a': 1, 'b': 2}\n data = json.dumps(data).encode('utf-8')\n PostThread(\n 'https://example.com/post',\n data,\n (\n ('Content-Type', 'application/json; charset=utf-8'),\n ('Content-Length', len(data))\n )\n ).start()\n\n This only works for external URLs! If posting to server itself is\n needed, use a process instead of the thread!\n\n \"\"\"\n\n def __init__(self, url: str, data: bytes, headers:\n 'Collection[tuple[str, str]]', timeout: float=30):\n Thread.__init__(self)\n self.url = url\n self.data = data\n self.headers = headers\n self.timeout = timeout\n\n def run(self) ->None:\n try:\n if not self.url.lower().startswith('http'):\n raise ValueError from None\n request = urllib.request.Request(self.url)\n for header in self.headers:\n request.add_header(header[0], header[1])\n urllib.request.urlopen(request, self.data, self.timeout)\n except Exception as e:\n log.error('Error while sending a POST request to {}: {}'.format\n (self.url, str(e)))\n\n\n<mask token>\n\n\ndef dictionary_to_binary(dictionary: 'LaxFileDict') ->bytes:\n \"\"\" Takes a dictionary created by :func:`binary_to_dictionary` and returns\n the original binary data.\n\n \"\"\"\n data = base64.b64decode(dictionary['data'])\n with gzip.GzipFile(fileobj=BytesIO(data), mode='r') as f:\n return f.read()\n\n\n@overload\ndef safe_format(format: str, dictionary: dict[str, str | int | float],\n types: None=..., adapt: 'Callable[[str], str] | None'=...,\n raise_on_missing: bool=...) ->str:\n ...\n\n\n<mask token>\n\n\ndef is_valid_yubikey(client_id: str, secret_key: str, expected_yubikey_id:\n str, yubikey: str) ->bool:\n \"\"\" Asks the yubico validation servers if the given yubikey OTP is valid.\n\n :client_id:\n The yubico API client id.\n\n :secret_key:\n The yubico API secret key.\n\n :expected_yubikey_id:\n The expected yubikey id. The yubikey id is defined as the first twelve\n characters of any yubikey value. Each user should have a yubikey\n associated with it's account. If the yubikey value comes from a\n different key, the key is invalid.\n\n :yubikey:\n The actual yubikey value that should be verified.\n\n :return: True if yubico confirmed the validity of the key.\n\n \"\"\"\n assert client_id and secret_key and expected_yubikey_id and yubikey\n assert len(expected_yubikey_id) == 12\n if not yubikey.startswith(expected_yubikey_id):\n return False\n try:\n return Yubico(client_id, secret_key).verify(yubikey)\n except StatusCodeError as e:\n if e.status_code != 'REPLAYED_OTP':\n raise e\n return False\n except SignatureVerificationError:\n return False\n\n\n<mask token>\n\n\ndef yubikey_otp_to_serial(otp: str) ->(int | None):\n \"\"\" Takes a Yubikey OTP and calculates the serial number of the key.\n\n The serial key is printed on the yubikey, in decimal and as a QR code.\n\n Example:\n\n >>> yubikey_otp_to_serial(\n 'ccccccdefghdefghdefghdefghdefghdefghdefghklv')\n 2311522\n\n Adapted from Java:\n\n https://github.com/Yubico/yubikey-salesforce-client/blob/\n e38e46ee90296a852374a8b744555e99d16b6ca7/src/classes/Modhex.cls\n\n If the key cannot be calculated, None is returned. This can happen if\n they key is malformed.\n\n \"\"\"\n if not is_valid_yubikey_format(otp):\n return None\n token = 'cccc' + otp[:12]\n toggle = False\n keep = 0\n bytesarray = []\n for char in token:\n n = ALPHABET.index(char)\n toggle = not toggle\n if toggle:\n keep = n\n else:\n bytesarray.append(keep << 4 | n)\n value = 0\n mask_value = 31\n for i in range(0, 8):\n shift = (4 - 1 - i) * 8\n value += (bytesarray[i] & 255) << (shift & mask_value)\n return value\n\n\n<mask token>\n\n\ndef dict_path(dictionary: dict[str, _T], path: str) ->_T:\n \"\"\" Gets the value of the given dictionary at the given path. For example:\n\n >>> data = {'foo': {'bar': True}}\n >>> dict_path(data, 'foo.bar')\n True\n\n \"\"\"\n if not dictionary:\n raise KeyError()\n return reduce(operator.getitem, path.split('.'), dictionary)\n\n\n<mask token>\n\n\n@overload\ndef batched(iterable: Iterable[_T], batch_size: int, container_factory:\n 'type[list]') ->'Iterator[list[_T]]':\n ...\n\n\n@overload\ndef batched(iterable: Iterable[_T], batch_size: int, container_factory:\n 'Callable[[Iterator[_T]], Collection[_T]]') ->'Iterator[Collection[_T]]':\n ...\n\n\ndef batched(iterable: Iterable[_T], batch_size: int, container_factory:\n 'Callable[[Iterator[_T]], Collection[_T]]'=tuple\n ) ->'Iterator[Collection[_T]]':\n \"\"\" Splits an iterable into batches of batch_size and puts them\n inside a given collection (tuple by default).\n\n The container_factory is necessary in order to consume the iterator\n returned by islice. Otherwise this function would never return.\n\n \"\"\"\n iterator = iter(iterable)\n while True:\n batch = container_factory(islice(iterator, batch_size))\n if len(batch) == 0:\n return\n yield batch\n",
"step-3": "<mask token>\n\n\n@contextmanager\ndef local_lock(namespace: str, key: str) ->'Iterator[None]':\n \"\"\" Locks the given namespace/key combination on the current system,\n automatically freeing it after the with statement has been completed or\n once the process is killed.\n\n Usage::\n\n with lock('namespace', 'key'):\n pass\n\n \"\"\"\n name = f'{namespace}-{key}'.replace('/', '-')\n with open(f'/tmp/{name}', 'w+') as f:\n try:\n fcntl.flock(f, fcntl.LOCK_EX | fcntl.LOCK_NB)\n yield\n fcntl.flock(f, fcntl.LOCK_UN)\n except BlockingIOError as exception:\n raise AlreadyLockedError from exception\n\n\ndef normalize_for_url(text: str) ->str:\n \"\"\" Takes the given text and makes it fit to be used for an url.\n\n That means replacing spaces and other unwanted characters with '-',\n lowercasing everything and turning unicode characters into their closest\n ascii equivalent using Unidecode.\n\n See https://pypi.python.org/pypi/Unidecode\n\n \"\"\"\n text = text.replace('ü', 'ue')\n text = text.replace('ä', 'ae')\n text = text.replace('ö', 'oe')\n clean = _unwanted_url_chars.sub('-', unidecode(text).strip(' ').lower())\n clean = _double_dash.sub('-', clean)\n clean = clean.strip('-')\n return clean\n\n\ndef increment_name(name: str) ->str:\n \"\"\" Takes the given name and adds a numbered suffix beginning at 1.\n\n For example::\n\n foo => foo-1\n foo-1 => foo-2\n\n \"\"\"\n match = _number_suffix.search(name)\n if match:\n number_str = match.group(1)\n next_number = int(number_str) + 1\n return f'{name[:-len(number_str)]}{next_number}'\n else:\n return f'{name}-1'\n\n\ndef remove_repeated_spaces(text: str) ->str:\n \"\"\" Removes repeated spaces in the text ('a b' -> 'a b'). \"\"\"\n return _repeated_spaces.sub(' ', text)\n\n\n@contextmanager\ndef profile(filename: str) ->'Iterator[None]':\n \"\"\" Profiles the wrapped code and stores the result in the profiles folder\n with the given filename.\n\n \"\"\"\n profiler = Profile()\n profiler.enable()\n yield\n profiler.disable()\n profiler.create_stats()\n profiler.dump_stats('profiles/{}'.format(filename))\n\n\n@contextmanager\ndef timing(name: (str | None)=None) ->'Iterator[None]':\n \"\"\" Runs the wrapped code and prints the time in ms it took to run it.\n The name is printed in front of the time, if given.\n\n \"\"\"\n start = perf_counter()\n yield\n duration_ms = 1000.0 * (perf_counter() - start)\n if name:\n print(f'{name}: {duration_ms:.0f} ms')\n else:\n print(f'{duration_ms:.0f} ms')\n\n\n<mask token>\n\n\ndef module_path(module: 'ModuleType | str', subpath: str) ->str:\n \"\"\" Returns a subdirectory in the given python module.\n\n :mod:\n A python module (actual module or string)\n\n :subpath:\n Subpath below that python module. Leading slashes ('/') are ignored.\n \"\"\"\n parent = module_path_root(module)\n path = os.path.join(parent, subpath.strip('/'))\n assert is_subpath(parent, path)\n return path\n\n\n<mask token>\n\n\nclass Bunch:\n \"\"\" A simple but handy \"collector of a bunch of named stuff\" class.\n\n See `<https://code.activestate.com/recipes/ 52308-the-simple-but-handy-collector-of-a-bunch-of-named/>`_.\n\n For example::\n\n point = Bunch(x=1, y=2)\n assert point.x == 1\n assert point.y == 2\n\n point.z = 3\n assert point.z == 3\n\n Allows the creation of simple nested bunches, for example::\n\n request = Bunch(**{'app.settings.org.my_setting': True})\n assert request.app.settings.org.my_setting is True\n\n \"\"\"\n\n def __init__(self, **kwargs: Any):\n self.__dict__.update((key, value) for key, value in kwargs.items() if\n '.' not in key)\n for key, value in kwargs.items():\n if '.' in key:\n name, _, key = key.partition('.')\n setattr(self, name, Bunch(**{key: value}))\n if TYPE_CHECKING:\n\n def __getattr__(self, name: str) ->Any:\n ...\n\n def __setattr__(self, name: str, value: Any) ->None:\n ...\n\n def __delattr__(self, name: str) ->None:\n ...\n\n def __eq__(self, other: object) ->bool:\n if type(other) is type(self):\n return self.__dict__ == other.__dict__\n return False\n\n def __ne__(self, other: object) ->bool:\n return not self.__eq__(other)\n\n\n<mask token>\n\n\ndef hash_dictionary(dictionary: dict[str, Any]) ->str:\n \"\"\" Computes a sha256 hash for the given dictionary. The dictionary\n is expected to only contain values that can be serialized by json.\n\n That includes int, decimal, string, boolean.\n\n Note that this function is not meant to be used for hashing secrets. Do\n not include data in this dictionary that is secret!\n\n \"\"\"\n dict_as_string = json.dumps(dictionary, sort_keys=True).encode('utf-8')\n return hashlib.new('sha1', dict_as_string, usedforsecurity=False\n ).hexdigest()\n\n\n@overload\ndef groupbylist(iterable: Iterable[_T], key: None=...) ->list[tuple[_T,\n list[_T]]]:\n ...\n\n\n<mask token>\n\n\ndef groupbylist(iterable: Iterable[_T], key: 'Callable[[_T], Any] | None'=None\n ) ->list[tuple[Any, list[_T]]]:\n \"\"\" Works just like Python's ``itertools.groupby`` function, but instead\n of returning generators, it returns lists.\n\n \"\"\"\n return [(k, list(g)) for k, g in groupby(iterable, key=key)]\n\n\ndef linkify_phone(text: str) ->str:\n \"\"\" Takes a string and replaces valid phone numbers with html links. If a\n phone number is matched, it will be replaced by the result of a callback\n function, that does further checks on the regex match. If these checks do\n not pass, the matched number will remain unchanged.\n\n \"\"\"\n\n def strip_whitespace(number: str) ->str:\n return re.sub('\\\\s', '', number)\n\n def is_valid_length(number: str) ->bool:\n if number.startswith('+00'):\n return False\n if number.startswith('00'):\n return len(number) == 13\n elif number.startswith('0'):\n return len(number) == 10\n elif number.startswith('+'):\n return len(number) == 12\n return False\n\n def handle_match(match: 'Match[str]') ->str:\n inside_html = match.group(1)\n number = f'{match.group(2)}{match.group(3)}'\n assert not number.endswith('\\n')\n if inside_html:\n return match.group(0)\n if is_valid_length(strip_whitespace(number)):\n number = remove_repeated_spaces(number).strip()\n return f'<a href=\"tel:{number}\">{number}</a> '\n return match.group(0)\n return _phone_ch_html_safe.sub(handle_match, text)\n\n\ndef linkify(text: str, escape: bool=True) ->str:\n \"\"\" Takes plain text and injects html links for urls and email addresses.\n\n By default the text is html escaped before it is linkified. This accounts\n for the fact that we usually use this for text blocks that we mean to\n extend with email addresses and urls.\n\n If html is already possible, why linkify it?\n\n Note: We need to clean the html after we've created it (linkify\n parses escaped html and turns it into real html). As a consequence it\n is possible to have html urls in the text that won't be escaped.\n\n \"\"\"\n if not text:\n return text\n long_top_level_domains = ['.agency']\n if any(domain in text for domain in long_top_level_domains):\n if '@' in text:\n linkified = str(Markup('<a href=\"mailto:{text}\">{text}</a>').\n format(text=text))\n else:\n linkified = str(Markup('<a href=\"{text}\">{text}</a>').format(\n text=text))\n else:\n linkified = linkify_phone(bleach.linkify(text, parse_email=True))\n if not escape:\n return linkified\n return bleach.clean(linkified, tags=['a'], attributes={'a': ['href',\n 'rel']}, protocols=['http', 'https', 'mailto', 'tel'])\n\n\ndef paragraphify(text: str) ->str:\n \"\"\" Takes a text with newlines groups them into paragraphs according to the\n following rules:\n\n If there's a single newline between two lines, a <br> will replace that\n newline.\n\n If there are multiple newlines between two lines, each line will become\n a paragraph and the extra newlines are discarded.\n\n \"\"\"\n text = text and text.replace('\\r', '').strip('\\n')\n if not text:\n return ''\n return ''.join(f'<p>{p}</p>' for p in (p.replace('\\n', '<br>') for p in\n _multiple_newlines.split(text)))\n\n\ndef to_html_ul(value: str, convert_dashes: bool=True, with_title: bool=False\n ) ->str:\n \"\"\" Linkify and convert to text to one or multiple ul's or paragraphs.\n \"\"\"\n if not value:\n return ''\n value = value.replace('\\r', '').strip('\\n')\n value = value.replace('\\n\\n', '\\n \\n')\n if not convert_dashes:\n return '<p>{}</p>'.format('<br>'.join(linkify(value).splitlines()))\n elements = []\n temp: list[str] = []\n\n def ul(inner: str) ->str:\n return f'<ul class=\"bulleted\">{inner}</ul>'\n\n def li(inner: str) ->str:\n return f'<li>{inner}</li>'\n\n def p(inner: str) ->str:\n return f'<p>{inner}</p>'\n was_list = False\n for i, line in enumerate(value.splitlines()):\n if not line:\n continue\n line = linkify(line)\n is_list = line.startswith('-')\n new_p_or_ul = True if line == ' ' else False\n line = line.lstrip('-').strip()\n if with_title:\n elements.append(p(f'<span class=\"title\">{line}</span>'))\n with_title = False\n else:\n if new_p_or_ul or was_list != is_list and i > 0:\n elements.append(ul(''.join(temp)) if was_list else p('<br>'\n .join(temp)))\n temp = []\n was_list = False\n if not new_p_or_ul:\n temp.append(li(line) if is_list else line)\n new_p_or_ul = False\n was_list = is_list\n if temp:\n elements.append(ul(''.join(temp)) if was_list else p('<br>'.join(temp))\n )\n return ''.join(elements)\n\n\ndef ensure_scheme(url: str, default: str='http') ->str:\n \"\"\" Makes sure that the given url has a scheme in front, if none\n was provided.\n\n \"\"\"\n if not url:\n return url\n if '//' not in url:\n url = '//' + url\n _url = URL(url)\n if _url.scheme():\n return url\n return _url.scheme(default).as_string()\n\n\ndef is_uuid(value: (str | UUID)) ->bool:\n \"\"\" Returns true if the given value is a uuid. The value may be a string\n or of type UUID. If it's a string, the uuid is checked with a regex.\n \"\"\"\n if isinstance(value, str):\n return _uuid.match(str(value)) and True or False\n return isinstance(value, UUID)\n\n\n<mask token>\n\n\ndef relative_url(absolute_url: (str | None)) ->str:\n \"\"\" Removes everything in front of the path, including scheme, host,\n username, password and port.\n\n \"\"\"\n url = URL._mutate(URL(absolute_url), scheme=None, username=None,\n password=None, host=None, port=None)\n return url.as_string()\n\n\ndef is_subpath(directory: str, path: str) ->bool:\n \"\"\" Returns true if the given path is inside the given directory. \"\"\"\n directory = os.path.join(os.path.realpath(directory), '')\n path = os.path.realpath(path)\n return os.path.commonprefix([path, directory]) == directory\n\n\n@overload\ndef is_sorted(iterable: 'Iterable[SupportsRichComparison]', key:\n 'Callable[[SupportsRichComparison], SupportsRichComparison]'=...,\n reverse: bool=...) ->bool:\n ...\n\n\n@overload\ndef is_sorted(iterable: 'Iterable[_T]', key:\n 'Callable[[_T], SupportsRichComparison]', reverse: bool=...) ->bool:\n ...\n\n\n<mask token>\n\n\ndef morepath_modules(cls: type[morepath.App]) ->'Iterator[str]':\n \"\"\" Returns all morepath modules which should be scanned for the given\n morepath application class.\n\n We can't reliably know the actual morepath modules that\n need to be scanned, which is why we assume that each module has\n one namespace (like 'more.transaction' or 'onegov.core').\n\n \"\"\"\n for base in cls.__mro__:\n if not issubclass(base, morepath.App):\n continue\n if base is morepath.App:\n continue\n module = '.'.join(base.__module__.split('.')[:2])\n if module.startswith('test'):\n continue\n yield module\n\n\ndef scan_morepath_modules(cls: type[morepath.App]) ->None:\n \"\"\" Tries to scan all the morepath modules required for the given\n application class. This is not guaranteed to stay reliable as there is\n no sure way to discover all modules required by the application class.\n\n \"\"\"\n for module in sorted(morepath_modules(cls)):\n morepath.scan(import_module(module))\n\n\ndef get_unique_hstore_keys(session: 'Session', column: 'Column[dict[str, Any]]'\n ) ->set[str]:\n \"\"\" Returns a set of keys found in an hstore column over all records\n of its table.\n\n \"\"\"\n base = session.query(column.keys()).with_entities(sqlalchemy.func.skeys\n (column).label('keys'))\n query = sqlalchemy.select([sqlalchemy.func.array_agg(sqlalchemy.column(\n 'keys'))], distinct=True).select_from(base.subquery())\n keys = session.execute(query).scalar()\n return set(keys) if keys else set()\n\n\ndef makeopendir(fs: 'FS', directory: str) ->'SubFS[FS]':\n \"\"\" Creates and opens the given directory in the given PyFilesystem. \"\"\"\n if not fs.isdir(directory):\n fs.makedir(directory)\n return fs.opendir(directory)\n\n\ndef append_query_param(url: str, key: str, value: str) ->str:\n \"\"\" Appends a single query parameter to an url. This is faster than\n using Purl, if and only if we only add one query param.\n\n Also this function assumes that the value is already url encoded.\n\n \"\"\"\n template = '?' in url and '{}&{}={}' or '{}?{}={}'\n return template.format(url, key, value)\n\n\nclass PostThread(Thread):\n \"\"\" POSTs the given data with the headers to the URL.\n\n Example::\n\n data = {'a': 1, 'b': 2}\n data = json.dumps(data).encode('utf-8')\n PostThread(\n 'https://example.com/post',\n data,\n (\n ('Content-Type', 'application/json; charset=utf-8'),\n ('Content-Length', len(data))\n )\n ).start()\n\n This only works for external URLs! If posting to server itself is\n needed, use a process instead of the thread!\n\n \"\"\"\n\n def __init__(self, url: str, data: bytes, headers:\n 'Collection[tuple[str, str]]', timeout: float=30):\n Thread.__init__(self)\n self.url = url\n self.data = data\n self.headers = headers\n self.timeout = timeout\n\n def run(self) ->None:\n try:\n if not self.url.lower().startswith('http'):\n raise ValueError from None\n request = urllib.request.Request(self.url)\n for header in self.headers:\n request.add_header(header[0], header[1])\n urllib.request.urlopen(request, self.data, self.timeout)\n except Exception as e:\n log.error('Error while sending a POST request to {}: {}'.format\n (self.url, str(e)))\n\n\n<mask token>\n\n\ndef dictionary_to_binary(dictionary: 'LaxFileDict') ->bytes:\n \"\"\" Takes a dictionary created by :func:`binary_to_dictionary` and returns\n the original binary data.\n\n \"\"\"\n data = base64.b64decode(dictionary['data'])\n with gzip.GzipFile(fileobj=BytesIO(data), mode='r') as f:\n return f.read()\n\n\n@overload\ndef safe_format(format: str, dictionary: dict[str, str | int | float],\n types: None=..., adapt: 'Callable[[str], str] | None'=...,\n raise_on_missing: bool=...) ->str:\n ...\n\n\n@overload\ndef safe_format(format: str, dictionary: dict[str, _T], types: set[type[_T]\n ]=..., adapt: 'Callable[[str], str] | None'=..., raise_on_missing: bool=...\n ) ->str:\n ...\n\n\n<mask token>\n\n\ndef safe_format_keys(format: str, adapt: 'Callable[[str], str] | None'=None\n ) ->list[str]:\n \"\"\" Takes a :func:`safe_format` string and returns the found keys. \"\"\"\n keys = []\n\n def adapt_and_record(key: str) ->str:\n key = adapt(key) if adapt else key\n keys.append(key)\n return key\n safe_format(format, {}, adapt=adapt_and_record)\n return keys\n\n\ndef is_valid_yubikey(client_id: str, secret_key: str, expected_yubikey_id:\n str, yubikey: str) ->bool:\n \"\"\" Asks the yubico validation servers if the given yubikey OTP is valid.\n\n :client_id:\n The yubico API client id.\n\n :secret_key:\n The yubico API secret key.\n\n :expected_yubikey_id:\n The expected yubikey id. The yubikey id is defined as the first twelve\n characters of any yubikey value. Each user should have a yubikey\n associated with it's account. If the yubikey value comes from a\n different key, the key is invalid.\n\n :yubikey:\n The actual yubikey value that should be verified.\n\n :return: True if yubico confirmed the validity of the key.\n\n \"\"\"\n assert client_id and secret_key and expected_yubikey_id and yubikey\n assert len(expected_yubikey_id) == 12\n if not yubikey.startswith(expected_yubikey_id):\n return False\n try:\n return Yubico(client_id, secret_key).verify(yubikey)\n except StatusCodeError as e:\n if e.status_code != 'REPLAYED_OTP':\n raise e\n return False\n except SignatureVerificationError:\n return False\n\n\ndef is_valid_yubikey_format(otp: str) ->bool:\n \"\"\" Returns True if the given OTP has the correct format. Does not actually\n contact Yubico, so this function may return true, for some invalid keys.\n\n \"\"\"\n return ALPHABET_RE.match(otp) and True or False\n\n\ndef yubikey_otp_to_serial(otp: str) ->(int | None):\n \"\"\" Takes a Yubikey OTP and calculates the serial number of the key.\n\n The serial key is printed on the yubikey, in decimal and as a QR code.\n\n Example:\n\n >>> yubikey_otp_to_serial(\n 'ccccccdefghdefghdefghdefghdefghdefghdefghklv')\n 2311522\n\n Adapted from Java:\n\n https://github.com/Yubico/yubikey-salesforce-client/blob/\n e38e46ee90296a852374a8b744555e99d16b6ca7/src/classes/Modhex.cls\n\n If the key cannot be calculated, None is returned. This can happen if\n they key is malformed.\n\n \"\"\"\n if not is_valid_yubikey_format(otp):\n return None\n token = 'cccc' + otp[:12]\n toggle = False\n keep = 0\n bytesarray = []\n for char in token:\n n = ALPHABET.index(char)\n toggle = not toggle\n if toggle:\n keep = n\n else:\n bytesarray.append(keep << 4 | n)\n value = 0\n mask_value = 31\n for i in range(0, 8):\n shift = (4 - 1 - i) * 8\n value += (bytesarray[i] & 255) << (shift & mask_value)\n return value\n\n\n<mask token>\n\n\ndef dict_path(dictionary: dict[str, _T], path: str) ->_T:\n \"\"\" Gets the value of the given dictionary at the given path. For example:\n\n >>> data = {'foo': {'bar': True}}\n >>> dict_path(data, 'foo.bar')\n True\n\n \"\"\"\n if not dictionary:\n raise KeyError()\n return reduce(operator.getitem, path.split('.'), dictionary)\n\n\ndef safe_move(src: str, dst: str) ->None:\n \"\"\" Rename a file from ``src`` to ``dst``.\n\n * Moves must be atomic. ``shutil.move()`` is not atomic.\n\n * Moves must work across filesystems. Often temp directories and the\n cache directories live on different filesystems. ``os.rename()`` can\n throw errors if run across filesystems.\n\n So we try ``os.rename()``, but if we detect a cross-filesystem copy, we\n switch to ``shutil.move()`` with some wrappers to make it atomic.\n\n Via https://alexwlchan.net/2019/03/atomic-cross-filesystem-moves-in-python\n\n \"\"\"\n try:\n os.rename(src, dst)\n except OSError as err:\n if err.errno == errno.EXDEV:\n copy_id = uuid4()\n tmp_dst = '%s.%s.tmp' % (dst, copy_id)\n shutil.copyfile(src, tmp_dst)\n os.rename(tmp_dst, dst)\n os.unlink(src)\n else:\n raise\n\n\n<mask token>\n\n\n@overload\ndef batched(iterable: Iterable[_T], batch_size: int, container_factory:\n 'type[list]') ->'Iterator[list[_T]]':\n ...\n\n\n@overload\ndef batched(iterable: Iterable[_T], batch_size: int, container_factory:\n 'Callable[[Iterator[_T]], Collection[_T]]') ->'Iterator[Collection[_T]]':\n ...\n\n\ndef batched(iterable: Iterable[_T], batch_size: int, container_factory:\n 'Callable[[Iterator[_T]], Collection[_T]]'=tuple\n ) ->'Iterator[Collection[_T]]':\n \"\"\" Splits an iterable into batches of batch_size and puts them\n inside a given collection (tuple by default).\n\n The container_factory is necessary in order to consume the iterator\n returned by islice. Otherwise this function would never return.\n\n \"\"\"\n iterator = iter(iterable)\n while True:\n batch = container_factory(islice(iterator, batch_size))\n if len(batch) == 0:\n return\n yield batch\n",
"step-4": "<mask token>\n\n\n@contextmanager\ndef local_lock(namespace: str, key: str) ->'Iterator[None]':\n \"\"\" Locks the given namespace/key combination on the current system,\n automatically freeing it after the with statement has been completed or\n once the process is killed.\n\n Usage::\n\n with lock('namespace', 'key'):\n pass\n\n \"\"\"\n name = f'{namespace}-{key}'.replace('/', '-')\n with open(f'/tmp/{name}', 'w+') as f:\n try:\n fcntl.flock(f, fcntl.LOCK_EX | fcntl.LOCK_NB)\n yield\n fcntl.flock(f, fcntl.LOCK_UN)\n except BlockingIOError as exception:\n raise AlreadyLockedError from exception\n\n\ndef normalize_for_url(text: str) ->str:\n \"\"\" Takes the given text and makes it fit to be used for an url.\n\n That means replacing spaces and other unwanted characters with '-',\n lowercasing everything and turning unicode characters into their closest\n ascii equivalent using Unidecode.\n\n See https://pypi.python.org/pypi/Unidecode\n\n \"\"\"\n text = text.replace('ü', 'ue')\n text = text.replace('ä', 'ae')\n text = text.replace('ö', 'oe')\n clean = _unwanted_url_chars.sub('-', unidecode(text).strip(' ').lower())\n clean = _double_dash.sub('-', clean)\n clean = clean.strip('-')\n return clean\n\n\ndef increment_name(name: str) ->str:\n \"\"\" Takes the given name and adds a numbered suffix beginning at 1.\n\n For example::\n\n foo => foo-1\n foo-1 => foo-2\n\n \"\"\"\n match = _number_suffix.search(name)\n if match:\n number_str = match.group(1)\n next_number = int(number_str) + 1\n return f'{name[:-len(number_str)]}{next_number}'\n else:\n return f'{name}-1'\n\n\ndef remove_repeated_spaces(text: str) ->str:\n \"\"\" Removes repeated spaces in the text ('a b' -> 'a b'). \"\"\"\n return _repeated_spaces.sub(' ', text)\n\n\n@contextmanager\ndef profile(filename: str) ->'Iterator[None]':\n \"\"\" Profiles the wrapped code and stores the result in the profiles folder\n with the given filename.\n\n \"\"\"\n profiler = Profile()\n profiler.enable()\n yield\n profiler.disable()\n profiler.create_stats()\n profiler.dump_stats('profiles/{}'.format(filename))\n\n\n@contextmanager\ndef timing(name: (str | None)=None) ->'Iterator[None]':\n \"\"\" Runs the wrapped code and prints the time in ms it took to run it.\n The name is printed in front of the time, if given.\n\n \"\"\"\n start = perf_counter()\n yield\n duration_ms = 1000.0 * (perf_counter() - start)\n if name:\n print(f'{name}: {duration_ms:.0f} ms')\n else:\n print(f'{duration_ms:.0f} ms')\n\n\n<mask token>\n\n\ndef module_path(module: 'ModuleType | str', subpath: str) ->str:\n \"\"\" Returns a subdirectory in the given python module.\n\n :mod:\n A python module (actual module or string)\n\n :subpath:\n Subpath below that python module. Leading slashes ('/') are ignored.\n \"\"\"\n parent = module_path_root(module)\n path = os.path.join(parent, subpath.strip('/'))\n assert is_subpath(parent, path)\n return path\n\n\ndef touch(file_path: str) ->None:\n \"\"\" Touches the file on the given path. \"\"\"\n try:\n os.utime(file_path, None)\n except Exception:\n open(file_path, 'a').close()\n\n\nclass Bunch:\n \"\"\" A simple but handy \"collector of a bunch of named stuff\" class.\n\n See `<https://code.activestate.com/recipes/ 52308-the-simple-but-handy-collector-of-a-bunch-of-named/>`_.\n\n For example::\n\n point = Bunch(x=1, y=2)\n assert point.x == 1\n assert point.y == 2\n\n point.z = 3\n assert point.z == 3\n\n Allows the creation of simple nested bunches, for example::\n\n request = Bunch(**{'app.settings.org.my_setting': True})\n assert request.app.settings.org.my_setting is True\n\n \"\"\"\n\n def __init__(self, **kwargs: Any):\n self.__dict__.update((key, value) for key, value in kwargs.items() if\n '.' not in key)\n for key, value in kwargs.items():\n if '.' in key:\n name, _, key = key.partition('.')\n setattr(self, name, Bunch(**{key: value}))\n if TYPE_CHECKING:\n\n def __getattr__(self, name: str) ->Any:\n ...\n\n def __setattr__(self, name: str, value: Any) ->None:\n ...\n\n def __delattr__(self, name: str) ->None:\n ...\n\n def __eq__(self, other: object) ->bool:\n if type(other) is type(self):\n return self.__dict__ == other.__dict__\n return False\n\n def __ne__(self, other: object) ->bool:\n return not self.__eq__(other)\n\n\n<mask token>\n\n\ndef hash_dictionary(dictionary: dict[str, Any]) ->str:\n \"\"\" Computes a sha256 hash for the given dictionary. The dictionary\n is expected to only contain values that can be serialized by json.\n\n That includes int, decimal, string, boolean.\n\n Note that this function is not meant to be used for hashing secrets. Do\n not include data in this dictionary that is secret!\n\n \"\"\"\n dict_as_string = json.dumps(dictionary, sort_keys=True).encode('utf-8')\n return hashlib.new('sha1', dict_as_string, usedforsecurity=False\n ).hexdigest()\n\n\n@overload\ndef groupbylist(iterable: Iterable[_T], key: None=...) ->list[tuple[_T,\n list[_T]]]:\n ...\n\n\n@overload\ndef groupbylist(iterable: Iterable[_T], key: 'Callable[[_T], _KT]') ->list[\n tuple[_KT, list[_T]]]:\n ...\n\n\ndef groupbylist(iterable: Iterable[_T], key: 'Callable[[_T], Any] | None'=None\n ) ->list[tuple[Any, list[_T]]]:\n \"\"\" Works just like Python's ``itertools.groupby`` function, but instead\n of returning generators, it returns lists.\n\n \"\"\"\n return [(k, list(g)) for k, g in groupby(iterable, key=key)]\n\n\ndef linkify_phone(text: str) ->str:\n \"\"\" Takes a string and replaces valid phone numbers with html links. If a\n phone number is matched, it will be replaced by the result of a callback\n function, that does further checks on the regex match. If these checks do\n not pass, the matched number will remain unchanged.\n\n \"\"\"\n\n def strip_whitespace(number: str) ->str:\n return re.sub('\\\\s', '', number)\n\n def is_valid_length(number: str) ->bool:\n if number.startswith('+00'):\n return False\n if number.startswith('00'):\n return len(number) == 13\n elif number.startswith('0'):\n return len(number) == 10\n elif number.startswith('+'):\n return len(number) == 12\n return False\n\n def handle_match(match: 'Match[str]') ->str:\n inside_html = match.group(1)\n number = f'{match.group(2)}{match.group(3)}'\n assert not number.endswith('\\n')\n if inside_html:\n return match.group(0)\n if is_valid_length(strip_whitespace(number)):\n number = remove_repeated_spaces(number).strip()\n return f'<a href=\"tel:{number}\">{number}</a> '\n return match.group(0)\n return _phone_ch_html_safe.sub(handle_match, text)\n\n\ndef linkify(text: str, escape: bool=True) ->str:\n \"\"\" Takes plain text and injects html links for urls and email addresses.\n\n By default the text is html escaped before it is linkified. This accounts\n for the fact that we usually use this for text blocks that we mean to\n extend with email addresses and urls.\n\n If html is already possible, why linkify it?\n\n Note: We need to clean the html after we've created it (linkify\n parses escaped html and turns it into real html). As a consequence it\n is possible to have html urls in the text that won't be escaped.\n\n \"\"\"\n if not text:\n return text\n long_top_level_domains = ['.agency']\n if any(domain in text for domain in long_top_level_domains):\n if '@' in text:\n linkified = str(Markup('<a href=\"mailto:{text}\">{text}</a>').\n format(text=text))\n else:\n linkified = str(Markup('<a href=\"{text}\">{text}</a>').format(\n text=text))\n else:\n linkified = linkify_phone(bleach.linkify(text, parse_email=True))\n if not escape:\n return linkified\n return bleach.clean(linkified, tags=['a'], attributes={'a': ['href',\n 'rel']}, protocols=['http', 'https', 'mailto', 'tel'])\n\n\ndef paragraphify(text: str) ->str:\n \"\"\" Takes a text with newlines groups them into paragraphs according to the\n following rules:\n\n If there's a single newline between two lines, a <br> will replace that\n newline.\n\n If there are multiple newlines between two lines, each line will become\n a paragraph and the extra newlines are discarded.\n\n \"\"\"\n text = text and text.replace('\\r', '').strip('\\n')\n if not text:\n return ''\n return ''.join(f'<p>{p}</p>' for p in (p.replace('\\n', '<br>') for p in\n _multiple_newlines.split(text)))\n\n\ndef to_html_ul(value: str, convert_dashes: bool=True, with_title: bool=False\n ) ->str:\n \"\"\" Linkify and convert to text to one or multiple ul's or paragraphs.\n \"\"\"\n if not value:\n return ''\n value = value.replace('\\r', '').strip('\\n')\n value = value.replace('\\n\\n', '\\n \\n')\n if not convert_dashes:\n return '<p>{}</p>'.format('<br>'.join(linkify(value).splitlines()))\n elements = []\n temp: list[str] = []\n\n def ul(inner: str) ->str:\n return f'<ul class=\"bulleted\">{inner}</ul>'\n\n def li(inner: str) ->str:\n return f'<li>{inner}</li>'\n\n def p(inner: str) ->str:\n return f'<p>{inner}</p>'\n was_list = False\n for i, line in enumerate(value.splitlines()):\n if not line:\n continue\n line = linkify(line)\n is_list = line.startswith('-')\n new_p_or_ul = True if line == ' ' else False\n line = line.lstrip('-').strip()\n if with_title:\n elements.append(p(f'<span class=\"title\">{line}</span>'))\n with_title = False\n else:\n if new_p_or_ul or was_list != is_list and i > 0:\n elements.append(ul(''.join(temp)) if was_list else p('<br>'\n .join(temp)))\n temp = []\n was_list = False\n if not new_p_or_ul:\n temp.append(li(line) if is_list else line)\n new_p_or_ul = False\n was_list = is_list\n if temp:\n elements.append(ul(''.join(temp)) if was_list else p('<br>'.join(temp))\n )\n return ''.join(elements)\n\n\ndef ensure_scheme(url: str, default: str='http') ->str:\n \"\"\" Makes sure that the given url has a scheme in front, if none\n was provided.\n\n \"\"\"\n if not url:\n return url\n if '//' not in url:\n url = '//' + url\n _url = URL(url)\n if _url.scheme():\n return url\n return _url.scheme(default).as_string()\n\n\ndef is_uuid(value: (str | UUID)) ->bool:\n \"\"\" Returns true if the given value is a uuid. The value may be a string\n or of type UUID. If it's a string, the uuid is checked with a regex.\n \"\"\"\n if isinstance(value, str):\n return _uuid.match(str(value)) and True or False\n return isinstance(value, UUID)\n\n\n<mask token>\n\n\ndef relative_url(absolute_url: (str | None)) ->str:\n \"\"\" Removes everything in front of the path, including scheme, host,\n username, password and port.\n\n \"\"\"\n url = URL._mutate(URL(absolute_url), scheme=None, username=None,\n password=None, host=None, port=None)\n return url.as_string()\n\n\ndef is_subpath(directory: str, path: str) ->bool:\n \"\"\" Returns true if the given path is inside the given directory. \"\"\"\n directory = os.path.join(os.path.realpath(directory), '')\n path = os.path.realpath(path)\n return os.path.commonprefix([path, directory]) == directory\n\n\n@overload\ndef is_sorted(iterable: 'Iterable[SupportsRichComparison]', key:\n 'Callable[[SupportsRichComparison], SupportsRichComparison]'=...,\n reverse: bool=...) ->bool:\n ...\n\n\n@overload\ndef is_sorted(iterable: 'Iterable[_T]', key:\n 'Callable[[_T], SupportsRichComparison]', reverse: bool=...) ->bool:\n ...\n\n\ndef is_sorted(iterable: 'Iterable[Any]', key:\n 'Callable[[Any], SupportsRichComparison]'=lambda i: i, reverse: bool=False\n ) ->bool:\n \"\"\" Returns True if the iterable is sorted. \"\"\"\n seq = list(iterable)\n for a, b in zip(seq, sorted(seq, key=key, reverse=reverse)):\n if a is not b:\n return False\n return True\n\n\ndef morepath_modules(cls: type[morepath.App]) ->'Iterator[str]':\n \"\"\" Returns all morepath modules which should be scanned for the given\n morepath application class.\n\n We can't reliably know the actual morepath modules that\n need to be scanned, which is why we assume that each module has\n one namespace (like 'more.transaction' or 'onegov.core').\n\n \"\"\"\n for base in cls.__mro__:\n if not issubclass(base, morepath.App):\n continue\n if base is morepath.App:\n continue\n module = '.'.join(base.__module__.split('.')[:2])\n if module.startswith('test'):\n continue\n yield module\n\n\ndef scan_morepath_modules(cls: type[morepath.App]) ->None:\n \"\"\" Tries to scan all the morepath modules required for the given\n application class. This is not guaranteed to stay reliable as there is\n no sure way to discover all modules required by the application class.\n\n \"\"\"\n for module in sorted(morepath_modules(cls)):\n morepath.scan(import_module(module))\n\n\ndef get_unique_hstore_keys(session: 'Session', column: 'Column[dict[str, Any]]'\n ) ->set[str]:\n \"\"\" Returns a set of keys found in an hstore column over all records\n of its table.\n\n \"\"\"\n base = session.query(column.keys()).with_entities(sqlalchemy.func.skeys\n (column).label('keys'))\n query = sqlalchemy.select([sqlalchemy.func.array_agg(sqlalchemy.column(\n 'keys'))], distinct=True).select_from(base.subquery())\n keys = session.execute(query).scalar()\n return set(keys) if keys else set()\n\n\ndef makeopendir(fs: 'FS', directory: str) ->'SubFS[FS]':\n \"\"\" Creates and opens the given directory in the given PyFilesystem. \"\"\"\n if not fs.isdir(directory):\n fs.makedir(directory)\n return fs.opendir(directory)\n\n\ndef append_query_param(url: str, key: str, value: str) ->str:\n \"\"\" Appends a single query parameter to an url. This is faster than\n using Purl, if and only if we only add one query param.\n\n Also this function assumes that the value is already url encoded.\n\n \"\"\"\n template = '?' in url and '{}&{}={}' or '{}?{}={}'\n return template.format(url, key, value)\n\n\nclass PostThread(Thread):\n \"\"\" POSTs the given data with the headers to the URL.\n\n Example::\n\n data = {'a': 1, 'b': 2}\n data = json.dumps(data).encode('utf-8')\n PostThread(\n 'https://example.com/post',\n data,\n (\n ('Content-Type', 'application/json; charset=utf-8'),\n ('Content-Length', len(data))\n )\n ).start()\n\n This only works for external URLs! If posting to server itself is\n needed, use a process instead of the thread!\n\n \"\"\"\n\n def __init__(self, url: str, data: bytes, headers:\n 'Collection[tuple[str, str]]', timeout: float=30):\n Thread.__init__(self)\n self.url = url\n self.data = data\n self.headers = headers\n self.timeout = timeout\n\n def run(self) ->None:\n try:\n if not self.url.lower().startswith('http'):\n raise ValueError from None\n request = urllib.request.Request(self.url)\n for header in self.headers:\n request.add_header(header[0], header[1])\n urllib.request.urlopen(request, self.data, self.timeout)\n except Exception as e:\n log.error('Error while sending a POST request to {}: {}'.format\n (self.url, str(e)))\n\n\n<mask token>\n\n\ndef dictionary_to_binary(dictionary: 'LaxFileDict') ->bytes:\n \"\"\" Takes a dictionary created by :func:`binary_to_dictionary` and returns\n the original binary data.\n\n \"\"\"\n data = base64.b64decode(dictionary['data'])\n with gzip.GzipFile(fileobj=BytesIO(data), mode='r') as f:\n return f.read()\n\n\n@overload\ndef safe_format(format: str, dictionary: dict[str, str | int | float],\n types: None=..., adapt: 'Callable[[str], str] | None'=...,\n raise_on_missing: bool=...) ->str:\n ...\n\n\n@overload\ndef safe_format(format: str, dictionary: dict[str, _T], types: set[type[_T]\n ]=..., adapt: 'Callable[[str], str] | None'=..., raise_on_missing: bool=...\n ) ->str:\n ...\n\n\n<mask token>\n\n\ndef safe_format_keys(format: str, adapt: 'Callable[[str], str] | None'=None\n ) ->list[str]:\n \"\"\" Takes a :func:`safe_format` string and returns the found keys. \"\"\"\n keys = []\n\n def adapt_and_record(key: str) ->str:\n key = adapt(key) if adapt else key\n keys.append(key)\n return key\n safe_format(format, {}, adapt=adapt_and_record)\n return keys\n\n\ndef is_valid_yubikey(client_id: str, secret_key: str, expected_yubikey_id:\n str, yubikey: str) ->bool:\n \"\"\" Asks the yubico validation servers if the given yubikey OTP is valid.\n\n :client_id:\n The yubico API client id.\n\n :secret_key:\n The yubico API secret key.\n\n :expected_yubikey_id:\n The expected yubikey id. The yubikey id is defined as the first twelve\n characters of any yubikey value. Each user should have a yubikey\n associated with it's account. If the yubikey value comes from a\n different key, the key is invalid.\n\n :yubikey:\n The actual yubikey value that should be verified.\n\n :return: True if yubico confirmed the validity of the key.\n\n \"\"\"\n assert client_id and secret_key and expected_yubikey_id and yubikey\n assert len(expected_yubikey_id) == 12\n if not yubikey.startswith(expected_yubikey_id):\n return False\n try:\n return Yubico(client_id, secret_key).verify(yubikey)\n except StatusCodeError as e:\n if e.status_code != 'REPLAYED_OTP':\n raise e\n return False\n except SignatureVerificationError:\n return False\n\n\ndef is_valid_yubikey_format(otp: str) ->bool:\n \"\"\" Returns True if the given OTP has the correct format. Does not actually\n contact Yubico, so this function may return true, for some invalid keys.\n\n \"\"\"\n return ALPHABET_RE.match(otp) and True or False\n\n\ndef yubikey_otp_to_serial(otp: str) ->(int | None):\n \"\"\" Takes a Yubikey OTP and calculates the serial number of the key.\n\n The serial key is printed on the yubikey, in decimal and as a QR code.\n\n Example:\n\n >>> yubikey_otp_to_serial(\n 'ccccccdefghdefghdefghdefghdefghdefghdefghklv')\n 2311522\n\n Adapted from Java:\n\n https://github.com/Yubico/yubikey-salesforce-client/blob/\n e38e46ee90296a852374a8b744555e99d16b6ca7/src/classes/Modhex.cls\n\n If the key cannot be calculated, None is returned. This can happen if\n they key is malformed.\n\n \"\"\"\n if not is_valid_yubikey_format(otp):\n return None\n token = 'cccc' + otp[:12]\n toggle = False\n keep = 0\n bytesarray = []\n for char in token:\n n = ALPHABET.index(char)\n toggle = not toggle\n if toggle:\n keep = n\n else:\n bytesarray.append(keep << 4 | n)\n value = 0\n mask_value = 31\n for i in range(0, 8):\n shift = (4 - 1 - i) * 8\n value += (bytesarray[i] & 255) << (shift & mask_value)\n return value\n\n\n<mask token>\n\n\ndef dict_path(dictionary: dict[str, _T], path: str) ->_T:\n \"\"\" Gets the value of the given dictionary at the given path. For example:\n\n >>> data = {'foo': {'bar': True}}\n >>> dict_path(data, 'foo.bar')\n True\n\n \"\"\"\n if not dictionary:\n raise KeyError()\n return reduce(operator.getitem, path.split('.'), dictionary)\n\n\ndef safe_move(src: str, dst: str) ->None:\n \"\"\" Rename a file from ``src`` to ``dst``.\n\n * Moves must be atomic. ``shutil.move()`` is not atomic.\n\n * Moves must work across filesystems. Often temp directories and the\n cache directories live on different filesystems. ``os.rename()`` can\n throw errors if run across filesystems.\n\n So we try ``os.rename()``, but if we detect a cross-filesystem copy, we\n switch to ``shutil.move()`` with some wrappers to make it atomic.\n\n Via https://alexwlchan.net/2019/03/atomic-cross-filesystem-moves-in-python\n\n \"\"\"\n try:\n os.rename(src, dst)\n except OSError as err:\n if err.errno == errno.EXDEV:\n copy_id = uuid4()\n tmp_dst = '%s.%s.tmp' % (dst, copy_id)\n shutil.copyfile(src, tmp_dst)\n os.rename(tmp_dst, dst)\n os.unlink(src)\n else:\n raise\n\n\n<mask token>\n\n\n@overload\ndef batched(iterable: Iterable[_T], batch_size: int, container_factory:\n 'type[list]') ->'Iterator[list[_T]]':\n ...\n\n\n@overload\ndef batched(iterable: Iterable[_T], batch_size: int, container_factory:\n 'Callable[[Iterator[_T]], Collection[_T]]') ->'Iterator[Collection[_T]]':\n ...\n\n\ndef batched(iterable: Iterable[_T], batch_size: int, container_factory:\n 'Callable[[Iterator[_T]], Collection[_T]]'=tuple\n ) ->'Iterator[Collection[_T]]':\n \"\"\" Splits an iterable into batches of batch_size and puts them\n inside a given collection (tuple by default).\n\n The container_factory is necessary in order to consume the iterator\n returned by islice. Otherwise this function would never return.\n\n \"\"\"\n iterator = iter(iterable)\n while True:\n batch = container_factory(islice(iterator, batch_size))\n if len(batch) == 0:\n return\n yield batch\n",
"step-5": "import base64\nimport bleach\nimport errno\nimport fcntl\nimport gzip\nimport hashlib\nimport importlib\nimport inspect\nimport magic\nimport mimetypes\nimport morepath\nimport operator\nimport os.path\nimport re\nimport shutil\nimport sqlalchemy\nimport urllib.request\n\nfrom markupsafe import Markup\nfrom collections.abc import Iterable\nfrom contextlib import contextmanager\nfrom cProfile import Profile\nfrom functools import reduce\nfrom importlib import import_module\nfrom io import BytesIO, StringIO\nfrom itertools import groupby, islice\nfrom onegov.core import log\nfrom onegov.core.cache import lru_cache\nfrom onegov.core.custom import json\nfrom onegov.core.errors import AlreadyLockedError\nfrom purl import URL\nfrom threading import Thread\nfrom time import perf_counter\nfrom unidecode import unidecode\nfrom uuid import UUID, uuid4\nfrom webob import static\nfrom yubico_client import Yubico\nfrom yubico_client.yubico_exceptions import SignatureVerificationError\nfrom yubico_client.yubico_exceptions import StatusCodeError\n\n\nfrom typing import overload, Any, TypeVar, TYPE_CHECKING\nif TYPE_CHECKING:\n from _typeshed import SupportsRichComparison\n from collections.abc import Callable, Collection, Iterator\n from fs.base import FS, SubFS\n from re import Match\n from sqlalchemy import Column\n from sqlalchemy.orm import Session\n from types import ModuleType\n from webob import Response\n from .request import CoreRequest\n from .types import FileDict, LaxFileDict\n\n\n_T = TypeVar('_T')\n_KT = TypeVar('_KT')\n\n\n# http://stackoverflow.com/a/13500078\n_unwanted_url_chars = re.compile(r'[\\.\\(\\)\\\\/\\s<>\\[\\]{},:;?!@&=+$#@%|\\*\"\\'`]+')\n_double_dash = re.compile(r'[-]+')\n_number_suffix = re.compile(r'-([0-9]+)$')\n_repeated_spaces = re.compile(r'\\s\\s+')\n_uuid = re.compile(\n r'^[a-f0-9]{8}-?[a-f0-9]{4}-?[a-f0-9]{4}-?[a-f0-9]{4}-?[a-f0-9]{12}$')\n\n# only temporary until bleach has a release > 1.4.1 -\n_email_regex = re.compile((\n r\"([a-z0-9!#$%&'*+\\/=?^_`{|}~-]+(?:\\.[a-z0-9!#$%&'*+\\/=?^_`\"\n r\"{|}~-]+)*(@|\\sat\\s)(?:[a-z0-9](?:[a-z0-9-]*[a-z0-9])?(\\.|\"\n r\"\\sdot\\s))+[a-z0-9](?:[a-z0-9-]*[a-z0-9])?)\"\n))\n\n# detects multiple successive newlines\n_multiple_newlines = re.compile(r'\\n{2,}', re.MULTILINE)\n\n# detect starting strings of phone inside a link\n_phone_inside_a_tags = r'(\\\">|href=\\\"tel:)?'\n\n# regex pattern for swiss phone numbers\n_phone_ch_country_code = r\"(\\+41|0041|0[0-9]{2})\"\n_phone_ch = re.compile(_phone_ch_country_code + r'([ \\r\\f\\t\\d]+)')\n\n# Adds a regex group to capture if a leading a tag is present or if the\n# number is part of the href attributes\n_phone_ch_html_safe = re.compile(\n _phone_inside_a_tags + _phone_ch_country_code + r'([ \\r\\f\\t\\d]+)')\n\n# for yubikeys\nALPHABET = 'cbdefghijklnrtuv'\nALPHABET_RE = re.compile(r'^[cbdefghijklnrtuv]{12,44}$')\n\n\n@contextmanager\ndef local_lock(namespace: str, key: str) -> 'Iterator[None]':\n \"\"\" Locks the given namespace/key combination on the current system,\n automatically freeing it after the with statement has been completed or\n once the process is killed.\n\n Usage::\n\n with lock('namespace', 'key'):\n pass\n\n \"\"\"\n name = f'{namespace}-{key}'.replace('/', '-')\n\n with open(f'/tmp/{name}', 'w+') as f:\n try:\n fcntl.flock(f, fcntl.LOCK_EX | fcntl.LOCK_NB)\n yield\n fcntl.flock(f, fcntl.LOCK_UN)\n except BlockingIOError as exception:\n raise AlreadyLockedError from exception\n\n\ndef normalize_for_url(text: str) -> str:\n \"\"\" Takes the given text and makes it fit to be used for an url.\n\n That means replacing spaces and other unwanted characters with '-',\n lowercasing everything and turning unicode characters into their closest\n ascii equivalent using Unidecode.\n\n See https://pypi.python.org/pypi/Unidecode\n\n \"\"\"\n\n # German is our main language, so we are extra considerate about it\n # (unidecode turns ü into u)\n text = text.replace(\"ü\", \"ue\")\n text = text.replace(\"ä\", \"ae\")\n text = text.replace(\"ö\", \"oe\")\n clean = _unwanted_url_chars.sub('-', unidecode(text).strip(' ').lower())\n clean = _double_dash.sub('-', clean)\n clean = clean.strip('-')\n\n return clean\n\n\ndef increment_name(name: str) -> str:\n \"\"\" Takes the given name and adds a numbered suffix beginning at 1.\n\n For example::\n\n foo => foo-1\n foo-1 => foo-2\n\n \"\"\"\n\n match = _number_suffix.search(name)\n if match:\n number_str = match.group(1)\n next_number = int(number_str) + 1\n return f'{name[:-len(number_str)]}{next_number}'\n else:\n return f'{name}-1'\n\n\ndef remove_repeated_spaces(text: str) -> str:\n \"\"\" Removes repeated spaces in the text ('a b' -> 'a b'). \"\"\"\n\n return _repeated_spaces.sub(' ', text)\n\n\n@contextmanager\ndef profile(filename: str) -> 'Iterator[None]':\n \"\"\" Profiles the wrapped code and stores the result in the profiles folder\n with the given filename.\n\n \"\"\"\n profiler = Profile()\n profiler.enable()\n\n yield\n\n profiler.disable()\n profiler.create_stats()\n profiler.dump_stats('profiles/{}'.format(filename))\n\n\n@contextmanager\ndef timing(name: str | None = None) -> 'Iterator[None]':\n \"\"\" Runs the wrapped code and prints the time in ms it took to run it.\n The name is printed in front of the time, if given.\n\n \"\"\"\n start = perf_counter()\n\n yield\n\n duration_ms = 1000.0 * (perf_counter() - start)\n\n if name:\n print(f'{name}: {duration_ms:.0f} ms')\n else:\n print(f'{duration_ms:.0f} ms')\n\n\n@lru_cache(maxsize=32)\ndef module_path_root(module: 'ModuleType | str') -> str:\n if isinstance(module, str):\n module = importlib.import_module(module)\n\n assert module is not None\n\n return os.path.dirname(inspect.getfile(module))\n\n\ndef module_path(module: 'ModuleType | str', subpath: str) -> str:\n \"\"\" Returns a subdirectory in the given python module.\n\n :mod:\n A python module (actual module or string)\n\n :subpath:\n Subpath below that python module. Leading slashes ('/') are ignored.\n \"\"\"\n\n parent = module_path_root(module)\n path = os.path.join(parent, subpath.strip('/'))\n\n # always be paranoid with path manipulation\n assert is_subpath(parent, path)\n\n return path\n\n\ndef touch(file_path: str) -> None:\n \"\"\" Touches the file on the given path. \"\"\"\n try:\n os.utime(file_path, None)\n except Exception:\n open(file_path, 'a').close()\n\n\nclass Bunch:\n \"\"\" A simple but handy \"collector of a bunch of named stuff\" class.\n\n See `<https://code.activestate.com/recipes/\\\n 52308-the-simple-but-handy-collector-of-a-bunch-of-named/>`_.\n\n For example::\n\n point = Bunch(x=1, y=2)\n assert point.x == 1\n assert point.y == 2\n\n point.z = 3\n assert point.z == 3\n\n Allows the creation of simple nested bunches, for example::\n\n request = Bunch(**{'app.settings.org.my_setting': True})\n assert request.app.settings.org.my_setting is True\n\n \"\"\"\n def __init__(self, **kwargs: Any):\n self.__dict__.update(\n (key, value)\n for key, value in kwargs.items()\n if '.' not in key\n )\n for key, value in kwargs.items():\n if '.' in key:\n name, _, key = key.partition('.')\n setattr(self, name, Bunch(**{key: value}))\n\n if TYPE_CHECKING:\n # let mypy know that any attribute access could be valid\n def __getattr__(self, name: str) -> Any: ...\n def __setattr__(self, name: str, value: Any) -> None: ...\n def __delattr__(self, name: str) -> None: ...\n\n def __eq__(self, other: object) -> bool:\n if type(other) is type(self):\n return self.__dict__ == other.__dict__\n return False\n\n def __ne__(self, other: object) -> bool:\n return not self.__eq__(other)\n\n\ndef render_file(file_path: str, request: 'CoreRequest') -> 'Response':\n \"\"\" Takes the given file_path (content) and renders it to the browser.\n The file must exist on the local system and be readable by the current\n process.\n\n \"\"\"\n\n def hash_path(path: str) -> str:\n return hashlib.new( # nosec:B324\n 'sha1',\n path.encode('utf-8'),\n usedforsecurity=False\n ).hexdigest()\n\n # this is a very cachable result - though it's possible that a file\n # changes it's content type, it should usually not, especially since\n # we emphasize the use of random filenames\n @request.app.cache.cache_on_arguments(to_str=hash_path)\n def get_content_type(file_path: str) -> str:\n content_type = mimetypes.guess_type(file_path)[0]\n\n if not content_type:\n content_type = magic.from_file(file_path, mime=True)\n\n return content_type\n\n return request.get_response(\n static.FileApp(file_path, content_type=get_content_type(file_path)))\n\n\ndef hash_dictionary(dictionary: dict[str, Any]) -> str:\n \"\"\" Computes a sha256 hash for the given dictionary. The dictionary\n is expected to only contain values that can be serialized by json.\n\n That includes int, decimal, string, boolean.\n\n Note that this function is not meant to be used for hashing secrets. Do\n not include data in this dictionary that is secret!\n\n \"\"\"\n dict_as_string = json.dumps(dictionary, sort_keys=True).encode('utf-8')\n return hashlib.new( # nosec:B324\n 'sha1',\n dict_as_string,\n usedforsecurity=False\n ).hexdigest()\n\n\n@overload\ndef groupbylist(\n iterable: Iterable[_T],\n key: None = ...\n) -> list[tuple[_T, list[_T]]]: ...\n\n\n@overload\ndef groupbylist(\n iterable: Iterable[_T],\n key: 'Callable[[_T], _KT]'\n) -> list[tuple[_KT, list[_T]]]: ...\n\n\ndef groupbylist(\n iterable: Iterable[_T],\n key: 'Callable[[_T], Any] | None' = None\n) -> list[tuple[Any, list[_T]]]:\n \"\"\" Works just like Python's ``itertools.groupby`` function, but instead\n of returning generators, it returns lists.\n\n \"\"\"\n return [(k, list(g)) for k, g in groupby(iterable, key=key)]\n\n\ndef linkify_phone(text: str) -> str:\n \"\"\" Takes a string and replaces valid phone numbers with html links. If a\n phone number is matched, it will be replaced by the result of a callback\n function, that does further checks on the regex match. If these checks do\n not pass, the matched number will remain unchanged.\n\n \"\"\"\n\n def strip_whitespace(number: str) -> str:\n return re.sub(r'\\s', '', number)\n\n def is_valid_length(number: str) -> bool:\n if number.startswith('+00'):\n return False\n if number.startswith('00'):\n return len(number) == 13\n elif number.startswith('0'):\n return len(number) == 10\n elif number.startswith('+'):\n return len(number) == 12\n return False\n\n def handle_match(match: 'Match[str]') -> str:\n inside_html = match.group(1)\n number = f'{match.group(2)}{match.group(3)}'\n assert not number.endswith('\\n')\n if inside_html:\n return match.group(0)\n if is_valid_length(strip_whitespace(number)):\n number = remove_repeated_spaces(number).strip()\n return f'<a href=\"tel:{number}\">{number}</a> '\n\n return match.group(0)\n\n return _phone_ch_html_safe.sub(handle_match, text)\n\n\n# FIXME: A lot of these methods should be using MarkupSafe\ndef linkify(text: str, escape: bool = True) -> str:\n \"\"\" Takes plain text and injects html links for urls and email addresses.\n\n By default the text is html escaped before it is linkified. This accounts\n for the fact that we usually use this for text blocks that we mean to\n extend with email addresses and urls.\n\n If html is already possible, why linkify it?\n\n Note: We need to clean the html after we've created it (linkify\n parses escaped html and turns it into real html). As a consequence it\n is possible to have html urls in the text that won't be escaped.\n\n \"\"\"\n\n if not text:\n return text\n\n long_top_level_domains = ['.agency']\n\n # bleach.linkify supports only a fairly limited amount of tlds\n if any(domain in text for domain in long_top_level_domains):\n if '@' in text:\n linkified = str(\n Markup('<a href=\"mailto:{text}\">{text}</a>').format(\n text=text\n )\n )\n else:\n linkified = str(\n Markup('<a href=\"{text}\">{text}</a>').format(text=text)\n )\n else:\n linkified = linkify_phone(bleach.linkify(text, parse_email=True))\n\n if not escape:\n return linkified\n\n return bleach.clean(\n linkified,\n tags=['a'],\n attributes={'a': ['href', 'rel']},\n protocols=['http', 'https', 'mailto', 'tel']\n )\n\n\ndef paragraphify(text: str) -> str:\n \"\"\" Takes a text with newlines groups them into paragraphs according to the\n following rules:\n\n If there's a single newline between two lines, a <br> will replace that\n newline.\n\n If there are multiple newlines between two lines, each line will become\n a paragraph and the extra newlines are discarded.\n\n \"\"\"\n text = text and text.replace('\\r', '').strip('\\n')\n\n if not text:\n return ''\n\n return ''.join(f'<p>{p}</p>' for p in (\n p.replace('\\n', '<br>') for p in _multiple_newlines.split(text)\n ))\n\n\ndef to_html_ul(\n value: str,\n convert_dashes: bool = True,\n with_title: bool = False\n) -> str:\n \"\"\" Linkify and convert to text to one or multiple ul's or paragraphs.\n \"\"\"\n if not value:\n return ''\n\n value = value.replace('\\r', '').strip('\\n')\n value = value.replace('\\n\\n', '\\n \\n')\n\n if not convert_dashes:\n return '<p>{}</p>'.format(\n '<br>'.join(linkify(value).splitlines())\n )\n\n elements = []\n temp: list[str] = []\n\n def ul(inner: str) -> str:\n return f'<ul class=\"bulleted\">{inner}</ul>'\n\n def li(inner: str) -> str:\n return f'<li>{inner}</li>'\n\n def p(inner: str) -> str:\n return f'<p>{inner}</p>'\n\n was_list = False\n\n for i, line in enumerate(value.splitlines()):\n if not line:\n continue\n\n line = linkify(line)\n is_list = line.startswith('-')\n new_p_or_ul = True if line == ' ' else False\n\n line = line.lstrip('-').strip()\n\n if with_title:\n elements.append(p(f'<span class=\"title\">{line}</span>'))\n with_title = False\n else:\n if new_p_or_ul or (was_list != is_list and i > 0):\n elements.append(\n ul(''.join(temp)) if was_list else p('<br>'.join(temp))\n )\n temp = []\n was_list = False\n\n if not new_p_or_ul:\n temp.append((li(line) if is_list else line))\n\n new_p_or_ul = False\n was_list = is_list\n\n if temp:\n elements.append(\n ul(''.join(temp)) if was_list else p('<br>'.join(temp))\n )\n\n return ''.join(elements)\n\n\ndef ensure_scheme(url: str, default: str = 'http') -> str:\n \"\"\" Makes sure that the given url has a scheme in front, if none\n was provided.\n\n \"\"\"\n\n if not url:\n return url\n\n # purl (or to be precise urlparse) will parse empty host names ('abc.xyz')\n # wrongly, assuming the abc.xyz is a path. by adding a double slash if\n # there isn't one already, we can circumvent that problem\n if '//' not in url:\n url = '//' + url\n\n _url = URL(url)\n\n if _url.scheme():\n return url\n\n return _url.scheme(default).as_string()\n\n\ndef is_uuid(value: str | UUID) -> bool:\n \"\"\" Returns true if the given value is a uuid. The value may be a string\n or of type UUID. If it's a string, the uuid is checked with a regex.\n \"\"\"\n if isinstance(value, str):\n return _uuid.match(str(value)) and True or False\n\n return isinstance(value, UUID)\n\n\ndef is_non_string_iterable(obj: object) -> bool:\n \"\"\" Returns true if the given obj is an iterable, but not a string. \"\"\"\n return not (isinstance(obj, str) or isinstance(obj, bytes))\\\n and isinstance(obj, Iterable)\n\n\ndef relative_url(absolute_url: str | None) -> str:\n \"\"\" Removes everything in front of the path, including scheme, host,\n username, password and port.\n\n \"\"\"\n url = URL._mutate(\n URL(absolute_url),\n scheme=None,\n username=None,\n password=None,\n host=None,\n port=None\n )\n\n return url.as_string()\n\n\ndef is_subpath(directory: str, path: str) -> bool:\n \"\"\" Returns true if the given path is inside the given directory. \"\"\"\n directory = os.path.join(os.path.realpath(directory), '')\n path = os.path.realpath(path)\n\n # return true, if the common prefix of both is equal to directory\n # e.g. /a/b/c/d.rst and directory is /a/b, the common prefix is /a/b\n return os.path.commonprefix([path, directory]) == directory\n\n\n@overload\ndef is_sorted(\n iterable: 'Iterable[SupportsRichComparison]',\n key: 'Callable[[SupportsRichComparison], SupportsRichComparison]' = ...,\n reverse: bool = ...\n) -> bool: ...\n\n\n@overload\ndef is_sorted(\n iterable: 'Iterable[_T]',\n key: 'Callable[[_T], SupportsRichComparison]',\n reverse: bool = ...\n) -> bool: ...\n\n\n# FIXME: Do we really want to allow any Iterable? This seems like a bad\n# idea to me... Iterators will be consumed and the Iterable might\n# be infinite. This seems like it should be a Container instead,\n# then we also don't need to use tee or list to make a copy\ndef is_sorted(\n iterable: 'Iterable[Any]',\n key: 'Callable[[Any], SupportsRichComparison]' = lambda i: i,\n reverse: bool = False\n) -> bool:\n \"\"\" Returns True if the iterable is sorted. \"\"\"\n\n # NOTE: we previously used `tee` here, but since `sorted` consumes\n # the entire iterator, this is the exact case where tee is\n # slower than just pulling the entire sequence into a list\n seq = list(iterable)\n\n for a, b in zip(seq, sorted(seq, key=key, reverse=reverse)):\n if a is not b:\n return False\n\n return True\n\n\ndef morepath_modules(cls: type[morepath.App]) -> 'Iterator[str]':\n \"\"\" Returns all morepath modules which should be scanned for the given\n morepath application class.\n\n We can't reliably know the actual morepath modules that\n need to be scanned, which is why we assume that each module has\n one namespace (like 'more.transaction' or 'onegov.core').\n\n \"\"\"\n for base in cls.__mro__:\n if not issubclass(base, morepath.App):\n continue\n\n if base is morepath.App:\n continue\n\n module = '.'.join(base.__module__.split('.')[:2])\n\n if module.startswith('test'):\n continue\n\n yield module\n\n\ndef scan_morepath_modules(cls: type[morepath.App]) -> None:\n \"\"\" Tries to scan all the morepath modules required for the given\n application class. This is not guaranteed to stay reliable as there is\n no sure way to discover all modules required by the application class.\n\n \"\"\"\n for module in sorted(morepath_modules(cls)):\n morepath.scan(import_module(module))\n\n\ndef get_unique_hstore_keys(\n session: 'Session',\n column: 'Column[dict[str, Any]]'\n) -> set[str]:\n \"\"\" Returns a set of keys found in an hstore column over all records\n of its table.\n\n \"\"\"\n\n base = session.query(column.keys()).with_entities( # type:ignore\n sqlalchemy.func.skeys(column).label('keys'))\n\n query = sqlalchemy.select(\n [sqlalchemy.func.array_agg(sqlalchemy.column('keys'))],\n distinct=True\n ).select_from(base.subquery())\n\n keys = session.execute(query).scalar()\n return set(keys) if keys else set()\n\n\ndef makeopendir(fs: 'FS', directory: str) -> 'SubFS[FS]':\n \"\"\" Creates and opens the given directory in the given PyFilesystem. \"\"\"\n\n if not fs.isdir(directory):\n fs.makedir(directory)\n\n return fs.opendir(directory)\n\n\ndef append_query_param(url: str, key: str, value: str) -> str:\n \"\"\" Appends a single query parameter to an url. This is faster than\n using Purl, if and only if we only add one query param.\n\n Also this function assumes that the value is already url encoded.\n\n \"\"\"\n template = '?' in url and '{}&{}={}' or '{}?{}={}'\n return template.format(url, key, value)\n\n\nclass PostThread(Thread):\n\n \"\"\" POSTs the given data with the headers to the URL.\n\n Example::\n\n data = {'a': 1, 'b': 2}\n data = json.dumps(data).encode('utf-8')\n PostThread(\n 'https://example.com/post',\n data,\n (\n ('Content-Type', 'application/json; charset=utf-8'),\n ('Content-Length', len(data))\n )\n ).start()\n\n This only works for external URLs! If posting to server itself is\n needed, use a process instead of the thread!\n\n \"\"\"\n\n def __init__(\n self,\n url: str,\n data: bytes,\n headers: 'Collection[tuple[str, str]]',\n timeout: float = 30\n ):\n Thread.__init__(self)\n self.url = url\n self.data = data\n self.headers = headers\n self.timeout = timeout\n\n def run(self) -> None:\n try:\n # Validate URL protocol before opening it, since it's possible to\n # open ftp:// and file:// as well.\n if not self.url.lower().startswith('http'):\n raise ValueError from None\n\n request = urllib.request.Request(self.url)\n for header in self.headers:\n request.add_header(header[0], header[1])\n urllib.request.urlopen( # nosec B310\n request, self.data, self.timeout\n )\n except Exception as e:\n log.error(\n 'Error while sending a POST request to {}: {}'.format(\n self.url, str(e)\n )\n )\n\n\ndef toggle(collection: set[_T], item: _T | None) -> set[_T]:\n \"\"\" Returns a new set where the item has been toggled. \"\"\"\n\n if item is None:\n return collection\n\n if item in collection:\n return collection - {item}\n else:\n return collection | {item}\n\n\ndef binary_to_dictionary(\n binary: bytes,\n filename: str | None = None\n) -> 'FileDict':\n \"\"\" Takes raw binary filedata and stores it in a dictionary together\n with metadata information.\n\n The data is compressed before it is stored int he dictionary. Use\n :func:`dictionary_to_binary` to get the original binary data back.\n\n \"\"\"\n\n assert isinstance(binary, bytes)\n\n mimetype = magic.from_buffer(binary, mime=True)\n\n # according to https://tools.ietf.org/html/rfc7111, text/csv should be used\n if mimetype == 'application/csv':\n mimetype = 'text/csv'\n\n gzipdata = BytesIO()\n\n with gzip.GzipFile(fileobj=gzipdata, mode='wb') as f:\n f.write(binary)\n\n return {\n 'data': base64.b64encode(gzipdata.getvalue()).decode('ascii'),\n 'filename': filename,\n 'mimetype': mimetype,\n 'size': len(binary)\n }\n\n\ndef dictionary_to_binary(dictionary: 'LaxFileDict') -> bytes:\n \"\"\" Takes a dictionary created by :func:`binary_to_dictionary` and returns\n the original binary data.\n\n \"\"\"\n data = base64.b64decode(dictionary['data'])\n\n with gzip.GzipFile(fileobj=BytesIO(data), mode='r') as f:\n return f.read()\n\n\n@overload\ndef safe_format(\n format: str,\n dictionary: dict[str, str | int | float],\n types: None = ...,\n adapt: 'Callable[[str], str] | None' = ...,\n raise_on_missing: bool = ...\n) -> str: ...\n\n\n@overload\ndef safe_format(\n format: str,\n dictionary: dict[str, _T],\n types: set[type[_T]] = ...,\n adapt: 'Callable[[str], str] | None' = ...,\n raise_on_missing: bool = ...\n) -> str: ...\n\n\ndef safe_format(\n format: str,\n dictionary: dict[str, Any],\n types: set[type[Any]] | None = None,\n adapt: 'Callable[[str], str] | None' = None,\n raise_on_missing: bool = False\n) -> str:\n \"\"\" Takes a user-supplied string with format blocks and returns a string\n where those blocks are replaced by values in a dictionary.\n\n For example::\n\n >>> safe_format('[user] has logged in', {'user': 'admin'})\n 'admin has logged in'\n\n :param format:\n The format to use. Square brackets denote dictionary keys. To\n literally print square bracktes, mask them by doubling ('[[' -> '[')\n\n :param dictionary:\n The dictionary holding the variables to use. If the key is not found\n in the dictionary, the bracket is replaced with an empty string.\n\n :param types:\n A set of types supported by the dictionary. Limiting this to safe\n types like builtins (str, int, float) ensure that no values are\n accidentally leaked through faulty __str__ representations.\n\n Note that inheritance is ignored. Supported types need to be\n whitelisted explicitly.\n\n :param adapt:\n An optional callable that receives the key before it is used. Returns\n the same key or an altered version.\n\n :param raise_on_missing:\n True if missing keys should result in a runtime error (defaults to\n False).\n\n This is strictly meant for formats provided by users. Python's string\n formatting options are clearly superior to this, however it is less\n secure!\n\n \"\"\"\n\n types = types or {int, str, float}\n output = StringIO()\n buffer = StringIO()\n opened = 0\n\n for ix, char in enumerate(format):\n if char == '[':\n opened += 1\n\n if char == ']':\n opened -= 1\n\n if opened == 1 and char != '[' and char != ']':\n print(char, file=buffer, end='')\n continue\n\n if opened == 2 or opened == -2:\n if buffer.tell():\n raise RuntimeError(\"Unexpected bracket inside bracket found\")\n\n print(char, file=output, end='')\n opened = 0\n continue\n\n if buffer.tell():\n k = adapt(buffer.getvalue()) if adapt else buffer.getvalue()\n\n if raise_on_missing and k not in dictionary:\n raise RuntimeError(\"Key '{}' is unknown\".format(k))\n\n v = dictionary.get(k, '')\n t = type(v)\n\n if t not in types:\n raise RuntimeError(\"Invalid type for '{}': {}\".format(k, t))\n\n print(v, file=output, end='')\n buffer = StringIO()\n\n if char != '[' and char != ']':\n print(char, file=output, end='')\n\n if opened != 0:\n raise RuntimeError(\"Uneven number of brackets in '{}'\".format(format))\n\n return output.getvalue()\n\n\ndef safe_format_keys(\n format: str,\n adapt: 'Callable[[str], str] | None' = None\n) -> list[str]:\n \"\"\" Takes a :func:`safe_format` string and returns the found keys. \"\"\"\n\n keys = []\n\n def adapt_and_record(key: str) -> str:\n key = adapt(key) if adapt else key\n keys.append(key)\n\n return key\n\n safe_format(format, {}, adapt=adapt_and_record)\n\n return keys\n\n\ndef is_valid_yubikey(\n client_id: str,\n secret_key: str,\n expected_yubikey_id: str,\n yubikey: str\n) -> bool:\n \"\"\" Asks the yubico validation servers if the given yubikey OTP is valid.\n\n :client_id:\n The yubico API client id.\n\n :secret_key:\n The yubico API secret key.\n\n :expected_yubikey_id:\n The expected yubikey id. The yubikey id is defined as the first twelve\n characters of any yubikey value. Each user should have a yubikey\n associated with it's account. If the yubikey value comes from a\n different key, the key is invalid.\n\n :yubikey:\n The actual yubikey value that should be verified.\n\n :return: True if yubico confirmed the validity of the key.\n\n \"\"\"\n assert client_id and secret_key and expected_yubikey_id and yubikey\n assert len(expected_yubikey_id) == 12\n\n # if the yubikey doesn't start with the expected yubikey id we do not\n # need to make a roundtrip to the validation server\n if not yubikey.startswith(expected_yubikey_id):\n # FIXME: Are we leaking information with this early out?\n return False\n\n try:\n return Yubico(client_id, secret_key).verify(yubikey)\n except StatusCodeError as e:\n if e.status_code != 'REPLAYED_OTP':\n raise e\n\n return False\n except SignatureVerificationError:\n return False\n\n\ndef is_valid_yubikey_format(otp: str) -> bool:\n \"\"\" Returns True if the given OTP has the correct format. Does not actually\n contact Yubico, so this function may return true, for some invalid keys.\n\n \"\"\"\n\n return ALPHABET_RE.match(otp) and True or False\n\n\ndef yubikey_otp_to_serial(otp: str) -> int | None:\n \"\"\" Takes a Yubikey OTP and calculates the serial number of the key.\n\n The serial key is printed on the yubikey, in decimal and as a QR code.\n\n Example:\n\n >>> yubikey_otp_to_serial(\n 'ccccccdefghdefghdefghdefghdefghdefghdefghklv')\n 2311522\n\n Adapted from Java:\n\n https://github.com/Yubico/yubikey-salesforce-client/blob/\n e38e46ee90296a852374a8b744555e99d16b6ca7/src/classes/Modhex.cls\n\n If the key cannot be calculated, None is returned. This can happen if\n they key is malformed.\n\n \"\"\"\n\n if not is_valid_yubikey_format(otp):\n return None\n\n token = 'cccc' + otp[:12]\n\n toggle = False\n keep = 0\n\n bytesarray = []\n\n for char in token:\n n = ALPHABET.index(char)\n\n toggle = not toggle\n\n if toggle:\n keep = n\n else:\n bytesarray.append((keep << 4) | n)\n\n value = 0\n\n # in Java, shifts on integers are masked with 0x1f using AND\n # https://docs.oracle.com/javase/specs/jls/se8/html/jls-15.html#jls-15.19\n mask_value = 0x1f\n\n for i in range(0, 8):\n shift = (4 - 1 - i) * 8\n value += (bytesarray[i] & 255) << (shift & mask_value)\n\n return value\n\n\ndef yubikey_public_id(otp: str) -> str:\n \"\"\" Returns the yubikey identity given a token. \"\"\"\n\n return otp[:12]\n\n\ndef dict_path(dictionary: dict[str, _T], path: str) -> _T:\n \"\"\" Gets the value of the given dictionary at the given path. For example:\n\n >>> data = {'foo': {'bar': True}}\n >>> dict_path(data, 'foo.bar')\n True\n\n \"\"\"\n\n if not dictionary:\n raise KeyError()\n\n return reduce(operator.getitem, path.split('.'), dictionary) # type:ignore\n\n\ndef safe_move(src: str, dst: str) -> None:\n \"\"\" Rename a file from ``src`` to ``dst``.\n\n * Moves must be atomic. ``shutil.move()`` is not atomic.\n\n * Moves must work across filesystems. Often temp directories and the\n cache directories live on different filesystems. ``os.rename()`` can\n throw errors if run across filesystems.\n\n So we try ``os.rename()``, but if we detect a cross-filesystem copy, we\n switch to ``shutil.move()`` with some wrappers to make it atomic.\n\n Via https://alexwlchan.net/2019/03/atomic-cross-filesystem-moves-in-python\n\n \"\"\"\n try:\n os.rename(src, dst)\n except OSError as err:\n\n if err.errno == errno.EXDEV:\n # Generate a unique ID, and copy `<src>` to the target directory\n # with a temporary name `<dst>.<ID>.tmp`. Because we're copying\n # across a filesystem boundary, this initial copy may not be\n # atomic. We intersperse a random UUID so if different processes\n # are copying into `<dst>`, they don't overlap in their tmp copies.\n copy_id = uuid4()\n tmp_dst = \"%s.%s.tmp\" % (dst, copy_id)\n shutil.copyfile(src, tmp_dst)\n\n # Then do an atomic rename onto the new name, and clean up the\n # source image.\n os.rename(tmp_dst, dst)\n os.unlink(src)\n else:\n raise\n\n\n@overload\ndef batched(\n iterable: Iterable[_T],\n batch_size: int,\n container_factory: 'type[tuple]' = ... # type:ignore[type-arg]\n) -> 'Iterator[tuple[_T, ...]]': ...\n\n\n@overload\ndef batched(\n iterable: Iterable[_T],\n batch_size: int,\n container_factory: 'type[list]' # type:ignore[type-arg]\n) -> 'Iterator[list[_T]]': ...\n\n\n# NOTE: If there were higher order TypeVars, we could properly infer\n# the type of the Container, for now we just add overloads for\n# two of the most common container_factories\n@overload\ndef batched(\n iterable: Iterable[_T],\n batch_size: int,\n container_factory: 'Callable[[Iterator[_T]], Collection[_T]]'\n) -> 'Iterator[Collection[_T]]': ...\n\n\ndef batched(\n iterable: Iterable[_T],\n batch_size: int,\n container_factory: 'Callable[[Iterator[_T]], Collection[_T]]' = tuple\n) -> 'Iterator[Collection[_T]]':\n \"\"\" Splits an iterable into batches of batch_size and puts them\n inside a given collection (tuple by default).\n\n The container_factory is necessary in order to consume the iterator\n returned by islice. Otherwise this function would never return.\n\n \"\"\"\n\n iterator = iter(iterable)\n while True:\n batch = container_factory(islice(iterator, batch_size))\n if len(batch) == 0:\n return\n\n yield batch\n",
"step-ids": [
35,
38,
46,
49,
61
]
}
|
[
35,
38,
46,
49,
61
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
logging.basicConfig(level=logging.DEBUG, format=
'%(asctime)s - %(levelname)s - %(message)s')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
logging.basicConfig(level=logging.DEBUG, format=
'%(asctime)s - %(levelname)s - %(message)s')
q = 'levamisole inhibitor'
p = PaperProcessor(q)
<|reserved_special_token_1|>
from paper_processor import PaperProcessor
import logging
logging.basicConfig(level=logging.DEBUG, format=
'%(asctime)s - %(levelname)s - %(message)s')
q = 'levamisole inhibitor'
p = PaperProcessor(q)
<|reserved_special_token_1|>
from paper_processor import PaperProcessor
import logging
logging.basicConfig(
level=logging.DEBUG,
format='%(asctime)s - %(levelname)s - %(message)s')
q = "levamisole inhibitor"
p = PaperProcessor(q)
|
flexible
|
{
"blob_id": "96e64b715dbfc1c59ba44d608ad2694b165017b5",
"index": 1975,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nlogging.basicConfig(level=logging.DEBUG, format=\n '%(asctime)s - %(levelname)s - %(message)s')\n<mask token>\n",
"step-3": "<mask token>\nlogging.basicConfig(level=logging.DEBUG, format=\n '%(asctime)s - %(levelname)s - %(message)s')\nq = 'levamisole inhibitor'\np = PaperProcessor(q)\n",
"step-4": "from paper_processor import PaperProcessor\nimport logging\nlogging.basicConfig(level=logging.DEBUG, format=\n '%(asctime)s - %(levelname)s - %(message)s')\nq = 'levamisole inhibitor'\np = PaperProcessor(q)\n",
"step-5": "from paper_processor import PaperProcessor\nimport logging\n\nlogging.basicConfig(\n level=logging.DEBUG,\n format='%(asctime)s - %(levelname)s - %(message)s')\nq = \"levamisole inhibitor\"\np = PaperProcessor(q)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def get_classes(html):
"""
returns a list of classes and titles, parsing through 'html'
"""
<|reserved_special_token_1|>
import sys
from bs4 import BeautifulSoup
def get_classes(html):
"""
returns a list of classes and titles, parsing through 'html'
"""
<|reserved_special_token_1|>
import sys
from bs4 import BeautifulSoup
def get_classes(html):
"""
returns a list of classes and titles, parsing through 'html'
"""
# elements = html.find_all("span", "code")
# titles = html.find_all("span", "title")
# classes = []
# for i in range(len(elements)):
# item = elements[i]
# tit = titles[i]
# classes += [(item.text.replace('\xa0', ' '), tit.text.replace('\xa0', ' '))]
# return classes
|
flexible
|
{
"blob_id": "9bb8e0f732eac474dbc01c374f9c74178f65dc36",
"index": 3063,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef get_classes(html):\n \"\"\"\n returns a list of classes and titles, parsing through 'html'\n \"\"\"\n",
"step-3": "import sys\nfrom bs4 import BeautifulSoup\n\n\ndef get_classes(html):\n \"\"\"\n returns a list of classes and titles, parsing through 'html'\n \"\"\"\n",
"step-4": "import sys\nfrom bs4 import BeautifulSoup\n\n\ndef get_classes(html):\n \"\"\"\n returns a list of classes and titles, parsing through 'html'\n \"\"\"\n # elements = html.find_all(\"span\", \"code\")\n # titles = html.find_all(\"span\", \"title\")\n # classes = []\n # for i in range(len(elements)):\n # item = elements[i]\n # tit = titles[i]\n # classes += [(item.text.replace('\\xa0', ' '), tit.text.replace('\\xa0', ' '))]\n # return classes\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from Global import *
import ShuntingYard
from Thompson import *
def check_string(automaton, word):
inicial = automata['s'].closure
for i in word:
inicial = state_list_delta(inicial, i)
return automaton['f'] in inicial
def create_AFND(re):
deltas = []
initial_node = ShuntingYard.create_tree(ShuntingYard.to_rpn(re))
s = State('s')
f = State('f')
automaton = {s.name: s, f.name: f}
#automaton = {s.name: s}
s.add_transition(initial_node, f);
deltas.append((s,initial_node))
while len(deltas) > 0:
(origin, simbol) = deltas.pop()
if not origin in automaton.values():
automaton.setdefault(origin.name, origin)
if isinstance(simbol, ShuntingYard.Node):
aux_deltas = Thompson.generic(origin, simbol)
for t in aux_deltas:
deltas.insert(0, t)
for state_name in automaton:
automaton[state_name].update_closure()
return automaton
|
normal
|
{
"blob_id": "9cf0174a8bd2bccbd8e5d0be1f0b031a1a23c9df",
"index": 4691,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef check_string(automaton, word):\n inicial = automata['s'].closure\n for i in word:\n inicial = state_list_delta(inicial, i)\n return automaton['f'] in inicial\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef check_string(automaton, word):\n inicial = automata['s'].closure\n for i in word:\n inicial = state_list_delta(inicial, i)\n return automaton['f'] in inicial\n\n\ndef create_AFND(re):\n deltas = []\n initial_node = ShuntingYard.create_tree(ShuntingYard.to_rpn(re))\n s = State('s')\n f = State('f')\n automaton = {s.name: s, f.name: f}\n s.add_transition(initial_node, f)\n deltas.append((s, initial_node))\n while len(deltas) > 0:\n origin, simbol = deltas.pop()\n if not origin in automaton.values():\n automaton.setdefault(origin.name, origin)\n if isinstance(simbol, ShuntingYard.Node):\n aux_deltas = Thompson.generic(origin, simbol)\n for t in aux_deltas:\n deltas.insert(0, t)\n for state_name in automaton:\n automaton[state_name].update_closure()\n return automaton\n",
"step-4": "from Global import *\nimport ShuntingYard\nfrom Thompson import *\n\n\ndef check_string(automaton, word):\n inicial = automata['s'].closure\n for i in word:\n inicial = state_list_delta(inicial, i)\n return automaton['f'] in inicial\n\n\ndef create_AFND(re):\n deltas = []\n initial_node = ShuntingYard.create_tree(ShuntingYard.to_rpn(re))\n s = State('s')\n f = State('f')\n automaton = {s.name: s, f.name: f}\n s.add_transition(initial_node, f)\n deltas.append((s, initial_node))\n while len(deltas) > 0:\n origin, simbol = deltas.pop()\n if not origin in automaton.values():\n automaton.setdefault(origin.name, origin)\n if isinstance(simbol, ShuntingYard.Node):\n aux_deltas = Thompson.generic(origin, simbol)\n for t in aux_deltas:\n deltas.insert(0, t)\n for state_name in automaton:\n automaton[state_name].update_closure()\n return automaton\n",
"step-5": "from Global import *\nimport ShuntingYard\nfrom Thompson import *\n\ndef check_string(automaton, word):\n\tinicial = automata['s'].closure\n\tfor i in word:\n\t\tinicial = state_list_delta(inicial, i)\n\treturn automaton['f'] in inicial\n\ndef create_AFND(re):\n\tdeltas = []\n\n\tinitial_node = ShuntingYard.create_tree(ShuntingYard.to_rpn(re))\n\n\ts = State('s')\n\tf = State('f')\n\tautomaton = {s.name: s, f.name: f}\n\t#automaton = {s.name: s}\n\n\ts.add_transition(initial_node, f);\n\tdeltas.append((s,initial_node))\n\n\twhile len(deltas) > 0:\n\t\t(origin, simbol) = deltas.pop()\n\t\t\n\t\tif not origin in automaton.values():\n\t\t\tautomaton.setdefault(origin.name, origin)\n\n\t\tif isinstance(simbol, ShuntingYard.Node):\n\t\t\taux_deltas = Thompson.generic(origin, simbol)\n\t\t\tfor t in aux_deltas:\n\t\t\t\tdeltas.insert(0, t)\n\n\tfor state_name in automaton:\n\t\tautomaton[state_name].update_closure()\n\n\treturn automaton\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
"""script for subpixel experiment (not tested)
"""
import numpy as np
from tqdm import tqdm
import logging
from pathlib import Path
import paddle
import paddle.optimizer
import paddle.io
from utils.loader import dataLoader
from utils.loader import modelLoader
from utils.loader import pretrainedLoader
from utils.tools import dict_update
from utils.utils import labels2Dto3D
from utils.utils import flattenDetection
from utils.utils import labels2Dto3D_flattened
from utils.utils import pltImshow
from utils.utils import saveImg
from utils.utils import precisionRecall_torch
from utils.utils import save_checkpoint
@paddle.no_grad()
class Val_model_subpixel(object):
def __init__(self, config, device='gpu', verbose=False):
self.config = config
self.model = self.config['name']
self.params = self.config['params']
self.weights_path = self.config['pretrained']
self.device = device
pass
def loadModel(self):
from utils.loader import modelLoader
self.net = modelLoader(model=self.model, **self.params)
checkpoint = paddle.load(self.weights_path)
self.net.load_dict(checkpoint['model_state_dict'])
self.net = self.net.to(self.device)
logging.info('successfully load pretrained model from: %s',
self.weights_path)
pass
def extract_patches(self, label_idx, img):
from utils.losses import extract_patches
patch_size = self.config['params']['patch_size']
patches = extract_patches(label_idx.to(self.device),
img.to(self.device),
patch_size=patch_size)
return patches
pass
def run(self, patches):
with paddle.no_grad():
pred_res = self.net(patches)
return pred_res
pass
if __name__ == '__main__':
filename = 'configs/magicpoint_repeatability.yaml'
import yaml
device = 'cuda' if paddle.is_compiled_with_cuda() else 'cpu'
device = device.replace('cuda', 'gpu')
device = paddle.set_device(device)
paddle.set_default_dtype('float32')
with open(filename, 'r') as f:
config = yaml.load(f, Loader=yaml.FullLoader)
task = config['data']['dataset']
from utils.loader import dataLoader_test as dataLoader
data = dataLoader(config, dataset='hpatches')
test_set, test_loader = data['test_set'], data['test_loader']
for i, sample in tqdm(enumerate(test_loader)):
if i > 1:
break
val_agent = Val_model_subpixel(config['subpixel'], device=device)
val_agent.loadModel()
img = sample['image']
print('image: ', img.shape)
points = paddle.to_tensor([[1, 2], [3, 4]])
def points_to_4d(points):
num_of_points = points.shape[0]
cols = paddle.to_tensor(paddle.zeros([num_of_points, 1]).requires_grad_(False), dtype=paddle.float32)
points = paddle.concat((cols, cols, paddle.to_tensor(points, dtype=paddle.float32)), axis=1)
return points
label_idx = points_to_4d(points)
patches = val_agent.extract_patches(label_idx, img)
points_res = val_agent.run(patches)
|
normal
|
{
"blob_id": "fc89fdf17f887ea398be5b36d4d6f0444d64b3e0",
"index": 8026,
"step-1": "<mask token>\n\n\[email protected]_grad()\nclass Val_model_subpixel(object):\n <mask token>\n\n def loadModel(self):\n from utils.loader import modelLoader\n self.net = modelLoader(model=self.model, **self.params)\n checkpoint = paddle.load(self.weights_path)\n self.net.load_dict(checkpoint['model_state_dict'])\n self.net = self.net.to(self.device)\n logging.info('successfully load pretrained model from: %s', self.\n weights_path)\n pass\n\n def extract_patches(self, label_idx, img):\n from utils.losses import extract_patches\n patch_size = self.config['params']['patch_size']\n patches = extract_patches(label_idx.to(self.device), img.to(self.\n device), patch_size=patch_size)\n return patches\n pass\n <mask token>\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\[email protected]_grad()\nclass Val_model_subpixel(object):\n\n def __init__(self, config, device='gpu', verbose=False):\n self.config = config\n self.model = self.config['name']\n self.params = self.config['params']\n self.weights_path = self.config['pretrained']\n self.device = device\n pass\n\n def loadModel(self):\n from utils.loader import modelLoader\n self.net = modelLoader(model=self.model, **self.params)\n checkpoint = paddle.load(self.weights_path)\n self.net.load_dict(checkpoint['model_state_dict'])\n self.net = self.net.to(self.device)\n logging.info('successfully load pretrained model from: %s', self.\n weights_path)\n pass\n\n def extract_patches(self, label_idx, img):\n from utils.losses import extract_patches\n patch_size = self.config['params']['patch_size']\n patches = extract_patches(label_idx.to(self.device), img.to(self.\n device), patch_size=patch_size)\n return patches\n pass\n\n def run(self, patches):\n with paddle.no_grad():\n pred_res = self.net(patches)\n return pred_res\n pass\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\[email protected]_grad()\nclass Val_model_subpixel(object):\n\n def __init__(self, config, device='gpu', verbose=False):\n self.config = config\n self.model = self.config['name']\n self.params = self.config['params']\n self.weights_path = self.config['pretrained']\n self.device = device\n pass\n\n def loadModel(self):\n from utils.loader import modelLoader\n self.net = modelLoader(model=self.model, **self.params)\n checkpoint = paddle.load(self.weights_path)\n self.net.load_dict(checkpoint['model_state_dict'])\n self.net = self.net.to(self.device)\n logging.info('successfully load pretrained model from: %s', self.\n weights_path)\n pass\n\n def extract_patches(self, label_idx, img):\n from utils.losses import extract_patches\n patch_size = self.config['params']['patch_size']\n patches = extract_patches(label_idx.to(self.device), img.to(self.\n device), patch_size=patch_size)\n return patches\n pass\n\n def run(self, patches):\n with paddle.no_grad():\n pred_res = self.net(patches)\n return pred_res\n pass\n\n\nif __name__ == '__main__':\n filename = 'configs/magicpoint_repeatability.yaml'\n import yaml\n device = 'cuda' if paddle.is_compiled_with_cuda() else 'cpu'\n device = device.replace('cuda', 'gpu')\n device = paddle.set_device(device)\n paddle.set_default_dtype('float32')\n with open(filename, 'r') as f:\n config = yaml.load(f, Loader=yaml.FullLoader)\n task = config['data']['dataset']\n from utils.loader import dataLoader_test as dataLoader\n data = dataLoader(config, dataset='hpatches')\n test_set, test_loader = data['test_set'], data['test_loader']\n for i, sample in tqdm(enumerate(test_loader)):\n if i > 1:\n break\n val_agent = Val_model_subpixel(config['subpixel'], device=device)\n val_agent.loadModel()\n img = sample['image']\n print('image: ', img.shape)\n points = paddle.to_tensor([[1, 2], [3, 4]])\n\n def points_to_4d(points):\n num_of_points = points.shape[0]\n cols = paddle.to_tensor(paddle.zeros([num_of_points, 1]).\n requires_grad_(False), dtype=paddle.float32)\n points = paddle.concat((cols, cols, paddle.to_tensor(points,\n dtype=paddle.float32)), axis=1)\n return points\n label_idx = points_to_4d(points)\n patches = val_agent.extract_patches(label_idx, img)\n points_res = val_agent.run(patches)\n",
"step-4": "<mask token>\nimport numpy as np\nfrom tqdm import tqdm\nimport logging\nfrom pathlib import Path\nimport paddle\nimport paddle.optimizer\nimport paddle.io\nfrom utils.loader import dataLoader\nfrom utils.loader import modelLoader\nfrom utils.loader import pretrainedLoader\nfrom utils.tools import dict_update\nfrom utils.utils import labels2Dto3D\nfrom utils.utils import flattenDetection\nfrom utils.utils import labels2Dto3D_flattened\nfrom utils.utils import pltImshow\nfrom utils.utils import saveImg\nfrom utils.utils import precisionRecall_torch\nfrom utils.utils import save_checkpoint\n\n\[email protected]_grad()\nclass Val_model_subpixel(object):\n\n def __init__(self, config, device='gpu', verbose=False):\n self.config = config\n self.model = self.config['name']\n self.params = self.config['params']\n self.weights_path = self.config['pretrained']\n self.device = device\n pass\n\n def loadModel(self):\n from utils.loader import modelLoader\n self.net = modelLoader(model=self.model, **self.params)\n checkpoint = paddle.load(self.weights_path)\n self.net.load_dict(checkpoint['model_state_dict'])\n self.net = self.net.to(self.device)\n logging.info('successfully load pretrained model from: %s', self.\n weights_path)\n pass\n\n def extract_patches(self, label_idx, img):\n from utils.losses import extract_patches\n patch_size = self.config['params']['patch_size']\n patches = extract_patches(label_idx.to(self.device), img.to(self.\n device), patch_size=patch_size)\n return patches\n pass\n\n def run(self, patches):\n with paddle.no_grad():\n pred_res = self.net(patches)\n return pred_res\n pass\n\n\nif __name__ == '__main__':\n filename = 'configs/magicpoint_repeatability.yaml'\n import yaml\n device = 'cuda' if paddle.is_compiled_with_cuda() else 'cpu'\n device = device.replace('cuda', 'gpu')\n device = paddle.set_device(device)\n paddle.set_default_dtype('float32')\n with open(filename, 'r') as f:\n config = yaml.load(f, Loader=yaml.FullLoader)\n task = config['data']['dataset']\n from utils.loader import dataLoader_test as dataLoader\n data = dataLoader(config, dataset='hpatches')\n test_set, test_loader = data['test_set'], data['test_loader']\n for i, sample in tqdm(enumerate(test_loader)):\n if i > 1:\n break\n val_agent = Val_model_subpixel(config['subpixel'], device=device)\n val_agent.loadModel()\n img = sample['image']\n print('image: ', img.shape)\n points = paddle.to_tensor([[1, 2], [3, 4]])\n\n def points_to_4d(points):\n num_of_points = points.shape[0]\n cols = paddle.to_tensor(paddle.zeros([num_of_points, 1]).\n requires_grad_(False), dtype=paddle.float32)\n points = paddle.concat((cols, cols, paddle.to_tensor(points,\n dtype=paddle.float32)), axis=1)\n return points\n label_idx = points_to_4d(points)\n patches = val_agent.extract_patches(label_idx, img)\n points_res = val_agent.run(patches)\n",
"step-5": "\"\"\"script for subpixel experiment (not tested)\n\"\"\"\nimport numpy as np\nfrom tqdm import tqdm\nimport logging\nfrom pathlib import Path\n\nimport paddle\nimport paddle.optimizer\nimport paddle.io\n\nfrom utils.loader import dataLoader\nfrom utils.loader import modelLoader\nfrom utils.loader import pretrainedLoader\nfrom utils.tools import dict_update\nfrom utils.utils import labels2Dto3D\nfrom utils.utils import flattenDetection\nfrom utils.utils import labels2Dto3D_flattened\nfrom utils.utils import pltImshow\nfrom utils.utils import saveImg\nfrom utils.utils import precisionRecall_torch\nfrom utils.utils import save_checkpoint\n\n\[email protected]_grad()\nclass Val_model_subpixel(object):\n\n def __init__(self, config, device='gpu', verbose=False):\n self.config = config\n self.model = self.config['name']\n self.params = self.config['params']\n self.weights_path = self.config['pretrained']\n self.device = device\n pass\n\n def loadModel(self):\n from utils.loader import modelLoader\n self.net = modelLoader(model=self.model, **self.params)\n\n checkpoint = paddle.load(self.weights_path)\n self.net.load_dict(checkpoint['model_state_dict'])\n\n self.net = self.net.to(self.device)\n logging.info('successfully load pretrained model from: %s',\n self.weights_path)\n pass\n\n def extract_patches(self, label_idx, img):\n from utils.losses import extract_patches\n patch_size = self.config['params']['patch_size']\n patches = extract_patches(label_idx.to(self.device),\n img.to(self.device),\n patch_size=patch_size)\n return patches\n pass\n\n def run(self, patches):\n with paddle.no_grad():\n pred_res = self.net(patches)\n return pred_res\n pass\n\n\nif __name__ == '__main__':\n filename = 'configs/magicpoint_repeatability.yaml'\n import yaml\n\n device = 'cuda' if paddle.is_compiled_with_cuda() else 'cpu'\n device = device.replace('cuda', 'gpu')\n device = paddle.set_device(device)\n\n paddle.set_default_dtype('float32')\n\n with open(filename, 'r') as f:\n config = yaml.load(f, Loader=yaml.FullLoader)\n\n task = config['data']['dataset']\n\n from utils.loader import dataLoader_test as dataLoader\n\n data = dataLoader(config, dataset='hpatches')\n test_set, test_loader = data['test_set'], data['test_loader']\n for i, sample in tqdm(enumerate(test_loader)):\n if i > 1:\n break\n\n val_agent = Val_model_subpixel(config['subpixel'], device=device)\n val_agent.loadModel()\n\n img = sample['image']\n print('image: ', img.shape)\n points = paddle.to_tensor([[1, 2], [3, 4]])\n\n def points_to_4d(points):\n num_of_points = points.shape[0]\n cols = paddle.to_tensor(paddle.zeros([num_of_points, 1]).requires_grad_(False), dtype=paddle.float32)\n points = paddle.concat((cols, cols, paddle.to_tensor(points, dtype=paddle.float32)), axis=1)\n return points\n label_idx = points_to_4d(points)\n\n patches = val_agent.extract_patches(label_idx, img)\n points_res = val_agent.run(patches)\n",
"step-ids": [
3,
5,
6,
7,
8
]
}
|
[
3,
5,
6,
7,
8
] |
import os
from app_web import sg
from sendgrid.helpers.mail import *
import pdfkit
from models.user import User
from models.expense import Expense
from models.statement import Statement
from models.category import Category
import tempfile
import subprocess
from .aws_uploader import upload_image_to_s3
import datetime
from peewee import fn
from flask import render_template
def create_statement(month=None):
def _get_pdfkit_config():
if os.getenv('FLASK_ENV') == 'production':
WKHTMLTOPDF_CMD = subprocess.Popen(
['which', os.environ.get(
'WKHTMLTOPDF_BINARY', 'wkhtmltopdf-pack')],
stdout=subprocess.PIPE).communicate()[0].strip()
return pdfkit.configuration(wkhtmltopdf=WKHTMLTOPDF_CMD)
else:
return pdfkit.configuration()
def create_pdf(pdf_content, filename):
options = {
'margin-top': '10mm',
'margin-bottom': '10mm',
'margin-left': '10mm',
'margin-right': '10mm',
'page-size': 'A4',
'page-width': '210mm',
'page-height': '296mm'
}
pdf = pdfkit.from_string(
pdf_content, False, configuration=_get_pdfkit_config(), options=options)
temp_file = tempfile.TemporaryFile()
temp_file.filename = filename
temp_file.content_type = "application/pdf"
temp_file.write(pdf)
temp_file.seek(0)
return temp_file
if month == None :
year = datetime.datetime.now().year
full_month = datetime.date.today().strftime("%B %Y") # current month
short_month = datetime.date.today().strftime("%b")
else:
# '2020-12' convert to 'December 2020'
year_month = month.split('-') # ['2020','12']
year = int(year_month[0])
short_month = datetime.datetime(year, int(year_month[1]), 1).strftime("%b")
full_month = datetime.datetime(year, int(year_month[1]), 1).strftime("%B %Y")
# select all user from database
users = User.select()
# get all expenses to render in template
for user in users:
record = Statement.get_or_none(Statement.user==user.id, Statement.month==full_month)
if not record:
expenses = Expense.select().where(Expense.cat in user.categories, Expense.month == short_month, Expense.created_at.year == year).order_by(Expense.created_at.asc())
# ttl = Expense.select(fn.SUM(Expense.amount).alias('total')).where(Expense.cat in user.categories, Expense.month == short_month, Expense.created_at.year == year)
total = 0
for exp in expenses:
total += exp.amount
html = render_template('expenses/statement.html', expenses=expenses, total=total, month=str(full_month))
pdf_name = (user.username).replace(" ", "-").lower() + "-" + str(full_month).replace(" ", "-")
temp_file = create_pdf(html, pdf_name)
statement_url = upload_image_to_s3(user.id ,temp_file)
print(statement_url)
statement = Statement(user=user.id, exp_url=statement_url, month=full_month)
statement.save()
'''
Send monthly statement email
'''
# message = Mail(
# from_email="[email protected]",
# to_emails=user.email,
# subject=f"{month} Expenses Statement",
# html_content=Content("text/html", f"<h1>Dear {user.username},</h1><br/>Here is your expenses statement PDF.<br/><a href={statement_url}>{month} Statement<a><br/><h1>Jw</h1>")
# )
# try:
# response = sg.send(message)
# print(response.body)
# except Exception as e:
# print(str(e))
else:
print('already exist!')
|
normal
|
{
"blob_id": "55df8d13ddf28f7b0477329bee743471a0780f24",
"index": 3253,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef create_statement(month=None):\n\n def _get_pdfkit_config():\n if os.getenv('FLASK_ENV') == 'production':\n WKHTMLTOPDF_CMD = subprocess.Popen(['which', os.environ.get(\n 'WKHTMLTOPDF_BINARY', 'wkhtmltopdf-pack')], stdout=\n subprocess.PIPE).communicate()[0].strip()\n return pdfkit.configuration(wkhtmltopdf=WKHTMLTOPDF_CMD)\n else:\n return pdfkit.configuration()\n\n def create_pdf(pdf_content, filename):\n options = {'margin-top': '10mm', 'margin-bottom': '10mm',\n 'margin-left': '10mm', 'margin-right': '10mm', 'page-size':\n 'A4', 'page-width': '210mm', 'page-height': '296mm'}\n pdf = pdfkit.from_string(pdf_content, False, configuration=\n _get_pdfkit_config(), options=options)\n temp_file = tempfile.TemporaryFile()\n temp_file.filename = filename\n temp_file.content_type = 'application/pdf'\n temp_file.write(pdf)\n temp_file.seek(0)\n return temp_file\n if month == None:\n year = datetime.datetime.now().year\n full_month = datetime.date.today().strftime('%B %Y')\n short_month = datetime.date.today().strftime('%b')\n else:\n year_month = month.split('-')\n year = int(year_month[0])\n short_month = datetime.datetime(year, int(year_month[1]), 1).strftime(\n '%b')\n full_month = datetime.datetime(year, int(year_month[1]), 1).strftime(\n '%B %Y')\n users = User.select()\n for user in users:\n record = Statement.get_or_none(Statement.user == user.id, Statement\n .month == full_month)\n if not record:\n expenses = Expense.select().where(Expense.cat in user.\n categories, Expense.month == short_month, Expense.\n created_at.year == year).order_by(Expense.created_at.asc())\n total = 0\n for exp in expenses:\n total += exp.amount\n html = render_template('expenses/statement.html', expenses=\n expenses, total=total, month=str(full_month))\n pdf_name = user.username.replace(' ', '-').lower() + '-' + str(\n full_month).replace(' ', '-')\n temp_file = create_pdf(html, pdf_name)\n statement_url = upload_image_to_s3(user.id, temp_file)\n print(statement_url)\n statement = Statement(user=user.id, exp_url=statement_url,\n month=full_month)\n statement.save()\n \"\"\"\n Send monthly statement email\n \"\"\"\n else:\n print('already exist!')\n",
"step-3": "import os\nfrom app_web import sg\nfrom sendgrid.helpers.mail import *\nimport pdfkit\nfrom models.user import User\nfrom models.expense import Expense\nfrom models.statement import Statement\nfrom models.category import Category\nimport tempfile\nimport subprocess\nfrom .aws_uploader import upload_image_to_s3\nimport datetime\nfrom peewee import fn\nfrom flask import render_template\n\n\ndef create_statement(month=None):\n\n def _get_pdfkit_config():\n if os.getenv('FLASK_ENV') == 'production':\n WKHTMLTOPDF_CMD = subprocess.Popen(['which', os.environ.get(\n 'WKHTMLTOPDF_BINARY', 'wkhtmltopdf-pack')], stdout=\n subprocess.PIPE).communicate()[0].strip()\n return pdfkit.configuration(wkhtmltopdf=WKHTMLTOPDF_CMD)\n else:\n return pdfkit.configuration()\n\n def create_pdf(pdf_content, filename):\n options = {'margin-top': '10mm', 'margin-bottom': '10mm',\n 'margin-left': '10mm', 'margin-right': '10mm', 'page-size':\n 'A4', 'page-width': '210mm', 'page-height': '296mm'}\n pdf = pdfkit.from_string(pdf_content, False, configuration=\n _get_pdfkit_config(), options=options)\n temp_file = tempfile.TemporaryFile()\n temp_file.filename = filename\n temp_file.content_type = 'application/pdf'\n temp_file.write(pdf)\n temp_file.seek(0)\n return temp_file\n if month == None:\n year = datetime.datetime.now().year\n full_month = datetime.date.today().strftime('%B %Y')\n short_month = datetime.date.today().strftime('%b')\n else:\n year_month = month.split('-')\n year = int(year_month[0])\n short_month = datetime.datetime(year, int(year_month[1]), 1).strftime(\n '%b')\n full_month = datetime.datetime(year, int(year_month[1]), 1).strftime(\n '%B %Y')\n users = User.select()\n for user in users:\n record = Statement.get_or_none(Statement.user == user.id, Statement\n .month == full_month)\n if not record:\n expenses = Expense.select().where(Expense.cat in user.\n categories, Expense.month == short_month, Expense.\n created_at.year == year).order_by(Expense.created_at.asc())\n total = 0\n for exp in expenses:\n total += exp.amount\n html = render_template('expenses/statement.html', expenses=\n expenses, total=total, month=str(full_month))\n pdf_name = user.username.replace(' ', '-').lower() + '-' + str(\n full_month).replace(' ', '-')\n temp_file = create_pdf(html, pdf_name)\n statement_url = upload_image_to_s3(user.id, temp_file)\n print(statement_url)\n statement = Statement(user=user.id, exp_url=statement_url,\n month=full_month)\n statement.save()\n \"\"\"\n Send monthly statement email\n \"\"\"\n else:\n print('already exist!')\n",
"step-4": "import os\nfrom app_web import sg\nfrom sendgrid.helpers.mail import *\nimport pdfkit\nfrom models.user import User\nfrom models.expense import Expense\nfrom models.statement import Statement\nfrom models.category import Category\nimport tempfile\nimport subprocess\nfrom .aws_uploader import upload_image_to_s3\nimport datetime\nfrom peewee import fn\nfrom flask import render_template\n\ndef create_statement(month=None):\n def _get_pdfkit_config():\n if os.getenv('FLASK_ENV') == 'production':\n WKHTMLTOPDF_CMD = subprocess.Popen(\n ['which', os.environ.get(\n 'WKHTMLTOPDF_BINARY', 'wkhtmltopdf-pack')],\n stdout=subprocess.PIPE).communicate()[0].strip()\n return pdfkit.configuration(wkhtmltopdf=WKHTMLTOPDF_CMD)\n else:\n return pdfkit.configuration()\n\n def create_pdf(pdf_content, filename):\n options = {\n 'margin-top': '10mm',\n 'margin-bottom': '10mm',\n 'margin-left': '10mm',\n 'margin-right': '10mm',\n 'page-size': 'A4',\n 'page-width': '210mm',\n 'page-height': '296mm'\n }\n pdf = pdfkit.from_string(\n pdf_content, False, configuration=_get_pdfkit_config(), options=options)\n temp_file = tempfile.TemporaryFile()\n temp_file.filename = filename\n temp_file.content_type = \"application/pdf\"\n temp_file.write(pdf)\n temp_file.seek(0)\n return temp_file\n\n if month == None :\n year = datetime.datetime.now().year\n full_month = datetime.date.today().strftime(\"%B %Y\") # current month\n short_month = datetime.date.today().strftime(\"%b\")\n else:\n # '2020-12' convert to 'December 2020'\n year_month = month.split('-') # ['2020','12']\n year = int(year_month[0])\n short_month = datetime.datetime(year, int(year_month[1]), 1).strftime(\"%b\")\n full_month = datetime.datetime(year, int(year_month[1]), 1).strftime(\"%B %Y\")\n\n # select all user from database\n users = User.select()\n # get all expenses to render in template\n for user in users:\n record = Statement.get_or_none(Statement.user==user.id, Statement.month==full_month)\n if not record:\n expenses = Expense.select().where(Expense.cat in user.categories, Expense.month == short_month, Expense.created_at.year == year).order_by(Expense.created_at.asc())\n # ttl = Expense.select(fn.SUM(Expense.amount).alias('total')).where(Expense.cat in user.categories, Expense.month == short_month, Expense.created_at.year == year)\n total = 0\n for exp in expenses:\n total += exp.amount\n\n html = render_template('expenses/statement.html', expenses=expenses, total=total, month=str(full_month))\n pdf_name = (user.username).replace(\" \", \"-\").lower() + \"-\" + str(full_month).replace(\" \", \"-\")\n temp_file = create_pdf(html, pdf_name)\n statement_url = upload_image_to_s3(user.id ,temp_file)\n print(statement_url)\n\n statement = Statement(user=user.id, exp_url=statement_url, month=full_month)\n statement.save()\n '''\n Send monthly statement email\n '''\n # message = Mail(\n # from_email=\"[email protected]\",\n # to_emails=user.email,\n # subject=f\"{month} Expenses Statement\",\n # html_content=Content(\"text/html\", f\"<h1>Dear {user.username},</h1><br/>Here is your expenses statement PDF.<br/><a href={statement_url}>{month} Statement<a><br/><h1>Jw</h1>\")\n # )\n # try:\n # response = sg.send(message)\n # print(response.body)\n # except Exception as e:\n # print(str(e))\n else:\n print('already exist!')\n\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class NoticiaForm(ModelForm):
class Meta:
model = Noticia
fields = ['idNoticia', 'resumen', 'titulo', 'categoria']
<|reserved_special_token_1|>
from django import forms
from django.forms import ModelForm
from .models import Noticia
class NoticiaForm(ModelForm):
class Meta:
model = Noticia
fields = ['idNoticia', 'resumen', 'titulo', 'categoria']
|
flexible
|
{
"blob_id": "e7a283e0e0e16e9adb415b26d724b2ee84c4f4f8",
"index": 1547,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass NoticiaForm(ModelForm):\n\n\n class Meta:\n model = Noticia\n fields = ['idNoticia', 'resumen', 'titulo', 'categoria']\n",
"step-3": "from django import forms\nfrom django.forms import ModelForm\nfrom .models import Noticia\n\n\nclass NoticiaForm(ModelForm):\n\n\n class Meta:\n model = Noticia\n fields = ['idNoticia', 'resumen', 'titulo', 'categoria']\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def recoverpix(doc, item):
x = item[0]
s = item[1]
if s == 0:
return doc.extractImage(x)
def getimage(pix):
if pix.colorspace.n != 4:
return pix
tpix = fitz.Pixmap(fitz.csRGB, pix)
return tpix
pix1 = fitz.Pixmap(doc, x)
pix2 = fitz.Pixmap(doc, s)
if not (pix1.irect == pix2.irect and pix1.alpha == pix2.alpha == 0 and
pix2.n == 1):
pix2 = None
return getimage(pix1)
pix = fitz.Pixmap(pix1)
pix.setAlpha(pix2.samples)
pix1 = pix2 = None
return getimage(pix)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
print(fitz.__doc__)
if not tuple(map(int, fitz.version[0].split('.'))) >= (1, 13, 17):
raise SystemExit('require PyMuPDF v1.13.17+')
dimlimit = 100
relsize = 0.05
abssize = 2048
imgdir = 'images'
if not os.path.exists(imgdir):
os.mkdir(imgdir)
def recoverpix(doc, item):
x = item[0]
s = item[1]
if s == 0:
return doc.extractImage(x)
def getimage(pix):
if pix.colorspace.n != 4:
return pix
tpix = fitz.Pixmap(fitz.csRGB, pix)
return tpix
pix1 = fitz.Pixmap(doc, x)
pix2 = fitz.Pixmap(doc, s)
if not (pix1.irect == pix2.irect and pix1.alpha == pix2.alpha == 0 and
pix2.n == 1):
pix2 = None
return getimage(pix1)
pix = fitz.Pixmap(pix1)
pix.setAlpha(pix2.samples)
pix1 = pix2 = None
return getimage(pix)
fname = sys.argv[1] if len(sys.argv) == 2 else None
if not fname:
fname = sg.PopupGetFile('Select file:', title=
'PyMuPDF PDF Image Extraction')
if not fname:
raise SystemExit()
t0 = time.time()
doc = fitz.open(fname)
page_count = len(doc)
xreflist = []
imglist = []
for pno in range(page_count):
sg.QuickMeter('Extract Images', pno + 1, page_count,
'*** Scanning Pages ***')
il = doc.getPageImageList(pno)
imglist.extend([x[0] for x in il])
for img in il:
xref = img[0]
if xref in xreflist:
continue
width = img[2]
height = img[3]
if min(width, height) <= dimlimit:
continue
pix = recoverpix(doc, img)
if type(pix) is dict:
ext = pix['ext']
imgdata = pix['image']
n = pix['colorspace']
imgfile = os.path.join(imgdir, 'img-%i.%s' % (xref, ext))
else:
imgfile = os.path.join(imgdir, 'img-%i.png' % xref)
n = pix.n
imgdata = pix.getPNGData()
if len(imgdata) <= abssize:
continue
if len(imgdata) / (width * height * n) <= relsize:
continue
fout = open(imgfile, 'wb')
fout.write(imgdata)
fout.close()
xreflist.append(xref)
t1 = time.time()
imglist = list(set(imglist))
print(len(set(imglist)), 'images in total')
print(len(xreflist), 'images extracted')
print('total time %g sec' % (t1 - t0))
<|reserved_special_token_1|>
from __future__ import print_function
import os, sys, time
import fitz
import PySimpleGUI as sg
<|reserved_special_token_0|>
print(fitz.__doc__)
if not tuple(map(int, fitz.version[0].split('.'))) >= (1, 13, 17):
raise SystemExit('require PyMuPDF v1.13.17+')
dimlimit = 100
relsize = 0.05
abssize = 2048
imgdir = 'images'
if not os.path.exists(imgdir):
os.mkdir(imgdir)
def recoverpix(doc, item):
x = item[0]
s = item[1]
if s == 0:
return doc.extractImage(x)
def getimage(pix):
if pix.colorspace.n != 4:
return pix
tpix = fitz.Pixmap(fitz.csRGB, pix)
return tpix
pix1 = fitz.Pixmap(doc, x)
pix2 = fitz.Pixmap(doc, s)
if not (pix1.irect == pix2.irect and pix1.alpha == pix2.alpha == 0 and
pix2.n == 1):
pix2 = None
return getimage(pix1)
pix = fitz.Pixmap(pix1)
pix.setAlpha(pix2.samples)
pix1 = pix2 = None
return getimage(pix)
fname = sys.argv[1] if len(sys.argv) == 2 else None
if not fname:
fname = sg.PopupGetFile('Select file:', title=
'PyMuPDF PDF Image Extraction')
if not fname:
raise SystemExit()
t0 = time.time()
doc = fitz.open(fname)
page_count = len(doc)
xreflist = []
imglist = []
for pno in range(page_count):
sg.QuickMeter('Extract Images', pno + 1, page_count,
'*** Scanning Pages ***')
il = doc.getPageImageList(pno)
imglist.extend([x[0] for x in il])
for img in il:
xref = img[0]
if xref in xreflist:
continue
width = img[2]
height = img[3]
if min(width, height) <= dimlimit:
continue
pix = recoverpix(doc, img)
if type(pix) is dict:
ext = pix['ext']
imgdata = pix['image']
n = pix['colorspace']
imgfile = os.path.join(imgdir, 'img-%i.%s' % (xref, ext))
else:
imgfile = os.path.join(imgdir, 'img-%i.png' % xref)
n = pix.n
imgdata = pix.getPNGData()
if len(imgdata) <= abssize:
continue
if len(imgdata) / (width * height * n) <= relsize:
continue
fout = open(imgfile, 'wb')
fout.write(imgdata)
fout.close()
xreflist.append(xref)
t1 = time.time()
imglist = list(set(imglist))
print(len(set(imglist)), 'images in total')
print(len(xreflist), 'images extracted')
print('total time %g sec' % (t1 - t0))
<|reserved_special_token_1|>
from __future__ import print_function
import os, sys, time
import fitz
import PySimpleGUI as sg
"""
PyMuPDF utility
----------------
For a given entry in a page's getImagleList() list, function "recoverpix"
returns either the raw image data, or a modified pixmap if an /SMask entry
exists.
The item's first two entries are PDF xref numbers. The first one is the image in
question, the second one may be 0 or the object id of a soft-image mask. In this
case, we assume it being a sequence of alpha bytes belonging to our image.
We then create a new Pixmap giving it these alpha values, and return it.
If the result pixmap is CMYK, it will be converted to RGB first.
"""
print(fitz.__doc__)
if not tuple(map(int, fitz.version[0].split("."))) >= (1, 13, 17):
raise SystemExit("require PyMuPDF v1.13.17+")
dimlimit = 100 # each image side must be greater than this
relsize = 0.05 # image : pixmap size ratio must be larger than this (5%)
abssize = 2048 # absolute image size limit 2 KB: ignore if smaller
imgdir = "images" # found images are stored in this subfolder
if not os.path.exists(imgdir):
os.mkdir(imgdir)
def recoverpix(doc, item):
x = item[0] # xref of PDF image
s = item[1] # xref of its /SMask
if s == 0: # no smask: use direct image output
return doc.extractImage(x)
def getimage(pix):
if pix.colorspace.n != 4:
return pix
tpix = fitz.Pixmap(fitz.csRGB, pix)
return tpix
# we need to reconstruct the alpha channel with the smask
pix1 = fitz.Pixmap(doc, x)
pix2 = fitz.Pixmap(doc, s) # create pixmap of the /SMask entry
# sanity check
if not (pix1.irect == pix2.irect and pix1.alpha == pix2.alpha == 0 and pix2.n == 1):
pix2 = None
return getimage(pix1)
pix = fitz.Pixmap(pix1) # copy of pix1, alpha channel added
pix.setAlpha(pix2.samples) # treat pix2.samples as alpha value
pix1 = pix2 = None # free temp pixmaps
# we may need to adjust something for CMYK pixmaps here:
return getimage(pix)
fname = sys.argv[1] if len(sys.argv) == 2 else None
if not fname:
fname = sg.PopupGetFile("Select file:", title="PyMuPDF PDF Image Extraction")
if not fname:
raise SystemExit()
t0 = time.time()
doc = fitz.open(fname)
page_count = len(doc) # number of pages
xreflist = []
imglist = []
for pno in range(page_count):
sg.QuickMeter(
"Extract Images", # show our progress
pno + 1,
page_count,
"*** Scanning Pages ***",
)
il = doc.getPageImageList(pno)
imglist.extend([x[0] for x in il])
for img in il:
xref = img[0]
if xref in xreflist:
continue
width = img[2]
height = img[3]
if min(width, height) <= dimlimit:
continue
pix = recoverpix(doc, img)
if type(pix) is dict: # we got a raw image
ext = pix["ext"]
imgdata = pix["image"]
n = pix["colorspace"]
imgfile = os.path.join(imgdir, "img-%i.%s" % (xref, ext))
else: # we got a pixmap
imgfile = os.path.join(imgdir, "img-%i.png" % xref)
n = pix.n
imgdata = pix.getPNGData()
if len(imgdata) <= abssize:
continue
if len(imgdata) / (width * height * n) <= relsize:
continue
fout = open(imgfile, "wb")
fout.write(imgdata)
fout.close()
xreflist.append(xref)
t1 = time.time()
imglist = list(set(imglist))
print(len(set(imglist)), "images in total")
print(len(xreflist), "images extracted")
print("total time %g sec" % (t1 - t0))
|
flexible
|
{
"blob_id": "856afd30a2ed01a1d44bbe91a7b69998e9a51bb7",
"index": 3170,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef recoverpix(doc, item):\n x = item[0]\n s = item[1]\n if s == 0:\n return doc.extractImage(x)\n\n def getimage(pix):\n if pix.colorspace.n != 4:\n return pix\n tpix = fitz.Pixmap(fitz.csRGB, pix)\n return tpix\n pix1 = fitz.Pixmap(doc, x)\n pix2 = fitz.Pixmap(doc, s)\n if not (pix1.irect == pix2.irect and pix1.alpha == pix2.alpha == 0 and \n pix2.n == 1):\n pix2 = None\n return getimage(pix1)\n pix = fitz.Pixmap(pix1)\n pix.setAlpha(pix2.samples)\n pix1 = pix2 = None\n return getimage(pix)\n\n\n<mask token>\n",
"step-3": "<mask token>\nprint(fitz.__doc__)\nif not tuple(map(int, fitz.version[0].split('.'))) >= (1, 13, 17):\n raise SystemExit('require PyMuPDF v1.13.17+')\ndimlimit = 100\nrelsize = 0.05\nabssize = 2048\nimgdir = 'images'\nif not os.path.exists(imgdir):\n os.mkdir(imgdir)\n\n\ndef recoverpix(doc, item):\n x = item[0]\n s = item[1]\n if s == 0:\n return doc.extractImage(x)\n\n def getimage(pix):\n if pix.colorspace.n != 4:\n return pix\n tpix = fitz.Pixmap(fitz.csRGB, pix)\n return tpix\n pix1 = fitz.Pixmap(doc, x)\n pix2 = fitz.Pixmap(doc, s)\n if not (pix1.irect == pix2.irect and pix1.alpha == pix2.alpha == 0 and \n pix2.n == 1):\n pix2 = None\n return getimage(pix1)\n pix = fitz.Pixmap(pix1)\n pix.setAlpha(pix2.samples)\n pix1 = pix2 = None\n return getimage(pix)\n\n\nfname = sys.argv[1] if len(sys.argv) == 2 else None\nif not fname:\n fname = sg.PopupGetFile('Select file:', title=\n 'PyMuPDF PDF Image Extraction')\nif not fname:\n raise SystemExit()\nt0 = time.time()\ndoc = fitz.open(fname)\npage_count = len(doc)\nxreflist = []\nimglist = []\nfor pno in range(page_count):\n sg.QuickMeter('Extract Images', pno + 1, page_count,\n '*** Scanning Pages ***')\n il = doc.getPageImageList(pno)\n imglist.extend([x[0] for x in il])\n for img in il:\n xref = img[0]\n if xref in xreflist:\n continue\n width = img[2]\n height = img[3]\n if min(width, height) <= dimlimit:\n continue\n pix = recoverpix(doc, img)\n if type(pix) is dict:\n ext = pix['ext']\n imgdata = pix['image']\n n = pix['colorspace']\n imgfile = os.path.join(imgdir, 'img-%i.%s' % (xref, ext))\n else:\n imgfile = os.path.join(imgdir, 'img-%i.png' % xref)\n n = pix.n\n imgdata = pix.getPNGData()\n if len(imgdata) <= abssize:\n continue\n if len(imgdata) / (width * height * n) <= relsize:\n continue\n fout = open(imgfile, 'wb')\n fout.write(imgdata)\n fout.close()\n xreflist.append(xref)\nt1 = time.time()\nimglist = list(set(imglist))\nprint(len(set(imglist)), 'images in total')\nprint(len(xreflist), 'images extracted')\nprint('total time %g sec' % (t1 - t0))\n",
"step-4": "from __future__ import print_function\nimport os, sys, time\nimport fitz\nimport PySimpleGUI as sg\n<mask token>\nprint(fitz.__doc__)\nif not tuple(map(int, fitz.version[0].split('.'))) >= (1, 13, 17):\n raise SystemExit('require PyMuPDF v1.13.17+')\ndimlimit = 100\nrelsize = 0.05\nabssize = 2048\nimgdir = 'images'\nif not os.path.exists(imgdir):\n os.mkdir(imgdir)\n\n\ndef recoverpix(doc, item):\n x = item[0]\n s = item[1]\n if s == 0:\n return doc.extractImage(x)\n\n def getimage(pix):\n if pix.colorspace.n != 4:\n return pix\n tpix = fitz.Pixmap(fitz.csRGB, pix)\n return tpix\n pix1 = fitz.Pixmap(doc, x)\n pix2 = fitz.Pixmap(doc, s)\n if not (pix1.irect == pix2.irect and pix1.alpha == pix2.alpha == 0 and \n pix2.n == 1):\n pix2 = None\n return getimage(pix1)\n pix = fitz.Pixmap(pix1)\n pix.setAlpha(pix2.samples)\n pix1 = pix2 = None\n return getimage(pix)\n\n\nfname = sys.argv[1] if len(sys.argv) == 2 else None\nif not fname:\n fname = sg.PopupGetFile('Select file:', title=\n 'PyMuPDF PDF Image Extraction')\nif not fname:\n raise SystemExit()\nt0 = time.time()\ndoc = fitz.open(fname)\npage_count = len(doc)\nxreflist = []\nimglist = []\nfor pno in range(page_count):\n sg.QuickMeter('Extract Images', pno + 1, page_count,\n '*** Scanning Pages ***')\n il = doc.getPageImageList(pno)\n imglist.extend([x[0] for x in il])\n for img in il:\n xref = img[0]\n if xref in xreflist:\n continue\n width = img[2]\n height = img[3]\n if min(width, height) <= dimlimit:\n continue\n pix = recoverpix(doc, img)\n if type(pix) is dict:\n ext = pix['ext']\n imgdata = pix['image']\n n = pix['colorspace']\n imgfile = os.path.join(imgdir, 'img-%i.%s' % (xref, ext))\n else:\n imgfile = os.path.join(imgdir, 'img-%i.png' % xref)\n n = pix.n\n imgdata = pix.getPNGData()\n if len(imgdata) <= abssize:\n continue\n if len(imgdata) / (width * height * n) <= relsize:\n continue\n fout = open(imgfile, 'wb')\n fout.write(imgdata)\n fout.close()\n xreflist.append(xref)\nt1 = time.time()\nimglist = list(set(imglist))\nprint(len(set(imglist)), 'images in total')\nprint(len(xreflist), 'images extracted')\nprint('total time %g sec' % (t1 - t0))\n",
"step-5": "from __future__ import print_function\nimport os, sys, time\nimport fitz\nimport PySimpleGUI as sg\n\n\"\"\"\nPyMuPDF utility\n----------------\nFor a given entry in a page's getImagleList() list, function \"recoverpix\"\nreturns either the raw image data, or a modified pixmap if an /SMask entry\nexists.\nThe item's first two entries are PDF xref numbers. The first one is the image in\nquestion, the second one may be 0 or the object id of a soft-image mask. In this\ncase, we assume it being a sequence of alpha bytes belonging to our image.\nWe then create a new Pixmap giving it these alpha values, and return it.\nIf the result pixmap is CMYK, it will be converted to RGB first.\n\"\"\"\nprint(fitz.__doc__)\n\nif not tuple(map(int, fitz.version[0].split(\".\"))) >= (1, 13, 17):\n raise SystemExit(\"require PyMuPDF v1.13.17+\")\n\ndimlimit = 100 # each image side must be greater than this\nrelsize = 0.05 # image : pixmap size ratio must be larger than this (5%)\nabssize = 2048 # absolute image size limit 2 KB: ignore if smaller\nimgdir = \"images\" # found images are stored in this subfolder\n\nif not os.path.exists(imgdir):\n os.mkdir(imgdir)\n\n\ndef recoverpix(doc, item):\n x = item[0] # xref of PDF image\n s = item[1] # xref of its /SMask\n if s == 0: # no smask: use direct image output\n return doc.extractImage(x)\n\n def getimage(pix):\n if pix.colorspace.n != 4:\n return pix\n tpix = fitz.Pixmap(fitz.csRGB, pix)\n return tpix\n\n # we need to reconstruct the alpha channel with the smask\n pix1 = fitz.Pixmap(doc, x)\n pix2 = fitz.Pixmap(doc, s) # create pixmap of the /SMask entry\n\n # sanity check\n if not (pix1.irect == pix2.irect and pix1.alpha == pix2.alpha == 0 and pix2.n == 1):\n pix2 = None\n return getimage(pix1)\n\n pix = fitz.Pixmap(pix1) # copy of pix1, alpha channel added\n pix.setAlpha(pix2.samples) # treat pix2.samples as alpha value\n pix1 = pix2 = None # free temp pixmaps\n\n # we may need to adjust something for CMYK pixmaps here:\n return getimage(pix)\n\n\nfname = sys.argv[1] if len(sys.argv) == 2 else None\nif not fname:\n fname = sg.PopupGetFile(\"Select file:\", title=\"PyMuPDF PDF Image Extraction\")\nif not fname:\n raise SystemExit()\n\nt0 = time.time()\ndoc = fitz.open(fname)\n\npage_count = len(doc) # number of pages\n\nxreflist = []\nimglist = []\nfor pno in range(page_count):\n sg.QuickMeter(\n \"Extract Images\", # show our progress\n pno + 1,\n page_count,\n \"*** Scanning Pages ***\",\n )\n\n il = doc.getPageImageList(pno)\n imglist.extend([x[0] for x in il])\n for img in il:\n xref = img[0]\n if xref in xreflist:\n continue\n width = img[2]\n height = img[3]\n if min(width, height) <= dimlimit:\n continue\n pix = recoverpix(doc, img)\n if type(pix) is dict: # we got a raw image\n ext = pix[\"ext\"]\n imgdata = pix[\"image\"]\n n = pix[\"colorspace\"]\n imgfile = os.path.join(imgdir, \"img-%i.%s\" % (xref, ext))\n else: # we got a pixmap\n imgfile = os.path.join(imgdir, \"img-%i.png\" % xref)\n n = pix.n\n imgdata = pix.getPNGData()\n\n if len(imgdata) <= abssize:\n continue\n\n if len(imgdata) / (width * height * n) <= relsize:\n continue\n\n fout = open(imgfile, \"wb\")\n fout.write(imgdata)\n fout.close()\n xreflist.append(xref)\n\nt1 = time.time()\nimglist = list(set(imglist))\nprint(len(set(imglist)), \"images in total\")\nprint(len(xreflist), \"images extracted\")\nprint(\"total time %g sec\" % (t1 - t0))",
"step-ids": [
0,
1,
3,
4,
5
]
}
|
[
0,
1,
3,
4,
5
] |
from django.apps import AppConfig
from django.conf import settings
import importlib
import importlib.util
class RestAdminAppConfig(AppConfig):
name = 'libraries.django_rest_admin'
verbose_name = 'Rest Admin'
loaded = False
def ready(self):
autodiscover()
def autodiscover():
"""
Automatic discovering of rest_admin.py file inside apps.
similar to what Django admin does.
"""
from .register import rest_admin
if not RestAdminAppConfig.loaded:
for app in settings.INSTALLED_APPS:
# For each app, we need to look for an rest_admin.py inside that app's
# package. We can't use os.path here -- recall that modules may be
# imported different ways (think zip files) -- so we need to get
# the app's __path__ and look for rest_admin.py on that path.
# Step 1: find out the app's __path__ Import errors here will (and
# should) bubble up, but a missing __path__ (which is legal, but weird)
# fails silently -- apps that do weird things with __path__ might
# need to roll their own rest_admin registration.
try:
app_path = importlib.import_module(app).__path__
except AttributeError:
continue
# Step 2: use imp.find_module to find the app's rest_admin.py. For some
# reason imp.find_module raises ImportError if the app can't be found
# but doesn't actually try to import the module. So skip this app if
# its rest_admin.py doesn't exist
# try:
# importlib.util.find_spec('rest_admin', app_path)
# # imp.find_module('rest_admin', app_path)
# except ImportError:
# continue
#
if not importlib.find_loader('rest_admin', app_path):
continue
# Step 3: import the app's admin file. If this has errors we want them
# to bubble up.
importlib.import_module("%s.rest_admin" % app)
# autodiscover was successful, reset loading flag.
RestAdminAppConfig.loaded = True
|
normal
|
{
"blob_id": "a41d00c86d0bdab1bced77c275e56c3569af4f4e",
"index": 921,
"step-1": "<mask token>\n\n\nclass RestAdminAppConfig(AppConfig):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass RestAdminAppConfig(AppConfig):\n name = 'libraries.django_rest_admin'\n verbose_name = 'Rest Admin'\n loaded = False\n\n def ready(self):\n autodiscover()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass RestAdminAppConfig(AppConfig):\n name = 'libraries.django_rest_admin'\n verbose_name = 'Rest Admin'\n loaded = False\n\n def ready(self):\n autodiscover()\n\n\ndef autodiscover():\n \"\"\"\n Automatic discovering of rest_admin.py file inside apps.\n similar to what Django admin does. \n \"\"\"\n from .register import rest_admin\n if not RestAdminAppConfig.loaded:\n for app in settings.INSTALLED_APPS:\n try:\n app_path = importlib.import_module(app).__path__\n except AttributeError:\n continue\n if not importlib.find_loader('rest_admin', app_path):\n continue\n importlib.import_module('%s.rest_admin' % app)\n RestAdminAppConfig.loaded = True\n",
"step-4": "from django.apps import AppConfig\nfrom django.conf import settings\nimport importlib\nimport importlib.util\n\n\nclass RestAdminAppConfig(AppConfig):\n name = 'libraries.django_rest_admin'\n verbose_name = 'Rest Admin'\n loaded = False\n\n def ready(self):\n autodiscover()\n\n\ndef autodiscover():\n \"\"\"\n Automatic discovering of rest_admin.py file inside apps.\n similar to what Django admin does. \n \"\"\"\n from .register import rest_admin\n if not RestAdminAppConfig.loaded:\n for app in settings.INSTALLED_APPS:\n try:\n app_path = importlib.import_module(app).__path__\n except AttributeError:\n continue\n if not importlib.find_loader('rest_admin', app_path):\n continue\n importlib.import_module('%s.rest_admin' % app)\n RestAdminAppConfig.loaded = True\n",
"step-5": "from django.apps import AppConfig\nfrom django.conf import settings\nimport importlib\nimport importlib.util\n\n\nclass RestAdminAppConfig(AppConfig):\n name = 'libraries.django_rest_admin'\n verbose_name = 'Rest Admin'\n loaded = False\n\n def ready(self):\n autodiscover()\n\n\ndef autodiscover():\n \"\"\"\n Automatic discovering of rest_admin.py file inside apps.\n similar to what Django admin does. \n \"\"\"\n from .register import rest_admin\n\n if not RestAdminAppConfig.loaded:\n for app in settings.INSTALLED_APPS:\n # For each app, we need to look for an rest_admin.py inside that app's\n # package. We can't use os.path here -- recall that modules may be\n # imported different ways (think zip files) -- so we need to get\n # the app's __path__ and look for rest_admin.py on that path.\n\n # Step 1: find out the app's __path__ Import errors here will (and\n # should) bubble up, but a missing __path__ (which is legal, but weird)\n # fails silently -- apps that do weird things with __path__ might\n # need to roll their own rest_admin registration.\n try:\n app_path = importlib.import_module(app).__path__\n except AttributeError:\n continue\n\n # Step 2: use imp.find_module to find the app's rest_admin.py. For some\n # reason imp.find_module raises ImportError if the app can't be found\n # but doesn't actually try to import the module. So skip this app if\n # its rest_admin.py doesn't exist\n # try:\n # importlib.util.find_spec('rest_admin', app_path)\n # # imp.find_module('rest_admin', app_path)\n # except ImportError:\n # continue\n #\n if not importlib.find_loader('rest_admin', app_path):\n continue\n\n # Step 3: import the app's admin file. If this has errors we want them\n # to bubble up.\n importlib.import_module(\"%s.rest_admin\" % app)\n\n # autodiscover was successful, reset loading flag.\n RestAdminAppConfig.loaded = True\n",
"step-ids": [
1,
3,
4,
5,
6
]
}
|
[
1,
3,
4,
5,
6
] |
import re
def read_input():
with open('../input/day12.txt') as f:
lines = f.readlines()
m = re.search(r'initial state:\s([\.#]+)', lines[0])
initial_state = m.groups()[0]
prog = re.compile(r'([\.#]{5})\s=>\s([\.#])')
rules = []
for i in range(2, len(lines)):
m = prog.search(lines[i])
groups = m.groups()
if groups[1] == '#':
rules.append((groups[0], groups[1]))
return initial_state, rules
def apply_gen(initial_state, rules, start):
next_state = []
initial_state = '....' + initial_state.strip('.') + '....'
set_start_idx = False
i = 2
while i <= len(initial_state)-3:
curr_str = initial_state[i-2:i+3]
rule_matches = None
for r in rules:
if curr_str == r[0]:
rule_matches = r
break
if rule_matches:
if not set_start_idx:
start_idx = i - 4
set_start_idx = True
next_state.append(rule_matches[1])
else:
next_state.append('.')
i += 1
return start + start_idx, ''.join(next_state).strip('.')
def sum_plants(state, start):
i = start
plant_count = 0
for c in state:
if c == '#':
plant_count += i
i += 1
return plant_count
state, rules = read_input()
start = 0
for c in state:
if c == '#':
break
start += 1
gen = 0
start_idx = -2
previos = sum_plants(state, start)
prev_diff = 0
same_diff_count = 0
while gen < 1000:
start, state = apply_gen(state, rules, start)
total = sum_plants(state, start)
diff = total-previos
gen += 1
if diff == prev_diff:
same_diff_count += 1
if same_diff_count == 100:
break
previos = total
prev_diff = diff
b = total - diff*gen
solution = diff * 50000000000 + b
print(solution)
|
normal
|
{
"blob_id": "27f001f4e79291825c56642693894375fef3e66a",
"index": 1647,
"step-1": "<mask token>\n\n\ndef read_input():\n with open('../input/day12.txt') as f:\n lines = f.readlines()\n m = re.search('initial state:\\\\s([\\\\.#]+)', lines[0])\n initial_state = m.groups()[0]\n prog = re.compile('([\\\\.#]{5})\\\\s=>\\\\s([\\\\.#])')\n rules = []\n for i in range(2, len(lines)):\n m = prog.search(lines[i])\n groups = m.groups()\n if groups[1] == '#':\n rules.append((groups[0], groups[1]))\n return initial_state, rules\n\n\ndef apply_gen(initial_state, rules, start):\n next_state = []\n initial_state = '....' + initial_state.strip('.') + '....'\n set_start_idx = False\n i = 2\n while i <= len(initial_state) - 3:\n curr_str = initial_state[i - 2:i + 3]\n rule_matches = None\n for r in rules:\n if curr_str == r[0]:\n rule_matches = r\n break\n if rule_matches:\n if not set_start_idx:\n start_idx = i - 4\n set_start_idx = True\n next_state.append(rule_matches[1])\n else:\n next_state.append('.')\n i += 1\n return start + start_idx, ''.join(next_state).strip('.')\n\n\ndef sum_plants(state, start):\n i = start\n plant_count = 0\n for c in state:\n if c == '#':\n plant_count += i\n i += 1\n return plant_count\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef read_input():\n with open('../input/day12.txt') as f:\n lines = f.readlines()\n m = re.search('initial state:\\\\s([\\\\.#]+)', lines[0])\n initial_state = m.groups()[0]\n prog = re.compile('([\\\\.#]{5})\\\\s=>\\\\s([\\\\.#])')\n rules = []\n for i in range(2, len(lines)):\n m = prog.search(lines[i])\n groups = m.groups()\n if groups[1] == '#':\n rules.append((groups[0], groups[1]))\n return initial_state, rules\n\n\ndef apply_gen(initial_state, rules, start):\n next_state = []\n initial_state = '....' + initial_state.strip('.') + '....'\n set_start_idx = False\n i = 2\n while i <= len(initial_state) - 3:\n curr_str = initial_state[i - 2:i + 3]\n rule_matches = None\n for r in rules:\n if curr_str == r[0]:\n rule_matches = r\n break\n if rule_matches:\n if not set_start_idx:\n start_idx = i - 4\n set_start_idx = True\n next_state.append(rule_matches[1])\n else:\n next_state.append('.')\n i += 1\n return start + start_idx, ''.join(next_state).strip('.')\n\n\ndef sum_plants(state, start):\n i = start\n plant_count = 0\n for c in state:\n if c == '#':\n plant_count += i\n i += 1\n return plant_count\n\n\n<mask token>\nfor c in state:\n if c == '#':\n break\n start += 1\n<mask token>\nwhile gen < 1000:\n start, state = apply_gen(state, rules, start)\n total = sum_plants(state, start)\n diff = total - previos\n gen += 1\n if diff == prev_diff:\n same_diff_count += 1\n if same_diff_count == 100:\n break\n previos = total\n prev_diff = diff\n<mask token>\nprint(solution)\n",
"step-3": "<mask token>\n\n\ndef read_input():\n with open('../input/day12.txt') as f:\n lines = f.readlines()\n m = re.search('initial state:\\\\s([\\\\.#]+)', lines[0])\n initial_state = m.groups()[0]\n prog = re.compile('([\\\\.#]{5})\\\\s=>\\\\s([\\\\.#])')\n rules = []\n for i in range(2, len(lines)):\n m = prog.search(lines[i])\n groups = m.groups()\n if groups[1] == '#':\n rules.append((groups[0], groups[1]))\n return initial_state, rules\n\n\ndef apply_gen(initial_state, rules, start):\n next_state = []\n initial_state = '....' + initial_state.strip('.') + '....'\n set_start_idx = False\n i = 2\n while i <= len(initial_state) - 3:\n curr_str = initial_state[i - 2:i + 3]\n rule_matches = None\n for r in rules:\n if curr_str == r[0]:\n rule_matches = r\n break\n if rule_matches:\n if not set_start_idx:\n start_idx = i - 4\n set_start_idx = True\n next_state.append(rule_matches[1])\n else:\n next_state.append('.')\n i += 1\n return start + start_idx, ''.join(next_state).strip('.')\n\n\ndef sum_plants(state, start):\n i = start\n plant_count = 0\n for c in state:\n if c == '#':\n plant_count += i\n i += 1\n return plant_count\n\n\nstate, rules = read_input()\nstart = 0\nfor c in state:\n if c == '#':\n break\n start += 1\ngen = 0\nstart_idx = -2\nprevios = sum_plants(state, start)\nprev_diff = 0\nsame_diff_count = 0\nwhile gen < 1000:\n start, state = apply_gen(state, rules, start)\n total = sum_plants(state, start)\n diff = total - previos\n gen += 1\n if diff == prev_diff:\n same_diff_count += 1\n if same_diff_count == 100:\n break\n previos = total\n prev_diff = diff\nb = total - diff * gen\nsolution = diff * 50000000000 + b\nprint(solution)\n",
"step-4": "import re\n\n\ndef read_input():\n with open('../input/day12.txt') as f:\n lines = f.readlines()\n m = re.search('initial state:\\\\s([\\\\.#]+)', lines[0])\n initial_state = m.groups()[0]\n prog = re.compile('([\\\\.#]{5})\\\\s=>\\\\s([\\\\.#])')\n rules = []\n for i in range(2, len(lines)):\n m = prog.search(lines[i])\n groups = m.groups()\n if groups[1] == '#':\n rules.append((groups[0], groups[1]))\n return initial_state, rules\n\n\ndef apply_gen(initial_state, rules, start):\n next_state = []\n initial_state = '....' + initial_state.strip('.') + '....'\n set_start_idx = False\n i = 2\n while i <= len(initial_state) - 3:\n curr_str = initial_state[i - 2:i + 3]\n rule_matches = None\n for r in rules:\n if curr_str == r[0]:\n rule_matches = r\n break\n if rule_matches:\n if not set_start_idx:\n start_idx = i - 4\n set_start_idx = True\n next_state.append(rule_matches[1])\n else:\n next_state.append('.')\n i += 1\n return start + start_idx, ''.join(next_state).strip('.')\n\n\ndef sum_plants(state, start):\n i = start\n plant_count = 0\n for c in state:\n if c == '#':\n plant_count += i\n i += 1\n return plant_count\n\n\nstate, rules = read_input()\nstart = 0\nfor c in state:\n if c == '#':\n break\n start += 1\ngen = 0\nstart_idx = -2\nprevios = sum_plants(state, start)\nprev_diff = 0\nsame_diff_count = 0\nwhile gen < 1000:\n start, state = apply_gen(state, rules, start)\n total = sum_plants(state, start)\n diff = total - previos\n gen += 1\n if diff == prev_diff:\n same_diff_count += 1\n if same_diff_count == 100:\n break\n previos = total\n prev_diff = diff\nb = total - diff * gen\nsolution = diff * 50000000000 + b\nprint(solution)\n",
"step-5": "import re\n\ndef read_input():\n with open('../input/day12.txt') as f:\n lines = f.readlines()\n m = re.search(r'initial state:\\s([\\.#]+)', lines[0])\n initial_state = m.groups()[0]\n prog = re.compile(r'([\\.#]{5})\\s=>\\s([\\.#])')\n rules = []\n for i in range(2, len(lines)):\n m = prog.search(lines[i])\n groups = m.groups()\n if groups[1] == '#':\n rules.append((groups[0], groups[1]))\n return initial_state, rules\n\ndef apply_gen(initial_state, rules, start):\n next_state = []\n initial_state = '....' + initial_state.strip('.') + '....'\n set_start_idx = False\n i = 2\n while i <= len(initial_state)-3:\n curr_str = initial_state[i-2:i+3]\n rule_matches = None\n for r in rules:\n if curr_str == r[0]:\n rule_matches = r\n break\n if rule_matches:\n if not set_start_idx:\n start_idx = i - 4\n set_start_idx = True\n next_state.append(rule_matches[1])\n else:\n next_state.append('.')\n i += 1\n return start + start_idx, ''.join(next_state).strip('.')\n\ndef sum_plants(state, start):\n i = start\n plant_count = 0\n for c in state:\n if c == '#':\n plant_count += i\n i += 1\n return plant_count\n\nstate, rules = read_input()\nstart = 0\nfor c in state:\n if c == '#':\n break\n start += 1\ngen = 0\nstart_idx = -2\nprevios = sum_plants(state, start)\nprev_diff = 0\nsame_diff_count = 0\nwhile gen < 1000:\n start, state = apply_gen(state, rules, start)\n total = sum_plants(state, start)\n diff = total-previos\n gen += 1\n if diff == prev_diff:\n same_diff_count += 1\n if same_diff_count == 100:\n break\n previos = total\n prev_diff = diff\nb = total - diff*gen\nsolution = diff * 50000000000 + b\nprint(solution)",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
for x in a:
b.append(int(x))
print(b)
<|reserved_special_token_0|>
for i in range(l):
s = len(b[:i])
for j in range(s):
if b[s] < b[j]:
c = b[s]
b.pop(s)
b.insert(b.index(b[j]), c)
print(b, b[:i], b[s])
<|reserved_special_token_1|>
a = input('Enter number')
a = a.split()
b = []
for x in a:
b.append(int(x))
print(b)
l = len(b)
c = 0
s = 0
for i in range(l):
s = len(b[:i])
for j in range(s):
if b[s] < b[j]:
c = b[s]
b.pop(s)
b.insert(b.index(b[j]), c)
print(b, b[:i], b[s])
<|reserved_special_token_1|>
a= input("Enter number")
a= a.split()
b=[]
for x in a:
b.append(int(x))
print(b)
l=len(b)
c=0
s=0
for i in range(l):
s=len(b[:i])
for j in range(s):
if b[s]<b[j]:
c=b[s]
b.pop(s)
b.insert(b.index(b[j]),c)
print(b,b[:i],b[s])
|
flexible
|
{
"blob_id": "24de4f486d4e976850e94a003f8d9cbe3e518402",
"index": 33,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor x in a:\n b.append(int(x))\nprint(b)\n<mask token>\nfor i in range(l):\n s = len(b[:i])\n for j in range(s):\n if b[s] < b[j]:\n c = b[s]\n b.pop(s)\n b.insert(b.index(b[j]), c)\n print(b, b[:i], b[s])\n",
"step-3": "a = input('Enter number')\na = a.split()\nb = []\nfor x in a:\n b.append(int(x))\nprint(b)\nl = len(b)\nc = 0\ns = 0\nfor i in range(l):\n s = len(b[:i])\n for j in range(s):\n if b[s] < b[j]:\n c = b[s]\n b.pop(s)\n b.insert(b.index(b[j]), c)\n print(b, b[:i], b[s])\n",
"step-4": "a= input(\"Enter number\")\r\na= a.split()\r\nb=[]\r\nfor x in a:\r\n b.append(int(x)) \r\n\r\nprint(b)\r\nl=len(b)\r\nc=0\r\ns=0\r\nfor i in range(l):\r\n s=len(b[:i])\r\n for j in range(s):\r\n \r\n if b[s]<b[j]:\r\n c=b[s]\r\n b.pop(s)\r\n b.insert(b.index(b[j]),c)\r\n print(b,b[:i],b[s])\r\n\r\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from manimlib.imports import *
class A_Scroller(Scene):
CONFIG={
"camera_config":{"background_color":"#FFFFFF"}
}
def construct(self):
text_1 = Text("3493", color="#DC3832")
text_2 = Text("3646", color="#221F20").shift(2*RIGHT)
text_3 = Text("4182", color="#2566AD").shift(4*RIGHT)
text_4 = Text("16417", color="#DC3832").shift(6*RIGHT)
text_5 = Text("18209", color="#221F20").shift(8*RIGHT)
text_6 = Text("18569", color="#2566AD").shift(10*RIGHT)
text_7 = Text("22229", color="#DC3832").shift(12*RIGHT)
text_8 = Text("24928", color="#221F20").shift(14*RIGHT)
text_9 = Text("26827", color="#2566AD").shift(16*RIGHT)
text_10 = Text("29779", color="#DC3832").shift(18*RIGHT)
line_1 = VGroup(text_1, text_2, text_3, text_4, text_5, text_6, text_7, text_8, text_9, text_10)
text_11 = Text("30898", color="#221F20").shift(DOWN)
text_12 = Text("31568", color="#2566AD").shift(2*RIGHT+DOWN)
text_13 = Text("32075", color="#DC3832").shift(4*RIGHT+DOWN)
text_14 = Text("32777", color="#221F20").shift(6*RIGHT+DOWN)
text_15 = Text("33959", color="#2566AD").shift(8*RIGHT+DOWN)
text_16 = Text("35450", color="#DC3832").shift(10*RIGHT+DOWN)
text_17 = Text("37680", color="#221F20").shift(12*RIGHT+DOWN)
text_18 = Text("38268", color="#2566AD").shift(14*RIGHT+DOWN)
text_19 = Text("38269", color="#DC3832").shift(16*RIGHT+DOWN)
text_20 = Text("38849", color="#221F20").shift(18*RIGHT+DOWN)
line_2 = VGroup(text_11, text_12, text_13, text_14, text_15, text_16, text_17, text_18, text_19, text_20)
text_21 = Text("44204", color="#2566AD").shift(2*DOWN)
text_22 = Text("44798", color="#DC3832").shift(2*RIGHT+2*DOWN)
text_23 = Text("44814", color="#221F20").shift(4*RIGHT+2*DOWN)
text_24 = Text("45084", color="#2566AD").shift(6*RIGHT+2*DOWN)
text_25 = Text("45252", color="#DC3832").shift(8*RIGHT+2*DOWN)
text_26 = Text("46041", color="#221F20").shift(10*RIGHT+2*DOWN)
text_27 = Text("46380", color="#2566AD").shift(12*RIGHT+2*DOWN)
text_28 = Text("47891", color="#DC3832").shift(14*RIGHT+2*DOWN)
text_29 = Text("51126", color="#221F20").shift(16*RIGHT+2*DOWN)
text_30 = Text("51599", color="#2566AD").shift(18*RIGHT+2*DOWN)
line_3 = VGroup(text_21, text_22, text_23, text_24, text_25, text_26, text_27, text_28, text_29, text_30)
all_numbers_1 = VGroup(line_1, line_2, line_3)
all_numbers_2 = all_numbers_1.copy()
all_numbers_1.move_to(2*UP).shift(20*RIGHT)
all_numbers_2.move_to(2*UP)
all_numbers = VGroup(all_numbers_1, all_numbers_2).to_edge(LEFT)
self.add(all_numbers)
self.play(ApplyMethod(all_numbers.to_edge, RIGHT), run_time=10, rate_func=linear)
|
normal
|
{
"blob_id": "97c97f18d1b93dc54538a0df7badafd961fdcb9c",
"index": 3588,
"step-1": "<mask token>\n\n\nclass A_Scroller(Scene):\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass A_Scroller(Scene):\n <mask token>\n\n def construct(self):\n text_1 = Text('3493', color='#DC3832')\n text_2 = Text('3646', color='#221F20').shift(2 * RIGHT)\n text_3 = Text('4182', color='#2566AD').shift(4 * RIGHT)\n text_4 = Text('16417', color='#DC3832').shift(6 * RIGHT)\n text_5 = Text('18209', color='#221F20').shift(8 * RIGHT)\n text_6 = Text('18569', color='#2566AD').shift(10 * RIGHT)\n text_7 = Text('22229', color='#DC3832').shift(12 * RIGHT)\n text_8 = Text('24928', color='#221F20').shift(14 * RIGHT)\n text_9 = Text('26827', color='#2566AD').shift(16 * RIGHT)\n text_10 = Text('29779', color='#DC3832').shift(18 * RIGHT)\n line_1 = VGroup(text_1, text_2, text_3, text_4, text_5, text_6,\n text_7, text_8, text_9, text_10)\n text_11 = Text('30898', color='#221F20').shift(DOWN)\n text_12 = Text('31568', color='#2566AD').shift(2 * RIGHT + DOWN)\n text_13 = Text('32075', color='#DC3832').shift(4 * RIGHT + DOWN)\n text_14 = Text('32777', color='#221F20').shift(6 * RIGHT + DOWN)\n text_15 = Text('33959', color='#2566AD').shift(8 * RIGHT + DOWN)\n text_16 = Text('35450', color='#DC3832').shift(10 * RIGHT + DOWN)\n text_17 = Text('37680', color='#221F20').shift(12 * RIGHT + DOWN)\n text_18 = Text('38268', color='#2566AD').shift(14 * RIGHT + DOWN)\n text_19 = Text('38269', color='#DC3832').shift(16 * RIGHT + DOWN)\n text_20 = Text('38849', color='#221F20').shift(18 * RIGHT + DOWN)\n line_2 = VGroup(text_11, text_12, text_13, text_14, text_15,\n text_16, text_17, text_18, text_19, text_20)\n text_21 = Text('44204', color='#2566AD').shift(2 * DOWN)\n text_22 = Text('44798', color='#DC3832').shift(2 * RIGHT + 2 * DOWN)\n text_23 = Text('44814', color='#221F20').shift(4 * RIGHT + 2 * DOWN)\n text_24 = Text('45084', color='#2566AD').shift(6 * RIGHT + 2 * DOWN)\n text_25 = Text('45252', color='#DC3832').shift(8 * RIGHT + 2 * DOWN)\n text_26 = Text('46041', color='#221F20').shift(10 * RIGHT + 2 * DOWN)\n text_27 = Text('46380', color='#2566AD').shift(12 * RIGHT + 2 * DOWN)\n text_28 = Text('47891', color='#DC3832').shift(14 * RIGHT + 2 * DOWN)\n text_29 = Text('51126', color='#221F20').shift(16 * RIGHT + 2 * DOWN)\n text_30 = Text('51599', color='#2566AD').shift(18 * RIGHT + 2 * DOWN)\n line_3 = VGroup(text_21, text_22, text_23, text_24, text_25,\n text_26, text_27, text_28, text_29, text_30)\n all_numbers_1 = VGroup(line_1, line_2, line_3)\n all_numbers_2 = all_numbers_1.copy()\n all_numbers_1.move_to(2 * UP).shift(20 * RIGHT)\n all_numbers_2.move_to(2 * UP)\n all_numbers = VGroup(all_numbers_1, all_numbers_2).to_edge(LEFT)\n self.add(all_numbers)\n self.play(ApplyMethod(all_numbers.to_edge, RIGHT), run_time=10,\n rate_func=linear)\n",
"step-3": "<mask token>\n\n\nclass A_Scroller(Scene):\n CONFIG = {'camera_config': {'background_color': '#FFFFFF'}}\n\n def construct(self):\n text_1 = Text('3493', color='#DC3832')\n text_2 = Text('3646', color='#221F20').shift(2 * RIGHT)\n text_3 = Text('4182', color='#2566AD').shift(4 * RIGHT)\n text_4 = Text('16417', color='#DC3832').shift(6 * RIGHT)\n text_5 = Text('18209', color='#221F20').shift(8 * RIGHT)\n text_6 = Text('18569', color='#2566AD').shift(10 * RIGHT)\n text_7 = Text('22229', color='#DC3832').shift(12 * RIGHT)\n text_8 = Text('24928', color='#221F20').shift(14 * RIGHT)\n text_9 = Text('26827', color='#2566AD').shift(16 * RIGHT)\n text_10 = Text('29779', color='#DC3832').shift(18 * RIGHT)\n line_1 = VGroup(text_1, text_2, text_3, text_4, text_5, text_6,\n text_7, text_8, text_9, text_10)\n text_11 = Text('30898', color='#221F20').shift(DOWN)\n text_12 = Text('31568', color='#2566AD').shift(2 * RIGHT + DOWN)\n text_13 = Text('32075', color='#DC3832').shift(4 * RIGHT + DOWN)\n text_14 = Text('32777', color='#221F20').shift(6 * RIGHT + DOWN)\n text_15 = Text('33959', color='#2566AD').shift(8 * RIGHT + DOWN)\n text_16 = Text('35450', color='#DC3832').shift(10 * RIGHT + DOWN)\n text_17 = Text('37680', color='#221F20').shift(12 * RIGHT + DOWN)\n text_18 = Text('38268', color='#2566AD').shift(14 * RIGHT + DOWN)\n text_19 = Text('38269', color='#DC3832').shift(16 * RIGHT + DOWN)\n text_20 = Text('38849', color='#221F20').shift(18 * RIGHT + DOWN)\n line_2 = VGroup(text_11, text_12, text_13, text_14, text_15,\n text_16, text_17, text_18, text_19, text_20)\n text_21 = Text('44204', color='#2566AD').shift(2 * DOWN)\n text_22 = Text('44798', color='#DC3832').shift(2 * RIGHT + 2 * DOWN)\n text_23 = Text('44814', color='#221F20').shift(4 * RIGHT + 2 * DOWN)\n text_24 = Text('45084', color='#2566AD').shift(6 * RIGHT + 2 * DOWN)\n text_25 = Text('45252', color='#DC3832').shift(8 * RIGHT + 2 * DOWN)\n text_26 = Text('46041', color='#221F20').shift(10 * RIGHT + 2 * DOWN)\n text_27 = Text('46380', color='#2566AD').shift(12 * RIGHT + 2 * DOWN)\n text_28 = Text('47891', color='#DC3832').shift(14 * RIGHT + 2 * DOWN)\n text_29 = Text('51126', color='#221F20').shift(16 * RIGHT + 2 * DOWN)\n text_30 = Text('51599', color='#2566AD').shift(18 * RIGHT + 2 * DOWN)\n line_3 = VGroup(text_21, text_22, text_23, text_24, text_25,\n text_26, text_27, text_28, text_29, text_30)\n all_numbers_1 = VGroup(line_1, line_2, line_3)\n all_numbers_2 = all_numbers_1.copy()\n all_numbers_1.move_to(2 * UP).shift(20 * RIGHT)\n all_numbers_2.move_to(2 * UP)\n all_numbers = VGroup(all_numbers_1, all_numbers_2).to_edge(LEFT)\n self.add(all_numbers)\n self.play(ApplyMethod(all_numbers.to_edge, RIGHT), run_time=10,\n rate_func=linear)\n",
"step-4": "from manimlib.imports import *\n\n\nclass A_Scroller(Scene):\n CONFIG = {'camera_config': {'background_color': '#FFFFFF'}}\n\n def construct(self):\n text_1 = Text('3493', color='#DC3832')\n text_2 = Text('3646', color='#221F20').shift(2 * RIGHT)\n text_3 = Text('4182', color='#2566AD').shift(4 * RIGHT)\n text_4 = Text('16417', color='#DC3832').shift(6 * RIGHT)\n text_5 = Text('18209', color='#221F20').shift(8 * RIGHT)\n text_6 = Text('18569', color='#2566AD').shift(10 * RIGHT)\n text_7 = Text('22229', color='#DC3832').shift(12 * RIGHT)\n text_8 = Text('24928', color='#221F20').shift(14 * RIGHT)\n text_9 = Text('26827', color='#2566AD').shift(16 * RIGHT)\n text_10 = Text('29779', color='#DC3832').shift(18 * RIGHT)\n line_1 = VGroup(text_1, text_2, text_3, text_4, text_5, text_6,\n text_7, text_8, text_9, text_10)\n text_11 = Text('30898', color='#221F20').shift(DOWN)\n text_12 = Text('31568', color='#2566AD').shift(2 * RIGHT + DOWN)\n text_13 = Text('32075', color='#DC3832').shift(4 * RIGHT + DOWN)\n text_14 = Text('32777', color='#221F20').shift(6 * RIGHT + DOWN)\n text_15 = Text('33959', color='#2566AD').shift(8 * RIGHT + DOWN)\n text_16 = Text('35450', color='#DC3832').shift(10 * RIGHT + DOWN)\n text_17 = Text('37680', color='#221F20').shift(12 * RIGHT + DOWN)\n text_18 = Text('38268', color='#2566AD').shift(14 * RIGHT + DOWN)\n text_19 = Text('38269', color='#DC3832').shift(16 * RIGHT + DOWN)\n text_20 = Text('38849', color='#221F20').shift(18 * RIGHT + DOWN)\n line_2 = VGroup(text_11, text_12, text_13, text_14, text_15,\n text_16, text_17, text_18, text_19, text_20)\n text_21 = Text('44204', color='#2566AD').shift(2 * DOWN)\n text_22 = Text('44798', color='#DC3832').shift(2 * RIGHT + 2 * DOWN)\n text_23 = Text('44814', color='#221F20').shift(4 * RIGHT + 2 * DOWN)\n text_24 = Text('45084', color='#2566AD').shift(6 * RIGHT + 2 * DOWN)\n text_25 = Text('45252', color='#DC3832').shift(8 * RIGHT + 2 * DOWN)\n text_26 = Text('46041', color='#221F20').shift(10 * RIGHT + 2 * DOWN)\n text_27 = Text('46380', color='#2566AD').shift(12 * RIGHT + 2 * DOWN)\n text_28 = Text('47891', color='#DC3832').shift(14 * RIGHT + 2 * DOWN)\n text_29 = Text('51126', color='#221F20').shift(16 * RIGHT + 2 * DOWN)\n text_30 = Text('51599', color='#2566AD').shift(18 * RIGHT + 2 * DOWN)\n line_3 = VGroup(text_21, text_22, text_23, text_24, text_25,\n text_26, text_27, text_28, text_29, text_30)\n all_numbers_1 = VGroup(line_1, line_2, line_3)\n all_numbers_2 = all_numbers_1.copy()\n all_numbers_1.move_to(2 * UP).shift(20 * RIGHT)\n all_numbers_2.move_to(2 * UP)\n all_numbers = VGroup(all_numbers_1, all_numbers_2).to_edge(LEFT)\n self.add(all_numbers)\n self.play(ApplyMethod(all_numbers.to_edge, RIGHT), run_time=10,\n rate_func=linear)\n",
"step-5": "from manimlib.imports import *\n\nclass A_Scroller(Scene):\n CONFIG={\n \"camera_config\":{\"background_color\":\"#FFFFFF\"}\n }\n def construct(self):\n text_1 = Text(\"3493\", color=\"#DC3832\")\n text_2 = Text(\"3646\", color=\"#221F20\").shift(2*RIGHT)\n text_3 = Text(\"4182\", color=\"#2566AD\").shift(4*RIGHT)\n text_4 = Text(\"16417\", color=\"#DC3832\").shift(6*RIGHT)\n text_5 = Text(\"18209\", color=\"#221F20\").shift(8*RIGHT)\n text_6 = Text(\"18569\", color=\"#2566AD\").shift(10*RIGHT)\n text_7 = Text(\"22229\", color=\"#DC3832\").shift(12*RIGHT)\n text_8 = Text(\"24928\", color=\"#221F20\").shift(14*RIGHT)\n text_9 = Text(\"26827\", color=\"#2566AD\").shift(16*RIGHT)\n text_10 = Text(\"29779\", color=\"#DC3832\").shift(18*RIGHT)\n line_1 = VGroup(text_1, text_2, text_3, text_4, text_5, text_6, text_7, text_8, text_9, text_10)\n\n text_11 = Text(\"30898\", color=\"#221F20\").shift(DOWN)\n text_12 = Text(\"31568\", color=\"#2566AD\").shift(2*RIGHT+DOWN)\n text_13 = Text(\"32075\", color=\"#DC3832\").shift(4*RIGHT+DOWN)\n text_14 = Text(\"32777\", color=\"#221F20\").shift(6*RIGHT+DOWN)\n text_15 = Text(\"33959\", color=\"#2566AD\").shift(8*RIGHT+DOWN)\n text_16 = Text(\"35450\", color=\"#DC3832\").shift(10*RIGHT+DOWN)\n text_17 = Text(\"37680\", color=\"#221F20\").shift(12*RIGHT+DOWN)\n text_18 = Text(\"38268\", color=\"#2566AD\").shift(14*RIGHT+DOWN)\n text_19 = Text(\"38269\", color=\"#DC3832\").shift(16*RIGHT+DOWN)\n text_20 = Text(\"38849\", color=\"#221F20\").shift(18*RIGHT+DOWN)\n line_2 = VGroup(text_11, text_12, text_13, text_14, text_15, text_16, text_17, text_18, text_19, text_20)\n\n text_21 = Text(\"44204\", color=\"#2566AD\").shift(2*DOWN)\n text_22 = Text(\"44798\", color=\"#DC3832\").shift(2*RIGHT+2*DOWN)\n text_23 = Text(\"44814\", color=\"#221F20\").shift(4*RIGHT+2*DOWN)\n text_24 = Text(\"45084\", color=\"#2566AD\").shift(6*RIGHT+2*DOWN)\n text_25 = Text(\"45252\", color=\"#DC3832\").shift(8*RIGHT+2*DOWN)\n text_26 = Text(\"46041\", color=\"#221F20\").shift(10*RIGHT+2*DOWN)\n text_27 = Text(\"46380\", color=\"#2566AD\").shift(12*RIGHT+2*DOWN)\n text_28 = Text(\"47891\", color=\"#DC3832\").shift(14*RIGHT+2*DOWN)\n text_29 = Text(\"51126\", color=\"#221F20\").shift(16*RIGHT+2*DOWN)\n text_30 = Text(\"51599\", color=\"#2566AD\").shift(18*RIGHT+2*DOWN)\n line_3 = VGroup(text_21, text_22, text_23, text_24, text_25, text_26, text_27, text_28, text_29, text_30)\n\n all_numbers_1 = VGroup(line_1, line_2, line_3)\n all_numbers_2 = all_numbers_1.copy()\n all_numbers_1.move_to(2*UP).shift(20*RIGHT)\n all_numbers_2.move_to(2*UP)\n all_numbers = VGroup(all_numbers_1, all_numbers_2).to_edge(LEFT)\n\n self.add(all_numbers)\n self.play(ApplyMethod(all_numbers.to_edge, RIGHT), run_time=10, rate_func=linear)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
"""Defines all Rady URL."""
from django.conf.urls import url, include
from django.contrib import admin
apiv1_urls = [
url(r"^users/", include("user.urls")),
url(r"^meetings/", include("meeting.urls")),
url(r"^docs/", include("rest_framework_docs.urls")),
url(r"^auth/", include("auth.urls")),
url(r"^fcm/devices/", include("device.urls")),
url(r"^statistics/", include("stats.urls")),
url(r"^admin/", include("admin.urls")),
]
urlpatterns = [
url(r"^api/v1/", include(apiv1_urls)),
url(r"^admin/", admin.site.urls),
]
|
normal
|
{
"blob_id": "aa00e4569aeae58e3f0ea1a8326e35c0776f7727",
"index": 4849,
"step-1": "<mask token>\n",
"step-2": "<mask token>\napiv1_urls = [url('^users/', include('user.urls')), url('^meetings/',\n include('meeting.urls')), url('^docs/', include(\n 'rest_framework_docs.urls')), url('^auth/', include('auth.urls')), url(\n '^fcm/devices/', include('device.urls')), url('^statistics/', include(\n 'stats.urls')), url('^admin/', include('admin.urls'))]\nurlpatterns = [url('^api/v1/', include(apiv1_urls)), url('^admin/', admin.\n site.urls)]\n",
"step-3": "<mask token>\nfrom django.conf.urls import url, include\nfrom django.contrib import admin\napiv1_urls = [url('^users/', include('user.urls')), url('^meetings/',\n include('meeting.urls')), url('^docs/', include(\n 'rest_framework_docs.urls')), url('^auth/', include('auth.urls')), url(\n '^fcm/devices/', include('device.urls')), url('^statistics/', include(\n 'stats.urls')), url('^admin/', include('admin.urls'))]\nurlpatterns = [url('^api/v1/', include(apiv1_urls)), url('^admin/', admin.\n site.urls)]\n",
"step-4": "\"\"\"Defines all Rady URL.\"\"\"\n\nfrom django.conf.urls import url, include\nfrom django.contrib import admin\n\n\napiv1_urls = [\n url(r\"^users/\", include(\"user.urls\")),\n url(r\"^meetings/\", include(\"meeting.urls\")),\n url(r\"^docs/\", include(\"rest_framework_docs.urls\")),\n url(r\"^auth/\", include(\"auth.urls\")),\n url(r\"^fcm/devices/\", include(\"device.urls\")),\n url(r\"^statistics/\", include(\"stats.urls\")),\n url(r\"^admin/\", include(\"admin.urls\")),\n]\n\nurlpatterns = [\n url(r\"^api/v1/\", include(apiv1_urls)),\n url(r\"^admin/\", admin.site.urls),\n]\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import unittest
import userinput
class Testing(unittest.TestCase):
def test_creation(self):
x = userinput.UserInput()
self.assertNotEqual(x, None)
def test_charset_initialization(self):
x = userinput.UserInput()
self.assertEqual(x.character_set, userinput.CHARACTERS)
def test_charset_display(self):
x = userinput.UserInput()
self.assertEqual(str(x.character_set), str(x.display_characters()))
def test_charset_remove(self):
x = userinput.UserInput()
# my favourite character :)
x.remove_character('پ')
self.assertNotIn('پ', x.character_set)
def test_charset_remove_missing(self):
x = userinput.UserInput()
# my favourite character :)
try:
x.remove_character('+')
self.assertFalse(False)
except KeyError:
self.assertTrue(True)
if __name__ == '__main__':
unittest.main()
|
normal
|
{
"blob_id": "4745d81558130440d35d277b586572f5d3f85c06",
"index": 7366,
"step-1": "<mask token>\n\n\nclass Testing(unittest.TestCase):\n\n def test_creation(self):\n x = userinput.UserInput()\n self.assertNotEqual(x, None)\n\n def test_charset_initialization(self):\n x = userinput.UserInput()\n self.assertEqual(x.character_set, userinput.CHARACTERS)\n\n def test_charset_display(self):\n x = userinput.UserInput()\n self.assertEqual(str(x.character_set), str(x.display_characters()))\n <mask token>\n <mask token>\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Testing(unittest.TestCase):\n\n def test_creation(self):\n x = userinput.UserInput()\n self.assertNotEqual(x, None)\n\n def test_charset_initialization(self):\n x = userinput.UserInput()\n self.assertEqual(x.character_set, userinput.CHARACTERS)\n\n def test_charset_display(self):\n x = userinput.UserInput()\n self.assertEqual(str(x.character_set), str(x.display_characters()))\n <mask token>\n\n def test_charset_remove_missing(self):\n x = userinput.UserInput()\n try:\n x.remove_character('+')\n self.assertFalse(False)\n except KeyError:\n self.assertTrue(True)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Testing(unittest.TestCase):\n\n def test_creation(self):\n x = userinput.UserInput()\n self.assertNotEqual(x, None)\n\n def test_charset_initialization(self):\n x = userinput.UserInput()\n self.assertEqual(x.character_set, userinput.CHARACTERS)\n\n def test_charset_display(self):\n x = userinput.UserInput()\n self.assertEqual(str(x.character_set), str(x.display_characters()))\n\n def test_charset_remove(self):\n x = userinput.UserInput()\n x.remove_character('پ')\n self.assertNotIn('پ', x.character_set)\n\n def test_charset_remove_missing(self):\n x = userinput.UserInput()\n try:\n x.remove_character('+')\n self.assertFalse(False)\n except KeyError:\n self.assertTrue(True)\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"step-4": "import unittest\nimport userinput\n\n\nclass Testing(unittest.TestCase):\n\n def test_creation(self):\n x = userinput.UserInput()\n self.assertNotEqual(x, None)\n\n def test_charset_initialization(self):\n x = userinput.UserInput()\n self.assertEqual(x.character_set, userinput.CHARACTERS)\n\n def test_charset_display(self):\n x = userinput.UserInput()\n self.assertEqual(str(x.character_set), str(x.display_characters()))\n\n def test_charset_remove(self):\n x = userinput.UserInput()\n x.remove_character('پ')\n self.assertNotIn('پ', x.character_set)\n\n def test_charset_remove_missing(self):\n x = userinput.UserInput()\n try:\n x.remove_character('+')\n self.assertFalse(False)\n except KeyError:\n self.assertTrue(True)\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"step-5": "import unittest\nimport userinput\n\n\nclass Testing(unittest.TestCase):\n def test_creation(self):\n x = userinput.UserInput()\n self.assertNotEqual(x, None)\n\n def test_charset_initialization(self):\n x = userinput.UserInput()\n self.assertEqual(x.character_set, userinput.CHARACTERS)\n\n def test_charset_display(self):\n x = userinput.UserInput()\n self.assertEqual(str(x.character_set), str(x.display_characters()))\n\n def test_charset_remove(self):\n x = userinput.UserInput()\n # my favourite character :)\n x.remove_character('پ')\n self.assertNotIn('پ', x.character_set)\n\n def test_charset_remove_missing(self):\n x = userinput.UserInput()\n # my favourite character :)\n try:\n x.remove_character('+')\n self.assertFalse(False)\n except KeyError:\n self.assertTrue(True)\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"step-ids": [
4,
5,
7,
8,
9
]
}
|
[
4,
5,
7,
8,
9
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
for kk in file:
paragraph += kk[0]
f.close()
<|reserved_special_token_0|>
print('most commons below...')
print(most_common_words)
<|reserved_special_token_0|>
for i, j in most_common_words:
most_cm_1.append(i)
<|reserved_special_token_0|>
for i in file:
filtered = ''
filtered_from_stopWords = ''
counter = 0
for j in range(len(illegal_chars)):
if counter == 0:
counter += 1
filtered = i[0].replace(illegal_chars[j], '')
else:
filtered = filtered.replace(illegal_chars[j], '')
counter = 0
filteredArr = filtered.split(' ')
for x in filteredArr:
if x not in stopWords:
filtered_from_stopWords += x + ' '
bb = []
filtered_from_stopWords_ARRAY = filtered_from_stopWords.split(' ')
features = {w.lower(): (w in most_cm_1) for w in
filtered_from_stopWords_ARRAY}
bb.append(features)
bb.append(i[1])
sentences.append(bb)
remarks.append(i[1])
<|reserved_special_token_0|>
print(remarks)
print(sentences)
<|reserved_special_token_0|>
print(classifier.classify(entry))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
f = open('trolls.csv', 'r')
file = csv.reader(f)
sentences = []
remarks = []
psObject = PorterStemmer()
illegal_chars = ['.', ',', '@', "'", '+', '-', '*']
paragraph = ''
for kk in file:
paragraph += kk[0]
f.close()
f = open('trolls.csv', 'r')
file = csv.reader(f)
all_words = word_tokenize(paragraph)
all2 = FreqDist(all_words)
most_common_words = list(all2.most_common(100))
print('most commons below...')
print(most_common_words)
most_cm_1 = []
for i, j in most_common_words:
most_cm_1.append(i)
stopWords = stopwords.words('english')
all_words = []
for i in file:
filtered = ''
filtered_from_stopWords = ''
counter = 0
for j in range(len(illegal_chars)):
if counter == 0:
counter += 1
filtered = i[0].replace(illegal_chars[j], '')
else:
filtered = filtered.replace(illegal_chars[j], '')
counter = 0
filteredArr = filtered.split(' ')
for x in filteredArr:
if x not in stopWords:
filtered_from_stopWords += x + ' '
bb = []
filtered_from_stopWords_ARRAY = filtered_from_stopWords.split(' ')
features = {w.lower(): (w in most_cm_1) for w in
filtered_from_stopWords_ARRAY}
bb.append(features)
bb.append(i[1])
sentences.append(bb)
remarks.append(i[1])
count = 0
print(remarks)
print(sentences)
classifier = NaiveBayesClassifier.train(sentences)
inputs = input('Enter a comment ')
words_entered = inputs.split(' ')
entry = {w: (True) for w in words_entered}
print(classifier.classify(entry))
<|reserved_special_token_1|>
from nltk.tokenize import sent_tokenize, word_tokenize
from nltk.corpus import stopwords
from nltk.stem import PorterStemmer
from nltk.classify import NaiveBayesClassifier
from nltk.probability import FreqDist
import csv
f = open('trolls.csv', 'r')
file = csv.reader(f)
sentences = []
remarks = []
psObject = PorterStemmer()
illegal_chars = ['.', ',', '@', "'", '+', '-', '*']
paragraph = ''
for kk in file:
paragraph += kk[0]
f.close()
f = open('trolls.csv', 'r')
file = csv.reader(f)
all_words = word_tokenize(paragraph)
all2 = FreqDist(all_words)
most_common_words = list(all2.most_common(100))
print('most commons below...')
print(most_common_words)
most_cm_1 = []
for i, j in most_common_words:
most_cm_1.append(i)
stopWords = stopwords.words('english')
all_words = []
for i in file:
filtered = ''
filtered_from_stopWords = ''
counter = 0
for j in range(len(illegal_chars)):
if counter == 0:
counter += 1
filtered = i[0].replace(illegal_chars[j], '')
else:
filtered = filtered.replace(illegal_chars[j], '')
counter = 0
filteredArr = filtered.split(' ')
for x in filteredArr:
if x not in stopWords:
filtered_from_stopWords += x + ' '
bb = []
filtered_from_stopWords_ARRAY = filtered_from_stopWords.split(' ')
features = {w.lower(): (w in most_cm_1) for w in
filtered_from_stopWords_ARRAY}
bb.append(features)
bb.append(i[1])
sentences.append(bb)
remarks.append(i[1])
count = 0
print(remarks)
print(sentences)
classifier = NaiveBayesClassifier.train(sentences)
inputs = input('Enter a comment ')
words_entered = inputs.split(' ')
entry = {w: (True) for w in words_entered}
print(classifier.classify(entry))
<|reserved_special_token_1|>
from nltk.tokenize import sent_tokenize, word_tokenize
from nltk.corpus import stopwords
from nltk.stem import PorterStemmer
from nltk.classify import NaiveBayesClassifier
from nltk.probability import FreqDist
import csv
f = open('trolls.csv', 'r')
file = csv.reader(f)
sentences=[]
remarks=[]
psObject = PorterStemmer()
illegal_chars = [
'.',',','@',"'",'+','-','*',
]
paragraph=''
for kk in file :
paragraph+=kk[0]
f.close()
f = open('trolls.csv', 'r')
file = csv.reader(f)
all_words = word_tokenize(paragraph)
# print(all_words)
all2 = FreqDist(all_words)
most_common_words = list(all2.most_common(100))
print('most commons below...')
print(most_common_words)
most_cm_1=[]
for i,j in most_common_words:
most_cm_1.append(i)
# print(most_cm_1)
stopWords = stopwords.words('english')
all_words = []
for i in file :
filtered=''
filtered_from_stopWords=''
counter = 0
for j in range(len(illegal_chars)) :
if counter == 0:
counter+=1
filtered = i[0].replace(illegal_chars[j], '')
else :
filtered=filtered.replace(illegal_chars[j],'')
counter=0
filteredArr = filtered.split(' ')
for x in filteredArr :
if x not in stopWords :
filtered_from_stopWords+=x+' '
bb=[]
filtered_from_stopWords_ARRAY=filtered_from_stopWords.split(' ')
features = {w.lower(): (w in most_cm_1) for w in filtered_from_stopWords_ARRAY}
bb.append(features)
bb.append(i[1])
sentences.append(bb)
remarks.append(i[1])
count =0
print(remarks)
print(sentences)
classifier = NaiveBayesClassifier.train(sentences)
inputs = input('Enter a comment ')
words_entered=inputs.split(' ')
entry = {w: ( True) for w in words_entered}
print(classifier.classify(entry))
|
flexible
|
{
"blob_id": "0dbdd7f7adffed850f126a2054c764b421c6ab84",
"index": 6799,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor kk in file:\n paragraph += kk[0]\nf.close()\n<mask token>\nprint('most commons below...')\nprint(most_common_words)\n<mask token>\nfor i, j in most_common_words:\n most_cm_1.append(i)\n<mask token>\nfor i in file:\n filtered = ''\n filtered_from_stopWords = ''\n counter = 0\n for j in range(len(illegal_chars)):\n if counter == 0:\n counter += 1\n filtered = i[0].replace(illegal_chars[j], '')\n else:\n filtered = filtered.replace(illegal_chars[j], '')\n counter = 0\n filteredArr = filtered.split(' ')\n for x in filteredArr:\n if x not in stopWords:\n filtered_from_stopWords += x + ' '\n bb = []\n filtered_from_stopWords_ARRAY = filtered_from_stopWords.split(' ')\n features = {w.lower(): (w in most_cm_1) for w in\n filtered_from_stopWords_ARRAY}\n bb.append(features)\n bb.append(i[1])\n sentences.append(bb)\n remarks.append(i[1])\n<mask token>\nprint(remarks)\nprint(sentences)\n<mask token>\nprint(classifier.classify(entry))\n",
"step-3": "<mask token>\nf = open('trolls.csv', 'r')\nfile = csv.reader(f)\nsentences = []\nremarks = []\npsObject = PorterStemmer()\nillegal_chars = ['.', ',', '@', \"'\", '+', '-', '*']\nparagraph = ''\nfor kk in file:\n paragraph += kk[0]\nf.close()\nf = open('trolls.csv', 'r')\nfile = csv.reader(f)\nall_words = word_tokenize(paragraph)\nall2 = FreqDist(all_words)\nmost_common_words = list(all2.most_common(100))\nprint('most commons below...')\nprint(most_common_words)\nmost_cm_1 = []\nfor i, j in most_common_words:\n most_cm_1.append(i)\nstopWords = stopwords.words('english')\nall_words = []\nfor i in file:\n filtered = ''\n filtered_from_stopWords = ''\n counter = 0\n for j in range(len(illegal_chars)):\n if counter == 0:\n counter += 1\n filtered = i[0].replace(illegal_chars[j], '')\n else:\n filtered = filtered.replace(illegal_chars[j], '')\n counter = 0\n filteredArr = filtered.split(' ')\n for x in filteredArr:\n if x not in stopWords:\n filtered_from_stopWords += x + ' '\n bb = []\n filtered_from_stopWords_ARRAY = filtered_from_stopWords.split(' ')\n features = {w.lower(): (w in most_cm_1) for w in\n filtered_from_stopWords_ARRAY}\n bb.append(features)\n bb.append(i[1])\n sentences.append(bb)\n remarks.append(i[1])\ncount = 0\nprint(remarks)\nprint(sentences)\nclassifier = NaiveBayesClassifier.train(sentences)\ninputs = input('Enter a comment ')\nwords_entered = inputs.split(' ')\nentry = {w: (True) for w in words_entered}\nprint(classifier.classify(entry))\n",
"step-4": "from nltk.tokenize import sent_tokenize, word_tokenize\nfrom nltk.corpus import stopwords\nfrom nltk.stem import PorterStemmer\nfrom nltk.classify import NaiveBayesClassifier\nfrom nltk.probability import FreqDist\nimport csv\nf = open('trolls.csv', 'r')\nfile = csv.reader(f)\nsentences = []\nremarks = []\npsObject = PorterStemmer()\nillegal_chars = ['.', ',', '@', \"'\", '+', '-', '*']\nparagraph = ''\nfor kk in file:\n paragraph += kk[0]\nf.close()\nf = open('trolls.csv', 'r')\nfile = csv.reader(f)\nall_words = word_tokenize(paragraph)\nall2 = FreqDist(all_words)\nmost_common_words = list(all2.most_common(100))\nprint('most commons below...')\nprint(most_common_words)\nmost_cm_1 = []\nfor i, j in most_common_words:\n most_cm_1.append(i)\nstopWords = stopwords.words('english')\nall_words = []\nfor i in file:\n filtered = ''\n filtered_from_stopWords = ''\n counter = 0\n for j in range(len(illegal_chars)):\n if counter == 0:\n counter += 1\n filtered = i[0].replace(illegal_chars[j], '')\n else:\n filtered = filtered.replace(illegal_chars[j], '')\n counter = 0\n filteredArr = filtered.split(' ')\n for x in filteredArr:\n if x not in stopWords:\n filtered_from_stopWords += x + ' '\n bb = []\n filtered_from_stopWords_ARRAY = filtered_from_stopWords.split(' ')\n features = {w.lower(): (w in most_cm_1) for w in\n filtered_from_stopWords_ARRAY}\n bb.append(features)\n bb.append(i[1])\n sentences.append(bb)\n remarks.append(i[1])\ncount = 0\nprint(remarks)\nprint(sentences)\nclassifier = NaiveBayesClassifier.train(sentences)\ninputs = input('Enter a comment ')\nwords_entered = inputs.split(' ')\nentry = {w: (True) for w in words_entered}\nprint(classifier.classify(entry))\n",
"step-5": "from nltk.tokenize import sent_tokenize, word_tokenize\nfrom nltk.corpus import stopwords\nfrom nltk.stem import PorterStemmer\nfrom nltk.classify import NaiveBayesClassifier\nfrom nltk.probability import FreqDist\nimport csv\n\nf = open('trolls.csv', 'r')\nfile = csv.reader(f)\nsentences=[]\nremarks=[]\npsObject = PorterStemmer()\n\nillegal_chars = [\n '.',',','@',\"'\",'+','-','*',\n]\nparagraph=''\n\nfor kk in file :\n paragraph+=kk[0]\nf.close()\nf = open('trolls.csv', 'r')\nfile = csv.reader(f)\nall_words = word_tokenize(paragraph)\n# print(all_words)\nall2 = FreqDist(all_words)\nmost_common_words = list(all2.most_common(100))\nprint('most commons below...')\nprint(most_common_words)\nmost_cm_1=[]\nfor i,j in most_common_words:\n most_cm_1.append(i)\n# print(most_cm_1)\nstopWords = stopwords.words('english')\nall_words = []\nfor i in file :\n filtered=''\n filtered_from_stopWords=''\n counter = 0\n for j in range(len(illegal_chars)) :\n if counter == 0:\n counter+=1\n filtered = i[0].replace(illegal_chars[j], '')\n else :\n filtered=filtered.replace(illegal_chars[j],'')\n counter=0\n filteredArr = filtered.split(' ')\n for x in filteredArr :\n if x not in stopWords :\n filtered_from_stopWords+=x+' '\n bb=[]\n filtered_from_stopWords_ARRAY=filtered_from_stopWords.split(' ')\n features = {w.lower(): (w in most_cm_1) for w in filtered_from_stopWords_ARRAY}\n bb.append(features)\n bb.append(i[1])\n sentences.append(bb)\n remarks.append(i[1])\n\ncount =0\nprint(remarks)\nprint(sentences)\nclassifier = NaiveBayesClassifier.train(sentences)\ninputs = input('Enter a comment ')\nwords_entered=inputs.split(' ')\nentry = {w: ( True) for w in words_entered}\n\nprint(classifier.classify(entry))\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
class QManeger(object):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def listening(self):
while True:
traces = self.q_trace.get(block=True)
for s, a, r in zip(traces[0], traces[1], traces[2]):
self._push_one(s, a, r)
if len(self.traces_s) > self.opt.batch_size:
self.produce_batch()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class QManeger(object):
def __init__(self, opt, q_trace, q_batch):
self.traces_s = []
self.traces_a = []
self.traces_r = []
self.lock = mp.Lock()
self.q_trace = q_trace
self.q_batch = q_batch
self.opt = opt
self.device = torch.device('cuda' if torch.cuda.is_available() else
'cpu')
<|reserved_special_token_0|>
def listening(self):
while True:
traces = self.q_trace.get(block=True)
for s, a, r in zip(traces[0], traces[1], traces[2]):
self._push_one(s, a, r)
if len(self.traces_s) > self.opt.batch_size:
self.produce_batch()
def produce_batch(self):
batch_size = self.opt.batch_size
res_s, res_a, res_r = self.traces_s[:batch_size], self.traces_a[:
batch_size], self.traces_r[:batch_size]
del self.traces_s[:batch_size]
del self.traces_a[:batch_size]
del self.traces_r[:batch_size]
res_s = torch.FloatTensor(res_s).to(self.device)
res_a = torch.LongTensor(res_a).to(self.device)
res_r = torch.FloatTensor(res_r).to(self.device).view(-1, 1)
self.q_batch.put((res_s, res_a, res_r))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class QManeger(object):
def __init__(self, opt, q_trace, q_batch):
self.traces_s = []
self.traces_a = []
self.traces_r = []
self.lock = mp.Lock()
self.q_trace = q_trace
self.q_batch = q_batch
self.opt = opt
self.device = torch.device('cuda' if torch.cuda.is_available() else
'cpu')
def _push_one(self, state, action, reward):
self.traces_s.append(state)
self.traces_a.append(action)
self.traces_r.append(reward)
def listening(self):
while True:
traces = self.q_trace.get(block=True)
for s, a, r in zip(traces[0], traces[1], traces[2]):
self._push_one(s, a, r)
if len(self.traces_s) > self.opt.batch_size:
self.produce_batch()
def produce_batch(self):
batch_size = self.opt.batch_size
res_s, res_a, res_r = self.traces_s[:batch_size], self.traces_a[:
batch_size], self.traces_r[:batch_size]
del self.traces_s[:batch_size]
del self.traces_a[:batch_size]
del self.traces_r[:batch_size]
res_s = torch.FloatTensor(res_s).to(self.device)
res_a = torch.LongTensor(res_a).to(self.device)
res_r = torch.FloatTensor(res_r).to(self.device).view(-1, 1)
self.q_batch.put((res_s, res_a, res_r))
<|reserved_special_token_1|>
import torch
import torch.multiprocessing as mp
import random
class QManeger(object):
def __init__(self, opt, q_trace, q_batch):
self.traces_s = []
self.traces_a = []
self.traces_r = []
self.lock = mp.Lock()
self.q_trace = q_trace
self.q_batch = q_batch
self.opt = opt
self.device = torch.device('cuda' if torch.cuda.is_available() else
'cpu')
def _push_one(self, state, action, reward):
self.traces_s.append(state)
self.traces_a.append(action)
self.traces_r.append(reward)
def listening(self):
while True:
traces = self.q_trace.get(block=True)
for s, a, r in zip(traces[0], traces[1], traces[2]):
self._push_one(s, a, r)
if len(self.traces_s) > self.opt.batch_size:
self.produce_batch()
def produce_batch(self):
batch_size = self.opt.batch_size
res_s, res_a, res_r = self.traces_s[:batch_size], self.traces_a[:
batch_size], self.traces_r[:batch_size]
del self.traces_s[:batch_size]
del self.traces_a[:batch_size]
del self.traces_r[:batch_size]
res_s = torch.FloatTensor(res_s).to(self.device)
res_a = torch.LongTensor(res_a).to(self.device)
res_r = torch.FloatTensor(res_r).to(self.device).view(-1, 1)
self.q_batch.put((res_s, res_a, res_r))
<|reserved_special_token_1|>
import torch
import torch.multiprocessing as mp
import random
class QManeger(object):
def __init__(self, opt, q_trace, q_batch):
self.traces_s = []
self.traces_a = []
self.traces_r = []
self.lock = mp.Lock()
self.q_trace = q_trace
self.q_batch = q_batch
self.opt = opt
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def _push_one(self, state, action, reward):
self.traces_s.append(state)
self.traces_a.append(action)
self.traces_r.append(reward)
def listening(self):
while True:
traces = self.q_trace.get(block=True)
for s, a, r in zip(traces[0], traces[1], traces[2]):
self._push_one(s, a, r)
if len(self.traces_s) > self.opt.batch_size:
self.produce_batch()
def produce_batch(self):
batch_size = self.opt.batch_size
res_s, res_a, res_r = self.traces_s[:batch_size], self.traces_a[:batch_size], \
self.traces_r[:batch_size]
# delete
del self.traces_s[:batch_size]
del self.traces_a[:batch_size]
del self.traces_r[:batch_size]
res_s = torch.FloatTensor(res_s).to(self.device)
res_a = torch.LongTensor(res_a).to(self.device)
res_r = torch.FloatTensor(res_r).to(self.device).view(-1, 1)
# stack batch and put
self.q_batch.put((res_s, res_a, res_r))
|
flexible
|
{
"blob_id": "b693cc63e2ee4c994ef7b5e44faea99f15a021f6",
"index": 68,
"step-1": "<mask token>\n\n\nclass QManeger(object):\n <mask token>\n <mask token>\n\n def listening(self):\n while True:\n traces = self.q_trace.get(block=True)\n for s, a, r in zip(traces[0], traces[1], traces[2]):\n self._push_one(s, a, r)\n if len(self.traces_s) > self.opt.batch_size:\n self.produce_batch()\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass QManeger(object):\n\n def __init__(self, opt, q_trace, q_batch):\n self.traces_s = []\n self.traces_a = []\n self.traces_r = []\n self.lock = mp.Lock()\n self.q_trace = q_trace\n self.q_batch = q_batch\n self.opt = opt\n self.device = torch.device('cuda' if torch.cuda.is_available() else\n 'cpu')\n <mask token>\n\n def listening(self):\n while True:\n traces = self.q_trace.get(block=True)\n for s, a, r in zip(traces[0], traces[1], traces[2]):\n self._push_one(s, a, r)\n if len(self.traces_s) > self.opt.batch_size:\n self.produce_batch()\n\n def produce_batch(self):\n batch_size = self.opt.batch_size\n res_s, res_a, res_r = self.traces_s[:batch_size], self.traces_a[:\n batch_size], self.traces_r[:batch_size]\n del self.traces_s[:batch_size]\n del self.traces_a[:batch_size]\n del self.traces_r[:batch_size]\n res_s = torch.FloatTensor(res_s).to(self.device)\n res_a = torch.LongTensor(res_a).to(self.device)\n res_r = torch.FloatTensor(res_r).to(self.device).view(-1, 1)\n self.q_batch.put((res_s, res_a, res_r))\n",
"step-3": "<mask token>\n\n\nclass QManeger(object):\n\n def __init__(self, opt, q_trace, q_batch):\n self.traces_s = []\n self.traces_a = []\n self.traces_r = []\n self.lock = mp.Lock()\n self.q_trace = q_trace\n self.q_batch = q_batch\n self.opt = opt\n self.device = torch.device('cuda' if torch.cuda.is_available() else\n 'cpu')\n\n def _push_one(self, state, action, reward):\n self.traces_s.append(state)\n self.traces_a.append(action)\n self.traces_r.append(reward)\n\n def listening(self):\n while True:\n traces = self.q_trace.get(block=True)\n for s, a, r in zip(traces[0], traces[1], traces[2]):\n self._push_one(s, a, r)\n if len(self.traces_s) > self.opt.batch_size:\n self.produce_batch()\n\n def produce_batch(self):\n batch_size = self.opt.batch_size\n res_s, res_a, res_r = self.traces_s[:batch_size], self.traces_a[:\n batch_size], self.traces_r[:batch_size]\n del self.traces_s[:batch_size]\n del self.traces_a[:batch_size]\n del self.traces_r[:batch_size]\n res_s = torch.FloatTensor(res_s).to(self.device)\n res_a = torch.LongTensor(res_a).to(self.device)\n res_r = torch.FloatTensor(res_r).to(self.device).view(-1, 1)\n self.q_batch.put((res_s, res_a, res_r))\n",
"step-4": "import torch\nimport torch.multiprocessing as mp\nimport random\n\n\nclass QManeger(object):\n\n def __init__(self, opt, q_trace, q_batch):\n self.traces_s = []\n self.traces_a = []\n self.traces_r = []\n self.lock = mp.Lock()\n self.q_trace = q_trace\n self.q_batch = q_batch\n self.opt = opt\n self.device = torch.device('cuda' if torch.cuda.is_available() else\n 'cpu')\n\n def _push_one(self, state, action, reward):\n self.traces_s.append(state)\n self.traces_a.append(action)\n self.traces_r.append(reward)\n\n def listening(self):\n while True:\n traces = self.q_trace.get(block=True)\n for s, a, r in zip(traces[0], traces[1], traces[2]):\n self._push_one(s, a, r)\n if len(self.traces_s) > self.opt.batch_size:\n self.produce_batch()\n\n def produce_batch(self):\n batch_size = self.opt.batch_size\n res_s, res_a, res_r = self.traces_s[:batch_size], self.traces_a[:\n batch_size], self.traces_r[:batch_size]\n del self.traces_s[:batch_size]\n del self.traces_a[:batch_size]\n del self.traces_r[:batch_size]\n res_s = torch.FloatTensor(res_s).to(self.device)\n res_a = torch.LongTensor(res_a).to(self.device)\n res_r = torch.FloatTensor(res_r).to(self.device).view(-1, 1)\n self.q_batch.put((res_s, res_a, res_r))\n",
"step-5": "import torch\nimport torch.multiprocessing as mp\nimport random\n\nclass QManeger(object):\n\n def __init__(self, opt, q_trace, q_batch):\n self.traces_s = []\n self.traces_a = []\n self.traces_r = []\n self.lock = mp.Lock()\n\n self.q_trace = q_trace\n self.q_batch = q_batch\n self.opt = opt\n self.device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n def _push_one(self, state, action, reward):\n self.traces_s.append(state)\n self.traces_a.append(action)\n self.traces_r.append(reward)\n\n def listening(self):\n while True:\n traces = self.q_trace.get(block=True)\n for s, a, r in zip(traces[0], traces[1], traces[2]):\n self._push_one(s, a, r)\n\n if len(self.traces_s) > self.opt.batch_size:\n self.produce_batch()\n\n def produce_batch(self):\n batch_size = self.opt.batch_size\n\n res_s, res_a, res_r = self.traces_s[:batch_size], self.traces_a[:batch_size], \\\n self.traces_r[:batch_size]\n\n # delete\n del self.traces_s[:batch_size]\n del self.traces_a[:batch_size]\n del self.traces_r[:batch_size]\n\n res_s = torch.FloatTensor(res_s).to(self.device)\n res_a = torch.LongTensor(res_a).to(self.device)\n res_r = torch.FloatTensor(res_r).to(self.device).view(-1, 1)\n\n # stack batch and put\n self.q_batch.put((res_s, res_a, res_r))\n",
"step-ids": [
2,
4,
5,
6,
7
]
}
|
[
2,
4,
5,
6,
7
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
with open('src/time.txt', 'w') as f:
f.write(str(int(time.time())))
<|reserved_special_token_1|>
import time
with open('src/time.txt', 'w') as f:
f.write(str(int(time.time())))
<|reserved_special_token_1|>
import time
with open("src/time.txt", "w") as f:
f.write(str(int(time.time())))
|
flexible
|
{
"blob_id": "0058a6d3c9d4e600885b876614362ea4401ce2fe",
"index": 1640,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwith open('src/time.txt', 'w') as f:\n f.write(str(int(time.time())))\n",
"step-3": "import time\nwith open('src/time.txt', 'w') as f:\n f.write(str(int(time.time())))\n",
"step-4": "import time\n\nwith open(\"src/time.txt\", \"w\") as f:\n f.write(str(int(time.time())))",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import os
class Idea:
def __init__(self, folder):
self.folder = folder
def name(self):
return "jetbrains-idea"
def cmd(self):
return "intellij-idea-ultimate-edition %s" % self.folder
|
normal
|
{
"blob_id": "90fc6e37e3988a2014c66913db61749509db2d53",
"index": 1036,
"step-1": "<mask token>\n\n\nclass Idea:\n <mask token>\n <mask token>\n\n def cmd(self):\n return 'intellij-idea-ultimate-edition %s' % self.folder\n",
"step-2": "<mask token>\n\n\nclass Idea:\n\n def __init__(self, folder):\n self.folder = folder\n <mask token>\n\n def cmd(self):\n return 'intellij-idea-ultimate-edition %s' % self.folder\n",
"step-3": "<mask token>\n\n\nclass Idea:\n\n def __init__(self, folder):\n self.folder = folder\n\n def name(self):\n return 'jetbrains-idea'\n\n def cmd(self):\n return 'intellij-idea-ultimate-edition %s' % self.folder\n",
"step-4": "import os\n\n\nclass Idea:\n\n def __init__(self, folder):\n self.folder = folder\n\n def name(self):\n return 'jetbrains-idea'\n\n def cmd(self):\n return 'intellij-idea-ultimate-edition %s' % self.folder\n",
"step-5": "import os\n\nclass Idea:\n def __init__(self, folder):\n self.folder = folder\n\n def name(self):\n return \"jetbrains-idea\"\n\n def cmd(self):\n return \"intellij-idea-ultimate-edition %s\" % self.folder\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
class _CornerStorageBuilder:
def __init__(self, progress_indicator=None):
self._progress_indicator = progress_indicator
self._corners = dict()
def set_corners_at_frame(self, frame, corners):
self._corners[frame] = corners
if self._progress_indicator is not None:
self._progress_indicator.update(1)
def build_corner_storage(self):
return StorageImpl(item[1] for item in sorted(self._corners.items()))
<|reserved_special_token_0|>
class CornerTracker:
MAX_CORNERS = 1300
INITIAL_QUALITY_LEVEL = 0.03
QUALITY_LEVEL = 0.15
MIN_DISTANCE = 6
BLOCK_SIZE = 5
CIRCLE_SIZE = 14
MAX_LEVEL_LK = 2
TERM_CRITERIA = cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03
def __init__(self):
self.total_corners = 0
def get_circles_mask(self, shape, points):
mask = np.full(shape, 255, dtype=np.uint8)
for x, y in points:
cv2.circle(mask, center=(x, y), radius=self.MIN_DISTANCE, color
=0, thickness=-1)
return mask
def find_new_corners(self, img, num_corners=MAX_CORNERS, mask=None,
quality_level=INITIAL_QUALITY_LEVEL):
points = cv2.goodFeaturesToTrack(img, mask=mask, maxCorners=
num_corners, qualityLevel=quality_level, minDistance=self.
MIN_DISTANCE, blockSize=self.BLOCK_SIZE)
if points is None:
return None, None
num_points = points.shape[0]
sizes = np.array([self.CIRCLE_SIZE for _ in range(num_points)])
return points, sizes
def get_corners(self, new_img, old_img=None, old_corners=None):
if old_img is None:
points, sizes = self.find_new_corners(new_img)
ids = np.arange(len(points))
points = points.reshape((-1, 2))
self.total_corners = len(points)
return FrameCorners(ids, points, sizes)
else:
ids = old_corners.ids
points = old_corners.points
sizes = old_corners.sizes
nextPts, status, err = cv2.calcOpticalFlowPyrLK(to_uint8_image(
old_img), to_uint8_image(new_img), prevPts=points, nextPts=
None, winSize=(self.CIRCLE_SIZE, self.CIRCLE_SIZE),
maxLevel=self.MAX_LEVEL_LK, criteria=self.TERM_CRITERIA)
status = status.squeeze()
found = np.where(status == 1)
ids = ids[found]
points = nextPts[found]
sizes = sizes[found]
mask = self.get_circles_mask(new_img.shape, points)
if len(points) < self.MAX_CORNERS:
new_points, new_sizes = self.find_new_corners(new_img, self
.MAX_CORNERS - len(points), mask, self.QUALITY_LEVEL)
if new_points is not None:
new_ids = np.arange(self.total_corners, self.
total_corners + len(new_points))
new_ids = new_ids.reshape((-1, 1))
new_points = new_points.reshape((-1, 2))
new_sizes = new_sizes.reshape((-1, 1))
self.total_corners += len(new_points)
ids = np.concatenate([ids, new_ids])
points = np.concatenate([points, new_points])
sizes = np.concatenate([sizes, new_sizes])
points = points.reshape((-1, 2))
return FrameCorners(ids, points, sizes)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class _CornerStorageBuilder:
def __init__(self, progress_indicator=None):
self._progress_indicator = progress_indicator
self._corners = dict()
def set_corners_at_frame(self, frame, corners):
self._corners[frame] = corners
if self._progress_indicator is not None:
self._progress_indicator.update(1)
def build_corner_storage(self):
return StorageImpl(item[1] for item in sorted(self._corners.items()))
def to_uint8_image(img):
img = img * 255.0
img = np.round(img)
return img.astype(np.uint8)
class CornerTracker:
MAX_CORNERS = 1300
INITIAL_QUALITY_LEVEL = 0.03
QUALITY_LEVEL = 0.15
MIN_DISTANCE = 6
BLOCK_SIZE = 5
CIRCLE_SIZE = 14
MAX_LEVEL_LK = 2
TERM_CRITERIA = cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03
def __init__(self):
self.total_corners = 0
def get_circles_mask(self, shape, points):
mask = np.full(shape, 255, dtype=np.uint8)
for x, y in points:
cv2.circle(mask, center=(x, y), radius=self.MIN_DISTANCE, color
=0, thickness=-1)
return mask
def find_new_corners(self, img, num_corners=MAX_CORNERS, mask=None,
quality_level=INITIAL_QUALITY_LEVEL):
points = cv2.goodFeaturesToTrack(img, mask=mask, maxCorners=
num_corners, qualityLevel=quality_level, minDistance=self.
MIN_DISTANCE, blockSize=self.BLOCK_SIZE)
if points is None:
return None, None
num_points = points.shape[0]
sizes = np.array([self.CIRCLE_SIZE for _ in range(num_points)])
return points, sizes
def get_corners(self, new_img, old_img=None, old_corners=None):
if old_img is None:
points, sizes = self.find_new_corners(new_img)
ids = np.arange(len(points))
points = points.reshape((-1, 2))
self.total_corners = len(points)
return FrameCorners(ids, points, sizes)
else:
ids = old_corners.ids
points = old_corners.points
sizes = old_corners.sizes
nextPts, status, err = cv2.calcOpticalFlowPyrLK(to_uint8_image(
old_img), to_uint8_image(new_img), prevPts=points, nextPts=
None, winSize=(self.CIRCLE_SIZE, self.CIRCLE_SIZE),
maxLevel=self.MAX_LEVEL_LK, criteria=self.TERM_CRITERIA)
status = status.squeeze()
found = np.where(status == 1)
ids = ids[found]
points = nextPts[found]
sizes = sizes[found]
mask = self.get_circles_mask(new_img.shape, points)
if len(points) < self.MAX_CORNERS:
new_points, new_sizes = self.find_new_corners(new_img, self
.MAX_CORNERS - len(points), mask, self.QUALITY_LEVEL)
if new_points is not None:
new_ids = np.arange(self.total_corners, self.
total_corners + len(new_points))
new_ids = new_ids.reshape((-1, 1))
new_points = new_points.reshape((-1, 2))
new_sizes = new_sizes.reshape((-1, 1))
self.total_corners += len(new_points)
ids = np.concatenate([ids, new_ids])
points = np.concatenate([points, new_points])
sizes = np.concatenate([sizes, new_sizes])
points = points.reshape((-1, 2))
return FrameCorners(ids, points, sizes)
<|reserved_special_token_0|>
def build(frame_sequence: pims.FramesSequence, progress: bool=True
) ->CornerStorage:
"""
Build corners for all frames of a frame sequence.
:param frame_sequence: grayscale float32 frame sequence.
:param progress: enable/disable building progress bar.
:return: corners for all frames of given sequence.
"""
if progress:
with click.progressbar(length=len(frame_sequence), label=
'Calculating corners') as progress_bar:
builder = _CornerStorageBuilder(progress_bar)
_build_impl(frame_sequence, builder)
else:
builder = _CornerStorageBuilder()
_build_impl(frame_sequence, builder)
corner_storage = builder.build_corner_storage()
final_storage = without_short_tracks(corner_storage, min_len=20)
return final_storage
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class _CornerStorageBuilder:
def __init__(self, progress_indicator=None):
self._progress_indicator = progress_indicator
self._corners = dict()
def set_corners_at_frame(self, frame, corners):
self._corners[frame] = corners
if self._progress_indicator is not None:
self._progress_indicator.update(1)
def build_corner_storage(self):
return StorageImpl(item[1] for item in sorted(self._corners.items()))
def to_uint8_image(img):
img = img * 255.0
img = np.round(img)
return img.astype(np.uint8)
class CornerTracker:
MAX_CORNERS = 1300
INITIAL_QUALITY_LEVEL = 0.03
QUALITY_LEVEL = 0.15
MIN_DISTANCE = 6
BLOCK_SIZE = 5
CIRCLE_SIZE = 14
MAX_LEVEL_LK = 2
TERM_CRITERIA = cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03
def __init__(self):
self.total_corners = 0
def get_circles_mask(self, shape, points):
mask = np.full(shape, 255, dtype=np.uint8)
for x, y in points:
cv2.circle(mask, center=(x, y), radius=self.MIN_DISTANCE, color
=0, thickness=-1)
return mask
def find_new_corners(self, img, num_corners=MAX_CORNERS, mask=None,
quality_level=INITIAL_QUALITY_LEVEL):
points = cv2.goodFeaturesToTrack(img, mask=mask, maxCorners=
num_corners, qualityLevel=quality_level, minDistance=self.
MIN_DISTANCE, blockSize=self.BLOCK_SIZE)
if points is None:
return None, None
num_points = points.shape[0]
sizes = np.array([self.CIRCLE_SIZE for _ in range(num_points)])
return points, sizes
def get_corners(self, new_img, old_img=None, old_corners=None):
if old_img is None:
points, sizes = self.find_new_corners(new_img)
ids = np.arange(len(points))
points = points.reshape((-1, 2))
self.total_corners = len(points)
return FrameCorners(ids, points, sizes)
else:
ids = old_corners.ids
points = old_corners.points
sizes = old_corners.sizes
nextPts, status, err = cv2.calcOpticalFlowPyrLK(to_uint8_image(
old_img), to_uint8_image(new_img), prevPts=points, nextPts=
None, winSize=(self.CIRCLE_SIZE, self.CIRCLE_SIZE),
maxLevel=self.MAX_LEVEL_LK, criteria=self.TERM_CRITERIA)
status = status.squeeze()
found = np.where(status == 1)
ids = ids[found]
points = nextPts[found]
sizes = sizes[found]
mask = self.get_circles_mask(new_img.shape, points)
if len(points) < self.MAX_CORNERS:
new_points, new_sizes = self.find_new_corners(new_img, self
.MAX_CORNERS - len(points), mask, self.QUALITY_LEVEL)
if new_points is not None:
new_ids = np.arange(self.total_corners, self.
total_corners + len(new_points))
new_ids = new_ids.reshape((-1, 1))
new_points = new_points.reshape((-1, 2))
new_sizes = new_sizes.reshape((-1, 1))
self.total_corners += len(new_points)
ids = np.concatenate([ids, new_ids])
points = np.concatenate([points, new_points])
sizes = np.concatenate([sizes, new_sizes])
points = points.reshape((-1, 2))
return FrameCorners(ids, points, sizes)
def _build_impl(frame_sequence: pims.FramesSequence, builder:
_CornerStorageBuilder) ->None:
cornerTracker = CornerTracker()
image_0 = frame_sequence[0]
corners = cornerTracker.get_corners(image_0)
builder.set_corners_at_frame(0, corners)
for frame, image_1 in enumerate(frame_sequence[1:], 1):
corners = cornerTracker.get_corners(image_1, image_0, corners)
builder.set_corners_at_frame(frame, corners)
image_0 = image_1
def build(frame_sequence: pims.FramesSequence, progress: bool=True
) ->CornerStorage:
"""
Build corners for all frames of a frame sequence.
:param frame_sequence: grayscale float32 frame sequence.
:param progress: enable/disable building progress bar.
:return: corners for all frames of given sequence.
"""
if progress:
with click.progressbar(length=len(frame_sequence), label=
'Calculating corners') as progress_bar:
builder = _CornerStorageBuilder(progress_bar)
_build_impl(frame_sequence, builder)
else:
builder = _CornerStorageBuilder()
_build_impl(frame_sequence, builder)
corner_storage = builder.build_corner_storage()
final_storage = without_short_tracks(corner_storage, min_len=20)
return final_storage
if __name__ == '__main__':
create_cli(build)()
<|reserved_special_token_1|>
__all__ = ['FrameCorners', 'CornerStorage', 'build', 'dump', 'load', 'draw',
'without_short_tracks']
<|reserved_special_token_0|>
class _CornerStorageBuilder:
def __init__(self, progress_indicator=None):
self._progress_indicator = progress_indicator
self._corners = dict()
def set_corners_at_frame(self, frame, corners):
self._corners[frame] = corners
if self._progress_indicator is not None:
self._progress_indicator.update(1)
def build_corner_storage(self):
return StorageImpl(item[1] for item in sorted(self._corners.items()))
def to_uint8_image(img):
img = img * 255.0
img = np.round(img)
return img.astype(np.uint8)
class CornerTracker:
MAX_CORNERS = 1300
INITIAL_QUALITY_LEVEL = 0.03
QUALITY_LEVEL = 0.15
MIN_DISTANCE = 6
BLOCK_SIZE = 5
CIRCLE_SIZE = 14
MAX_LEVEL_LK = 2
TERM_CRITERIA = cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03
def __init__(self):
self.total_corners = 0
def get_circles_mask(self, shape, points):
mask = np.full(shape, 255, dtype=np.uint8)
for x, y in points:
cv2.circle(mask, center=(x, y), radius=self.MIN_DISTANCE, color
=0, thickness=-1)
return mask
def find_new_corners(self, img, num_corners=MAX_CORNERS, mask=None,
quality_level=INITIAL_QUALITY_LEVEL):
points = cv2.goodFeaturesToTrack(img, mask=mask, maxCorners=
num_corners, qualityLevel=quality_level, minDistance=self.
MIN_DISTANCE, blockSize=self.BLOCK_SIZE)
if points is None:
return None, None
num_points = points.shape[0]
sizes = np.array([self.CIRCLE_SIZE for _ in range(num_points)])
return points, sizes
def get_corners(self, new_img, old_img=None, old_corners=None):
if old_img is None:
points, sizes = self.find_new_corners(new_img)
ids = np.arange(len(points))
points = points.reshape((-1, 2))
self.total_corners = len(points)
return FrameCorners(ids, points, sizes)
else:
ids = old_corners.ids
points = old_corners.points
sizes = old_corners.sizes
nextPts, status, err = cv2.calcOpticalFlowPyrLK(to_uint8_image(
old_img), to_uint8_image(new_img), prevPts=points, nextPts=
None, winSize=(self.CIRCLE_SIZE, self.CIRCLE_SIZE),
maxLevel=self.MAX_LEVEL_LK, criteria=self.TERM_CRITERIA)
status = status.squeeze()
found = np.where(status == 1)
ids = ids[found]
points = nextPts[found]
sizes = sizes[found]
mask = self.get_circles_mask(new_img.shape, points)
if len(points) < self.MAX_CORNERS:
new_points, new_sizes = self.find_new_corners(new_img, self
.MAX_CORNERS - len(points), mask, self.QUALITY_LEVEL)
if new_points is not None:
new_ids = np.arange(self.total_corners, self.
total_corners + len(new_points))
new_ids = new_ids.reshape((-1, 1))
new_points = new_points.reshape((-1, 2))
new_sizes = new_sizes.reshape((-1, 1))
self.total_corners += len(new_points)
ids = np.concatenate([ids, new_ids])
points = np.concatenate([points, new_points])
sizes = np.concatenate([sizes, new_sizes])
points = points.reshape((-1, 2))
return FrameCorners(ids, points, sizes)
def _build_impl(frame_sequence: pims.FramesSequence, builder:
_CornerStorageBuilder) ->None:
cornerTracker = CornerTracker()
image_0 = frame_sequence[0]
corners = cornerTracker.get_corners(image_0)
builder.set_corners_at_frame(0, corners)
for frame, image_1 in enumerate(frame_sequence[1:], 1):
corners = cornerTracker.get_corners(image_1, image_0, corners)
builder.set_corners_at_frame(frame, corners)
image_0 = image_1
def build(frame_sequence: pims.FramesSequence, progress: bool=True
) ->CornerStorage:
"""
Build corners for all frames of a frame sequence.
:param frame_sequence: grayscale float32 frame sequence.
:param progress: enable/disable building progress bar.
:return: corners for all frames of given sequence.
"""
if progress:
with click.progressbar(length=len(frame_sequence), label=
'Calculating corners') as progress_bar:
builder = _CornerStorageBuilder(progress_bar)
_build_impl(frame_sequence, builder)
else:
builder = _CornerStorageBuilder()
_build_impl(frame_sequence, builder)
corner_storage = builder.build_corner_storage()
final_storage = without_short_tracks(corner_storage, min_len=20)
return final_storage
if __name__ == '__main__':
create_cli(build)()
<|reserved_special_token_1|>
#! /usr/bin/env python3
__all__ = [
'FrameCorners',
'CornerStorage',
'build',
'dump',
'load',
'draw',
'without_short_tracks'
]
import click
import cv2
import numpy as np
import pims
from _corners import FrameCorners, CornerStorage, StorageImpl
from _corners import dump, load, draw, without_short_tracks, create_cli
class _CornerStorageBuilder:
def __init__(self, progress_indicator=None):
self._progress_indicator = progress_indicator
self._corners = dict()
def set_corners_at_frame(self, frame, corners):
self._corners[frame] = corners
if self._progress_indicator is not None:
self._progress_indicator.update(1)
def build_corner_storage(self):
return StorageImpl(item[1] for item in sorted(self._corners.items()))
def to_uint8_image(img):
img = img * 255.0
img = np.round(img)
return img.astype(np.uint8)
class CornerTracker:
MAX_CORNERS = 1300
INITIAL_QUALITY_LEVEL = 0.03
QUALITY_LEVEL = 0.15
MIN_DISTANCE = 6
BLOCK_SIZE = 5
CIRCLE_SIZE = 14
MAX_LEVEL_LK = 2
TERM_CRITERIA = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03)
def __init__(self):
self.total_corners = 0
def get_circles_mask(self, shape, points):
mask = np.full(shape, 255, dtype=np.uint8)
for x, y in points:
cv2.circle(mask,
center=(x, y),
radius=self.MIN_DISTANCE,
color=0,
thickness=-1)
return mask
def find_new_corners(self, img, num_corners=MAX_CORNERS, mask=None, quality_level=INITIAL_QUALITY_LEVEL):
points = cv2.goodFeaturesToTrack(img,
mask=mask,
maxCorners=num_corners,
qualityLevel=quality_level,
minDistance=self.MIN_DISTANCE,
blockSize=self.BLOCK_SIZE)
if points is None:
return None, None
num_points = points.shape[0]
sizes = np.array([self.CIRCLE_SIZE for _ in range(num_points)])
return points, sizes
def get_corners(self, new_img, old_img = None, old_corners=None):
if old_img is None:
points, sizes = self.find_new_corners(new_img)
ids = np.arange(len(points))
points = points.reshape((-1, 2))
self.total_corners = len(points)
return FrameCorners(ids, points, sizes)
else:
ids = old_corners.ids
points = old_corners.points
sizes = old_corners.sizes
nextPts, status, err = cv2.calcOpticalFlowPyrLK(to_uint8_image(old_img),
to_uint8_image(new_img),
prevPts=points,
nextPts=None,
winSize=(self.CIRCLE_SIZE, self.CIRCLE_SIZE),
maxLevel=self.MAX_LEVEL_LK,
criteria=self.TERM_CRITERIA)
status = status.squeeze()
found = np.where(status == 1)
ids = ids[found]
points = nextPts[found]
sizes = sizes[found]
mask = self.get_circles_mask(new_img.shape, points)
if len(points) < self.MAX_CORNERS:
new_points, new_sizes = self.find_new_corners(new_img,
self.MAX_CORNERS - len(points),
mask,
self.QUALITY_LEVEL)
if new_points is not None:
new_ids = np.arange(self.total_corners, self.total_corners + len(new_points))
new_ids = new_ids.reshape((-1, 1))
new_points = new_points.reshape((-1, 2))
new_sizes = new_sizes.reshape((-1, 1))
self.total_corners += len(new_points)
ids = np.concatenate([ids, new_ids])
points = np.concatenate([points, new_points])
sizes = np.concatenate([sizes, new_sizes])
points = points.reshape((-1, 2))
return FrameCorners(ids, points, sizes)
def _build_impl(frame_sequence: pims.FramesSequence,
builder: _CornerStorageBuilder) -> None:
cornerTracker = CornerTracker()
image_0 = frame_sequence[0]
corners = cornerTracker.get_corners(image_0)
builder.set_corners_at_frame(0, corners)
for frame, image_1 in enumerate(frame_sequence[1:], 1):
corners = cornerTracker.get_corners(image_1, image_0, corners)
builder.set_corners_at_frame(frame, corners)
image_0 = image_1
def build(frame_sequence: pims.FramesSequence,
progress: bool = True) -> CornerStorage:
"""
Build corners for all frames of a frame sequence.
:param frame_sequence: grayscale float32 frame sequence.
:param progress: enable/disable building progress bar.
:return: corners for all frames of given sequence.
"""
if progress:
with click.progressbar(length=len(frame_sequence),
label='Calculating corners') as progress_bar:
builder = _CornerStorageBuilder(progress_bar)
_build_impl(frame_sequence, builder)
else:
builder = _CornerStorageBuilder()
_build_impl(frame_sequence, builder)
corner_storage = builder.build_corner_storage()
final_storage = without_short_tracks(corner_storage, min_len=20)
return final_storage
if __name__ == '__main__':
create_cli(build)() # pylint:disable=no-value-for-parameter
|
flexible
|
{
"blob_id": "0b5fb649dc421187820677ce75f3cd0e804c18a3",
"index": 7055,
"step-1": "<mask token>\n\n\nclass _CornerStorageBuilder:\n\n def __init__(self, progress_indicator=None):\n self._progress_indicator = progress_indicator\n self._corners = dict()\n\n def set_corners_at_frame(self, frame, corners):\n self._corners[frame] = corners\n if self._progress_indicator is not None:\n self._progress_indicator.update(1)\n\n def build_corner_storage(self):\n return StorageImpl(item[1] for item in sorted(self._corners.items()))\n\n\n<mask token>\n\n\nclass CornerTracker:\n MAX_CORNERS = 1300\n INITIAL_QUALITY_LEVEL = 0.03\n QUALITY_LEVEL = 0.15\n MIN_DISTANCE = 6\n BLOCK_SIZE = 5\n CIRCLE_SIZE = 14\n MAX_LEVEL_LK = 2\n TERM_CRITERIA = cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03\n\n def __init__(self):\n self.total_corners = 0\n\n def get_circles_mask(self, shape, points):\n mask = np.full(shape, 255, dtype=np.uint8)\n for x, y in points:\n cv2.circle(mask, center=(x, y), radius=self.MIN_DISTANCE, color\n =0, thickness=-1)\n return mask\n\n def find_new_corners(self, img, num_corners=MAX_CORNERS, mask=None,\n quality_level=INITIAL_QUALITY_LEVEL):\n points = cv2.goodFeaturesToTrack(img, mask=mask, maxCorners=\n num_corners, qualityLevel=quality_level, minDistance=self.\n MIN_DISTANCE, blockSize=self.BLOCK_SIZE)\n if points is None:\n return None, None\n num_points = points.shape[0]\n sizes = np.array([self.CIRCLE_SIZE for _ in range(num_points)])\n return points, sizes\n\n def get_corners(self, new_img, old_img=None, old_corners=None):\n if old_img is None:\n points, sizes = self.find_new_corners(new_img)\n ids = np.arange(len(points))\n points = points.reshape((-1, 2))\n self.total_corners = len(points)\n return FrameCorners(ids, points, sizes)\n else:\n ids = old_corners.ids\n points = old_corners.points\n sizes = old_corners.sizes\n nextPts, status, err = cv2.calcOpticalFlowPyrLK(to_uint8_image(\n old_img), to_uint8_image(new_img), prevPts=points, nextPts=\n None, winSize=(self.CIRCLE_SIZE, self.CIRCLE_SIZE),\n maxLevel=self.MAX_LEVEL_LK, criteria=self.TERM_CRITERIA)\n status = status.squeeze()\n found = np.where(status == 1)\n ids = ids[found]\n points = nextPts[found]\n sizes = sizes[found]\n mask = self.get_circles_mask(new_img.shape, points)\n if len(points) < self.MAX_CORNERS:\n new_points, new_sizes = self.find_new_corners(new_img, self\n .MAX_CORNERS - len(points), mask, self.QUALITY_LEVEL)\n if new_points is not None:\n new_ids = np.arange(self.total_corners, self.\n total_corners + len(new_points))\n new_ids = new_ids.reshape((-1, 1))\n new_points = new_points.reshape((-1, 2))\n new_sizes = new_sizes.reshape((-1, 1))\n self.total_corners += len(new_points)\n ids = np.concatenate([ids, new_ids])\n points = np.concatenate([points, new_points])\n sizes = np.concatenate([sizes, new_sizes])\n points = points.reshape((-1, 2))\n return FrameCorners(ids, points, sizes)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass _CornerStorageBuilder:\n\n def __init__(self, progress_indicator=None):\n self._progress_indicator = progress_indicator\n self._corners = dict()\n\n def set_corners_at_frame(self, frame, corners):\n self._corners[frame] = corners\n if self._progress_indicator is not None:\n self._progress_indicator.update(1)\n\n def build_corner_storage(self):\n return StorageImpl(item[1] for item in sorted(self._corners.items()))\n\n\ndef to_uint8_image(img):\n img = img * 255.0\n img = np.round(img)\n return img.astype(np.uint8)\n\n\nclass CornerTracker:\n MAX_CORNERS = 1300\n INITIAL_QUALITY_LEVEL = 0.03\n QUALITY_LEVEL = 0.15\n MIN_DISTANCE = 6\n BLOCK_SIZE = 5\n CIRCLE_SIZE = 14\n MAX_LEVEL_LK = 2\n TERM_CRITERIA = cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03\n\n def __init__(self):\n self.total_corners = 0\n\n def get_circles_mask(self, shape, points):\n mask = np.full(shape, 255, dtype=np.uint8)\n for x, y in points:\n cv2.circle(mask, center=(x, y), radius=self.MIN_DISTANCE, color\n =0, thickness=-1)\n return mask\n\n def find_new_corners(self, img, num_corners=MAX_CORNERS, mask=None,\n quality_level=INITIAL_QUALITY_LEVEL):\n points = cv2.goodFeaturesToTrack(img, mask=mask, maxCorners=\n num_corners, qualityLevel=quality_level, minDistance=self.\n MIN_DISTANCE, blockSize=self.BLOCK_SIZE)\n if points is None:\n return None, None\n num_points = points.shape[0]\n sizes = np.array([self.CIRCLE_SIZE for _ in range(num_points)])\n return points, sizes\n\n def get_corners(self, new_img, old_img=None, old_corners=None):\n if old_img is None:\n points, sizes = self.find_new_corners(new_img)\n ids = np.arange(len(points))\n points = points.reshape((-1, 2))\n self.total_corners = len(points)\n return FrameCorners(ids, points, sizes)\n else:\n ids = old_corners.ids\n points = old_corners.points\n sizes = old_corners.sizes\n nextPts, status, err = cv2.calcOpticalFlowPyrLK(to_uint8_image(\n old_img), to_uint8_image(new_img), prevPts=points, nextPts=\n None, winSize=(self.CIRCLE_SIZE, self.CIRCLE_SIZE),\n maxLevel=self.MAX_LEVEL_LK, criteria=self.TERM_CRITERIA)\n status = status.squeeze()\n found = np.where(status == 1)\n ids = ids[found]\n points = nextPts[found]\n sizes = sizes[found]\n mask = self.get_circles_mask(new_img.shape, points)\n if len(points) < self.MAX_CORNERS:\n new_points, new_sizes = self.find_new_corners(new_img, self\n .MAX_CORNERS - len(points), mask, self.QUALITY_LEVEL)\n if new_points is not None:\n new_ids = np.arange(self.total_corners, self.\n total_corners + len(new_points))\n new_ids = new_ids.reshape((-1, 1))\n new_points = new_points.reshape((-1, 2))\n new_sizes = new_sizes.reshape((-1, 1))\n self.total_corners += len(new_points)\n ids = np.concatenate([ids, new_ids])\n points = np.concatenate([points, new_points])\n sizes = np.concatenate([sizes, new_sizes])\n points = points.reshape((-1, 2))\n return FrameCorners(ids, points, sizes)\n\n\n<mask token>\n\n\ndef build(frame_sequence: pims.FramesSequence, progress: bool=True\n ) ->CornerStorage:\n \"\"\"\n Build corners for all frames of a frame sequence.\n :param frame_sequence: grayscale float32 frame sequence.\n :param progress: enable/disable building progress bar.\n :return: corners for all frames of given sequence.\n \"\"\"\n if progress:\n with click.progressbar(length=len(frame_sequence), label=\n 'Calculating corners') as progress_bar:\n builder = _CornerStorageBuilder(progress_bar)\n _build_impl(frame_sequence, builder)\n else:\n builder = _CornerStorageBuilder()\n _build_impl(frame_sequence, builder)\n corner_storage = builder.build_corner_storage()\n final_storage = without_short_tracks(corner_storage, min_len=20)\n return final_storage\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass _CornerStorageBuilder:\n\n def __init__(self, progress_indicator=None):\n self._progress_indicator = progress_indicator\n self._corners = dict()\n\n def set_corners_at_frame(self, frame, corners):\n self._corners[frame] = corners\n if self._progress_indicator is not None:\n self._progress_indicator.update(1)\n\n def build_corner_storage(self):\n return StorageImpl(item[1] for item in sorted(self._corners.items()))\n\n\ndef to_uint8_image(img):\n img = img * 255.0\n img = np.round(img)\n return img.astype(np.uint8)\n\n\nclass CornerTracker:\n MAX_CORNERS = 1300\n INITIAL_QUALITY_LEVEL = 0.03\n QUALITY_LEVEL = 0.15\n MIN_DISTANCE = 6\n BLOCK_SIZE = 5\n CIRCLE_SIZE = 14\n MAX_LEVEL_LK = 2\n TERM_CRITERIA = cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03\n\n def __init__(self):\n self.total_corners = 0\n\n def get_circles_mask(self, shape, points):\n mask = np.full(shape, 255, dtype=np.uint8)\n for x, y in points:\n cv2.circle(mask, center=(x, y), radius=self.MIN_DISTANCE, color\n =0, thickness=-1)\n return mask\n\n def find_new_corners(self, img, num_corners=MAX_CORNERS, mask=None,\n quality_level=INITIAL_QUALITY_LEVEL):\n points = cv2.goodFeaturesToTrack(img, mask=mask, maxCorners=\n num_corners, qualityLevel=quality_level, minDistance=self.\n MIN_DISTANCE, blockSize=self.BLOCK_SIZE)\n if points is None:\n return None, None\n num_points = points.shape[0]\n sizes = np.array([self.CIRCLE_SIZE for _ in range(num_points)])\n return points, sizes\n\n def get_corners(self, new_img, old_img=None, old_corners=None):\n if old_img is None:\n points, sizes = self.find_new_corners(new_img)\n ids = np.arange(len(points))\n points = points.reshape((-1, 2))\n self.total_corners = len(points)\n return FrameCorners(ids, points, sizes)\n else:\n ids = old_corners.ids\n points = old_corners.points\n sizes = old_corners.sizes\n nextPts, status, err = cv2.calcOpticalFlowPyrLK(to_uint8_image(\n old_img), to_uint8_image(new_img), prevPts=points, nextPts=\n None, winSize=(self.CIRCLE_SIZE, self.CIRCLE_SIZE),\n maxLevel=self.MAX_LEVEL_LK, criteria=self.TERM_CRITERIA)\n status = status.squeeze()\n found = np.where(status == 1)\n ids = ids[found]\n points = nextPts[found]\n sizes = sizes[found]\n mask = self.get_circles_mask(new_img.shape, points)\n if len(points) < self.MAX_CORNERS:\n new_points, new_sizes = self.find_new_corners(new_img, self\n .MAX_CORNERS - len(points), mask, self.QUALITY_LEVEL)\n if new_points is not None:\n new_ids = np.arange(self.total_corners, self.\n total_corners + len(new_points))\n new_ids = new_ids.reshape((-1, 1))\n new_points = new_points.reshape((-1, 2))\n new_sizes = new_sizes.reshape((-1, 1))\n self.total_corners += len(new_points)\n ids = np.concatenate([ids, new_ids])\n points = np.concatenate([points, new_points])\n sizes = np.concatenate([sizes, new_sizes])\n points = points.reshape((-1, 2))\n return FrameCorners(ids, points, sizes)\n\n\ndef _build_impl(frame_sequence: pims.FramesSequence, builder:\n _CornerStorageBuilder) ->None:\n cornerTracker = CornerTracker()\n image_0 = frame_sequence[0]\n corners = cornerTracker.get_corners(image_0)\n builder.set_corners_at_frame(0, corners)\n for frame, image_1 in enumerate(frame_sequence[1:], 1):\n corners = cornerTracker.get_corners(image_1, image_0, corners)\n builder.set_corners_at_frame(frame, corners)\n image_0 = image_1\n\n\ndef build(frame_sequence: pims.FramesSequence, progress: bool=True\n ) ->CornerStorage:\n \"\"\"\n Build corners for all frames of a frame sequence.\n :param frame_sequence: grayscale float32 frame sequence.\n :param progress: enable/disable building progress bar.\n :return: corners for all frames of given sequence.\n \"\"\"\n if progress:\n with click.progressbar(length=len(frame_sequence), label=\n 'Calculating corners') as progress_bar:\n builder = _CornerStorageBuilder(progress_bar)\n _build_impl(frame_sequence, builder)\n else:\n builder = _CornerStorageBuilder()\n _build_impl(frame_sequence, builder)\n corner_storage = builder.build_corner_storage()\n final_storage = without_short_tracks(corner_storage, min_len=20)\n return final_storage\n\n\nif __name__ == '__main__':\n create_cli(build)()\n",
"step-4": "__all__ = ['FrameCorners', 'CornerStorage', 'build', 'dump', 'load', 'draw',\n 'without_short_tracks']\n<mask token>\n\n\nclass _CornerStorageBuilder:\n\n def __init__(self, progress_indicator=None):\n self._progress_indicator = progress_indicator\n self._corners = dict()\n\n def set_corners_at_frame(self, frame, corners):\n self._corners[frame] = corners\n if self._progress_indicator is not None:\n self._progress_indicator.update(1)\n\n def build_corner_storage(self):\n return StorageImpl(item[1] for item in sorted(self._corners.items()))\n\n\ndef to_uint8_image(img):\n img = img * 255.0\n img = np.round(img)\n return img.astype(np.uint8)\n\n\nclass CornerTracker:\n MAX_CORNERS = 1300\n INITIAL_QUALITY_LEVEL = 0.03\n QUALITY_LEVEL = 0.15\n MIN_DISTANCE = 6\n BLOCK_SIZE = 5\n CIRCLE_SIZE = 14\n MAX_LEVEL_LK = 2\n TERM_CRITERIA = cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03\n\n def __init__(self):\n self.total_corners = 0\n\n def get_circles_mask(self, shape, points):\n mask = np.full(shape, 255, dtype=np.uint8)\n for x, y in points:\n cv2.circle(mask, center=(x, y), radius=self.MIN_DISTANCE, color\n =0, thickness=-1)\n return mask\n\n def find_new_corners(self, img, num_corners=MAX_CORNERS, mask=None,\n quality_level=INITIAL_QUALITY_LEVEL):\n points = cv2.goodFeaturesToTrack(img, mask=mask, maxCorners=\n num_corners, qualityLevel=quality_level, minDistance=self.\n MIN_DISTANCE, blockSize=self.BLOCK_SIZE)\n if points is None:\n return None, None\n num_points = points.shape[0]\n sizes = np.array([self.CIRCLE_SIZE for _ in range(num_points)])\n return points, sizes\n\n def get_corners(self, new_img, old_img=None, old_corners=None):\n if old_img is None:\n points, sizes = self.find_new_corners(new_img)\n ids = np.arange(len(points))\n points = points.reshape((-1, 2))\n self.total_corners = len(points)\n return FrameCorners(ids, points, sizes)\n else:\n ids = old_corners.ids\n points = old_corners.points\n sizes = old_corners.sizes\n nextPts, status, err = cv2.calcOpticalFlowPyrLK(to_uint8_image(\n old_img), to_uint8_image(new_img), prevPts=points, nextPts=\n None, winSize=(self.CIRCLE_SIZE, self.CIRCLE_SIZE),\n maxLevel=self.MAX_LEVEL_LK, criteria=self.TERM_CRITERIA)\n status = status.squeeze()\n found = np.where(status == 1)\n ids = ids[found]\n points = nextPts[found]\n sizes = sizes[found]\n mask = self.get_circles_mask(new_img.shape, points)\n if len(points) < self.MAX_CORNERS:\n new_points, new_sizes = self.find_new_corners(new_img, self\n .MAX_CORNERS - len(points), mask, self.QUALITY_LEVEL)\n if new_points is not None:\n new_ids = np.arange(self.total_corners, self.\n total_corners + len(new_points))\n new_ids = new_ids.reshape((-1, 1))\n new_points = new_points.reshape((-1, 2))\n new_sizes = new_sizes.reshape((-1, 1))\n self.total_corners += len(new_points)\n ids = np.concatenate([ids, new_ids])\n points = np.concatenate([points, new_points])\n sizes = np.concatenate([sizes, new_sizes])\n points = points.reshape((-1, 2))\n return FrameCorners(ids, points, sizes)\n\n\ndef _build_impl(frame_sequence: pims.FramesSequence, builder:\n _CornerStorageBuilder) ->None:\n cornerTracker = CornerTracker()\n image_0 = frame_sequence[0]\n corners = cornerTracker.get_corners(image_0)\n builder.set_corners_at_frame(0, corners)\n for frame, image_1 in enumerate(frame_sequence[1:], 1):\n corners = cornerTracker.get_corners(image_1, image_0, corners)\n builder.set_corners_at_frame(frame, corners)\n image_0 = image_1\n\n\ndef build(frame_sequence: pims.FramesSequence, progress: bool=True\n ) ->CornerStorage:\n \"\"\"\n Build corners for all frames of a frame sequence.\n :param frame_sequence: grayscale float32 frame sequence.\n :param progress: enable/disable building progress bar.\n :return: corners for all frames of given sequence.\n \"\"\"\n if progress:\n with click.progressbar(length=len(frame_sequence), label=\n 'Calculating corners') as progress_bar:\n builder = _CornerStorageBuilder(progress_bar)\n _build_impl(frame_sequence, builder)\n else:\n builder = _CornerStorageBuilder()\n _build_impl(frame_sequence, builder)\n corner_storage = builder.build_corner_storage()\n final_storage = without_short_tracks(corner_storage, min_len=20)\n return final_storage\n\n\nif __name__ == '__main__':\n create_cli(build)()\n",
"step-5": "#! /usr/bin/env python3\n\n__all__ = [\n 'FrameCorners',\n 'CornerStorage',\n 'build',\n 'dump',\n 'load',\n 'draw',\n 'without_short_tracks'\n]\n\nimport click\nimport cv2\nimport numpy as np\nimport pims\n\nfrom _corners import FrameCorners, CornerStorage, StorageImpl\nfrom _corners import dump, load, draw, without_short_tracks, create_cli\n\n\nclass _CornerStorageBuilder:\n\n def __init__(self, progress_indicator=None):\n self._progress_indicator = progress_indicator\n self._corners = dict()\n\n def set_corners_at_frame(self, frame, corners):\n self._corners[frame] = corners\n if self._progress_indicator is not None:\n self._progress_indicator.update(1)\n\n def build_corner_storage(self):\n return StorageImpl(item[1] for item in sorted(self._corners.items()))\n\n\ndef to_uint8_image(img):\n img = img * 255.0\n img = np.round(img)\n return img.astype(np.uint8)\n\n\nclass CornerTracker:\n MAX_CORNERS = 1300\n INITIAL_QUALITY_LEVEL = 0.03\n QUALITY_LEVEL = 0.15\n MIN_DISTANCE = 6\n BLOCK_SIZE = 5\n CIRCLE_SIZE = 14\n MAX_LEVEL_LK = 2\n TERM_CRITERIA = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03)\n\n def __init__(self):\n self.total_corners = 0\n\n def get_circles_mask(self, shape, points):\n mask = np.full(shape, 255, dtype=np.uint8)\n for x, y in points:\n cv2.circle(mask,\n center=(x, y),\n radius=self.MIN_DISTANCE,\n color=0,\n thickness=-1)\n return mask\n\n def find_new_corners(self, img, num_corners=MAX_CORNERS, mask=None, quality_level=INITIAL_QUALITY_LEVEL):\n points = cv2.goodFeaturesToTrack(img,\n mask=mask,\n maxCorners=num_corners,\n qualityLevel=quality_level,\n minDistance=self.MIN_DISTANCE,\n blockSize=self.BLOCK_SIZE)\n if points is None:\n return None, None\n\n num_points = points.shape[0]\n sizes = np.array([self.CIRCLE_SIZE for _ in range(num_points)])\n return points, sizes\n\n def get_corners(self, new_img, old_img = None, old_corners=None):\n if old_img is None:\n points, sizes = self.find_new_corners(new_img)\n ids = np.arange(len(points))\n points = points.reshape((-1, 2))\n self.total_corners = len(points)\n return FrameCorners(ids, points, sizes)\n else:\n ids = old_corners.ids\n points = old_corners.points\n sizes = old_corners.sizes\n\n nextPts, status, err = cv2.calcOpticalFlowPyrLK(to_uint8_image(old_img),\n to_uint8_image(new_img),\n prevPts=points,\n nextPts=None,\n winSize=(self.CIRCLE_SIZE, self.CIRCLE_SIZE),\n maxLevel=self.MAX_LEVEL_LK,\n criteria=self.TERM_CRITERIA)\n\n status = status.squeeze()\n found = np.where(status == 1)\n\n ids = ids[found]\n points = nextPts[found]\n sizes = sizes[found]\n\n mask = self.get_circles_mask(new_img.shape, points)\n if len(points) < self.MAX_CORNERS:\n new_points, new_sizes = self.find_new_corners(new_img,\n self.MAX_CORNERS - len(points),\n mask,\n self.QUALITY_LEVEL)\n if new_points is not None:\n new_ids = np.arange(self.total_corners, self.total_corners + len(new_points))\n new_ids = new_ids.reshape((-1, 1))\n new_points = new_points.reshape((-1, 2))\n new_sizes = new_sizes.reshape((-1, 1))\n self.total_corners += len(new_points)\n ids = np.concatenate([ids, new_ids])\n points = np.concatenate([points, new_points])\n sizes = np.concatenate([sizes, new_sizes])\n\n points = points.reshape((-1, 2))\n return FrameCorners(ids, points, sizes)\n\n\ndef _build_impl(frame_sequence: pims.FramesSequence,\n builder: _CornerStorageBuilder) -> None:\n cornerTracker = CornerTracker()\n\n image_0 = frame_sequence[0]\n corners = cornerTracker.get_corners(image_0)\n builder.set_corners_at_frame(0, corners)\n\n for frame, image_1 in enumerate(frame_sequence[1:], 1):\n corners = cornerTracker.get_corners(image_1, image_0, corners)\n builder.set_corners_at_frame(frame, corners)\n image_0 = image_1\n\n\ndef build(frame_sequence: pims.FramesSequence,\n progress: bool = True) -> CornerStorage:\n \"\"\"\n Build corners for all frames of a frame sequence.\n :param frame_sequence: grayscale float32 frame sequence.\n :param progress: enable/disable building progress bar.\n :return: corners for all frames of given sequence.\n \"\"\"\n if progress:\n with click.progressbar(length=len(frame_sequence),\n label='Calculating corners') as progress_bar:\n builder = _CornerStorageBuilder(progress_bar)\n _build_impl(frame_sequence, builder)\n else:\n builder = _CornerStorageBuilder()\n _build_impl(frame_sequence, builder)\n\n corner_storage = builder.build_corner_storage()\n final_storage = without_short_tracks(corner_storage, min_len=20)\n\n return final_storage\n\n\nif __name__ == '__main__':\n create_cli(build)() # pylint:disable=no-value-for-parameter",
"step-ids": [
10,
12,
14,
15,
17
]
}
|
[
10,
12,
14,
15,
17
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
if __name__ == '__main__':
app.run_server(debug=False, port=8080, host='127.0.0.1')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
app = dash.Dash(__name__)
app.layout = html.H1('Hello dashboard')
if __name__ == '__main__':
app.run_server(debug=False, port=8080, host='127.0.0.1')
<|reserved_special_token_1|>
import dash
import dash_html_components as html
app = dash.Dash(__name__)
app.layout = html.H1('Hello dashboard')
if __name__ == '__main__':
app.run_server(debug=False, port=8080, host='127.0.0.1')
<|reserved_special_token_1|>
import dash
import dash_html_components as html
app = dash.Dash(__name__)
app.layout = html.H1("Hello dashboard")
if __name__ == "__main__":
app.run_server(debug=False, port=8080, host="127.0.0.1")
|
flexible
|
{
"blob_id": "b66f588149d160c119f9cc24af3acb9f64432d6e",
"index": 6014,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif __name__ == '__main__':\n app.run_server(debug=False, port=8080, host='127.0.0.1')\n",
"step-3": "<mask token>\napp = dash.Dash(__name__)\napp.layout = html.H1('Hello dashboard')\nif __name__ == '__main__':\n app.run_server(debug=False, port=8080, host='127.0.0.1')\n",
"step-4": "import dash\nimport dash_html_components as html\napp = dash.Dash(__name__)\napp.layout = html.H1('Hello dashboard')\nif __name__ == '__main__':\n app.run_server(debug=False, port=8080, host='127.0.0.1')\n",
"step-5": "import dash\nimport dash_html_components as html\n\napp = dash.Dash(__name__)\napp.layout = html.H1(\"Hello dashboard\")\n\nif __name__ == \"__main__\":\n app.run_server(debug=False, port=8080, host=\"127.0.0.1\")\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
def ipfs_add_local(file_path):
"""Returns CID"""
proc = subprocess.run(['ipfs', 'add', file_path], capture_output=True,
text=True)
stdout = proc.stdout
try:
return stdout.split()[1]
except IndexError as e:
print(e)
print(stdout)
return ''
def pin_with_pinata(cid, name):
proc = subprocess.run(['ipfs', 'pin', 'remote', 'add',
'--service=pinata', f'--name={name}', str(cid)], capture_output=
True, text=True)
print(f'Uploaded cid: {cid}')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def get_files(dir_path, ext='.png'):
relative_paths = os.listdir(dir_path)
relative_paths = list(filter(lambda fp: ext in fp, relative_paths))
return list(map(lambda rel_p: os.path.join(dir_path, rel_p),
relative_paths))
def ipfs_add_local(file_path):
"""Returns CID"""
proc = subprocess.run(['ipfs', 'add', file_path], capture_output=True,
text=True)
stdout = proc.stdout
try:
return stdout.split()[1]
except IndexError as e:
print(e)
print(stdout)
return ''
def pin_with_pinata(cid, name):
proc = subprocess.run(['ipfs', 'pin', 'remote', 'add',
'--service=pinata', f'--name={name}', str(cid)], capture_output=
True, text=True)
print(f'Uploaded cid: {cid}')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def get_files(dir_path, ext='.png'):
relative_paths = os.listdir(dir_path)
relative_paths = list(filter(lambda fp: ext in fp, relative_paths))
return list(map(lambda rel_p: os.path.join(dir_path, rel_p),
relative_paths))
def ipfs_add_local(file_path):
"""Returns CID"""
proc = subprocess.run(['ipfs', 'add', file_path], capture_output=True,
text=True)
stdout = proc.stdout
try:
return stdout.split()[1]
except IndexError as e:
print(e)
print(stdout)
return ''
def pin_with_pinata(cid, name):
proc = subprocess.run(['ipfs', 'pin', 'remote', 'add',
'--service=pinata', f'--name={name}', str(cid)], capture_output=
True, text=True)
print(f'Uploaded cid: {cid}')
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Batch IPFS file uploading')
parser.add_argument('-i', '--input', help=
'Path to directory containing media to upload', required=True)
args = vars(parser.parse_args())
files_to_upload = get_files(args['input'])
info = {}
for fp in files_to_upload:
print(fp)
cid = ipfs_add_local(fp)
if cid == '':
print(f'{fp} failed to upload!')
continue
name = os.path.basename(fp)
info[name] = {'cid': cid}
pin_with_pinata(cid, name)
with open(f"{args['input']}/result.csv", 'w') as f:
for fn in sorted(info.keys()):
cid = info[fn]['cid']
f.write(f'{fn}, {cid}\n')
f.close()
<|reserved_special_token_1|>
import argparse
import subprocess
import os
def get_files(dir_path, ext='.png'):
relative_paths = os.listdir(dir_path)
relative_paths = list(filter(lambda fp: ext in fp, relative_paths))
return list(map(lambda rel_p: os.path.join(dir_path, rel_p),
relative_paths))
def ipfs_add_local(file_path):
"""Returns CID"""
proc = subprocess.run(['ipfs', 'add', file_path], capture_output=True,
text=True)
stdout = proc.stdout
try:
return stdout.split()[1]
except IndexError as e:
print(e)
print(stdout)
return ''
def pin_with_pinata(cid, name):
proc = subprocess.run(['ipfs', 'pin', 'remote', 'add',
'--service=pinata', f'--name={name}', str(cid)], capture_output=
True, text=True)
print(f'Uploaded cid: {cid}')
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Batch IPFS file uploading')
parser.add_argument('-i', '--input', help=
'Path to directory containing media to upload', required=True)
args = vars(parser.parse_args())
files_to_upload = get_files(args['input'])
info = {}
for fp in files_to_upload:
print(fp)
cid = ipfs_add_local(fp)
if cid == '':
print(f'{fp} failed to upload!')
continue
name = os.path.basename(fp)
info[name] = {'cid': cid}
pin_with_pinata(cid, name)
with open(f"{args['input']}/result.csv", 'w') as f:
for fn in sorted(info.keys()):
cid = info[fn]['cid']
f.write(f'{fn}, {cid}\n')
f.close()
<|reserved_special_token_1|>
import argparse
import subprocess
import os
def get_files(dir_path, ext='.png'):
relative_paths = os.listdir(dir_path)
relative_paths = list(filter(lambda fp: ext in fp, relative_paths))
return list(map(lambda rel_p: os.path.join(dir_path, rel_p), relative_paths))
def ipfs_add_local(file_path):
'Returns CID'
proc = subprocess.run(['ipfs', 'add', file_path], capture_output=True, text=True)
stdout = proc.stdout
try:
return stdout.split()[1]
except IndexError as e:
print(e)
print(stdout)
return ""
def pin_with_pinata(cid, name):
proc = subprocess.run(['ipfs', 'pin', 'remote', 'add', '--service=pinata', f'--name={name}', str(cid)], capture_output=True, text=True)
print(f'Uploaded cid: {cid}')
# print(proc.stdout)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Batch IPFS file uploading')
parser.add_argument('-i', '--input', help='Path to directory containing media to upload', required=True)
args = vars(parser.parse_args())
files_to_upload = get_files(args['input'])
info = {}
for fp in files_to_upload:
print(fp)
cid = ipfs_add_local(fp)
if cid == "":
print(f'{fp} failed to upload!')
continue
name = os.path.basename(fp)
info[name] = {'cid': cid}
pin_with_pinata(cid, name)
with open(f'{args["input"]}/result.csv', 'w') as f:
for fn in sorted(info.keys()):
cid = info[fn]['cid']
f.write(f'{fn}, {cid}\n')
f.close()
|
flexible
|
{
"blob_id": "7ca88d451ad702e5a8e532da3e3f5939cfaa7215",
"index": 9571,
"step-1": "<mask token>\n\n\ndef ipfs_add_local(file_path):\n \"\"\"Returns CID\"\"\"\n proc = subprocess.run(['ipfs', 'add', file_path], capture_output=True,\n text=True)\n stdout = proc.stdout\n try:\n return stdout.split()[1]\n except IndexError as e:\n print(e)\n print(stdout)\n return ''\n\n\ndef pin_with_pinata(cid, name):\n proc = subprocess.run(['ipfs', 'pin', 'remote', 'add',\n '--service=pinata', f'--name={name}', str(cid)], capture_output=\n True, text=True)\n print(f'Uploaded cid: {cid}')\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef get_files(dir_path, ext='.png'):\n relative_paths = os.listdir(dir_path)\n relative_paths = list(filter(lambda fp: ext in fp, relative_paths))\n return list(map(lambda rel_p: os.path.join(dir_path, rel_p),\n relative_paths))\n\n\ndef ipfs_add_local(file_path):\n \"\"\"Returns CID\"\"\"\n proc = subprocess.run(['ipfs', 'add', file_path], capture_output=True,\n text=True)\n stdout = proc.stdout\n try:\n return stdout.split()[1]\n except IndexError as e:\n print(e)\n print(stdout)\n return ''\n\n\ndef pin_with_pinata(cid, name):\n proc = subprocess.run(['ipfs', 'pin', 'remote', 'add',\n '--service=pinata', f'--name={name}', str(cid)], capture_output=\n True, text=True)\n print(f'Uploaded cid: {cid}')\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef get_files(dir_path, ext='.png'):\n relative_paths = os.listdir(dir_path)\n relative_paths = list(filter(lambda fp: ext in fp, relative_paths))\n return list(map(lambda rel_p: os.path.join(dir_path, rel_p),\n relative_paths))\n\n\ndef ipfs_add_local(file_path):\n \"\"\"Returns CID\"\"\"\n proc = subprocess.run(['ipfs', 'add', file_path], capture_output=True,\n text=True)\n stdout = proc.stdout\n try:\n return stdout.split()[1]\n except IndexError as e:\n print(e)\n print(stdout)\n return ''\n\n\ndef pin_with_pinata(cid, name):\n proc = subprocess.run(['ipfs', 'pin', 'remote', 'add',\n '--service=pinata', f'--name={name}', str(cid)], capture_output=\n True, text=True)\n print(f'Uploaded cid: {cid}')\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='Batch IPFS file uploading')\n parser.add_argument('-i', '--input', help=\n 'Path to directory containing media to upload', required=True)\n args = vars(parser.parse_args())\n files_to_upload = get_files(args['input'])\n info = {}\n for fp in files_to_upload:\n print(fp)\n cid = ipfs_add_local(fp)\n if cid == '':\n print(f'{fp} failed to upload!')\n continue\n name = os.path.basename(fp)\n info[name] = {'cid': cid}\n pin_with_pinata(cid, name)\n with open(f\"{args['input']}/result.csv\", 'w') as f:\n for fn in sorted(info.keys()):\n cid = info[fn]['cid']\n f.write(f'{fn}, {cid}\\n')\n f.close()\n",
"step-4": "import argparse\nimport subprocess\nimport os\n\n\ndef get_files(dir_path, ext='.png'):\n relative_paths = os.listdir(dir_path)\n relative_paths = list(filter(lambda fp: ext in fp, relative_paths))\n return list(map(lambda rel_p: os.path.join(dir_path, rel_p),\n relative_paths))\n\n\ndef ipfs_add_local(file_path):\n \"\"\"Returns CID\"\"\"\n proc = subprocess.run(['ipfs', 'add', file_path], capture_output=True,\n text=True)\n stdout = proc.stdout\n try:\n return stdout.split()[1]\n except IndexError as e:\n print(e)\n print(stdout)\n return ''\n\n\ndef pin_with_pinata(cid, name):\n proc = subprocess.run(['ipfs', 'pin', 'remote', 'add',\n '--service=pinata', f'--name={name}', str(cid)], capture_output=\n True, text=True)\n print(f'Uploaded cid: {cid}')\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='Batch IPFS file uploading')\n parser.add_argument('-i', '--input', help=\n 'Path to directory containing media to upload', required=True)\n args = vars(parser.parse_args())\n files_to_upload = get_files(args['input'])\n info = {}\n for fp in files_to_upload:\n print(fp)\n cid = ipfs_add_local(fp)\n if cid == '':\n print(f'{fp} failed to upload!')\n continue\n name = os.path.basename(fp)\n info[name] = {'cid': cid}\n pin_with_pinata(cid, name)\n with open(f\"{args['input']}/result.csv\", 'w') as f:\n for fn in sorted(info.keys()):\n cid = info[fn]['cid']\n f.write(f'{fn}, {cid}\\n')\n f.close()\n",
"step-5": "import argparse\nimport subprocess\nimport os\n\n\ndef get_files(dir_path, ext='.png'):\n relative_paths = os.listdir(dir_path)\n relative_paths = list(filter(lambda fp: ext in fp, relative_paths))\n return list(map(lambda rel_p: os.path.join(dir_path, rel_p), relative_paths))\n\n\ndef ipfs_add_local(file_path):\n 'Returns CID'\n proc = subprocess.run(['ipfs', 'add', file_path], capture_output=True, text=True)\n stdout = proc.stdout\n try:\n return stdout.split()[1]\n except IndexError as e:\n print(e)\n print(stdout)\n return \"\"\n\n\ndef pin_with_pinata(cid, name):\n proc = subprocess.run(['ipfs', 'pin', 'remote', 'add', '--service=pinata', f'--name={name}', str(cid)], capture_output=True, text=True)\n print(f'Uploaded cid: {cid}')\n # print(proc.stdout)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='Batch IPFS file uploading')\n parser.add_argument('-i', '--input', help='Path to directory containing media to upload', required=True)\n args = vars(parser.parse_args())\n\n files_to_upload = get_files(args['input'])\n\n info = {}\n\n for fp in files_to_upload:\n print(fp)\n cid = ipfs_add_local(fp)\n if cid == \"\":\n print(f'{fp} failed to upload!')\n continue\n name = os.path.basename(fp)\n info[name] = {'cid': cid}\n\n pin_with_pinata(cid, name)\n\n with open(f'{args[\"input\"]}/result.csv', 'w') as f:\n for fn in sorted(info.keys()):\n cid = info[fn]['cid']\n f.write(f'{fn}, {cid}\\n')\n\n f.close()\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
def get_heart_rate(auth2_client, date, granularity='1sec'):
"""
Query intraday time series given date
granularity: 1sec or 1min
"""
heart_rate_raw = auth2_client.intraday_time_series('activities/heart',
base_date=date, detail_level=granularity)
time_list = []
val_list = []
date_list = []
for i in heart_rate_raw['activities-heart-intraday']['dataset']:
val_list.append(i['value'])
time_list.append(i['time'])
date_list.append(date)
heart_rate_df = pd.DataFrame({'Date': date_list, 'Heart Rate': val_list,
'Time': time_list})
heart_rate_df['Timestamp'] = pd.to_datetime(heart_rate_df['Date'] + ' ' +
heart_rate_df['Time'])
heart_rate_df = heart_rate_df[['Timestamp', 'Heart Rate']]
return heart_rate_df
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
server.browser_authorize()
<|reserved_special_token_0|>
def get_heart_rate(auth2_client, date, granularity='1sec'):
"""
Query intraday time series given date
granularity: 1sec or 1min
"""
heart_rate_raw = auth2_client.intraday_time_series('activities/heart',
base_date=date, detail_level=granularity)
time_list = []
val_list = []
date_list = []
for i in heart_rate_raw['activities-heart-intraday']['dataset']:
val_list.append(i['value'])
time_list.append(i['time'])
date_list.append(date)
heart_rate_df = pd.DataFrame({'Date': date_list, 'Heart Rate': val_list,
'Time': time_list})
heart_rate_df['Timestamp'] = pd.to_datetime(heart_rate_df['Date'] + ' ' +
heart_rate_df['Time'])
heart_rate_df = heart_rate_df[['Timestamp', 'Heart Rate']]
return heart_rate_df
<|reserved_special_token_0|>
for date in DATES:
heart_rate_dfs.append(get_heart_rate(auth2_client, date))
<|reserved_special_token_0|>
for i in range(len(DATE_RANGES)):
start = pd.to_datetime(DATE_RANGES['Start'][i])
end = pd.to_datetime(DATE_RANGES['End'][i])
mask = (pd.to_datetime(heart_rate_df['Timestamp']) >= start) & (pd.
to_datetime(heart_rate_df['Timestamp']) <= end)
heart_rate_df['onDate?'] = heart_rate_df['onDate?'].where(~mask, other=1)
<|reserved_special_token_0|>
heart_rate_df.to_csv(FILEPATH, index=False)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
server = Oauth2.OAuth2Server(CLIENT_ID, CLIENT_SECRET)
server.browser_authorize()
ACCESS_TOKEN = str(server.fitbit.client.session.token['access_token'])
REFRESH_TOKEN = str(server.fitbit.client.session.token['refresh_token'])
auth2_client = fitbit.Fitbit(CLIENT_ID, CLIENT_SECRET, oauth2=True,
access_token=ACCESS_TOKEN, refresh_token=REFRESH_TOKEN)
def get_heart_rate(auth2_client, date, granularity='1sec'):
"""
Query intraday time series given date
granularity: 1sec or 1min
"""
heart_rate_raw = auth2_client.intraday_time_series('activities/heart',
base_date=date, detail_level=granularity)
time_list = []
val_list = []
date_list = []
for i in heart_rate_raw['activities-heart-intraday']['dataset']:
val_list.append(i['value'])
time_list.append(i['time'])
date_list.append(date)
heart_rate_df = pd.DataFrame({'Date': date_list, 'Heart Rate': val_list,
'Time': time_list})
heart_rate_df['Timestamp'] = pd.to_datetime(heart_rate_df['Date'] + ' ' +
heart_rate_df['Time'])
heart_rate_df = heart_rate_df[['Timestamp', 'Heart Rate']]
return heart_rate_df
START_DATE = '2018-01-20'
END_DATE = '2018-02-13'
DATES = pd.date_range(start=START_DATE, end=END_DATE).tolist()
DATES = [date.strftime('%Y-%m-%d') for date in DATES]
heart_rate_dfs = []
for date in DATES:
heart_rate_dfs.append(get_heart_rate(auth2_client, date))
heart_rate_df = pd.concat(heart_rate_dfs, axis=0, ignore_index=True)
DATE_RANGES = pd.read_csv('./data/date_times.csv')
DATE_RANGES['Start'] = pd.to_datetime(DATE_RANGES['Start'])
DATE_RANGES['End'] = pd.to_datetime(DATE_RANGES['End'])
heart_rate_df['onDate?'] = 0
for i in range(len(DATE_RANGES)):
start = pd.to_datetime(DATE_RANGES['Start'][i])
end = pd.to_datetime(DATE_RANGES['End'][i])
mask = (pd.to_datetime(heart_rate_df['Timestamp']) >= start) & (pd.
to_datetime(heart_rate_df['Timestamp']) <= end)
heart_rate_df['onDate?'] = heart_rate_df['onDate?'].where(~mask, other=1)
FILEPATH = './data/' + 'heart_rate ' + START_DATE + ' to ' + END_DATE + '.csv'
heart_rate_df.to_csv(FILEPATH, index=False)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
import fitbit
import gather_keys_oauth2 as Oauth2
import pandas as pd
import datetime as dt
from config import CLIENT_ID, CLIENT_SECRET
server = Oauth2.OAuth2Server(CLIENT_ID, CLIENT_SECRET)
server.browser_authorize()
ACCESS_TOKEN = str(server.fitbit.client.session.token['access_token'])
REFRESH_TOKEN = str(server.fitbit.client.session.token['refresh_token'])
auth2_client = fitbit.Fitbit(CLIENT_ID, CLIENT_SECRET, oauth2=True,
access_token=ACCESS_TOKEN, refresh_token=REFRESH_TOKEN)
def get_heart_rate(auth2_client, date, granularity='1sec'):
"""
Query intraday time series given date
granularity: 1sec or 1min
"""
heart_rate_raw = auth2_client.intraday_time_series('activities/heart',
base_date=date, detail_level=granularity)
time_list = []
val_list = []
date_list = []
for i in heart_rate_raw['activities-heart-intraday']['dataset']:
val_list.append(i['value'])
time_list.append(i['time'])
date_list.append(date)
heart_rate_df = pd.DataFrame({'Date': date_list, 'Heart Rate': val_list,
'Time': time_list})
heart_rate_df['Timestamp'] = pd.to_datetime(heart_rate_df['Date'] + ' ' +
heart_rate_df['Time'])
heart_rate_df = heart_rate_df[['Timestamp', 'Heart Rate']]
return heart_rate_df
START_DATE = '2018-01-20'
END_DATE = '2018-02-13'
DATES = pd.date_range(start=START_DATE, end=END_DATE).tolist()
DATES = [date.strftime('%Y-%m-%d') for date in DATES]
heart_rate_dfs = []
for date in DATES:
heart_rate_dfs.append(get_heart_rate(auth2_client, date))
heart_rate_df = pd.concat(heart_rate_dfs, axis=0, ignore_index=True)
DATE_RANGES = pd.read_csv('./data/date_times.csv')
DATE_RANGES['Start'] = pd.to_datetime(DATE_RANGES['Start'])
DATE_RANGES['End'] = pd.to_datetime(DATE_RANGES['End'])
heart_rate_df['onDate?'] = 0
for i in range(len(DATE_RANGES)):
start = pd.to_datetime(DATE_RANGES['Start'][i])
end = pd.to_datetime(DATE_RANGES['End'][i])
mask = (pd.to_datetime(heart_rate_df['Timestamp']) >= start) & (pd.
to_datetime(heart_rate_df['Timestamp']) <= end)
heart_rate_df['onDate?'] = heart_rate_df['onDate?'].where(~mask, other=1)
FILEPATH = './data/' + 'heart_rate ' + START_DATE + ' to ' + END_DATE + '.csv'
heart_rate_df.to_csv(FILEPATH, index=False)
<|reserved_special_token_1|>
# -*- coding: utf-8 -*-
"""
Created on Tue Jan 23 20:44:38 2018
@author: user
"""
import fitbit
import gather_keys_oauth2 as Oauth2
import pandas as pd
import datetime as dt
from config import CLIENT_ID, CLIENT_SECRET
#Establish connection to Fitbit API
server = Oauth2.OAuth2Server(CLIENT_ID, CLIENT_SECRET)
server.browser_authorize()
ACCESS_TOKEN = str(server.fitbit.client.session.token['access_token'])
REFRESH_TOKEN = str(server.fitbit.client.session.token['refresh_token'])
auth2_client = fitbit.Fitbit(CLIENT_ID, CLIENT_SECRET, oauth2=True, access_token=ACCESS_TOKEN, refresh_token=REFRESH_TOKEN)
def get_heart_rate(auth2_client, date, granularity='1sec'):
"""
Query intraday time series given date
granularity: 1sec or 1min
"""
heart_rate_raw = auth2_client.intraday_time_series('activities/heart', base_date=date, detail_level=granularity)
time_list = []
val_list = []
date_list = []
for i in heart_rate_raw['activities-heart-intraday']['dataset']:
val_list.append(i['value'])
time_list.append(i['time'])
date_list.append(date)
heart_rate_df = pd.DataFrame({'Date': date_list,'Heart Rate':val_list,'Time':time_list})
heart_rate_df['Timestamp'] = pd.to_datetime(heart_rate_df['Date'] + ' ' + heart_rate_df['Time'])
heart_rate_df = heart_rate_df[['Timestamp','Heart Rate']]
return heart_rate_df
START_DATE = '2018-01-20'
END_DATE = '2018-02-13'
DATES = pd.date_range(start=START_DATE, end=END_DATE).tolist()
DATES = [date.strftime('%Y-%m-%d') for date in DATES]
heart_rate_dfs = []
for date in DATES:
heart_rate_dfs.append(get_heart_rate(auth2_client, date))
#Concatenate individual heart_rate_dfs for each date into one big df
heart_rate_df = pd.concat(heart_rate_dfs, axis=0, ignore_index=True)
#Label each reading as 0 (not on date) or 1 (on date)
DATE_RANGES = pd.read_csv('./data/date_times.csv')
DATE_RANGES['Start'] = pd.to_datetime(DATE_RANGES['Start'])
DATE_RANGES['End'] = pd.to_datetime(DATE_RANGES['End'])
heart_rate_df['onDate?'] = 0
for i in range(len(DATE_RANGES)):
start = pd.to_datetime(DATE_RANGES['Start'][i])
end = pd.to_datetime(DATE_RANGES['End'][i])
mask = (pd.to_datetime(heart_rate_df['Timestamp']) >= start) & (pd.to_datetime(heart_rate_df['Timestamp']) <= end)
heart_rate_df['onDate?'] = heart_rate_df['onDate?'].where(~mask, other=1)
#Save to CSV
FILEPATH = './data/' + 'heart_rate ' + START_DATE + ' to ' + END_DATE + '.csv'
heart_rate_df.to_csv(FILEPATH, index=False)
|
flexible
|
{
"blob_id": "9f1cbc655a5d8f14fa45cf977bb2dcee4874b188",
"index": 5809,
"step-1": "<mask token>\n\n\ndef get_heart_rate(auth2_client, date, granularity='1sec'):\n \"\"\"\n Query intraday time series given date\n granularity: 1sec or 1min\n \"\"\"\n heart_rate_raw = auth2_client.intraday_time_series('activities/heart',\n base_date=date, detail_level=granularity)\n time_list = []\n val_list = []\n date_list = []\n for i in heart_rate_raw['activities-heart-intraday']['dataset']:\n val_list.append(i['value'])\n time_list.append(i['time'])\n date_list.append(date)\n heart_rate_df = pd.DataFrame({'Date': date_list, 'Heart Rate': val_list,\n 'Time': time_list})\n heart_rate_df['Timestamp'] = pd.to_datetime(heart_rate_df['Date'] + ' ' +\n heart_rate_df['Time'])\n heart_rate_df = heart_rate_df[['Timestamp', 'Heart Rate']]\n return heart_rate_df\n\n\n<mask token>\n",
"step-2": "<mask token>\nserver.browser_authorize()\n<mask token>\n\n\ndef get_heart_rate(auth2_client, date, granularity='1sec'):\n \"\"\"\n Query intraday time series given date\n granularity: 1sec or 1min\n \"\"\"\n heart_rate_raw = auth2_client.intraday_time_series('activities/heart',\n base_date=date, detail_level=granularity)\n time_list = []\n val_list = []\n date_list = []\n for i in heart_rate_raw['activities-heart-intraday']['dataset']:\n val_list.append(i['value'])\n time_list.append(i['time'])\n date_list.append(date)\n heart_rate_df = pd.DataFrame({'Date': date_list, 'Heart Rate': val_list,\n 'Time': time_list})\n heart_rate_df['Timestamp'] = pd.to_datetime(heart_rate_df['Date'] + ' ' +\n heart_rate_df['Time'])\n heart_rate_df = heart_rate_df[['Timestamp', 'Heart Rate']]\n return heart_rate_df\n\n\n<mask token>\nfor date in DATES:\n heart_rate_dfs.append(get_heart_rate(auth2_client, date))\n<mask token>\nfor i in range(len(DATE_RANGES)):\n start = pd.to_datetime(DATE_RANGES['Start'][i])\n end = pd.to_datetime(DATE_RANGES['End'][i])\n mask = (pd.to_datetime(heart_rate_df['Timestamp']) >= start) & (pd.\n to_datetime(heart_rate_df['Timestamp']) <= end)\n heart_rate_df['onDate?'] = heart_rate_df['onDate?'].where(~mask, other=1)\n<mask token>\nheart_rate_df.to_csv(FILEPATH, index=False)\n",
"step-3": "<mask token>\nserver = Oauth2.OAuth2Server(CLIENT_ID, CLIENT_SECRET)\nserver.browser_authorize()\nACCESS_TOKEN = str(server.fitbit.client.session.token['access_token'])\nREFRESH_TOKEN = str(server.fitbit.client.session.token['refresh_token'])\nauth2_client = fitbit.Fitbit(CLIENT_ID, CLIENT_SECRET, oauth2=True,\n access_token=ACCESS_TOKEN, refresh_token=REFRESH_TOKEN)\n\n\ndef get_heart_rate(auth2_client, date, granularity='1sec'):\n \"\"\"\n Query intraday time series given date\n granularity: 1sec or 1min\n \"\"\"\n heart_rate_raw = auth2_client.intraday_time_series('activities/heart',\n base_date=date, detail_level=granularity)\n time_list = []\n val_list = []\n date_list = []\n for i in heart_rate_raw['activities-heart-intraday']['dataset']:\n val_list.append(i['value'])\n time_list.append(i['time'])\n date_list.append(date)\n heart_rate_df = pd.DataFrame({'Date': date_list, 'Heart Rate': val_list,\n 'Time': time_list})\n heart_rate_df['Timestamp'] = pd.to_datetime(heart_rate_df['Date'] + ' ' +\n heart_rate_df['Time'])\n heart_rate_df = heart_rate_df[['Timestamp', 'Heart Rate']]\n return heart_rate_df\n\n\nSTART_DATE = '2018-01-20'\nEND_DATE = '2018-02-13'\nDATES = pd.date_range(start=START_DATE, end=END_DATE).tolist()\nDATES = [date.strftime('%Y-%m-%d') for date in DATES]\nheart_rate_dfs = []\nfor date in DATES:\n heart_rate_dfs.append(get_heart_rate(auth2_client, date))\nheart_rate_df = pd.concat(heart_rate_dfs, axis=0, ignore_index=True)\nDATE_RANGES = pd.read_csv('./data/date_times.csv')\nDATE_RANGES['Start'] = pd.to_datetime(DATE_RANGES['Start'])\nDATE_RANGES['End'] = pd.to_datetime(DATE_RANGES['End'])\nheart_rate_df['onDate?'] = 0\nfor i in range(len(DATE_RANGES)):\n start = pd.to_datetime(DATE_RANGES['Start'][i])\n end = pd.to_datetime(DATE_RANGES['End'][i])\n mask = (pd.to_datetime(heart_rate_df['Timestamp']) >= start) & (pd.\n to_datetime(heart_rate_df['Timestamp']) <= end)\n heart_rate_df['onDate?'] = heart_rate_df['onDate?'].where(~mask, other=1)\nFILEPATH = './data/' + 'heart_rate ' + START_DATE + ' to ' + END_DATE + '.csv'\nheart_rate_df.to_csv(FILEPATH, index=False)\n",
"step-4": "<mask token>\nimport fitbit\nimport gather_keys_oauth2 as Oauth2\nimport pandas as pd\nimport datetime as dt\nfrom config import CLIENT_ID, CLIENT_SECRET\nserver = Oauth2.OAuth2Server(CLIENT_ID, CLIENT_SECRET)\nserver.browser_authorize()\nACCESS_TOKEN = str(server.fitbit.client.session.token['access_token'])\nREFRESH_TOKEN = str(server.fitbit.client.session.token['refresh_token'])\nauth2_client = fitbit.Fitbit(CLIENT_ID, CLIENT_SECRET, oauth2=True,\n access_token=ACCESS_TOKEN, refresh_token=REFRESH_TOKEN)\n\n\ndef get_heart_rate(auth2_client, date, granularity='1sec'):\n \"\"\"\n Query intraday time series given date\n granularity: 1sec or 1min\n \"\"\"\n heart_rate_raw = auth2_client.intraday_time_series('activities/heart',\n base_date=date, detail_level=granularity)\n time_list = []\n val_list = []\n date_list = []\n for i in heart_rate_raw['activities-heart-intraday']['dataset']:\n val_list.append(i['value'])\n time_list.append(i['time'])\n date_list.append(date)\n heart_rate_df = pd.DataFrame({'Date': date_list, 'Heart Rate': val_list,\n 'Time': time_list})\n heart_rate_df['Timestamp'] = pd.to_datetime(heart_rate_df['Date'] + ' ' +\n heart_rate_df['Time'])\n heart_rate_df = heart_rate_df[['Timestamp', 'Heart Rate']]\n return heart_rate_df\n\n\nSTART_DATE = '2018-01-20'\nEND_DATE = '2018-02-13'\nDATES = pd.date_range(start=START_DATE, end=END_DATE).tolist()\nDATES = [date.strftime('%Y-%m-%d') for date in DATES]\nheart_rate_dfs = []\nfor date in DATES:\n heart_rate_dfs.append(get_heart_rate(auth2_client, date))\nheart_rate_df = pd.concat(heart_rate_dfs, axis=0, ignore_index=True)\nDATE_RANGES = pd.read_csv('./data/date_times.csv')\nDATE_RANGES['Start'] = pd.to_datetime(DATE_RANGES['Start'])\nDATE_RANGES['End'] = pd.to_datetime(DATE_RANGES['End'])\nheart_rate_df['onDate?'] = 0\nfor i in range(len(DATE_RANGES)):\n start = pd.to_datetime(DATE_RANGES['Start'][i])\n end = pd.to_datetime(DATE_RANGES['End'][i])\n mask = (pd.to_datetime(heart_rate_df['Timestamp']) >= start) & (pd.\n to_datetime(heart_rate_df['Timestamp']) <= end)\n heart_rate_df['onDate?'] = heart_rate_df['onDate?'].where(~mask, other=1)\nFILEPATH = './data/' + 'heart_rate ' + START_DATE + ' to ' + END_DATE + '.csv'\nheart_rate_df.to_csv(FILEPATH, index=False)\n",
"step-5": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Jan 23 20:44:38 2018\n\n@author: user\n\"\"\"\n\nimport fitbit\nimport gather_keys_oauth2 as Oauth2\nimport pandas as pd \nimport datetime as dt\nfrom config import CLIENT_ID, CLIENT_SECRET\n\n\n#Establish connection to Fitbit API\nserver = Oauth2.OAuth2Server(CLIENT_ID, CLIENT_SECRET)\nserver.browser_authorize()\n\nACCESS_TOKEN = str(server.fitbit.client.session.token['access_token'])\nREFRESH_TOKEN = str(server.fitbit.client.session.token['refresh_token'])\n\nauth2_client = fitbit.Fitbit(CLIENT_ID, CLIENT_SECRET, oauth2=True, access_token=ACCESS_TOKEN, refresh_token=REFRESH_TOKEN)\n\n\ndef get_heart_rate(auth2_client, date, granularity='1sec'):\n \"\"\"\n Query intraday time series given date\n granularity: 1sec or 1min\n \"\"\"\n \n heart_rate_raw = auth2_client.intraday_time_series('activities/heart', base_date=date, detail_level=granularity)\n\n time_list = []\n val_list = []\n date_list = []\n \n for i in heart_rate_raw['activities-heart-intraday']['dataset']:\n val_list.append(i['value'])\n time_list.append(i['time'])\n date_list.append(date)\n \n heart_rate_df = pd.DataFrame({'Date': date_list,'Heart Rate':val_list,'Time':time_list})\n heart_rate_df['Timestamp'] = pd.to_datetime(heart_rate_df['Date'] + ' ' + heart_rate_df['Time'])\n heart_rate_df = heart_rate_df[['Timestamp','Heart Rate']]\n \n return heart_rate_df\n\n\nSTART_DATE = '2018-01-20'\nEND_DATE = '2018-02-13' \nDATES = pd.date_range(start=START_DATE, end=END_DATE).tolist()\nDATES = [date.strftime('%Y-%m-%d') for date in DATES]\n \nheart_rate_dfs = []\nfor date in DATES:\n heart_rate_dfs.append(get_heart_rate(auth2_client, date))\n\n#Concatenate individual heart_rate_dfs for each date into one big df\nheart_rate_df = pd.concat(heart_rate_dfs, axis=0, ignore_index=True)\n\n#Label each reading as 0 (not on date) or 1 (on date)\nDATE_RANGES = pd.read_csv('./data/date_times.csv')\nDATE_RANGES['Start'] = pd.to_datetime(DATE_RANGES['Start'])\nDATE_RANGES['End'] = pd.to_datetime(DATE_RANGES['End'])\n\nheart_rate_df['onDate?'] = 0\nfor i in range(len(DATE_RANGES)):\n start = pd.to_datetime(DATE_RANGES['Start'][i])\n end = pd.to_datetime(DATE_RANGES['End'][i])\n \n mask = (pd.to_datetime(heart_rate_df['Timestamp']) >= start) & (pd.to_datetime(heart_rate_df['Timestamp']) <= end)\n heart_rate_df['onDate?'] = heart_rate_df['onDate?'].where(~mask, other=1)\n\n#Save to CSV\nFILEPATH = './data/' + 'heart_rate ' + START_DATE + ' to ' + END_DATE + '.csv'\nheart_rate_df.to_csv(FILEPATH, index=False)",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
regressor.fit(X, y)
<|reserved_special_token_0|>
plt.scatter(X, y, color='red')
plt.plot(X_grid, regressor.predict(X_grid), color='blue')
plt.scatter(6.5, y_pred, color='green')
plt.title('Salary vs Title')
plt.xlabel('Title')
plt.ylabel('Salary')
plt.show()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
dataset = pd.read_csv('Position_Salaries.csv')
X = dataset.iloc[:, 1:-1].values
y = dataset.iloc[:, dataset.shape[1] - 1].values
<|reserved_special_token_0|>
regressor = DecisionTreeRegressor(random_state=0)
regressor.fit(X, y)
y_pred = regressor.predict(np.reshape([6.5], (-1, 1)))
X_grid = np.arange(min(X), max(X), 0.1)
X_grid = X_grid.reshape((len(X_grid), 1))
plt.scatter(X, y, color='red')
plt.plot(X_grid, regressor.predict(X_grid), color='blue')
plt.scatter(6.5, y_pred, color='green')
plt.title('Salary vs Title')
plt.xlabel('Title')
plt.ylabel('Salary')
plt.show()
<|reserved_special_token_1|>
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
dataset = pd.read_csv('Position_Salaries.csv')
X = dataset.iloc[:, 1:-1].values
y = dataset.iloc[:, dataset.shape[1] - 1].values
from sklearn.tree import DecisionTreeRegressor
regressor = DecisionTreeRegressor(random_state=0)
regressor.fit(X, y)
y_pred = regressor.predict(np.reshape([6.5], (-1, 1)))
X_grid = np.arange(min(X), max(X), 0.1)
X_grid = X_grid.reshape((len(X_grid), 1))
plt.scatter(X, y, color='red')
plt.plot(X_grid, regressor.predict(X_grid), color='blue')
plt.scatter(6.5, y_pred, color='green')
plt.title('Salary vs Title')
plt.xlabel('Title')
plt.ylabel('Salary')
plt.show()
<|reserved_special_token_1|>
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
dataset = pd.read_csv('Position_Salaries.csv')
X = dataset.iloc[:, 1:-1].values
y = dataset.iloc[:, dataset.shape[1]-1].values
#Fitting the Decision Tree Regression
from sklearn.tree import DecisionTreeRegressor
regressor = DecisionTreeRegressor(random_state = 0)
regressor.fit(X, y)
#Predicting a new result
y_pred = regressor.predict(np.reshape([6.5], (-1, 1)))
#Visualizing the results
X_grid = np.arange(min(X), max(X), 0.1)
X_grid = X_grid.reshape((len(X_grid), 1))
plt.scatter(X, y, color = 'red')
plt.plot(X_grid, regressor.predict(X_grid), color = 'blue')
plt.scatter(6.5, y_pred, color = 'green')
plt.title('Salary vs Title')
plt.xlabel('Title')
plt.ylabel('Salary')
plt.show()
|
flexible
|
{
"blob_id": "c8565e1b5659dd0908aabf91e07738a798dc3232",
"index": 1366,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nregressor.fit(X, y)\n<mask token>\nplt.scatter(X, y, color='red')\nplt.plot(X_grid, regressor.predict(X_grid), color='blue')\nplt.scatter(6.5, y_pred, color='green')\nplt.title('Salary vs Title')\nplt.xlabel('Title')\nplt.ylabel('Salary')\nplt.show()\n",
"step-3": "<mask token>\ndataset = pd.read_csv('Position_Salaries.csv')\nX = dataset.iloc[:, 1:-1].values\ny = dataset.iloc[:, dataset.shape[1] - 1].values\n<mask token>\nregressor = DecisionTreeRegressor(random_state=0)\nregressor.fit(X, y)\ny_pred = regressor.predict(np.reshape([6.5], (-1, 1)))\nX_grid = np.arange(min(X), max(X), 0.1)\nX_grid = X_grid.reshape((len(X_grid), 1))\nplt.scatter(X, y, color='red')\nplt.plot(X_grid, regressor.predict(X_grid), color='blue')\nplt.scatter(6.5, y_pred, color='green')\nplt.title('Salary vs Title')\nplt.xlabel('Title')\nplt.ylabel('Salary')\nplt.show()\n",
"step-4": "import numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\ndataset = pd.read_csv('Position_Salaries.csv')\nX = dataset.iloc[:, 1:-1].values\ny = dataset.iloc[:, dataset.shape[1] - 1].values\nfrom sklearn.tree import DecisionTreeRegressor\nregressor = DecisionTreeRegressor(random_state=0)\nregressor.fit(X, y)\ny_pred = regressor.predict(np.reshape([6.5], (-1, 1)))\nX_grid = np.arange(min(X), max(X), 0.1)\nX_grid = X_grid.reshape((len(X_grid), 1))\nplt.scatter(X, y, color='red')\nplt.plot(X_grid, regressor.predict(X_grid), color='blue')\nplt.scatter(6.5, y_pred, color='green')\nplt.title('Salary vs Title')\nplt.xlabel('Title')\nplt.ylabel('Salary')\nplt.show()\n",
"step-5": "import numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\ndataset = pd.read_csv('Position_Salaries.csv')\nX = dataset.iloc[:, 1:-1].values\ny = dataset.iloc[:, dataset.shape[1]-1].values\n\n#Fitting the Decision Tree Regression\nfrom sklearn.tree import DecisionTreeRegressor\nregressor = DecisionTreeRegressor(random_state = 0)\nregressor.fit(X, y)\n\n#Predicting a new result\ny_pred = regressor.predict(np.reshape([6.5], (-1, 1)))\n\n#Visualizing the results\nX_grid = np.arange(min(X), max(X), 0.1)\nX_grid = X_grid.reshape((len(X_grid), 1))\nplt.scatter(X, y, color = 'red')\nplt.plot(X_grid, regressor.predict(X_grid), color = 'blue')\nplt.scatter(6.5, y_pred, color = 'green')\nplt.title('Salary vs Title')\nplt.xlabel('Title')\nplt.ylabel('Salary')\nplt.show()",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
"""This module contains an algorithm to find the different
components in a graph represented as an adjacency matrix.
"""
def find_components(adjacency_matrix):
visited = set()
components = []
for node in range(len(adjacency_matrix)):
if node not in visited:
component = []
build_component(adjacency_matrix, visited, node, component)
components.append(component)
return components
def build_component(adjacency_matrix, visited, node, component):
visited.add(node)
component.append(node)
for neighbor, value in enumerate(adjacency_matrix[node]):
if value == 1 and neighbor not in visited:
build_component(adjacency_matrix, visited, neighbor, component)
|
normal
|
{
"blob_id": "e71a23ef7a065bc4210e55552e19c83c428bc194",
"index": 3187,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef find_components(adjacency_matrix):\n visited = set()\n components = []\n for node in range(len(adjacency_matrix)):\n if node not in visited:\n component = []\n build_component(adjacency_matrix, visited, node, component)\n components.append(component)\n return components\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef find_components(adjacency_matrix):\n visited = set()\n components = []\n for node in range(len(adjacency_matrix)):\n if node not in visited:\n component = []\n build_component(adjacency_matrix, visited, node, component)\n components.append(component)\n return components\n\n\ndef build_component(adjacency_matrix, visited, node, component):\n visited.add(node)\n component.append(node)\n for neighbor, value in enumerate(adjacency_matrix[node]):\n if value == 1 and neighbor not in visited:\n build_component(adjacency_matrix, visited, neighbor, component)\n",
"step-4": "\"\"\"This module contains an algorithm to find the different\ncomponents in a graph represented as an adjacency matrix.\n\"\"\"\n\n\ndef find_components(adjacency_matrix):\n visited = set()\n components = []\n for node in range(len(adjacency_matrix)):\n if node not in visited:\n component = []\n build_component(adjacency_matrix, visited, node, component)\n components.append(component)\n return components\n\n\ndef build_component(adjacency_matrix, visited, node, component):\n visited.add(node)\n component.append(node)\n for neighbor, value in enumerate(adjacency_matrix[node]):\n if value == 1 and neighbor not in visited:\n build_component(adjacency_matrix, visited, neighbor, component)\n\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from typing import List
"""
1. Generate an array containing the products of all elements to the left of current element
2. Similarly, start from the last element and generate an array containing the products to the right of each element
3. Multiply both arrays element-wise
"""
class Solution:
def productExceptSelf(self, nums: List[int]) -> List[int]:
output = []
prod = 1
# First generate the products to the left of the current element
for num in nums:
output.append(prod)
prod *= num
prod = 1
# Now, generate and multiply the product to the right of current element
for k in range(len(nums) - 1, -1, -1):
output[k] = output[k] * prod
prod *= nums[k]
return output
|
normal
|
{
"blob_id": "26ae44b5be1d78ed3fe9c858413ae47e163c5460",
"index": 1282,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Solution:\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Solution:\n\n def productExceptSelf(self, nums: List[int]) ->List[int]:\n output = []\n prod = 1\n for num in nums:\n output.append(prod)\n prod *= num\n prod = 1\n for k in range(len(nums) - 1, -1, -1):\n output[k] = output[k] * prod\n prod *= nums[k]\n return output\n",
"step-4": "from typing import List\n<mask token>\n\n\nclass Solution:\n\n def productExceptSelf(self, nums: List[int]) ->List[int]:\n output = []\n prod = 1\n for num in nums:\n output.append(prod)\n prod *= num\n prod = 1\n for k in range(len(nums) - 1, -1, -1):\n output[k] = output[k] * prod\n prod *= nums[k]\n return output\n",
"step-5": "from typing import List\n\n\"\"\"\n1. Generate an array containing the products of all elements to the left of current element\n2. Similarly, start from the last element and generate an array containing the products to the right of each element\n3. Multiply both arrays element-wise\n\n\"\"\"\n\n\nclass Solution:\n def productExceptSelf(self, nums: List[int]) -> List[int]:\n output = []\n prod = 1\n # First generate the products to the left of the current element\n for num in nums:\n output.append(prod)\n prod *= num\n\n prod = 1\n # Now, generate and multiply the product to the right of current element\n for k in range(len(nums) - 1, -1, -1):\n output[k] = output[k] * prod\n prod *= nums[k]\n\n return output\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
df.head()
<|reserved_special_token_0|>
print(m)
<|reserved_special_token_0|>
print(mean)
for i in mean:
random.seed(1)
randomFactor = [(random.random() * 0.01 + (i - 0.005)) for _ in range(m)]
for idx, step in enumerate(range(m)):
frameBegin = time.time()
time.sleep(randomFactor[idx])
computeBegin = time.time()
dt = i if idx == 0 else deltaTime
A = np.matrix([[1.0, 0.0, 0.0, dt, 0.0, 0.0, 1 / 2.0 * dt ** 2, 0.0,
0.0], [0.0, 1.0, 0.0, 0.0, dt, 0.0, 0.0, 1 / 2.0 * dt ** 2, 0.0
], [0.0, 0.0, 1.0, 0.0, 0.0, dt, 0.0, 0.0, 1 / 2.0 * dt ** 2],
[0.0, 0.0, 0.0, 1.0, 0.0, 0.0, dt, 0.0, 0.0], [0.0, 0.0, 0.0,
0.0, 1.0, 0.0, 0.0, dt, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 1.0,
0.0, 0.0, dt], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0], [
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0], [0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 1.0]])
G = np.matrix([[1 / 2.0 * dt ** 2], [1 / 2.0 * dt ** 2], [1 / 2.0 *
dt ** 2], [dt], [dt], [dt], [1.0], [1.0], [1.0]])
Q = G * G.T * sa ** 2
x = A * x + B * u
P = A * P * A.T + Q
S = H * P * H.T + R
K = P * H.T * np.linalg.pinv(S)
Z = measurements[:, step].reshape(H.shape[0], 1)
y = Z - H * x
x = x + K * y
P = (I - K * H) * P
xt.append(float(x[0]))
yt.append(float(x[1]))
zt.append(float(x[2]))
frameEnd = time.time()
deltaTime = frameEnd - frameBegin
totalTime += frameEnd - computeBegin
dist = np.sqrt(((Xr - xt) ** 2 + (Yr - yt) ** 2 + (Zr - zt) ** 2).mean())
print('%.3f,%.8f,%.3f' % (i, totalTime, dist))
totalTime = 0.0
P = 1.0 * np.eye(9)
H = np.matrix([[1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 1.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 1.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0]])
rp = 0.01
R = np.matrix([[rp, 0.0, 0.0], [0.0, rp, 0.0], [0.0, 0.0, rp]])
sa = 0.05
u = 0.0
B = np.matrix([[0.0], [0.0], [0.0], [0.0], [0.0], [0.0], [0.0], [0.0],
[0.0]])
I = np.eye(9)
sp = 0.01
Xm = Xr + sp * np.random.randn(m)
Ym = Yr + sp * np.random.randn(m)
Zm = Zr + sp * np.random.randn(m)
measurements = np.vstack((Xm, Ym, Zm))
x = np.matrix([measurements[0][0], measurements[1][0], measurements[2][
0], 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]).T
xt = []
yt = []
zt = []
<|reserved_special_token_1|>
<|reserved_special_token_0|>
csvfilename = 'data/0901/exp1/xiaoxiong.csv'
df = pd.read_csv(csvfilename, header=None, names=['abstime', 'posx', 'posy',
'posz', 'roty', 'rotx', 'anim'])
df.head()
Xr = df['posx'].values
Yr = df['posy'].values
Zr = df['posz'].values
m = len(Xr)
print(m)
deltaTime = 0.0
totalTime = 0.0
P = 1.0 * np.eye(9)
H = np.matrix([[1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 1.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0]])
rp = 0.01
R = np.matrix([[rp, 0.0, 0.0], [0.0, rp, 0.0], [0.0, 0.0, rp]])
sa = 0.05
u = 0.0
B = np.matrix([[0.0], [0.0], [0.0], [0.0], [0.0], [0.0], [0.0], [0.0], [0.0]])
I = np.eye(9)
sp = 0.01
Xm = Xr + sp * np.random.randn(m)
Ym = Yr + sp * np.random.randn(m)
Zm = Zr + sp * np.random.randn(m)
measurements = np.vstack((Xm, Ym, Zm))
x = np.matrix([measurements[0][0], measurements[1][0], measurements[2][0],
0.0, 0.0, 0.0, 0.0, 0.0, 0.0]).T
xt = []
yt = []
zt = []
mean = [(i * 0.01) for i in range(1, 21)]
print(mean)
for i in mean:
random.seed(1)
randomFactor = [(random.random() * 0.01 + (i - 0.005)) for _ in range(m)]
for idx, step in enumerate(range(m)):
frameBegin = time.time()
time.sleep(randomFactor[idx])
computeBegin = time.time()
dt = i if idx == 0 else deltaTime
A = np.matrix([[1.0, 0.0, 0.0, dt, 0.0, 0.0, 1 / 2.0 * dt ** 2, 0.0,
0.0], [0.0, 1.0, 0.0, 0.0, dt, 0.0, 0.0, 1 / 2.0 * dt ** 2, 0.0
], [0.0, 0.0, 1.0, 0.0, 0.0, dt, 0.0, 0.0, 1 / 2.0 * dt ** 2],
[0.0, 0.0, 0.0, 1.0, 0.0, 0.0, dt, 0.0, 0.0], [0.0, 0.0, 0.0,
0.0, 1.0, 0.0, 0.0, dt, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 1.0,
0.0, 0.0, dt], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0], [
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0], [0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 1.0]])
G = np.matrix([[1 / 2.0 * dt ** 2], [1 / 2.0 * dt ** 2], [1 / 2.0 *
dt ** 2], [dt], [dt], [dt], [1.0], [1.0], [1.0]])
Q = G * G.T * sa ** 2
x = A * x + B * u
P = A * P * A.T + Q
S = H * P * H.T + R
K = P * H.T * np.linalg.pinv(S)
Z = measurements[:, step].reshape(H.shape[0], 1)
y = Z - H * x
x = x + K * y
P = (I - K * H) * P
xt.append(float(x[0]))
yt.append(float(x[1]))
zt.append(float(x[2]))
frameEnd = time.time()
deltaTime = frameEnd - frameBegin
totalTime += frameEnd - computeBegin
dist = np.sqrt(((Xr - xt) ** 2 + (Yr - yt) ** 2 + (Zr - zt) ** 2).mean())
print('%.3f,%.8f,%.3f' % (i, totalTime, dist))
totalTime = 0.0
P = 1.0 * np.eye(9)
H = np.matrix([[1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 1.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 1.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0]])
rp = 0.01
R = np.matrix([[rp, 0.0, 0.0], [0.0, rp, 0.0], [0.0, 0.0, rp]])
sa = 0.05
u = 0.0
B = np.matrix([[0.0], [0.0], [0.0], [0.0], [0.0], [0.0], [0.0], [0.0],
[0.0]])
I = np.eye(9)
sp = 0.01
Xm = Xr + sp * np.random.randn(m)
Ym = Yr + sp * np.random.randn(m)
Zm = Zr + sp * np.random.randn(m)
measurements = np.vstack((Xm, Ym, Zm))
x = np.matrix([measurements[0][0], measurements[1][0], measurements[2][
0], 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]).T
xt = []
yt = []
zt = []
<|reserved_special_token_1|>
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import time
import random
csvfilename = 'data/0901/exp1/xiaoxiong.csv'
df = pd.read_csv(csvfilename, header=None, names=['abstime', 'posx', 'posy',
'posz', 'roty', 'rotx', 'anim'])
df.head()
Xr = df['posx'].values
Yr = df['posy'].values
Zr = df['posz'].values
m = len(Xr)
print(m)
deltaTime = 0.0
totalTime = 0.0
P = 1.0 * np.eye(9)
H = np.matrix([[1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 1.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0]])
rp = 0.01
R = np.matrix([[rp, 0.0, 0.0], [0.0, rp, 0.0], [0.0, 0.0, rp]])
sa = 0.05
u = 0.0
B = np.matrix([[0.0], [0.0], [0.0], [0.0], [0.0], [0.0], [0.0], [0.0], [0.0]])
I = np.eye(9)
sp = 0.01
Xm = Xr + sp * np.random.randn(m)
Ym = Yr + sp * np.random.randn(m)
Zm = Zr + sp * np.random.randn(m)
measurements = np.vstack((Xm, Ym, Zm))
x = np.matrix([measurements[0][0], measurements[1][0], measurements[2][0],
0.0, 0.0, 0.0, 0.0, 0.0, 0.0]).T
xt = []
yt = []
zt = []
mean = [(i * 0.01) for i in range(1, 21)]
print(mean)
for i in mean:
random.seed(1)
randomFactor = [(random.random() * 0.01 + (i - 0.005)) for _ in range(m)]
for idx, step in enumerate(range(m)):
frameBegin = time.time()
time.sleep(randomFactor[idx])
computeBegin = time.time()
dt = i if idx == 0 else deltaTime
A = np.matrix([[1.0, 0.0, 0.0, dt, 0.0, 0.0, 1 / 2.0 * dt ** 2, 0.0,
0.0], [0.0, 1.0, 0.0, 0.0, dt, 0.0, 0.0, 1 / 2.0 * dt ** 2, 0.0
], [0.0, 0.0, 1.0, 0.0, 0.0, dt, 0.0, 0.0, 1 / 2.0 * dt ** 2],
[0.0, 0.0, 0.0, 1.0, 0.0, 0.0, dt, 0.0, 0.0], [0.0, 0.0, 0.0,
0.0, 1.0, 0.0, 0.0, dt, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 1.0,
0.0, 0.0, dt], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0], [
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0], [0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 1.0]])
G = np.matrix([[1 / 2.0 * dt ** 2], [1 / 2.0 * dt ** 2], [1 / 2.0 *
dt ** 2], [dt], [dt], [dt], [1.0], [1.0], [1.0]])
Q = G * G.T * sa ** 2
x = A * x + B * u
P = A * P * A.T + Q
S = H * P * H.T + R
K = P * H.T * np.linalg.pinv(S)
Z = measurements[:, step].reshape(H.shape[0], 1)
y = Z - H * x
x = x + K * y
P = (I - K * H) * P
xt.append(float(x[0]))
yt.append(float(x[1]))
zt.append(float(x[2]))
frameEnd = time.time()
deltaTime = frameEnd - frameBegin
totalTime += frameEnd - computeBegin
dist = np.sqrt(((Xr - xt) ** 2 + (Yr - yt) ** 2 + (Zr - zt) ** 2).mean())
print('%.3f,%.8f,%.3f' % (i, totalTime, dist))
totalTime = 0.0
P = 1.0 * np.eye(9)
H = np.matrix([[1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 1.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 1.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0]])
rp = 0.01
R = np.matrix([[rp, 0.0, 0.0], [0.0, rp, 0.0], [0.0, 0.0, rp]])
sa = 0.05
u = 0.0
B = np.matrix([[0.0], [0.0], [0.0], [0.0], [0.0], [0.0], [0.0], [0.0],
[0.0]])
I = np.eye(9)
sp = 0.01
Xm = Xr + sp * np.random.randn(m)
Ym = Yr + sp * np.random.randn(m)
Zm = Zr + sp * np.random.randn(m)
measurements = np.vstack((Xm, Ym, Zm))
x = np.matrix([measurements[0][0], measurements[1][0], measurements[2][
0], 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]).T
xt = []
yt = []
zt = []
<|reserved_special_token_1|>
# coding: utf-8
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import time
import random
csvfilename = 'data/0901/exp1/xiaoxiong.csv'
df = pd.read_csv(csvfilename, header=None,
names=['abstime','posx','posy','posz','roty','rotx','anim'])
# skiprows=1, skipfooter=1)
df.head()
Xr=df['posx'].values
Yr=df['posy'].values
Zr=df['posz'].values
m=len(Xr)
print(m)
deltaTime = 0.0
totalTime = 0.0
P = 1.0*np.eye(9)
H = np.matrix([[1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]])
rp = 0.01 # Noise of Position Measurement
R = np.matrix([[rp, 0.0, 0.0],
[0.0, rp, 0.0],
[0.0, 0.0, rp]])
sa = 0.05
u = 0.0
B = np.matrix([[0.0],
[0.0],
[0.0],
[0.0],
[0.0],
[0.0],
[0.0],
[0.0],
[0.0]])
I = np.eye(9)
sp= 0.01 # Sigma for position noise
Xm = Xr + sp * (np.random.randn(m))
Ym = Yr + sp * (np.random.randn(m))
Zm = Zr + sp * (np.random.randn(m))
measurements = np.vstack((Xm,Ym,Zm))
x = np.matrix([measurements[0][0], measurements[1][0],measurements[2][0], 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]).T
# Preallocation for Plotting
xt = []
yt = []
zt = []
mean = [i*0.01 for i in range(1,21)]
print(mean)
for i in mean:
random.seed(1)
randomFactor = [random.random() * 0.01 + (i - 0.005) for _ in range(m)]
for idx,step in enumerate(range(m)):
frameBegin = time.time()
time.sleep(randomFactor[idx])
computeBegin = time.time()
# 更新随时间变化的矩阵
dt = i if idx == 0 else deltaTime # Time Step between Filter Steps
A = np.matrix([[1.0, 0.0, 0.0, dt, 0.0, 0.0, 1 / 2.0 * dt ** 2, 0.0, 0.0],
[0.0, 1.0, 0.0, 0.0, dt, 0.0, 0.0, 1 / 2.0 * dt ** 2, 0.0],
[0.0, 0.0, 1.0, 0.0, 0.0, dt, 0.0, 0.0, 1 / 2.0 * dt ** 2],
[0.0, 0.0, 0.0, 1.0, 0.0, 0.0, dt, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, dt, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, dt],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0]])
G = np.matrix([[1 / 2.0 * dt ** 2],
[1 / 2.0 * dt ** 2],
[1 / 2.0 * dt ** 2],
[dt],
[dt],
[dt],
[1.0],
[1.0],
[1.0]])
Q = G * G.T * sa ** 2
# Time Update (Prediction)
# ========================
# Project the state ahead
x = A*x + B*u
# Project the error covariance ahead
P = A*P*A.T + Q
# Measurement Update (Correction)
# ===============================
# Compute the Kalman Gain
S = H*P*H.T + R
K = (P*H.T) * np.linalg.pinv(S)
# Update the estimate via z
Z = measurements[:,step].reshape(H.shape[0],1)
y = Z - (H*x) # Innovation or Residual
x = x + (K*y)
# Update the error covariance
P = (I - (K*H))*P
# Save states for Plotting
xt.append(float(x[0]))
yt.append(float(x[1]))
zt.append(float(x[2]))
frameEnd = time.time()
deltaTime = frameEnd - frameBegin
totalTime += (frameEnd - computeBegin)
# distance calculate
dist = np.sqrt(((Xr-xt)**2 + (Yr-yt)**2 + (Zr-zt)**2).mean())
print('%.3f,%.8f,%.3f' % (i, totalTime, dist))
# 还原初始设置
totalTime = 0.0
P = 1.0 * np.eye(9)
H = np.matrix([[1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]])
rp = 0.01 # Noise of Position Measurement
R = np.matrix([[rp, 0.0, 0.0],
[0.0, rp, 0.0],
[0.0, 0.0, rp]])
sa = 0.05
u = 0.0
B = np.matrix([[0.0],
[0.0],
[0.0],
[0.0],
[0.0],
[0.0],
[0.0],
[0.0],
[0.0]])
I = np.eye(9)
sp = 0.01 # Sigma for position noise
Xm = Xr + sp * (np.random.randn(m))
Ym = Yr + sp * (np.random.randn(m))
Zm = Zr + sp * (np.random.randn(m))
measurements = np.vstack((Xm, Ym, Zm))
x = np.matrix([measurements[0][0], measurements[1][0], measurements[2][0], 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]).T
# Preallocation for Plotting
xt = []
yt = []
zt = []
|
flexible
|
{
"blob_id": "d0adbcd60727c2c68e06dc5e796f2676f927c45a",
"index": 4593,
"step-1": "<mask token>\n",
"step-2": "<mask token>\ndf.head()\n<mask token>\nprint(m)\n<mask token>\nprint(mean)\nfor i in mean:\n random.seed(1)\n randomFactor = [(random.random() * 0.01 + (i - 0.005)) for _ in range(m)]\n for idx, step in enumerate(range(m)):\n frameBegin = time.time()\n time.sleep(randomFactor[idx])\n computeBegin = time.time()\n dt = i if idx == 0 else deltaTime\n A = np.matrix([[1.0, 0.0, 0.0, dt, 0.0, 0.0, 1 / 2.0 * dt ** 2, 0.0,\n 0.0], [0.0, 1.0, 0.0, 0.0, dt, 0.0, 0.0, 1 / 2.0 * dt ** 2, 0.0\n ], [0.0, 0.0, 1.0, 0.0, 0.0, dt, 0.0, 0.0, 1 / 2.0 * dt ** 2],\n [0.0, 0.0, 0.0, 1.0, 0.0, 0.0, dt, 0.0, 0.0], [0.0, 0.0, 0.0, \n 0.0, 1.0, 0.0, 0.0, dt, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 1.0, \n 0.0, 0.0, dt], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0], [\n 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0], [0.0, 0.0, 0.0, \n 0.0, 0.0, 0.0, 0.0, 0.0, 1.0]])\n G = np.matrix([[1 / 2.0 * dt ** 2], [1 / 2.0 * dt ** 2], [1 / 2.0 *\n dt ** 2], [dt], [dt], [dt], [1.0], [1.0], [1.0]])\n Q = G * G.T * sa ** 2\n x = A * x + B * u\n P = A * P * A.T + Q\n S = H * P * H.T + R\n K = P * H.T * np.linalg.pinv(S)\n Z = measurements[:, step].reshape(H.shape[0], 1)\n y = Z - H * x\n x = x + K * y\n P = (I - K * H) * P\n xt.append(float(x[0]))\n yt.append(float(x[1]))\n zt.append(float(x[2]))\n frameEnd = time.time()\n deltaTime = frameEnd - frameBegin\n totalTime += frameEnd - computeBegin\n dist = np.sqrt(((Xr - xt) ** 2 + (Yr - yt) ** 2 + (Zr - zt) ** 2).mean())\n print('%.3f,%.8f,%.3f' % (i, totalTime, dist))\n totalTime = 0.0\n P = 1.0 * np.eye(9)\n H = np.matrix([[1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 1.0,\n 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 1.0, 0.0, 0.0, 0.0, \n 0.0, 0.0, 0.0]])\n rp = 0.01\n R = np.matrix([[rp, 0.0, 0.0], [0.0, rp, 0.0], [0.0, 0.0, rp]])\n sa = 0.05\n u = 0.0\n B = np.matrix([[0.0], [0.0], [0.0], [0.0], [0.0], [0.0], [0.0], [0.0],\n [0.0]])\n I = np.eye(9)\n sp = 0.01\n Xm = Xr + sp * np.random.randn(m)\n Ym = Yr + sp * np.random.randn(m)\n Zm = Zr + sp * np.random.randn(m)\n measurements = np.vstack((Xm, Ym, Zm))\n x = np.matrix([measurements[0][0], measurements[1][0], measurements[2][\n 0], 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]).T\n xt = []\n yt = []\n zt = []\n",
"step-3": "<mask token>\ncsvfilename = 'data/0901/exp1/xiaoxiong.csv'\ndf = pd.read_csv(csvfilename, header=None, names=['abstime', 'posx', 'posy',\n 'posz', 'roty', 'rotx', 'anim'])\ndf.head()\nXr = df['posx'].values\nYr = df['posy'].values\nZr = df['posz'].values\nm = len(Xr)\nprint(m)\ndeltaTime = 0.0\ntotalTime = 0.0\nP = 1.0 * np.eye(9)\nH = np.matrix([[1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 1.0, \n 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0,\n 0.0, 0.0]])\nrp = 0.01\nR = np.matrix([[rp, 0.0, 0.0], [0.0, rp, 0.0], [0.0, 0.0, rp]])\nsa = 0.05\nu = 0.0\nB = np.matrix([[0.0], [0.0], [0.0], [0.0], [0.0], [0.0], [0.0], [0.0], [0.0]])\nI = np.eye(9)\nsp = 0.01\nXm = Xr + sp * np.random.randn(m)\nYm = Yr + sp * np.random.randn(m)\nZm = Zr + sp * np.random.randn(m)\nmeasurements = np.vstack((Xm, Ym, Zm))\nx = np.matrix([measurements[0][0], measurements[1][0], measurements[2][0], \n 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]).T\nxt = []\nyt = []\nzt = []\nmean = [(i * 0.01) for i in range(1, 21)]\nprint(mean)\nfor i in mean:\n random.seed(1)\n randomFactor = [(random.random() * 0.01 + (i - 0.005)) for _ in range(m)]\n for idx, step in enumerate(range(m)):\n frameBegin = time.time()\n time.sleep(randomFactor[idx])\n computeBegin = time.time()\n dt = i if idx == 0 else deltaTime\n A = np.matrix([[1.0, 0.0, 0.0, dt, 0.0, 0.0, 1 / 2.0 * dt ** 2, 0.0,\n 0.0], [0.0, 1.0, 0.0, 0.0, dt, 0.0, 0.0, 1 / 2.0 * dt ** 2, 0.0\n ], [0.0, 0.0, 1.0, 0.0, 0.0, dt, 0.0, 0.0, 1 / 2.0 * dt ** 2],\n [0.0, 0.0, 0.0, 1.0, 0.0, 0.0, dt, 0.0, 0.0], [0.0, 0.0, 0.0, \n 0.0, 1.0, 0.0, 0.0, dt, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 1.0, \n 0.0, 0.0, dt], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0], [\n 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0], [0.0, 0.0, 0.0, \n 0.0, 0.0, 0.0, 0.0, 0.0, 1.0]])\n G = np.matrix([[1 / 2.0 * dt ** 2], [1 / 2.0 * dt ** 2], [1 / 2.0 *\n dt ** 2], [dt], [dt], [dt], [1.0], [1.0], [1.0]])\n Q = G * G.T * sa ** 2\n x = A * x + B * u\n P = A * P * A.T + Q\n S = H * P * H.T + R\n K = P * H.T * np.linalg.pinv(S)\n Z = measurements[:, step].reshape(H.shape[0], 1)\n y = Z - H * x\n x = x + K * y\n P = (I - K * H) * P\n xt.append(float(x[0]))\n yt.append(float(x[1]))\n zt.append(float(x[2]))\n frameEnd = time.time()\n deltaTime = frameEnd - frameBegin\n totalTime += frameEnd - computeBegin\n dist = np.sqrt(((Xr - xt) ** 2 + (Yr - yt) ** 2 + (Zr - zt) ** 2).mean())\n print('%.3f,%.8f,%.3f' % (i, totalTime, dist))\n totalTime = 0.0\n P = 1.0 * np.eye(9)\n H = np.matrix([[1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 1.0,\n 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 1.0, 0.0, 0.0, 0.0, \n 0.0, 0.0, 0.0]])\n rp = 0.01\n R = np.matrix([[rp, 0.0, 0.0], [0.0, rp, 0.0], [0.0, 0.0, rp]])\n sa = 0.05\n u = 0.0\n B = np.matrix([[0.0], [0.0], [0.0], [0.0], [0.0], [0.0], [0.0], [0.0],\n [0.0]])\n I = np.eye(9)\n sp = 0.01\n Xm = Xr + sp * np.random.randn(m)\n Ym = Yr + sp * np.random.randn(m)\n Zm = Zr + sp * np.random.randn(m)\n measurements = np.vstack((Xm, Ym, Zm))\n x = np.matrix([measurements[0][0], measurements[1][0], measurements[2][\n 0], 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]).T\n xt = []\n yt = []\n zt = []\n",
"step-4": "import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\nimport time\nimport random\ncsvfilename = 'data/0901/exp1/xiaoxiong.csv'\ndf = pd.read_csv(csvfilename, header=None, names=['abstime', 'posx', 'posy',\n 'posz', 'roty', 'rotx', 'anim'])\ndf.head()\nXr = df['posx'].values\nYr = df['posy'].values\nZr = df['posz'].values\nm = len(Xr)\nprint(m)\ndeltaTime = 0.0\ntotalTime = 0.0\nP = 1.0 * np.eye(9)\nH = np.matrix([[1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 1.0, \n 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0,\n 0.0, 0.0]])\nrp = 0.01\nR = np.matrix([[rp, 0.0, 0.0], [0.0, rp, 0.0], [0.0, 0.0, rp]])\nsa = 0.05\nu = 0.0\nB = np.matrix([[0.0], [0.0], [0.0], [0.0], [0.0], [0.0], [0.0], [0.0], [0.0]])\nI = np.eye(9)\nsp = 0.01\nXm = Xr + sp * np.random.randn(m)\nYm = Yr + sp * np.random.randn(m)\nZm = Zr + sp * np.random.randn(m)\nmeasurements = np.vstack((Xm, Ym, Zm))\nx = np.matrix([measurements[0][0], measurements[1][0], measurements[2][0], \n 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]).T\nxt = []\nyt = []\nzt = []\nmean = [(i * 0.01) for i in range(1, 21)]\nprint(mean)\nfor i in mean:\n random.seed(1)\n randomFactor = [(random.random() * 0.01 + (i - 0.005)) for _ in range(m)]\n for idx, step in enumerate(range(m)):\n frameBegin = time.time()\n time.sleep(randomFactor[idx])\n computeBegin = time.time()\n dt = i if idx == 0 else deltaTime\n A = np.matrix([[1.0, 0.0, 0.0, dt, 0.0, 0.0, 1 / 2.0 * dt ** 2, 0.0,\n 0.0], [0.0, 1.0, 0.0, 0.0, dt, 0.0, 0.0, 1 / 2.0 * dt ** 2, 0.0\n ], [0.0, 0.0, 1.0, 0.0, 0.0, dt, 0.0, 0.0, 1 / 2.0 * dt ** 2],\n [0.0, 0.0, 0.0, 1.0, 0.0, 0.0, dt, 0.0, 0.0], [0.0, 0.0, 0.0, \n 0.0, 1.0, 0.0, 0.0, dt, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 1.0, \n 0.0, 0.0, dt], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0], [\n 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0], [0.0, 0.0, 0.0, \n 0.0, 0.0, 0.0, 0.0, 0.0, 1.0]])\n G = np.matrix([[1 / 2.0 * dt ** 2], [1 / 2.0 * dt ** 2], [1 / 2.0 *\n dt ** 2], [dt], [dt], [dt], [1.0], [1.0], [1.0]])\n Q = G * G.T * sa ** 2\n x = A * x + B * u\n P = A * P * A.T + Q\n S = H * P * H.T + R\n K = P * H.T * np.linalg.pinv(S)\n Z = measurements[:, step].reshape(H.shape[0], 1)\n y = Z - H * x\n x = x + K * y\n P = (I - K * H) * P\n xt.append(float(x[0]))\n yt.append(float(x[1]))\n zt.append(float(x[2]))\n frameEnd = time.time()\n deltaTime = frameEnd - frameBegin\n totalTime += frameEnd - computeBegin\n dist = np.sqrt(((Xr - xt) ** 2 + (Yr - yt) ** 2 + (Zr - zt) ** 2).mean())\n print('%.3f,%.8f,%.3f' % (i, totalTime, dist))\n totalTime = 0.0\n P = 1.0 * np.eye(9)\n H = np.matrix([[1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 1.0,\n 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 1.0, 0.0, 0.0, 0.0, \n 0.0, 0.0, 0.0]])\n rp = 0.01\n R = np.matrix([[rp, 0.0, 0.0], [0.0, rp, 0.0], [0.0, 0.0, rp]])\n sa = 0.05\n u = 0.0\n B = np.matrix([[0.0], [0.0], [0.0], [0.0], [0.0], [0.0], [0.0], [0.0],\n [0.0]])\n I = np.eye(9)\n sp = 0.01\n Xm = Xr + sp * np.random.randn(m)\n Ym = Yr + sp * np.random.randn(m)\n Zm = Zr + sp * np.random.randn(m)\n measurements = np.vstack((Xm, Ym, Zm))\n x = np.matrix([measurements[0][0], measurements[1][0], measurements[2][\n 0], 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]).T\n xt = []\n yt = []\n zt = []\n",
"step-5": "\n# coding: utf-8\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\nimport time\nimport random\n\n\ncsvfilename = 'data/0901/exp1/xiaoxiong.csv'\ndf = pd.read_csv(csvfilename, header=None,\n names=['abstime','posx','posy','posz','roty','rotx','anim'])\n # skiprows=1, skipfooter=1)\ndf.head()\n\nXr=df['posx'].values\nYr=df['posy'].values\nZr=df['posz'].values\nm=len(Xr)\nprint(m)\n\ndeltaTime = 0.0\ntotalTime = 0.0\n\nP = 1.0*np.eye(9)\nH = np.matrix([[1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],\n [0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],\n [0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]])\nrp = 0.01 # Noise of Position Measurement\nR = np.matrix([[rp, 0.0, 0.0],\n [0.0, rp, 0.0],\n [0.0, 0.0, rp]])\nsa = 0.05\nu = 0.0\nB = np.matrix([[0.0],\n [0.0],\n [0.0],\n [0.0],\n [0.0],\n [0.0],\n [0.0],\n [0.0],\n [0.0]])\nI = np.eye(9)\nsp= 0.01 # Sigma for position noise\nXm = Xr + sp * (np.random.randn(m))\nYm = Yr + sp * (np.random.randn(m))\nZm = Zr + sp * (np.random.randn(m))\nmeasurements = np.vstack((Xm,Ym,Zm))\nx = np.matrix([measurements[0][0], measurements[1][0],measurements[2][0], 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]).T\n# Preallocation for Plotting\nxt = []\nyt = []\nzt = []\n\nmean = [i*0.01 for i in range(1,21)]\nprint(mean)\nfor i in mean:\n random.seed(1)\n randomFactor = [random.random() * 0.01 + (i - 0.005) for _ in range(m)]\n for idx,step in enumerate(range(m)):\n frameBegin = time.time()\n time.sleep(randomFactor[idx])\n computeBegin = time.time()\n # 更新随时间变化的矩阵\n dt = i if idx == 0 else deltaTime # Time Step between Filter Steps\n A = np.matrix([[1.0, 0.0, 0.0, dt, 0.0, 0.0, 1 / 2.0 * dt ** 2, 0.0, 0.0],\n [0.0, 1.0, 0.0, 0.0, dt, 0.0, 0.0, 1 / 2.0 * dt ** 2, 0.0],\n [0.0, 0.0, 1.0, 0.0, 0.0, dt, 0.0, 0.0, 1 / 2.0 * dt ** 2],\n [0.0, 0.0, 0.0, 1.0, 0.0, 0.0, dt, 0.0, 0.0],\n [0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, dt, 0.0],\n [0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, dt],\n [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0],\n [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0],\n [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0]])\n\n G = np.matrix([[1 / 2.0 * dt ** 2],\n [1 / 2.0 * dt ** 2],\n [1 / 2.0 * dt ** 2],\n [dt],\n [dt],\n [dt],\n [1.0],\n [1.0],\n [1.0]])\n Q = G * G.T * sa ** 2\n\n # Time Update (Prediction)\n # ========================\n # Project the state ahead\n x = A*x + B*u\n\n # Project the error covariance ahead\n P = A*P*A.T + Q\n\n # Measurement Update (Correction)\n # ===============================\n # Compute the Kalman Gain\n S = H*P*H.T + R\n K = (P*H.T) * np.linalg.pinv(S)\n\n # Update the estimate via z\n Z = measurements[:,step].reshape(H.shape[0],1)\n y = Z - (H*x) # Innovation or Residual\n x = x + (K*y)\n\n # Update the error covariance\n P = (I - (K*H))*P\n\n # Save states for Plotting\n xt.append(float(x[0]))\n yt.append(float(x[1]))\n zt.append(float(x[2]))\n\n frameEnd = time.time()\n deltaTime = frameEnd - frameBegin\n totalTime += (frameEnd - computeBegin)\n\n # distance calculate\n dist = np.sqrt(((Xr-xt)**2 + (Yr-yt)**2 + (Zr-zt)**2).mean())\n print('%.3f,%.8f,%.3f' % (i, totalTime, dist))\n # 还原初始设置\n totalTime = 0.0\n\n P = 1.0 * np.eye(9)\n H = np.matrix([[1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],\n [0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],\n [0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]])\n rp = 0.01 # Noise of Position Measurement\n R = np.matrix([[rp, 0.0, 0.0],\n [0.0, rp, 0.0],\n [0.0, 0.0, rp]])\n sa = 0.05\n u = 0.0\n B = np.matrix([[0.0],\n [0.0],\n [0.0],\n [0.0],\n [0.0],\n [0.0],\n [0.0],\n [0.0],\n [0.0]])\n I = np.eye(9)\n sp = 0.01 # Sigma for position noise\n Xm = Xr + sp * (np.random.randn(m))\n Ym = Yr + sp * (np.random.randn(m))\n Zm = Zr + sp * (np.random.randn(m))\n measurements = np.vstack((Xm, Ym, Zm))\n x = np.matrix([measurements[0][0], measurements[1][0], measurements[2][0], 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]).T\n # Preallocation for Plotting\n xt = []\n yt = []\n zt = []\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
def load_cifar_data(data_files):
data = []
labels = []
for file in data_files:
with open(file, 'rb') as fo:
data_dict = pickle.load(fo, encoding='bytes')
if len(data) == 0:
data = data_dict[str.encode('data')]
labels = data_dict[str.encode('labels')]
else:
data = np.vstack((data, data_dict[str.encode('data')]))
labels.extend(data_dict[str.encode('labels')])
return data, labels
<|reserved_special_token_0|>
def get_data_from_indices(data, indices_dict, count_per_class, image_shape):
generated_data = []
generated_labels = []
for key, val in indices_dict.items():
if count_per_class:
for i in range(count_per_class):
generated_data.append(np.reshape(data[val[i]], image_shape))
generated_labels.append(key)
else:
for i in val:
generated_data.append(np.reshape(data[i], image_shape))
generated_labels.append(key)
return np.asarray(generated_data), np.reshape(np.asarray(
generated_labels, dtype=np.int32), (-1, 1))
def create_data_loader(data_x, data_y, batch_size, shuffle):
tensor_x = torch.stack([torch.Tensor(i) for i in data_x])
tensor_y = torch.stack([torch.Tensor(i) for i in data_y])
dataset = torch.utils.data.TensorDataset(tensor_x, tensor_y)
dataloader = torch.utils.data.DataLoader(dataset=dataset, batch_size=
batch_size, shuffle=shuffle)
return dataloader
def train_model(model, train_data_loader, test_data_loader, num_epochs=5,
learning_rate=0.001, save_epochs=None, model_name='cnn'):
num_epochs = num_epochs
learning_rate = learning_rate
criterion = torch.nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
total_step = len(train_data_loader)
train_times = []
train_accuracies = []
train_losses = []
test_accuracies = []
for epoch in range(num_epochs):
start_time = time.time()
for i, (images, labels) in enumerate(train_data_loader):
outputs = model(images)
target = torch.max(labels.long(), 1)[0]
loss = criterion(outputs, target)
optimizer.zero_grad()
loss.backward()
optimizer.step()
if (i + 1) % 200 == 0:
print('Epoch [{}/{}], Step [{}/{}], Loss: {:.4f}'.format(
epoch + 1, num_epochs, i + 1, total_step, loss.item()))
end_time = time.time()
if save_epochs and epoch + 1 in save_epochs:
torch.save(model, '../data/models/' + model_name + '_' + str(
epoch + 1))
train_times.append(end_time - start_time)
train_losses.append(loss.item())
print('Calculating train accuracy...')
train_accuracies.append(get_accuracies(train_data_loader, model)[0])
print('Calculating test accuracy...')
test_accuracies.append(get_accuracies(test_data_loader, model)[0])
print('Average training time per epoch:', np.mean(train_times))
print('Total training time for all epochs:', np.sum(train_times))
return train_accuracies, test_accuracies, train_losses
def get_accuracies(data_loader, model):
start_time = time.time()
model.eval()
with torch.no_grad():
correct = 0
total = 0
for images, labels in data_loader:
labels = torch.max(labels.long(), 1)[0]
outputs = model(images)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
accuracy = 100 * correct / total
end_time = time.time()
time_taken = end_time - start_time
print('Accuracy of the model: {} %'.format(accuracy))
return accuracy, time_taken
def get_model_size(model, model_name):
model = pickle.dumps(net)
byte_size = sys.getsizeof(model)
print('Size of ' + model_name + ' model: ', byte_size / 1000000)
def imshow(img, label_names, file_name='../data/sample_images'):
npimg = img.numpy()
npimg = npimg.astype(np.uint8)
npimg = np.transpose(npimg, (1, 2, 0))
plt.clf()
im = plt.imshow(npimg)
ylim = im.get_extent()[2]
plt.yticks(np.arange(0, ylim + 1, ylim / len(label_names)), label_names)
plt.savefig(file_name)
plt.show()
def show_classwise_images(data, labels, label_names, k):
image_dict = {}
for idx, l in enumerate(labels):
label = l[0]
if label in image_dict.keys() and len(image_dict[label]) < k:
image_dict[label].append(data[idx])
elif label not in image_dict.keys():
image_dict[label] = [data[idx]]
images_to_show = []
labels_to_show = []
for label, image in image_dict.items():
labels_to_show.append(label_names[label])
for i in image:
images_to_show.append(i)
images_tensor = torch.stack([torch.Tensor(i) for i in images_to_show])
imshow(torchvision.utils.make_grid(images_tensor, nrow=k), labels_to_show)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def load_cifar_data(data_files):
data = []
labels = []
for file in data_files:
with open(file, 'rb') as fo:
data_dict = pickle.load(fo, encoding='bytes')
if len(data) == 0:
data = data_dict[str.encode('data')]
labels = data_dict[str.encode('labels')]
else:
data = np.vstack((data, data_dict[str.encode('data')]))
labels.extend(data_dict[str.encode('labels')])
return data, labels
def unpickle(file):
with open(file, 'rb') as fo:
res = pickle.load(fo, encoding='bytes')
return res
<|reserved_special_token_0|>
def get_data_from_indices(data, indices_dict, count_per_class, image_shape):
generated_data = []
generated_labels = []
for key, val in indices_dict.items():
if count_per_class:
for i in range(count_per_class):
generated_data.append(np.reshape(data[val[i]], image_shape))
generated_labels.append(key)
else:
for i in val:
generated_data.append(np.reshape(data[i], image_shape))
generated_labels.append(key)
return np.asarray(generated_data), np.reshape(np.asarray(
generated_labels, dtype=np.int32), (-1, 1))
def create_data_loader(data_x, data_y, batch_size, shuffle):
tensor_x = torch.stack([torch.Tensor(i) for i in data_x])
tensor_y = torch.stack([torch.Tensor(i) for i in data_y])
dataset = torch.utils.data.TensorDataset(tensor_x, tensor_y)
dataloader = torch.utils.data.DataLoader(dataset=dataset, batch_size=
batch_size, shuffle=shuffle)
return dataloader
def train_model(model, train_data_loader, test_data_loader, num_epochs=5,
learning_rate=0.001, save_epochs=None, model_name='cnn'):
num_epochs = num_epochs
learning_rate = learning_rate
criterion = torch.nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
total_step = len(train_data_loader)
train_times = []
train_accuracies = []
train_losses = []
test_accuracies = []
for epoch in range(num_epochs):
start_time = time.time()
for i, (images, labels) in enumerate(train_data_loader):
outputs = model(images)
target = torch.max(labels.long(), 1)[0]
loss = criterion(outputs, target)
optimizer.zero_grad()
loss.backward()
optimizer.step()
if (i + 1) % 200 == 0:
print('Epoch [{}/{}], Step [{}/{}], Loss: {:.4f}'.format(
epoch + 1, num_epochs, i + 1, total_step, loss.item()))
end_time = time.time()
if save_epochs and epoch + 1 in save_epochs:
torch.save(model, '../data/models/' + model_name + '_' + str(
epoch + 1))
train_times.append(end_time - start_time)
train_losses.append(loss.item())
print('Calculating train accuracy...')
train_accuracies.append(get_accuracies(train_data_loader, model)[0])
print('Calculating test accuracy...')
test_accuracies.append(get_accuracies(test_data_loader, model)[0])
print('Average training time per epoch:', np.mean(train_times))
print('Total training time for all epochs:', np.sum(train_times))
return train_accuracies, test_accuracies, train_losses
def get_accuracies(data_loader, model):
start_time = time.time()
model.eval()
with torch.no_grad():
correct = 0
total = 0
for images, labels in data_loader:
labels = torch.max(labels.long(), 1)[0]
outputs = model(images)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
accuracy = 100 * correct / total
end_time = time.time()
time_taken = end_time - start_time
print('Accuracy of the model: {} %'.format(accuracy))
return accuracy, time_taken
def get_model_size(model, model_name):
model = pickle.dumps(net)
byte_size = sys.getsizeof(model)
print('Size of ' + model_name + ' model: ', byte_size / 1000000)
def imshow(img, label_names, file_name='../data/sample_images'):
npimg = img.numpy()
npimg = npimg.astype(np.uint8)
npimg = np.transpose(npimg, (1, 2, 0))
plt.clf()
im = plt.imshow(npimg)
ylim = im.get_extent()[2]
plt.yticks(np.arange(0, ylim + 1, ylim / len(label_names)), label_names)
plt.savefig(file_name)
plt.show()
def show_classwise_images(data, labels, label_names, k):
image_dict = {}
for idx, l in enumerate(labels):
label = l[0]
if label in image_dict.keys() and len(image_dict[label]) < k:
image_dict[label].append(data[idx])
elif label not in image_dict.keys():
image_dict[label] = [data[idx]]
images_to_show = []
labels_to_show = []
for label, image in image_dict.items():
labels_to_show.append(label_names[label])
for i in image:
images_to_show.append(i)
images_tensor = torch.stack([torch.Tensor(i) for i in images_to_show])
imshow(torchvision.utils.make_grid(images_tensor, nrow=k), labels_to_show)
def outlier_analysis(model, outliers_tensor, outlier_label_names,
cifar10_label_names):
model.eval()
predicted_labels = []
with torch.no_grad():
start_time = time.time()
outputs = model(outliers_tensor)
end_time = time.time()
print('Time taken for prediction:', str(end_time - start_time))
_, predicted = torch.max(outputs.data, 1)
for idx, label in enumerate(predicted):
print('Original:', outlier_label_names[idx], 'Predicted:',
cifar10_label_names[label])
predicted_labels.append(cifar10_label_names[label])
imshow(torchvision.utils.make_grid(outliers_tensor, nrow=1),
predicted_labels)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def load_cifar_data(data_files):
data = []
labels = []
for file in data_files:
with open(file, 'rb') as fo:
data_dict = pickle.load(fo, encoding='bytes')
if len(data) == 0:
data = data_dict[str.encode('data')]
labels = data_dict[str.encode('labels')]
else:
data = np.vstack((data, data_dict[str.encode('data')]))
labels.extend(data_dict[str.encode('labels')])
return data, labels
def unpickle(file):
with open(file, 'rb') as fo:
res = pickle.load(fo, encoding='bytes')
return res
def get_classwise_indices(labels):
label_indices = {}
for idx, label in enumerate(labels):
if label not in label_indices.keys():
label_indices[label] = [idx]
else:
label_indices[label].append(idx)
return label_indices
def get_data_from_indices(data, indices_dict, count_per_class, image_shape):
generated_data = []
generated_labels = []
for key, val in indices_dict.items():
if count_per_class:
for i in range(count_per_class):
generated_data.append(np.reshape(data[val[i]], image_shape))
generated_labels.append(key)
else:
for i in val:
generated_data.append(np.reshape(data[i], image_shape))
generated_labels.append(key)
return np.asarray(generated_data), np.reshape(np.asarray(
generated_labels, dtype=np.int32), (-1, 1))
def create_data_loader(data_x, data_y, batch_size, shuffle):
tensor_x = torch.stack([torch.Tensor(i) for i in data_x])
tensor_y = torch.stack([torch.Tensor(i) for i in data_y])
dataset = torch.utils.data.TensorDataset(tensor_x, tensor_y)
dataloader = torch.utils.data.DataLoader(dataset=dataset, batch_size=
batch_size, shuffle=shuffle)
return dataloader
def train_model(model, train_data_loader, test_data_loader, num_epochs=5,
learning_rate=0.001, save_epochs=None, model_name='cnn'):
num_epochs = num_epochs
learning_rate = learning_rate
criterion = torch.nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
total_step = len(train_data_loader)
train_times = []
train_accuracies = []
train_losses = []
test_accuracies = []
for epoch in range(num_epochs):
start_time = time.time()
for i, (images, labels) in enumerate(train_data_loader):
outputs = model(images)
target = torch.max(labels.long(), 1)[0]
loss = criterion(outputs, target)
optimizer.zero_grad()
loss.backward()
optimizer.step()
if (i + 1) % 200 == 0:
print('Epoch [{}/{}], Step [{}/{}], Loss: {:.4f}'.format(
epoch + 1, num_epochs, i + 1, total_step, loss.item()))
end_time = time.time()
if save_epochs and epoch + 1 in save_epochs:
torch.save(model, '../data/models/' + model_name + '_' + str(
epoch + 1))
train_times.append(end_time - start_time)
train_losses.append(loss.item())
print('Calculating train accuracy...')
train_accuracies.append(get_accuracies(train_data_loader, model)[0])
print('Calculating test accuracy...')
test_accuracies.append(get_accuracies(test_data_loader, model)[0])
print('Average training time per epoch:', np.mean(train_times))
print('Total training time for all epochs:', np.sum(train_times))
return train_accuracies, test_accuracies, train_losses
def get_accuracies(data_loader, model):
start_time = time.time()
model.eval()
with torch.no_grad():
correct = 0
total = 0
for images, labels in data_loader:
labels = torch.max(labels.long(), 1)[0]
outputs = model(images)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
accuracy = 100 * correct / total
end_time = time.time()
time_taken = end_time - start_time
print('Accuracy of the model: {} %'.format(accuracy))
return accuracy, time_taken
def get_model_size(model, model_name):
model = pickle.dumps(net)
byte_size = sys.getsizeof(model)
print('Size of ' + model_name + ' model: ', byte_size / 1000000)
def imshow(img, label_names, file_name='../data/sample_images'):
npimg = img.numpy()
npimg = npimg.astype(np.uint8)
npimg = np.transpose(npimg, (1, 2, 0))
plt.clf()
im = plt.imshow(npimg)
ylim = im.get_extent()[2]
plt.yticks(np.arange(0, ylim + 1, ylim / len(label_names)), label_names)
plt.savefig(file_name)
plt.show()
def show_classwise_images(data, labels, label_names, k):
image_dict = {}
for idx, l in enumerate(labels):
label = l[0]
if label in image_dict.keys() and len(image_dict[label]) < k:
image_dict[label].append(data[idx])
elif label not in image_dict.keys():
image_dict[label] = [data[idx]]
images_to_show = []
labels_to_show = []
for label, image in image_dict.items():
labels_to_show.append(label_names[label])
for i in image:
images_to_show.append(i)
images_tensor = torch.stack([torch.Tensor(i) for i in images_to_show])
imshow(torchvision.utils.make_grid(images_tensor, nrow=k), labels_to_show)
def outlier_analysis(model, outliers_tensor, outlier_label_names,
cifar10_label_names):
model.eval()
predicted_labels = []
with torch.no_grad():
start_time = time.time()
outputs = model(outliers_tensor)
end_time = time.time()
print('Time taken for prediction:', str(end_time - start_time))
_, predicted = torch.max(outputs.data, 1)
for idx, label in enumerate(predicted):
print('Original:', outlier_label_names[idx], 'Predicted:',
cifar10_label_names[label])
predicted_labels.append(cifar10_label_names[label])
imshow(torchvision.utils.make_grid(outliers_tensor, nrow=1),
predicted_labels)
def plot_values(x, y, xlabel, ylabel, title, legend, fig_name):
plt.clf()
for y_i in y:
plt.plot(x, y_i)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.title(title)
plt.legend(legend)
plt.savefig('../data/plots/' + fig_name)
plt.show()
<|reserved_special_token_1|>
import pickle
import numpy as np
import torch
import time
import torchvision
import matplotlib
import matplotlib.pyplot as plt
def load_cifar_data(data_files):
data = []
labels = []
for file in data_files:
with open(file, 'rb') as fo:
data_dict = pickle.load(fo, encoding='bytes')
if len(data) == 0:
data = data_dict[str.encode('data')]
labels = data_dict[str.encode('labels')]
else:
data = np.vstack((data, data_dict[str.encode('data')]))
labels.extend(data_dict[str.encode('labels')])
return data, labels
def unpickle(file):
with open(file, 'rb') as fo:
res = pickle.load(fo, encoding='bytes')
return res
def get_classwise_indices(labels):
label_indices = {}
for idx, label in enumerate(labels):
if label not in label_indices.keys():
label_indices[label] = [idx]
else:
label_indices[label].append(idx)
return label_indices
def get_data_from_indices(data, indices_dict, count_per_class, image_shape):
generated_data = []
generated_labels = []
for key, val in indices_dict.items():
if count_per_class:
for i in range(count_per_class):
generated_data.append(np.reshape(data[val[i]], image_shape))
generated_labels.append(key)
else:
for i in val:
generated_data.append(np.reshape(data[i], image_shape))
generated_labels.append(key)
return np.asarray(generated_data), np.reshape(np.asarray(
generated_labels, dtype=np.int32), (-1, 1))
def create_data_loader(data_x, data_y, batch_size, shuffle):
tensor_x = torch.stack([torch.Tensor(i) for i in data_x])
tensor_y = torch.stack([torch.Tensor(i) for i in data_y])
dataset = torch.utils.data.TensorDataset(tensor_x, tensor_y)
dataloader = torch.utils.data.DataLoader(dataset=dataset, batch_size=
batch_size, shuffle=shuffle)
return dataloader
def train_model(model, train_data_loader, test_data_loader, num_epochs=5,
learning_rate=0.001, save_epochs=None, model_name='cnn'):
num_epochs = num_epochs
learning_rate = learning_rate
criterion = torch.nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
total_step = len(train_data_loader)
train_times = []
train_accuracies = []
train_losses = []
test_accuracies = []
for epoch in range(num_epochs):
start_time = time.time()
for i, (images, labels) in enumerate(train_data_loader):
outputs = model(images)
target = torch.max(labels.long(), 1)[0]
loss = criterion(outputs, target)
optimizer.zero_grad()
loss.backward()
optimizer.step()
if (i + 1) % 200 == 0:
print('Epoch [{}/{}], Step [{}/{}], Loss: {:.4f}'.format(
epoch + 1, num_epochs, i + 1, total_step, loss.item()))
end_time = time.time()
if save_epochs and epoch + 1 in save_epochs:
torch.save(model, '../data/models/' + model_name + '_' + str(
epoch + 1))
train_times.append(end_time - start_time)
train_losses.append(loss.item())
print('Calculating train accuracy...')
train_accuracies.append(get_accuracies(train_data_loader, model)[0])
print('Calculating test accuracy...')
test_accuracies.append(get_accuracies(test_data_loader, model)[0])
print('Average training time per epoch:', np.mean(train_times))
print('Total training time for all epochs:', np.sum(train_times))
return train_accuracies, test_accuracies, train_losses
def get_accuracies(data_loader, model):
start_time = time.time()
model.eval()
with torch.no_grad():
correct = 0
total = 0
for images, labels in data_loader:
labels = torch.max(labels.long(), 1)[0]
outputs = model(images)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
accuracy = 100 * correct / total
end_time = time.time()
time_taken = end_time - start_time
print('Accuracy of the model: {} %'.format(accuracy))
return accuracy, time_taken
def get_model_size(model, model_name):
model = pickle.dumps(net)
byte_size = sys.getsizeof(model)
print('Size of ' + model_name + ' model: ', byte_size / 1000000)
def imshow(img, label_names, file_name='../data/sample_images'):
npimg = img.numpy()
npimg = npimg.astype(np.uint8)
npimg = np.transpose(npimg, (1, 2, 0))
plt.clf()
im = plt.imshow(npimg)
ylim = im.get_extent()[2]
plt.yticks(np.arange(0, ylim + 1, ylim / len(label_names)), label_names)
plt.savefig(file_name)
plt.show()
def show_classwise_images(data, labels, label_names, k):
image_dict = {}
for idx, l in enumerate(labels):
label = l[0]
if label in image_dict.keys() and len(image_dict[label]) < k:
image_dict[label].append(data[idx])
elif label not in image_dict.keys():
image_dict[label] = [data[idx]]
images_to_show = []
labels_to_show = []
for label, image in image_dict.items():
labels_to_show.append(label_names[label])
for i in image:
images_to_show.append(i)
images_tensor = torch.stack([torch.Tensor(i) for i in images_to_show])
imshow(torchvision.utils.make_grid(images_tensor, nrow=k), labels_to_show)
def outlier_analysis(model, outliers_tensor, outlier_label_names,
cifar10_label_names):
model.eval()
predicted_labels = []
with torch.no_grad():
start_time = time.time()
outputs = model(outliers_tensor)
end_time = time.time()
print('Time taken for prediction:', str(end_time - start_time))
_, predicted = torch.max(outputs.data, 1)
for idx, label in enumerate(predicted):
print('Original:', outlier_label_names[idx], 'Predicted:',
cifar10_label_names[label])
predicted_labels.append(cifar10_label_names[label])
imshow(torchvision.utils.make_grid(outliers_tensor, nrow=1),
predicted_labels)
def plot_values(x, y, xlabel, ylabel, title, legend, fig_name):
plt.clf()
for y_i in y:
plt.plot(x, y_i)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.title(title)
plt.legend(legend)
plt.savefig('../data/plots/' + fig_name)
plt.show()
<|reserved_special_token_1|>
import pickle
import numpy as np
import torch
import time
import torchvision
import matplotlib
import matplotlib.pyplot as plt
def load_cifar_data(data_files):
data = []
labels = []
for file in data_files:
with open(file, 'rb') as fo:
data_dict = pickle.load(fo, encoding='bytes')
if len(data) == 0:
data = data_dict[str.encode('data')]
labels = data_dict[str.encode('labels')]
else:
data = np.vstack((data, data_dict[str.encode('data')]))
labels.extend(data_dict[str.encode('labels')])
return data, labels
def unpickle(file):
with open(file, 'rb') as fo:
res = pickle.load(fo, encoding='bytes')
return res
def get_classwise_indices(labels):
label_indices = {}
for idx, label in enumerate(labels):
if label not in label_indices.keys():
label_indices[label] = [idx]
else:
label_indices[label].append(idx)
return label_indices
def get_data_from_indices(data, indices_dict, count_per_class, image_shape):
generated_data = []
generated_labels = []
for key, val in indices_dict.items():
if count_per_class:
for i in range(count_per_class):
generated_data.append(np.reshape(data[val[i]], image_shape))
generated_labels.append(key)
else:
for i in val:
generated_data.append(np.reshape(data[i], image_shape))
generated_labels.append(key)
return np.asarray(generated_data), np.reshape(np.asarray(generated_labels, dtype=np.int32), (-1,1))
def create_data_loader(data_x, data_y, batch_size, shuffle):
tensor_x = torch.stack([torch.Tensor(i) for i in data_x]) # transform to torch tensors
tensor_y = torch.stack([torch.Tensor(i) for i in data_y])
dataset = torch.utils.data.TensorDataset(tensor_x,tensor_y) # create datset
dataloader = torch.utils.data.DataLoader(dataset=dataset, batch_size=batch_size, shuffle=shuffle) # create dataloader
return dataloader
def train_model(model, train_data_loader, test_data_loader, num_epochs=5, learning_rate=0.001, save_epochs=None, model_name="cnn"):
num_epochs = num_epochs
learning_rate = learning_rate
# Loss and optimizer
criterion = torch.nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
total_step = len(train_data_loader)
train_times = []
train_accuracies = []
train_losses = []
test_accuracies = []
for epoch in range(num_epochs):
start_time = time.time()
for i, (images, labels) in enumerate(train_data_loader):
# Forward pass
outputs = model(images)
target = torch.max(labels.long(), 1)[0]
loss = criterion(outputs, target)
# Backward and optimize
optimizer.zero_grad()
loss.backward()
optimizer.step()
if (i+1) % 200 == 0:
print ('Epoch [{}/{}], Step [{}/{}], Loss: {:.4f}'
.format(epoch+1, num_epochs, i+1, total_step, loss.item()))
end_time = time.time()
if save_epochs and epoch + 1 in save_epochs:
torch.save(model, "../data/models/" + model_name + "_" + str(epoch+1))
train_times.append(end_time - start_time)
train_losses.append(loss.item())
print("Calculating train accuracy...")
train_accuracies.append(get_accuracies(train_data_loader, model)[0])
print("Calculating test accuracy...")
test_accuracies.append(get_accuracies(test_data_loader, model)[0])
print("Average training time per epoch:", np.mean(train_times))
print("Total training time for all epochs:", np.sum(train_times))
return train_accuracies, test_accuracies, train_losses
def get_accuracies(data_loader, model):
start_time = time.time()
model.eval()
with torch.no_grad():
correct = 0
total = 0
for images, labels in data_loader:
labels = torch.max(labels.long(), 1)[0]
outputs = model(images)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
accuracy = 100 * correct / total
end_time = time.time()
time_taken = end_time - start_time
print('Accuracy of the model: {} %'.format(accuracy))
return accuracy, time_taken
def get_model_size(model, model_name):
model = pickle.dumps(net)
byte_size = sys.getsizeof(model)
print('Size of ' + model_name + ' model: ', byte_size/1000000)
def imshow(img, label_names, file_name="../data/sample_images"):
npimg = img.numpy()
npimg = npimg.astype(np.uint8)
npimg = np.transpose(npimg, (1, 2, 0))
plt.clf()
im = plt.imshow(npimg)
ylim = im.get_extent()[2]
plt.yticks(np.arange(0, ylim + 1, ylim/len(label_names)), label_names)
plt.savefig(file_name)
plt.show()
def show_classwise_images(data, labels, label_names, k):
image_dict = {}
for idx, l in enumerate(labels):
label = l[0]
if label in image_dict.keys() and len(image_dict[label]) < k:
image_dict[label].append(data[idx])
elif label not in image_dict.keys():
image_dict[label] = [data[idx]]
images_to_show = []
labels_to_show = []
for label, image in image_dict.items():
labels_to_show.append(label_names[label])
for i in image:
images_to_show.append(i)
images_tensor = torch.stack([torch.Tensor(i) for i in images_to_show])
imshow(torchvision.utils.make_grid(images_tensor, nrow=k), labels_to_show)
def outlier_analysis(model, outliers_tensor, outlier_label_names, cifar10_label_names):
model.eval()
predicted_labels = []
with torch.no_grad():
start_time = time.time()
outputs = model(outliers_tensor)
end_time = time.time()
print("Time taken for prediction:", str(end_time - start_time))
_, predicted = torch.max(outputs.data, 1)
for idx, label in enumerate(predicted):
print("Original:", outlier_label_names[idx], "Predicted:", cifar10_label_names[label])
predicted_labels.append(cifar10_label_names[label])
imshow(torchvision.utils.make_grid(outliers_tensor, nrow=1), predicted_labels)
def plot_values(x, y, xlabel, ylabel, title, legend, fig_name):
plt.clf()
for y_i in y:
plt.plot(x, y_i)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.title(title)
plt.legend(legend)
plt.savefig("../data/plots/" + fig_name)
plt.show()
|
flexible
|
{
"blob_id": "66fe0a3b84773ee1d4f91d8fde60f1fc5b3d7e4c",
"index": 6454,
"step-1": "<mask token>\n\n\ndef load_cifar_data(data_files):\n data = []\n labels = []\n for file in data_files:\n with open(file, 'rb') as fo:\n data_dict = pickle.load(fo, encoding='bytes')\n if len(data) == 0:\n data = data_dict[str.encode('data')]\n labels = data_dict[str.encode('labels')]\n else:\n data = np.vstack((data, data_dict[str.encode('data')]))\n labels.extend(data_dict[str.encode('labels')])\n return data, labels\n\n\n<mask token>\n\n\ndef get_data_from_indices(data, indices_dict, count_per_class, image_shape):\n generated_data = []\n generated_labels = []\n for key, val in indices_dict.items():\n if count_per_class:\n for i in range(count_per_class):\n generated_data.append(np.reshape(data[val[i]], image_shape))\n generated_labels.append(key)\n else:\n for i in val:\n generated_data.append(np.reshape(data[i], image_shape))\n generated_labels.append(key)\n return np.asarray(generated_data), np.reshape(np.asarray(\n generated_labels, dtype=np.int32), (-1, 1))\n\n\ndef create_data_loader(data_x, data_y, batch_size, shuffle):\n tensor_x = torch.stack([torch.Tensor(i) for i in data_x])\n tensor_y = torch.stack([torch.Tensor(i) for i in data_y])\n dataset = torch.utils.data.TensorDataset(tensor_x, tensor_y)\n dataloader = torch.utils.data.DataLoader(dataset=dataset, batch_size=\n batch_size, shuffle=shuffle)\n return dataloader\n\n\ndef train_model(model, train_data_loader, test_data_loader, num_epochs=5,\n learning_rate=0.001, save_epochs=None, model_name='cnn'):\n num_epochs = num_epochs\n learning_rate = learning_rate\n criterion = torch.nn.CrossEntropyLoss()\n optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)\n total_step = len(train_data_loader)\n train_times = []\n train_accuracies = []\n train_losses = []\n test_accuracies = []\n for epoch in range(num_epochs):\n start_time = time.time()\n for i, (images, labels) in enumerate(train_data_loader):\n outputs = model(images)\n target = torch.max(labels.long(), 1)[0]\n loss = criterion(outputs, target)\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n if (i + 1) % 200 == 0:\n print('Epoch [{}/{}], Step [{}/{}], Loss: {:.4f}'.format(\n epoch + 1, num_epochs, i + 1, total_step, loss.item()))\n end_time = time.time()\n if save_epochs and epoch + 1 in save_epochs:\n torch.save(model, '../data/models/' + model_name + '_' + str(\n epoch + 1))\n train_times.append(end_time - start_time)\n train_losses.append(loss.item())\n print('Calculating train accuracy...')\n train_accuracies.append(get_accuracies(train_data_loader, model)[0])\n print('Calculating test accuracy...')\n test_accuracies.append(get_accuracies(test_data_loader, model)[0])\n print('Average training time per epoch:', np.mean(train_times))\n print('Total training time for all epochs:', np.sum(train_times))\n return train_accuracies, test_accuracies, train_losses\n\n\ndef get_accuracies(data_loader, model):\n start_time = time.time()\n model.eval()\n with torch.no_grad():\n correct = 0\n total = 0\n for images, labels in data_loader:\n labels = torch.max(labels.long(), 1)[0]\n outputs = model(images)\n _, predicted = torch.max(outputs.data, 1)\n total += labels.size(0)\n correct += (predicted == labels).sum().item()\n accuracy = 100 * correct / total\n end_time = time.time()\n time_taken = end_time - start_time\n print('Accuracy of the model: {} %'.format(accuracy))\n return accuracy, time_taken\n\n\ndef get_model_size(model, model_name):\n model = pickle.dumps(net)\n byte_size = sys.getsizeof(model)\n print('Size of ' + model_name + ' model: ', byte_size / 1000000)\n\n\ndef imshow(img, label_names, file_name='../data/sample_images'):\n npimg = img.numpy()\n npimg = npimg.astype(np.uint8)\n npimg = np.transpose(npimg, (1, 2, 0))\n plt.clf()\n im = plt.imshow(npimg)\n ylim = im.get_extent()[2]\n plt.yticks(np.arange(0, ylim + 1, ylim / len(label_names)), label_names)\n plt.savefig(file_name)\n plt.show()\n\n\ndef show_classwise_images(data, labels, label_names, k):\n image_dict = {}\n for idx, l in enumerate(labels):\n label = l[0]\n if label in image_dict.keys() and len(image_dict[label]) < k:\n image_dict[label].append(data[idx])\n elif label not in image_dict.keys():\n image_dict[label] = [data[idx]]\n images_to_show = []\n labels_to_show = []\n for label, image in image_dict.items():\n labels_to_show.append(label_names[label])\n for i in image:\n images_to_show.append(i)\n images_tensor = torch.stack([torch.Tensor(i) for i in images_to_show])\n imshow(torchvision.utils.make_grid(images_tensor, nrow=k), labels_to_show)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef load_cifar_data(data_files):\n data = []\n labels = []\n for file in data_files:\n with open(file, 'rb') as fo:\n data_dict = pickle.load(fo, encoding='bytes')\n if len(data) == 0:\n data = data_dict[str.encode('data')]\n labels = data_dict[str.encode('labels')]\n else:\n data = np.vstack((data, data_dict[str.encode('data')]))\n labels.extend(data_dict[str.encode('labels')])\n return data, labels\n\n\ndef unpickle(file):\n with open(file, 'rb') as fo:\n res = pickle.load(fo, encoding='bytes')\n return res\n\n\n<mask token>\n\n\ndef get_data_from_indices(data, indices_dict, count_per_class, image_shape):\n generated_data = []\n generated_labels = []\n for key, val in indices_dict.items():\n if count_per_class:\n for i in range(count_per_class):\n generated_data.append(np.reshape(data[val[i]], image_shape))\n generated_labels.append(key)\n else:\n for i in val:\n generated_data.append(np.reshape(data[i], image_shape))\n generated_labels.append(key)\n return np.asarray(generated_data), np.reshape(np.asarray(\n generated_labels, dtype=np.int32), (-1, 1))\n\n\ndef create_data_loader(data_x, data_y, batch_size, shuffle):\n tensor_x = torch.stack([torch.Tensor(i) for i in data_x])\n tensor_y = torch.stack([torch.Tensor(i) for i in data_y])\n dataset = torch.utils.data.TensorDataset(tensor_x, tensor_y)\n dataloader = torch.utils.data.DataLoader(dataset=dataset, batch_size=\n batch_size, shuffle=shuffle)\n return dataloader\n\n\ndef train_model(model, train_data_loader, test_data_loader, num_epochs=5,\n learning_rate=0.001, save_epochs=None, model_name='cnn'):\n num_epochs = num_epochs\n learning_rate = learning_rate\n criterion = torch.nn.CrossEntropyLoss()\n optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)\n total_step = len(train_data_loader)\n train_times = []\n train_accuracies = []\n train_losses = []\n test_accuracies = []\n for epoch in range(num_epochs):\n start_time = time.time()\n for i, (images, labels) in enumerate(train_data_loader):\n outputs = model(images)\n target = torch.max(labels.long(), 1)[0]\n loss = criterion(outputs, target)\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n if (i + 1) % 200 == 0:\n print('Epoch [{}/{}], Step [{}/{}], Loss: {:.4f}'.format(\n epoch + 1, num_epochs, i + 1, total_step, loss.item()))\n end_time = time.time()\n if save_epochs and epoch + 1 in save_epochs:\n torch.save(model, '../data/models/' + model_name + '_' + str(\n epoch + 1))\n train_times.append(end_time - start_time)\n train_losses.append(loss.item())\n print('Calculating train accuracy...')\n train_accuracies.append(get_accuracies(train_data_loader, model)[0])\n print('Calculating test accuracy...')\n test_accuracies.append(get_accuracies(test_data_loader, model)[0])\n print('Average training time per epoch:', np.mean(train_times))\n print('Total training time for all epochs:', np.sum(train_times))\n return train_accuracies, test_accuracies, train_losses\n\n\ndef get_accuracies(data_loader, model):\n start_time = time.time()\n model.eval()\n with torch.no_grad():\n correct = 0\n total = 0\n for images, labels in data_loader:\n labels = torch.max(labels.long(), 1)[0]\n outputs = model(images)\n _, predicted = torch.max(outputs.data, 1)\n total += labels.size(0)\n correct += (predicted == labels).sum().item()\n accuracy = 100 * correct / total\n end_time = time.time()\n time_taken = end_time - start_time\n print('Accuracy of the model: {} %'.format(accuracy))\n return accuracy, time_taken\n\n\ndef get_model_size(model, model_name):\n model = pickle.dumps(net)\n byte_size = sys.getsizeof(model)\n print('Size of ' + model_name + ' model: ', byte_size / 1000000)\n\n\ndef imshow(img, label_names, file_name='../data/sample_images'):\n npimg = img.numpy()\n npimg = npimg.astype(np.uint8)\n npimg = np.transpose(npimg, (1, 2, 0))\n plt.clf()\n im = plt.imshow(npimg)\n ylim = im.get_extent()[2]\n plt.yticks(np.arange(0, ylim + 1, ylim / len(label_names)), label_names)\n plt.savefig(file_name)\n plt.show()\n\n\ndef show_classwise_images(data, labels, label_names, k):\n image_dict = {}\n for idx, l in enumerate(labels):\n label = l[0]\n if label in image_dict.keys() and len(image_dict[label]) < k:\n image_dict[label].append(data[idx])\n elif label not in image_dict.keys():\n image_dict[label] = [data[idx]]\n images_to_show = []\n labels_to_show = []\n for label, image in image_dict.items():\n labels_to_show.append(label_names[label])\n for i in image:\n images_to_show.append(i)\n images_tensor = torch.stack([torch.Tensor(i) for i in images_to_show])\n imshow(torchvision.utils.make_grid(images_tensor, nrow=k), labels_to_show)\n\n\ndef outlier_analysis(model, outliers_tensor, outlier_label_names,\n cifar10_label_names):\n model.eval()\n predicted_labels = []\n with torch.no_grad():\n start_time = time.time()\n outputs = model(outliers_tensor)\n end_time = time.time()\n print('Time taken for prediction:', str(end_time - start_time))\n _, predicted = torch.max(outputs.data, 1)\n for idx, label in enumerate(predicted):\n print('Original:', outlier_label_names[idx], 'Predicted:',\n cifar10_label_names[label])\n predicted_labels.append(cifar10_label_names[label])\n imshow(torchvision.utils.make_grid(outliers_tensor, nrow=1),\n predicted_labels)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef load_cifar_data(data_files):\n data = []\n labels = []\n for file in data_files:\n with open(file, 'rb') as fo:\n data_dict = pickle.load(fo, encoding='bytes')\n if len(data) == 0:\n data = data_dict[str.encode('data')]\n labels = data_dict[str.encode('labels')]\n else:\n data = np.vstack((data, data_dict[str.encode('data')]))\n labels.extend(data_dict[str.encode('labels')])\n return data, labels\n\n\ndef unpickle(file):\n with open(file, 'rb') as fo:\n res = pickle.load(fo, encoding='bytes')\n return res\n\n\ndef get_classwise_indices(labels):\n label_indices = {}\n for idx, label in enumerate(labels):\n if label not in label_indices.keys():\n label_indices[label] = [idx]\n else:\n label_indices[label].append(idx)\n return label_indices\n\n\ndef get_data_from_indices(data, indices_dict, count_per_class, image_shape):\n generated_data = []\n generated_labels = []\n for key, val in indices_dict.items():\n if count_per_class:\n for i in range(count_per_class):\n generated_data.append(np.reshape(data[val[i]], image_shape))\n generated_labels.append(key)\n else:\n for i in val:\n generated_data.append(np.reshape(data[i], image_shape))\n generated_labels.append(key)\n return np.asarray(generated_data), np.reshape(np.asarray(\n generated_labels, dtype=np.int32), (-1, 1))\n\n\ndef create_data_loader(data_x, data_y, batch_size, shuffle):\n tensor_x = torch.stack([torch.Tensor(i) for i in data_x])\n tensor_y = torch.stack([torch.Tensor(i) for i in data_y])\n dataset = torch.utils.data.TensorDataset(tensor_x, tensor_y)\n dataloader = torch.utils.data.DataLoader(dataset=dataset, batch_size=\n batch_size, shuffle=shuffle)\n return dataloader\n\n\ndef train_model(model, train_data_loader, test_data_loader, num_epochs=5,\n learning_rate=0.001, save_epochs=None, model_name='cnn'):\n num_epochs = num_epochs\n learning_rate = learning_rate\n criterion = torch.nn.CrossEntropyLoss()\n optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)\n total_step = len(train_data_loader)\n train_times = []\n train_accuracies = []\n train_losses = []\n test_accuracies = []\n for epoch in range(num_epochs):\n start_time = time.time()\n for i, (images, labels) in enumerate(train_data_loader):\n outputs = model(images)\n target = torch.max(labels.long(), 1)[0]\n loss = criterion(outputs, target)\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n if (i + 1) % 200 == 0:\n print('Epoch [{}/{}], Step [{}/{}], Loss: {:.4f}'.format(\n epoch + 1, num_epochs, i + 1, total_step, loss.item()))\n end_time = time.time()\n if save_epochs and epoch + 1 in save_epochs:\n torch.save(model, '../data/models/' + model_name + '_' + str(\n epoch + 1))\n train_times.append(end_time - start_time)\n train_losses.append(loss.item())\n print('Calculating train accuracy...')\n train_accuracies.append(get_accuracies(train_data_loader, model)[0])\n print('Calculating test accuracy...')\n test_accuracies.append(get_accuracies(test_data_loader, model)[0])\n print('Average training time per epoch:', np.mean(train_times))\n print('Total training time for all epochs:', np.sum(train_times))\n return train_accuracies, test_accuracies, train_losses\n\n\ndef get_accuracies(data_loader, model):\n start_time = time.time()\n model.eval()\n with torch.no_grad():\n correct = 0\n total = 0\n for images, labels in data_loader:\n labels = torch.max(labels.long(), 1)[0]\n outputs = model(images)\n _, predicted = torch.max(outputs.data, 1)\n total += labels.size(0)\n correct += (predicted == labels).sum().item()\n accuracy = 100 * correct / total\n end_time = time.time()\n time_taken = end_time - start_time\n print('Accuracy of the model: {} %'.format(accuracy))\n return accuracy, time_taken\n\n\ndef get_model_size(model, model_name):\n model = pickle.dumps(net)\n byte_size = sys.getsizeof(model)\n print('Size of ' + model_name + ' model: ', byte_size / 1000000)\n\n\ndef imshow(img, label_names, file_name='../data/sample_images'):\n npimg = img.numpy()\n npimg = npimg.astype(np.uint8)\n npimg = np.transpose(npimg, (1, 2, 0))\n plt.clf()\n im = plt.imshow(npimg)\n ylim = im.get_extent()[2]\n plt.yticks(np.arange(0, ylim + 1, ylim / len(label_names)), label_names)\n plt.savefig(file_name)\n plt.show()\n\n\ndef show_classwise_images(data, labels, label_names, k):\n image_dict = {}\n for idx, l in enumerate(labels):\n label = l[0]\n if label in image_dict.keys() and len(image_dict[label]) < k:\n image_dict[label].append(data[idx])\n elif label not in image_dict.keys():\n image_dict[label] = [data[idx]]\n images_to_show = []\n labels_to_show = []\n for label, image in image_dict.items():\n labels_to_show.append(label_names[label])\n for i in image:\n images_to_show.append(i)\n images_tensor = torch.stack([torch.Tensor(i) for i in images_to_show])\n imshow(torchvision.utils.make_grid(images_tensor, nrow=k), labels_to_show)\n\n\ndef outlier_analysis(model, outliers_tensor, outlier_label_names,\n cifar10_label_names):\n model.eval()\n predicted_labels = []\n with torch.no_grad():\n start_time = time.time()\n outputs = model(outliers_tensor)\n end_time = time.time()\n print('Time taken for prediction:', str(end_time - start_time))\n _, predicted = torch.max(outputs.data, 1)\n for idx, label in enumerate(predicted):\n print('Original:', outlier_label_names[idx], 'Predicted:',\n cifar10_label_names[label])\n predicted_labels.append(cifar10_label_names[label])\n imshow(torchvision.utils.make_grid(outliers_tensor, nrow=1),\n predicted_labels)\n\n\ndef plot_values(x, y, xlabel, ylabel, title, legend, fig_name):\n plt.clf()\n for y_i in y:\n plt.plot(x, y_i)\n plt.xlabel(xlabel)\n plt.ylabel(ylabel)\n plt.title(title)\n plt.legend(legend)\n plt.savefig('../data/plots/' + fig_name)\n plt.show()\n",
"step-4": "import pickle\nimport numpy as np\nimport torch\nimport time\nimport torchvision\nimport matplotlib\nimport matplotlib.pyplot as plt\n\n\ndef load_cifar_data(data_files):\n data = []\n labels = []\n for file in data_files:\n with open(file, 'rb') as fo:\n data_dict = pickle.load(fo, encoding='bytes')\n if len(data) == 0:\n data = data_dict[str.encode('data')]\n labels = data_dict[str.encode('labels')]\n else:\n data = np.vstack((data, data_dict[str.encode('data')]))\n labels.extend(data_dict[str.encode('labels')])\n return data, labels\n\n\ndef unpickle(file):\n with open(file, 'rb') as fo:\n res = pickle.load(fo, encoding='bytes')\n return res\n\n\ndef get_classwise_indices(labels):\n label_indices = {}\n for idx, label in enumerate(labels):\n if label not in label_indices.keys():\n label_indices[label] = [idx]\n else:\n label_indices[label].append(idx)\n return label_indices\n\n\ndef get_data_from_indices(data, indices_dict, count_per_class, image_shape):\n generated_data = []\n generated_labels = []\n for key, val in indices_dict.items():\n if count_per_class:\n for i in range(count_per_class):\n generated_data.append(np.reshape(data[val[i]], image_shape))\n generated_labels.append(key)\n else:\n for i in val:\n generated_data.append(np.reshape(data[i], image_shape))\n generated_labels.append(key)\n return np.asarray(generated_data), np.reshape(np.asarray(\n generated_labels, dtype=np.int32), (-1, 1))\n\n\ndef create_data_loader(data_x, data_y, batch_size, shuffle):\n tensor_x = torch.stack([torch.Tensor(i) for i in data_x])\n tensor_y = torch.stack([torch.Tensor(i) for i in data_y])\n dataset = torch.utils.data.TensorDataset(tensor_x, tensor_y)\n dataloader = torch.utils.data.DataLoader(dataset=dataset, batch_size=\n batch_size, shuffle=shuffle)\n return dataloader\n\n\ndef train_model(model, train_data_loader, test_data_loader, num_epochs=5,\n learning_rate=0.001, save_epochs=None, model_name='cnn'):\n num_epochs = num_epochs\n learning_rate = learning_rate\n criterion = torch.nn.CrossEntropyLoss()\n optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)\n total_step = len(train_data_loader)\n train_times = []\n train_accuracies = []\n train_losses = []\n test_accuracies = []\n for epoch in range(num_epochs):\n start_time = time.time()\n for i, (images, labels) in enumerate(train_data_loader):\n outputs = model(images)\n target = torch.max(labels.long(), 1)[0]\n loss = criterion(outputs, target)\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n if (i + 1) % 200 == 0:\n print('Epoch [{}/{}], Step [{}/{}], Loss: {:.4f}'.format(\n epoch + 1, num_epochs, i + 1, total_step, loss.item()))\n end_time = time.time()\n if save_epochs and epoch + 1 in save_epochs:\n torch.save(model, '../data/models/' + model_name + '_' + str(\n epoch + 1))\n train_times.append(end_time - start_time)\n train_losses.append(loss.item())\n print('Calculating train accuracy...')\n train_accuracies.append(get_accuracies(train_data_loader, model)[0])\n print('Calculating test accuracy...')\n test_accuracies.append(get_accuracies(test_data_loader, model)[0])\n print('Average training time per epoch:', np.mean(train_times))\n print('Total training time for all epochs:', np.sum(train_times))\n return train_accuracies, test_accuracies, train_losses\n\n\ndef get_accuracies(data_loader, model):\n start_time = time.time()\n model.eval()\n with torch.no_grad():\n correct = 0\n total = 0\n for images, labels in data_loader:\n labels = torch.max(labels.long(), 1)[0]\n outputs = model(images)\n _, predicted = torch.max(outputs.data, 1)\n total += labels.size(0)\n correct += (predicted == labels).sum().item()\n accuracy = 100 * correct / total\n end_time = time.time()\n time_taken = end_time - start_time\n print('Accuracy of the model: {} %'.format(accuracy))\n return accuracy, time_taken\n\n\ndef get_model_size(model, model_name):\n model = pickle.dumps(net)\n byte_size = sys.getsizeof(model)\n print('Size of ' + model_name + ' model: ', byte_size / 1000000)\n\n\ndef imshow(img, label_names, file_name='../data/sample_images'):\n npimg = img.numpy()\n npimg = npimg.astype(np.uint8)\n npimg = np.transpose(npimg, (1, 2, 0))\n plt.clf()\n im = plt.imshow(npimg)\n ylim = im.get_extent()[2]\n plt.yticks(np.arange(0, ylim + 1, ylim / len(label_names)), label_names)\n plt.savefig(file_name)\n plt.show()\n\n\ndef show_classwise_images(data, labels, label_names, k):\n image_dict = {}\n for idx, l in enumerate(labels):\n label = l[0]\n if label in image_dict.keys() and len(image_dict[label]) < k:\n image_dict[label].append(data[idx])\n elif label not in image_dict.keys():\n image_dict[label] = [data[idx]]\n images_to_show = []\n labels_to_show = []\n for label, image in image_dict.items():\n labels_to_show.append(label_names[label])\n for i in image:\n images_to_show.append(i)\n images_tensor = torch.stack([torch.Tensor(i) for i in images_to_show])\n imshow(torchvision.utils.make_grid(images_tensor, nrow=k), labels_to_show)\n\n\ndef outlier_analysis(model, outliers_tensor, outlier_label_names,\n cifar10_label_names):\n model.eval()\n predicted_labels = []\n with torch.no_grad():\n start_time = time.time()\n outputs = model(outliers_tensor)\n end_time = time.time()\n print('Time taken for prediction:', str(end_time - start_time))\n _, predicted = torch.max(outputs.data, 1)\n for idx, label in enumerate(predicted):\n print('Original:', outlier_label_names[idx], 'Predicted:',\n cifar10_label_names[label])\n predicted_labels.append(cifar10_label_names[label])\n imshow(torchvision.utils.make_grid(outliers_tensor, nrow=1),\n predicted_labels)\n\n\ndef plot_values(x, y, xlabel, ylabel, title, legend, fig_name):\n plt.clf()\n for y_i in y:\n plt.plot(x, y_i)\n plt.xlabel(xlabel)\n plt.ylabel(ylabel)\n plt.title(title)\n plt.legend(legend)\n plt.savefig('../data/plots/' + fig_name)\n plt.show()\n",
"step-5": "import pickle\nimport numpy as np\nimport torch\nimport time\nimport torchvision\nimport matplotlib\nimport matplotlib.pyplot as plt\n\ndef load_cifar_data(data_files):\n data = []\n labels = []\n for file in data_files:\n with open(file, 'rb') as fo:\n data_dict = pickle.load(fo, encoding='bytes')\n if len(data) == 0:\n data = data_dict[str.encode('data')]\n labels = data_dict[str.encode('labels')]\n else:\n data = np.vstack((data, data_dict[str.encode('data')]))\n labels.extend(data_dict[str.encode('labels')])\n return data, labels\n\ndef unpickle(file):\n with open(file, 'rb') as fo:\n res = pickle.load(fo, encoding='bytes')\n return res\n \ndef get_classwise_indices(labels):\n label_indices = {}\n for idx, label in enumerate(labels):\n if label not in label_indices.keys():\n label_indices[label] = [idx]\n else:\n label_indices[label].append(idx)\n return label_indices\n \ndef get_data_from_indices(data, indices_dict, count_per_class, image_shape):\n generated_data = []\n generated_labels = []\n for key, val in indices_dict.items():\n if count_per_class:\n for i in range(count_per_class):\n generated_data.append(np.reshape(data[val[i]], image_shape))\n generated_labels.append(key)\n else:\n for i in val:\n generated_data.append(np.reshape(data[i], image_shape))\n generated_labels.append(key)\n return np.asarray(generated_data), np.reshape(np.asarray(generated_labels, dtype=np.int32), (-1,1))\n\ndef create_data_loader(data_x, data_y, batch_size, shuffle):\n tensor_x = torch.stack([torch.Tensor(i) for i in data_x]) # transform to torch tensors\n tensor_y = torch.stack([torch.Tensor(i) for i in data_y])\n\n dataset = torch.utils.data.TensorDataset(tensor_x,tensor_y) # create datset\n dataloader = torch.utils.data.DataLoader(dataset=dataset, batch_size=batch_size, shuffle=shuffle) # create dataloader\n return dataloader\n \ndef train_model(model, train_data_loader, test_data_loader, num_epochs=5, learning_rate=0.001, save_epochs=None, model_name=\"cnn\"):\n num_epochs = num_epochs\n learning_rate = learning_rate\n\n # Loss and optimizer\n criterion = torch.nn.CrossEntropyLoss()\n optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)\n \n total_step = len(train_data_loader)\n train_times = []\n train_accuracies = []\n train_losses = []\n test_accuracies = []\n \n for epoch in range(num_epochs):\n start_time = time.time()\n for i, (images, labels) in enumerate(train_data_loader):\n # Forward pass\n outputs = model(images)\n target = torch.max(labels.long(), 1)[0]\n loss = criterion(outputs, target)\n\n # Backward and optimize\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n if (i+1) % 200 == 0:\n print ('Epoch [{}/{}], Step [{}/{}], Loss: {:.4f}' \n .format(epoch+1, num_epochs, i+1, total_step, loss.item()))\n end_time = time.time()\n if save_epochs and epoch + 1 in save_epochs:\n torch.save(model, \"../data/models/\" + model_name + \"_\" + str(epoch+1))\n train_times.append(end_time - start_time)\n train_losses.append(loss.item()) \n print(\"Calculating train accuracy...\")\n train_accuracies.append(get_accuracies(train_data_loader, model)[0])\n print(\"Calculating test accuracy...\")\n test_accuracies.append(get_accuracies(test_data_loader, model)[0])\n print(\"Average training time per epoch:\", np.mean(train_times))\n print(\"Total training time for all epochs:\", np.sum(train_times))\n return train_accuracies, test_accuracies, train_losses\n\ndef get_accuracies(data_loader, model):\n start_time = time.time()\n model.eval()\n with torch.no_grad():\n correct = 0\n total = 0\n for images, labels in data_loader:\n labels = torch.max(labels.long(), 1)[0]\n outputs = model(images)\n _, predicted = torch.max(outputs.data, 1)\n total += labels.size(0)\n correct += (predicted == labels).sum().item()\n accuracy = 100 * correct / total\n end_time = time.time()\n time_taken = end_time - start_time\n print('Accuracy of the model: {} %'.format(accuracy))\n return accuracy, time_taken\n \ndef get_model_size(model, model_name):\n model = pickle.dumps(net)\n byte_size = sys.getsizeof(model)\n print('Size of ' + model_name + ' model: ', byte_size/1000000)\n \ndef imshow(img, label_names, file_name=\"../data/sample_images\"):\n npimg = img.numpy()\n npimg = npimg.astype(np.uint8)\n npimg = np.transpose(npimg, (1, 2, 0))\n plt.clf()\n im = plt.imshow(npimg)\n ylim = im.get_extent()[2]\n plt.yticks(np.arange(0, ylim + 1, ylim/len(label_names)), label_names)\n plt.savefig(file_name)\n plt.show()\n \ndef show_classwise_images(data, labels, label_names, k):\n image_dict = {}\n for idx, l in enumerate(labels):\n label = l[0]\n if label in image_dict.keys() and len(image_dict[label]) < k:\n image_dict[label].append(data[idx])\n elif label not in image_dict.keys():\n image_dict[label] = [data[idx]]\n \n images_to_show = []\n labels_to_show = []\n for label, image in image_dict.items():\n labels_to_show.append(label_names[label])\n for i in image:\n images_to_show.append(i)\n \n images_tensor = torch.stack([torch.Tensor(i) for i in images_to_show])\n \n imshow(torchvision.utils.make_grid(images_tensor, nrow=k), labels_to_show)\n \ndef outlier_analysis(model, outliers_tensor, outlier_label_names, cifar10_label_names):\n model.eval()\n predicted_labels = []\n with torch.no_grad():\n start_time = time.time()\n outputs = model(outliers_tensor)\n end_time = time.time()\n print(\"Time taken for prediction:\", str(end_time - start_time))\n _, predicted = torch.max(outputs.data, 1)\n for idx, label in enumerate(predicted):\n print(\"Original:\", outlier_label_names[idx], \"Predicted:\", cifar10_label_names[label])\n predicted_labels.append(cifar10_label_names[label])\n imshow(torchvision.utils.make_grid(outliers_tensor, nrow=1), predicted_labels)\n \ndef plot_values(x, y, xlabel, ylabel, title, legend, fig_name):\n plt.clf()\n for y_i in y:\n plt.plot(x, y_i)\n plt.xlabel(xlabel)\n plt.ylabel(ylabel)\n plt.title(title)\n plt.legend(legend)\n plt.savefig(\"../data/plots/\" + fig_name)\n plt.show()",
"step-ids": [
8,
10,
12,
13,
14
]
}
|
[
8,
10,
12,
13,
14
] |
<|reserved_special_token_0|>
def k_NN(data, predict, k=3):
if len(data) >= k:
warnings.warn('K is set to a value less than total voting groups !')
distances = []
[[distances.append([np.linalg.norm(np.array(features) - np.array(
predict)), group]) for features in data[group]] for group in data]
votes = [i[1] for i in sorted(distances)[:k]]
vote_result = Counter(votes).most_common(1)[0][0]
confidence = Counter(votes).most_common(1)[0][1] / k
return vote_result, confidence
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def k_NN(data, predict, k=3):
if len(data) >= k:
warnings.warn('K is set to a value less than total voting groups !')
distances = []
[[distances.append([np.linalg.norm(np.array(features) - np.array(
predict)), group]) for features in data[group]] for group in data]
votes = [i[1] for i in sorted(distances)[:k]]
vote_result = Counter(votes).most_common(1)[0][0]
confidence = Counter(votes).most_common(1)[0][1] / k
return vote_result, confidence
<|reserved_special_token_0|>
df.replace('?', -99999, inplace=True)
df.drop(df.columns[[0]], 1, inplace=True)
<|reserved_special_token_0|>
random.shuffle(full_data)
<|reserved_special_token_0|>
[train_set[i[-1]].append(i[:-1]) for i in train_data]
[test_set[i[-1]].append(i[:-1]) for i in test_data]
<|reserved_special_token_0|>
for group in test_set:
for data in test_set[group]:
vote, confidence = k_NN(train_set, data, k=3)
if group == vote:
correct += 1
total += 1
confidences.append(confidence)
print('Accuracy:', correct / total, 'Average confidence', sum(confidences) /
len(confidences))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def k_NN(data, predict, k=3):
if len(data) >= k:
warnings.warn('K is set to a value less than total voting groups !')
distances = []
[[distances.append([np.linalg.norm(np.array(features) - np.array(
predict)), group]) for features in data[group]] for group in data]
votes = [i[1] for i in sorted(distances)[:k]]
vote_result = Counter(votes).most_common(1)[0][0]
confidence = Counter(votes).most_common(1)[0][1] / k
return vote_result, confidence
df = pd.read_csv('breast-cancer-wisconsin.data.txt')
df.replace('?', -99999, inplace=True)
df.drop(df.columns[[0]], 1, inplace=True)
full_data = df.astype(float).values.tolist()
random.shuffle(full_data)
test_size = 0.2
train_set = {(2): [], (4): []}
test_set = {(2): [], (4): []}
train_data = full_data[:-int(test_size * len(full_data))]
test_data = full_data[-int(test_size * len(full_data)):]
[train_set[i[-1]].append(i[:-1]) for i in train_data]
[test_set[i[-1]].append(i[:-1]) for i in test_data]
correct = 0
total = 0
confidences = []
for group in test_set:
for data in test_set[group]:
vote, confidence = k_NN(train_set, data, k=3)
if group == vote:
correct += 1
total += 1
confidences.append(confidence)
print('Accuracy:', correct / total, 'Average confidence', sum(confidences) /
len(confidences))
<|reserved_special_token_1|>
import numpy as np
from math import sqrt
import warnings
from collections import Counter
import pandas as pd
import random
def k_NN(data, predict, k=3):
if len(data) >= k:
warnings.warn('K is set to a value less than total voting groups !')
distances = []
[[distances.append([np.linalg.norm(np.array(features) - np.array(
predict)), group]) for features in data[group]] for group in data]
votes = [i[1] for i in sorted(distances)[:k]]
vote_result = Counter(votes).most_common(1)[0][0]
confidence = Counter(votes).most_common(1)[0][1] / k
return vote_result, confidence
df = pd.read_csv('breast-cancer-wisconsin.data.txt')
df.replace('?', -99999, inplace=True)
df.drop(df.columns[[0]], 1, inplace=True)
full_data = df.astype(float).values.tolist()
random.shuffle(full_data)
test_size = 0.2
train_set = {(2): [], (4): []}
test_set = {(2): [], (4): []}
train_data = full_data[:-int(test_size * len(full_data))]
test_data = full_data[-int(test_size * len(full_data)):]
[train_set[i[-1]].append(i[:-1]) for i in train_data]
[test_set[i[-1]].append(i[:-1]) for i in test_data]
correct = 0
total = 0
confidences = []
for group in test_set:
for data in test_set[group]:
vote, confidence = k_NN(train_set, data, k=3)
if group == vote:
correct += 1
total += 1
confidences.append(confidence)
print('Accuracy:', correct / total, 'Average confidence', sum(confidences) /
len(confidences))
<|reserved_special_token_1|>
import numpy as np
from math import sqrt
import warnings
from collections import Counter
import pandas as pd
import random
def k_NN(data, predict, k=3):
if len(data) >= k:
warnings.warn("K is set to a value less than total voting groups !")
distances = []
[[ distances.append([np.linalg.norm(np.array(features) - np.array(predict)), group]) for features in data[group]] for group in data]
votes = [i[1] for i in sorted(distances)[:k]]
vote_result = Counter(votes).most_common(1)[0][0]
confidence = Counter(votes).most_common(1)[0][1] / k
return vote_result, confidence
df = pd.read_csv("breast-cancer-wisconsin.data.txt")
df.replace('?', -99999, inplace=True)
df.drop(df.columns[[0]], 1, inplace=True)
full_data = df.astype(float).values.tolist()
random.shuffle(full_data)
test_size = 0.2
train_set = {2: [], 4: []}
test_set = {2: [], 4: []}
train_data = full_data[:-int(test_size*len(full_data))]
test_data = full_data[-int(test_size*len(full_data)):]
[ train_set[i[-1]].append(i[:-1]) for i in train_data ]
[ test_set[i[-1]].append(i[:-1]) for i in test_data ]
correct = 0
total = 0
confidences = []
for group in test_set:
for data in test_set[group]:
vote, confidence = k_NN(train_set, data, k=3)
if group == vote:
correct += 1
total += 1
confidences.append(confidence)
print('Accuracy:', correct/total, 'Average confidence', (sum(confidences)/len(confidences)))
|
flexible
|
{
"blob_id": "c6ce6ffe46be993bfe74ccb240e1ebf586c9f556",
"index": 7656,
"step-1": "<mask token>\n\n\ndef k_NN(data, predict, k=3):\n if len(data) >= k:\n warnings.warn('K is set to a value less than total voting groups !')\n distances = []\n [[distances.append([np.linalg.norm(np.array(features) - np.array(\n predict)), group]) for features in data[group]] for group in data]\n votes = [i[1] for i in sorted(distances)[:k]]\n vote_result = Counter(votes).most_common(1)[0][0]\n confidence = Counter(votes).most_common(1)[0][1] / k\n return vote_result, confidence\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef k_NN(data, predict, k=3):\n if len(data) >= k:\n warnings.warn('K is set to a value less than total voting groups !')\n distances = []\n [[distances.append([np.linalg.norm(np.array(features) - np.array(\n predict)), group]) for features in data[group]] for group in data]\n votes = [i[1] for i in sorted(distances)[:k]]\n vote_result = Counter(votes).most_common(1)[0][0]\n confidence = Counter(votes).most_common(1)[0][1] / k\n return vote_result, confidence\n\n\n<mask token>\ndf.replace('?', -99999, inplace=True)\ndf.drop(df.columns[[0]], 1, inplace=True)\n<mask token>\nrandom.shuffle(full_data)\n<mask token>\n[train_set[i[-1]].append(i[:-1]) for i in train_data]\n[test_set[i[-1]].append(i[:-1]) for i in test_data]\n<mask token>\nfor group in test_set:\n for data in test_set[group]:\n vote, confidence = k_NN(train_set, data, k=3)\n if group == vote:\n correct += 1\n total += 1\n confidences.append(confidence)\nprint('Accuracy:', correct / total, 'Average confidence', sum(confidences) /\n len(confidences))\n",
"step-3": "<mask token>\n\n\ndef k_NN(data, predict, k=3):\n if len(data) >= k:\n warnings.warn('K is set to a value less than total voting groups !')\n distances = []\n [[distances.append([np.linalg.norm(np.array(features) - np.array(\n predict)), group]) for features in data[group]] for group in data]\n votes = [i[1] for i in sorted(distances)[:k]]\n vote_result = Counter(votes).most_common(1)[0][0]\n confidence = Counter(votes).most_common(1)[0][1] / k\n return vote_result, confidence\n\n\ndf = pd.read_csv('breast-cancer-wisconsin.data.txt')\ndf.replace('?', -99999, inplace=True)\ndf.drop(df.columns[[0]], 1, inplace=True)\nfull_data = df.astype(float).values.tolist()\nrandom.shuffle(full_data)\ntest_size = 0.2\ntrain_set = {(2): [], (4): []}\ntest_set = {(2): [], (4): []}\ntrain_data = full_data[:-int(test_size * len(full_data))]\ntest_data = full_data[-int(test_size * len(full_data)):]\n[train_set[i[-1]].append(i[:-1]) for i in train_data]\n[test_set[i[-1]].append(i[:-1]) for i in test_data]\ncorrect = 0\ntotal = 0\nconfidences = []\nfor group in test_set:\n for data in test_set[group]:\n vote, confidence = k_NN(train_set, data, k=3)\n if group == vote:\n correct += 1\n total += 1\n confidences.append(confidence)\nprint('Accuracy:', correct / total, 'Average confidence', sum(confidences) /\n len(confidences))\n",
"step-4": "import numpy as np\nfrom math import sqrt\nimport warnings\nfrom collections import Counter\nimport pandas as pd\nimport random\n\n\ndef k_NN(data, predict, k=3):\n if len(data) >= k:\n warnings.warn('K is set to a value less than total voting groups !')\n distances = []\n [[distances.append([np.linalg.norm(np.array(features) - np.array(\n predict)), group]) for features in data[group]] for group in data]\n votes = [i[1] for i in sorted(distances)[:k]]\n vote_result = Counter(votes).most_common(1)[0][0]\n confidence = Counter(votes).most_common(1)[0][1] / k\n return vote_result, confidence\n\n\ndf = pd.read_csv('breast-cancer-wisconsin.data.txt')\ndf.replace('?', -99999, inplace=True)\ndf.drop(df.columns[[0]], 1, inplace=True)\nfull_data = df.astype(float).values.tolist()\nrandom.shuffle(full_data)\ntest_size = 0.2\ntrain_set = {(2): [], (4): []}\ntest_set = {(2): [], (4): []}\ntrain_data = full_data[:-int(test_size * len(full_data))]\ntest_data = full_data[-int(test_size * len(full_data)):]\n[train_set[i[-1]].append(i[:-1]) for i in train_data]\n[test_set[i[-1]].append(i[:-1]) for i in test_data]\ncorrect = 0\ntotal = 0\nconfidences = []\nfor group in test_set:\n for data in test_set[group]:\n vote, confidence = k_NN(train_set, data, k=3)\n if group == vote:\n correct += 1\n total += 1\n confidences.append(confidence)\nprint('Accuracy:', correct / total, 'Average confidence', sum(confidences) /\n len(confidences))\n",
"step-5": "import numpy as np\nfrom math import sqrt\nimport warnings\nfrom collections import Counter\nimport pandas as pd\nimport random\n\ndef k_NN(data, predict, k=3):\n\tif len(data) >= k:\n\t\twarnings.warn(\"K is set to a value less than total voting groups !\")\n\n\tdistances = []\n\t[[ distances.append([np.linalg.norm(np.array(features) - np.array(predict)), group]) for features in data[group]] for group in data]\n\n\tvotes = [i[1] for i in sorted(distances)[:k]]\n\tvote_result = Counter(votes).most_common(1)[0][0]\n\tconfidence = Counter(votes).most_common(1)[0][1] / k\n\n\treturn vote_result, confidence\n\ndf = pd.read_csv(\"breast-cancer-wisconsin.data.txt\")\ndf.replace('?', -99999, inplace=True)\ndf.drop(df.columns[[0]], 1, inplace=True)\nfull_data = df.astype(float).values.tolist()\nrandom.shuffle(full_data)\n\ntest_size = 0.2\ntrain_set = {2: [], 4: []}\ntest_set = {2: [], 4: []}\ntrain_data = full_data[:-int(test_size*len(full_data))]\ntest_data = full_data[-int(test_size*len(full_data)):]\n\n[ train_set[i[-1]].append(i[:-1]) for i in train_data ]\n[ test_set[i[-1]].append(i[:-1]) for i in test_data ]\n\ncorrect = 0\ntotal = 0\nconfidences = []\n\nfor group in test_set:\n\tfor data in test_set[group]:\n\t\tvote, confidence = k_NN(train_set, data, k=3)\n\t\tif group == vote:\n\t\t\tcorrect += 1\n\t\ttotal += 1\n\t\tconfidences.append(confidence)\n\nprint('Accuracy:', correct/total, 'Average confidence', (sum(confidences)/len(confidences)))",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
def primeiras_ocorrencias(str):
dic = {}
for i, letra in enumerate(str):
if letra not in dic:
dic[letra] = i
return dic
|
normal
|
{
"blob_id": "bb1a6815649eb9e79e2ab1e110ea8acd8adce5aa",
"index": 3379,
"step-1": "<mask token>\n",
"step-2": "def primeiras_ocorrencias(str):\n dic = {}\n for i, letra in enumerate(str):\n if letra not in dic:\n dic[letra] = i\n return dic\n",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
pickle.dump(emp1, empObj)
pickle.dump(emp2, empObj)
pickle.dump(emp3, empObj)
pickle.dump(emp4, empObj)
print('Successfully written four dictionaries')
empObj.close()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
emp1 = {'Empno': 1201, 'Name': 'Anushree', 'Age': 25, 'Salary': 47000}
emp2 = {'Empno': 1211, 'Name': 'Zoya', 'Age': 30, 'Salary': 48000}
emp3 = {'Empno': 1251, 'Name': 'Simarjeet', 'Age': 27, 'Salary': 49000}
emp4 = {'Empno': 1266, 'Name': 'Alex', 'Age': 29, 'Salary': 50000}
empObj = open('Emp.dat', 'wb')
pickle.dump(emp1, empObj)
pickle.dump(emp2, empObj)
pickle.dump(emp3, empObj)
pickle.dump(emp4, empObj)
print('Successfully written four dictionaries')
empObj.close()
<|reserved_special_token_1|>
import pickle
emp1 = {'Empno': 1201, 'Name': 'Anushree', 'Age': 25, 'Salary': 47000}
emp2 = {'Empno': 1211, 'Name': 'Zoya', 'Age': 30, 'Salary': 48000}
emp3 = {'Empno': 1251, 'Name': 'Simarjeet', 'Age': 27, 'Salary': 49000}
emp4 = {'Empno': 1266, 'Name': 'Alex', 'Age': 29, 'Salary': 50000}
empObj = open('Emp.dat', 'wb')
pickle.dump(emp1, empObj)
pickle.dump(emp2, empObj)
pickle.dump(emp3, empObj)
pickle.dump(emp4, empObj)
print('Successfully written four dictionaries')
empObj.close()
<|reserved_special_token_1|>
# write dictionary objects to be stored in a binary file
import pickle
#dictionary objects to be stored in a binary file
emp1 = {"Empno" : 1201, "Name" : "Anushree", "Age" : 25, "Salary" : 47000}
emp2 = {"Empno" : 1211, "Name" : "Zoya", "Age" : 30, "Salary" : 48000}
emp3 = {"Empno" : 1251, "Name" : "Simarjeet", "Age" : 27, "Salary" : 49000}
emp4 = {"Empno" : 1266, "Name" : "Alex", "Age" : 29, "Salary" : 50000}
empObj = open('Emp.dat',"wb")
#write onto the file
pickle.dump(emp1,empObj)
pickle.dump(emp2,empObj)
pickle.dump(emp3,empObj)
pickle.dump(emp4,empObj)
print("Successfully written four dictionaries")
empObj.close()
|
flexible
|
{
"blob_id": "23937ae531cc95069a1319f8c77a459ba7645363",
"index": 4331,
"step-1": "<mask token>\n",
"step-2": "<mask token>\npickle.dump(emp1, empObj)\npickle.dump(emp2, empObj)\npickle.dump(emp3, empObj)\npickle.dump(emp4, empObj)\nprint('Successfully written four dictionaries')\nempObj.close()\n",
"step-3": "<mask token>\nemp1 = {'Empno': 1201, 'Name': 'Anushree', 'Age': 25, 'Salary': 47000}\nemp2 = {'Empno': 1211, 'Name': 'Zoya', 'Age': 30, 'Salary': 48000}\nemp3 = {'Empno': 1251, 'Name': 'Simarjeet', 'Age': 27, 'Salary': 49000}\nemp4 = {'Empno': 1266, 'Name': 'Alex', 'Age': 29, 'Salary': 50000}\nempObj = open('Emp.dat', 'wb')\npickle.dump(emp1, empObj)\npickle.dump(emp2, empObj)\npickle.dump(emp3, empObj)\npickle.dump(emp4, empObj)\nprint('Successfully written four dictionaries')\nempObj.close()\n",
"step-4": "import pickle\nemp1 = {'Empno': 1201, 'Name': 'Anushree', 'Age': 25, 'Salary': 47000}\nemp2 = {'Empno': 1211, 'Name': 'Zoya', 'Age': 30, 'Salary': 48000}\nemp3 = {'Empno': 1251, 'Name': 'Simarjeet', 'Age': 27, 'Salary': 49000}\nemp4 = {'Empno': 1266, 'Name': 'Alex', 'Age': 29, 'Salary': 50000}\nempObj = open('Emp.dat', 'wb')\npickle.dump(emp1, empObj)\npickle.dump(emp2, empObj)\npickle.dump(emp3, empObj)\npickle.dump(emp4, empObj)\nprint('Successfully written four dictionaries')\nempObj.close()\n",
"step-5": "# write dictionary objects to be stored in a binary file\n\n\nimport pickle\n#dictionary objects to be stored in a binary file\nemp1 = {\"Empno\" : 1201, \"Name\" : \"Anushree\", \"Age\" : 25, \"Salary\" : 47000}\nemp2 = {\"Empno\" : 1211, \"Name\" : \"Zoya\", \"Age\" : 30, \"Salary\" : 48000}\nemp3 = {\"Empno\" : 1251, \"Name\" : \"Simarjeet\", \"Age\" : 27, \"Salary\" : 49000}\nemp4 = {\"Empno\" : 1266, \"Name\" : \"Alex\", \"Age\" : 29, \"Salary\" : 50000}\n\nempObj = open('Emp.dat',\"wb\")\n\n#write onto the file\n\npickle.dump(emp1,empObj)\npickle.dump(emp2,empObj)\npickle.dump(emp3,empObj)\npickle.dump(emp4,empObj)\n\nprint(\"Successfully written four dictionaries\")\nempObj.close()\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
class CardHolder:
acctlen = 8
retireage = 59.5
def __init__(self, acct, name, age, addr):
self.acct = acct
self.name = name
self.age = age
self.addr = addr
def __getattribute__(self, item): # __getattribute__ intercepts calls for all
# attributes (defined and undefined)
superget = object.__getattribute__ # We have to use __getattribute__ of object
# class (superclass) to prevent looping
if item == 'acct':
return superget(self, 'acct')[:-3] + '***'
elif item == 'remain':
return superget(self, 'retireage') - superget(self, 'age')
else:
return superget(self, item)
def __setattr__(self, key, value):
if key == 'name':
value = value.lower().replace(' ', '_')
elif key == 'age':
if value < 0 or value > 130:
raise ValueError('invalid age')
elif key == 'acct':
value = value.replace('-', '')
if len(value) != self.acctlen:
raise TypeError('invalid acct number')
elif key == 'remain':
raise TypeError('cannot set remain')
self.__dict__[key] = value
if __name__ == '__main__':
bob = CardHolder('1234-5678', 'Bob Smith', 40, '123 main st')
print(bob.acct, bob.name, bob.remain, bob.addr, sep=' / ')
bob.name = 'Bob Q. Smith'
bob.age = 50
bob.acct = '23-45-67-89'
print(bob.acct, bob.name, bob.remain, bob.addr, sep=' / ')
sue = CardHolder('5678-12-34', 'Sue Jones', 35, '124 main st')
print(sue.acct, sue.name, sue.remain, sue.addr, sep=' / ')
try:
sue.age = 200
except Exception:
print('Bad age for Sue')
try:
sue.remain = 5
except Exception:
print("Can't set sue.remain")
try:
sue.acct = '1234567'
except Exception:
print('Bad acct for Sue')
|
normal
|
{
"blob_id": "602a7676129721dbfd318407dd972f80d681146c",
"index": 3062,
"step-1": "class CardHolder:\n <mask token>\n <mask token>\n <mask token>\n\n def __getattribute__(self, item):\n superget = object.__getattribute__\n if item == 'acct':\n return superget(self, 'acct')[:-3] + '***'\n elif item == 'remain':\n return superget(self, 'retireage') - superget(self, 'age')\n else:\n return superget(self, item)\n\n def __setattr__(self, key, value):\n if key == 'name':\n value = value.lower().replace(' ', '_')\n elif key == 'age':\n if value < 0 or value > 130:\n raise ValueError('invalid age')\n elif key == 'acct':\n value = value.replace('-', '')\n if len(value) != self.acctlen:\n raise TypeError('invalid acct number')\n elif key == 'remain':\n raise TypeError('cannot set remain')\n self.__dict__[key] = value\n\n\n<mask token>\n",
"step-2": "class CardHolder:\n <mask token>\n <mask token>\n\n def __init__(self, acct, name, age, addr):\n self.acct = acct\n self.name = name\n self.age = age\n self.addr = addr\n\n def __getattribute__(self, item):\n superget = object.__getattribute__\n if item == 'acct':\n return superget(self, 'acct')[:-3] + '***'\n elif item == 'remain':\n return superget(self, 'retireage') - superget(self, 'age')\n else:\n return superget(self, item)\n\n def __setattr__(self, key, value):\n if key == 'name':\n value = value.lower().replace(' ', '_')\n elif key == 'age':\n if value < 0 or value > 130:\n raise ValueError('invalid age')\n elif key == 'acct':\n value = value.replace('-', '')\n if len(value) != self.acctlen:\n raise TypeError('invalid acct number')\n elif key == 'remain':\n raise TypeError('cannot set remain')\n self.__dict__[key] = value\n\n\n<mask token>\n",
"step-3": "class CardHolder:\n acctlen = 8\n retireage = 59.5\n\n def __init__(self, acct, name, age, addr):\n self.acct = acct\n self.name = name\n self.age = age\n self.addr = addr\n\n def __getattribute__(self, item):\n superget = object.__getattribute__\n if item == 'acct':\n return superget(self, 'acct')[:-3] + '***'\n elif item == 'remain':\n return superget(self, 'retireage') - superget(self, 'age')\n else:\n return superget(self, item)\n\n def __setattr__(self, key, value):\n if key == 'name':\n value = value.lower().replace(' ', '_')\n elif key == 'age':\n if value < 0 or value > 130:\n raise ValueError('invalid age')\n elif key == 'acct':\n value = value.replace('-', '')\n if len(value) != self.acctlen:\n raise TypeError('invalid acct number')\n elif key == 'remain':\n raise TypeError('cannot set remain')\n self.__dict__[key] = value\n\n\n<mask token>\n",
"step-4": "class CardHolder:\n acctlen = 8\n retireage = 59.5\n\n def __init__(self, acct, name, age, addr):\n self.acct = acct\n self.name = name\n self.age = age\n self.addr = addr\n\n def __getattribute__(self, item):\n superget = object.__getattribute__\n if item == 'acct':\n return superget(self, 'acct')[:-3] + '***'\n elif item == 'remain':\n return superget(self, 'retireage') - superget(self, 'age')\n else:\n return superget(self, item)\n\n def __setattr__(self, key, value):\n if key == 'name':\n value = value.lower().replace(' ', '_')\n elif key == 'age':\n if value < 0 or value > 130:\n raise ValueError('invalid age')\n elif key == 'acct':\n value = value.replace('-', '')\n if len(value) != self.acctlen:\n raise TypeError('invalid acct number')\n elif key == 'remain':\n raise TypeError('cannot set remain')\n self.__dict__[key] = value\n\n\nif __name__ == '__main__':\n bob = CardHolder('1234-5678', 'Bob Smith', 40, '123 main st')\n print(bob.acct, bob.name, bob.remain, bob.addr, sep=' / ')\n bob.name = 'Bob Q. Smith'\n bob.age = 50\n bob.acct = '23-45-67-89'\n print(bob.acct, bob.name, bob.remain, bob.addr, sep=' / ')\n sue = CardHolder('5678-12-34', 'Sue Jones', 35, '124 main st')\n print(sue.acct, sue.name, sue.remain, sue.addr, sep=' / ')\n try:\n sue.age = 200\n except Exception:\n print('Bad age for Sue')\n try:\n sue.remain = 5\n except Exception:\n print(\"Can't set sue.remain\")\n try:\n sue.acct = '1234567'\n except Exception:\n print('Bad acct for Sue')\n",
"step-5": "class CardHolder:\n acctlen = 8\n retireage = 59.5\n\n def __init__(self, acct, name, age, addr):\n self.acct = acct\n self.name = name\n self.age = age\n self.addr = addr\n\n def __getattribute__(self, item): # __getattribute__ intercepts calls for all\n # attributes (defined and undefined)\n\n superget = object.__getattribute__ # We have to use __getattribute__ of object\n # class (superclass) to prevent looping\n if item == 'acct':\n return superget(self, 'acct')[:-3] + '***'\n elif item == 'remain':\n return superget(self, 'retireage') - superget(self, 'age')\n else:\n return superget(self, item)\n\n def __setattr__(self, key, value):\n if key == 'name':\n value = value.lower().replace(' ', '_')\n elif key == 'age':\n if value < 0 or value > 130:\n raise ValueError('invalid age')\n elif key == 'acct':\n value = value.replace('-', '')\n if len(value) != self.acctlen:\n raise TypeError('invalid acct number')\n elif key == 'remain':\n raise TypeError('cannot set remain')\n self.__dict__[key] = value\n\n\nif __name__ == '__main__':\n bob = CardHolder('1234-5678', 'Bob Smith', 40, '123 main st')\n print(bob.acct, bob.name, bob.remain, bob.addr, sep=' / ')\n bob.name = 'Bob Q. Smith'\n bob.age = 50\n bob.acct = '23-45-67-89'\n print(bob.acct, bob.name, bob.remain, bob.addr, sep=' / ')\n\n sue = CardHolder('5678-12-34', 'Sue Jones', 35, '124 main st')\n print(sue.acct, sue.name, sue.remain, sue.addr, sep=' / ')\n\n try:\n sue.age = 200\n except Exception:\n print('Bad age for Sue')\n\n try:\n sue.remain = 5\n except Exception:\n print(\"Can't set sue.remain\")\n\n try:\n sue.acct = '1234567'\n except Exception:\n print('Bad acct for Sue')\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def setPixel(strip):
for i in range(count):
if i < lightUp:
strip.setPixelColor(i, Color(0, 255, 0))
strip.show()
else:
strip.setPixelColor(i, Color(255, 0, 0))
strip.show()
if __name__ == '__main__':
strip = Adafruit_NeoPixel(LED_COUNT, LED_PIN, LED_FREQ_HZ, LED_DMA,
LED_INVERT, LED_BRIGHTNESS, LED_CHANNEL, LED_STRIP)
strip.begin()
setPixel(strip)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
count = int(sys.argv[1])
percent = int(sys.argv[2])
LED_COUNT = count
LED_PIN = 18
LED_FREQ_HZ = 800000
LED_DMA = 5
LED_BRIGHTNESS = 255
LED_INVERT = False
LED_CHANNEL = 0
LED_STRIP = ws.WS2811_STRIP_GRB
lightUp = math.floor(percent / count)
def setPixel(strip):
for i in range(count):
if i < lightUp:
strip.setPixelColor(i, Color(0, 255, 0))
strip.show()
else:
strip.setPixelColor(i, Color(255, 0, 0))
strip.show()
if __name__ == '__main__':
strip = Adafruit_NeoPixel(LED_COUNT, LED_PIN, LED_FREQ_HZ, LED_DMA,
LED_INVERT, LED_BRIGHTNESS, LED_CHANNEL, LED_STRIP)
strip.begin()
setPixel(strip)
<|reserved_special_token_1|>
import sys
import time
import math
from neopixel import *
count = int(sys.argv[1])
percent = int(sys.argv[2])
LED_COUNT = count
LED_PIN = 18
LED_FREQ_HZ = 800000
LED_DMA = 5
LED_BRIGHTNESS = 255
LED_INVERT = False
LED_CHANNEL = 0
LED_STRIP = ws.WS2811_STRIP_GRB
lightUp = math.floor(percent / count)
def setPixel(strip):
for i in range(count):
if i < lightUp:
strip.setPixelColor(i, Color(0, 255, 0))
strip.show()
else:
strip.setPixelColor(i, Color(255, 0, 0))
strip.show()
if __name__ == '__main__':
strip = Adafruit_NeoPixel(LED_COUNT, LED_PIN, LED_FREQ_HZ, LED_DMA,
LED_INVERT, LED_BRIGHTNESS, LED_CHANNEL, LED_STRIP)
strip.begin()
setPixel(strip)
<|reserved_special_token_1|>
import sys
import time
import math
from neopixel import *
count = int(sys.argv[1])
percent = int(sys.argv[2])
# LED strip configuration:
LED_COUNT = count # Number of LED pixels.
LED_PIN = 18 # GPIO pin connected to the pixels (must support PWM!).
LED_FREQ_HZ = 800000 # LED signal frequency in hertz (usually 800khz)
LED_DMA = 5 # DMA channel to use for generating signal (try 5)
LED_BRIGHTNESS = 255 # Set to 0 for darkest and 255 for brightest
LED_INVERT = False # True to invert the signal (when using NPN transistor level shift)
LED_CHANNEL = 0
LED_STRIP = ws.WS2811_STRIP_GRB
#LED_STRIP = ws.SK6812W_STRIP
lightUp = math.floor(percent/count)
# Intialize the library (must be called once before other functions).
def setPixel(strip):
for i in range(count):
if(i<lightUp):
strip.setPixelColor(i, Color(0, 255, 0))
strip.show()
else:
strip.setPixelColor(i, Color(255, 0, 0))
strip.show()
if __name__ == '__main__':
strip = Adafruit_NeoPixel(LED_COUNT, LED_PIN, LED_FREQ_HZ, LED_DMA, LED_INVERT, LED_BRIGHTNESS, LED_CHANNEL, LED_STRIP)
strip.begin()
setPixel(strip)
|
flexible
|
{
"blob_id": "5ff7a3843314dfd3914c5e96164385d61fbe7fa5",
"index": 684,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef setPixel(strip):\n for i in range(count):\n if i < lightUp:\n strip.setPixelColor(i, Color(0, 255, 0))\n strip.show()\n else:\n strip.setPixelColor(i, Color(255, 0, 0))\n strip.show()\n\n\nif __name__ == '__main__':\n strip = Adafruit_NeoPixel(LED_COUNT, LED_PIN, LED_FREQ_HZ, LED_DMA,\n LED_INVERT, LED_BRIGHTNESS, LED_CHANNEL, LED_STRIP)\n strip.begin()\n setPixel(strip)\n",
"step-3": "<mask token>\ncount = int(sys.argv[1])\npercent = int(sys.argv[2])\nLED_COUNT = count\nLED_PIN = 18\nLED_FREQ_HZ = 800000\nLED_DMA = 5\nLED_BRIGHTNESS = 255\nLED_INVERT = False\nLED_CHANNEL = 0\nLED_STRIP = ws.WS2811_STRIP_GRB\nlightUp = math.floor(percent / count)\n\n\ndef setPixel(strip):\n for i in range(count):\n if i < lightUp:\n strip.setPixelColor(i, Color(0, 255, 0))\n strip.show()\n else:\n strip.setPixelColor(i, Color(255, 0, 0))\n strip.show()\n\n\nif __name__ == '__main__':\n strip = Adafruit_NeoPixel(LED_COUNT, LED_PIN, LED_FREQ_HZ, LED_DMA,\n LED_INVERT, LED_BRIGHTNESS, LED_CHANNEL, LED_STRIP)\n strip.begin()\n setPixel(strip)\n",
"step-4": "import sys\nimport time\nimport math\nfrom neopixel import *\ncount = int(sys.argv[1])\npercent = int(sys.argv[2])\nLED_COUNT = count\nLED_PIN = 18\nLED_FREQ_HZ = 800000\nLED_DMA = 5\nLED_BRIGHTNESS = 255\nLED_INVERT = False\nLED_CHANNEL = 0\nLED_STRIP = ws.WS2811_STRIP_GRB\nlightUp = math.floor(percent / count)\n\n\ndef setPixel(strip):\n for i in range(count):\n if i < lightUp:\n strip.setPixelColor(i, Color(0, 255, 0))\n strip.show()\n else:\n strip.setPixelColor(i, Color(255, 0, 0))\n strip.show()\n\n\nif __name__ == '__main__':\n strip = Adafruit_NeoPixel(LED_COUNT, LED_PIN, LED_FREQ_HZ, LED_DMA,\n LED_INVERT, LED_BRIGHTNESS, LED_CHANNEL, LED_STRIP)\n strip.begin()\n setPixel(strip)\n",
"step-5": "import sys\nimport time\nimport math\nfrom neopixel import *\ncount = int(sys.argv[1])\npercent = int(sys.argv[2])\n# LED strip configuration:\nLED_COUNT = count # Number of LED pixels.\nLED_PIN = 18 # GPIO pin connected to the pixels (must support PWM!).\nLED_FREQ_HZ = 800000 # LED signal frequency in hertz (usually 800khz)\nLED_DMA = 5 # DMA channel to use for generating signal (try 5)\nLED_BRIGHTNESS = 255 # Set to 0 for darkest and 255 for brightest\nLED_INVERT = False # True to invert the signal (when using NPN transistor level shift)\nLED_CHANNEL = 0\nLED_STRIP = ws.WS2811_STRIP_GRB\t\n#LED_STRIP = ws.SK6812W_STRIP\nlightUp = math.floor(percent/count)\n# Intialize the library (must be called once before other functions).\ndef setPixel(strip):\n\tfor i in range(count):\n\t\tif(i<lightUp):\n\t\t\tstrip.setPixelColor(i, Color(0, 255, 0))\n\t\t\tstrip.show()\n\t\telse:\n\t\t\tstrip.setPixelColor(i, Color(255, 0, 0))\n\t\t\tstrip.show()\nif __name__ == '__main__':\n\tstrip = Adafruit_NeoPixel(LED_COUNT, LED_PIN, LED_FREQ_HZ, LED_DMA, LED_INVERT, LED_BRIGHTNESS, LED_CHANNEL, LED_STRIP)\t\t\n\tstrip.begin()\n\tsetPixel(strip)\n",
"step-ids": [
0,
2,
3,
4,
5
]
}
|
[
0,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class TimeInterval(object):
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class TimeInterval(object):
def __init__(self, start_time, end_time):
self.start_time = start_time
self.end_time = end_time
|
flexible
|
{
"blob_id": "9d772d5500593583907b65bc2c81490e61375e8b",
"index": 8081,
"step-1": "<mask token>\n",
"step-2": "class TimeInterval(object):\n <mask token>\n",
"step-3": "class TimeInterval(object):\n\n def __init__(self, start_time, end_time):\n self.start_time = start_time\n self.end_time = end_time\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@client.command()
async def inviteInfo(ctx, link):
try:
await delete.byContext(ctx)
except:
pass
linkData = await client.fetch_invite(url=link)
if linkData.inviter:
inviterData = await getUser.byID(linkData.inviter.id)
try:
guildData = await getGuild.byID(linkData.guild.id)
except:
guildData = linkData.guild
embed = discord.Embed(title='Invite information', colour=discord.Color.
purple())
embed.set_thumbnail(url=guildData.icon_url)
fields = [('ID', f'```{guildData.id}```', True), ('Name::',
f'```{guildData.name}```', True), ('Description',
f'```{guildData.description}```', True), ('Created in:',
f"```{guildData.created_at.strftime('%d/%m/%Y')}```", True), (
'Member Count:', f'```{int(linkData.approximate_member_count)}```',
True), ('Link', f'```{linkData.url}```', True), ('\u200b', '\u200b',
True)]
for name, value, inline in fields:
embed.add_field(name=name, value=value, inline=inline)
if linkData.inviter:
embed.add_field(name='Inviter ID:', value=f'```{inviterData.id}```',
inline=True)
embed.add_field(name='Inviter:', value=
f"```{inviterData.name + '#' + inviterData.discriminator}```",
inline=True)
embed.set_footer(text='Selfium (◔‿◔)')
await ctx.send(embed=embed)
<|reserved_special_token_1|>
import discord
from app.vars.client import client
from app.helpers import delete, getUser, getGuild
@client.command()
async def inviteInfo(ctx, link):
try:
await delete.byContext(ctx)
except:
pass
linkData = await client.fetch_invite(url=link)
if linkData.inviter:
inviterData = await getUser.byID(linkData.inviter.id)
try:
guildData = await getGuild.byID(linkData.guild.id)
except:
guildData = linkData.guild
embed = discord.Embed(title='Invite information', colour=discord.Color.
purple())
embed.set_thumbnail(url=guildData.icon_url)
fields = [('ID', f'```{guildData.id}```', True), ('Name::',
f'```{guildData.name}```', True), ('Description',
f'```{guildData.description}```', True), ('Created in:',
f"```{guildData.created_at.strftime('%d/%m/%Y')}```", True), (
'Member Count:', f'```{int(linkData.approximate_member_count)}```',
True), ('Link', f'```{linkData.url}```', True), ('\u200b', '\u200b',
True)]
for name, value, inline in fields:
embed.add_field(name=name, value=value, inline=inline)
if linkData.inviter:
embed.add_field(name='Inviter ID:', value=f'```{inviterData.id}```',
inline=True)
embed.add_field(name='Inviter:', value=
f"```{inviterData.name + '#' + inviterData.discriminator}```",
inline=True)
embed.set_footer(text='Selfium (◔‿◔)')
await ctx.send(embed=embed)
<|reserved_special_token_1|>
import discord
from app.vars.client import client
from app.helpers import delete, getUser, getGuild
@client.command()
async def inviteInfo(ctx, link):
try:
await delete.byContext(ctx)
except:
pass
linkData = await client.fetch_invite(url=link)
if (linkData.inviter):
inviterData = await getUser.byID(linkData.inviter.id)
try:
guildData = await getGuild.byID(linkData.guild.id)
except:
guildData = linkData.guild
embed = discord.Embed(title="Invite information", colour=discord.Color.purple())
embed.set_thumbnail(url=guildData.icon_url)
fields = [
("ID", f"```{guildData.id}```", True),
("Name::", f"```{guildData.name}```", True),
("Description", f"```{guildData.description}```", True),
("Created in:", f'```{guildData.created_at.strftime("%d/%m/%Y")}```', True),
("Member Count:", f"```{int(linkData.approximate_member_count)}```", True),
("Link", f"```{linkData.url}```", True),
("\u200b", "\u200b", True),
]
for name, value, inline in fields:
embed.add_field(name=name, value=value, inline=inline)
if (linkData.inviter):
embed.add_field(name="Inviter ID:", value=f"```{inviterData.id}```", inline=True)
embed.add_field(name="Inviter:", value=f"```{inviterData.name + '#' + inviterData.discriminator}```", inline=True)
embed.set_footer(text='Selfium (◔‿◔)')
await ctx.send(embed=embed)
|
flexible
|
{
"blob_id": "b8f9633ab3110d00b2f0b82c78ad047fca0d3eee",
"index": 6999,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\[email protected]()\nasync def inviteInfo(ctx, link):\n try:\n await delete.byContext(ctx)\n except:\n pass\n linkData = await client.fetch_invite(url=link)\n if linkData.inviter:\n inviterData = await getUser.byID(linkData.inviter.id)\n try:\n guildData = await getGuild.byID(linkData.guild.id)\n except:\n guildData = linkData.guild\n embed = discord.Embed(title='Invite information', colour=discord.Color.\n purple())\n embed.set_thumbnail(url=guildData.icon_url)\n fields = [('ID', f'```{guildData.id}```', True), ('Name::',\n f'```{guildData.name}```', True), ('Description',\n f'```{guildData.description}```', True), ('Created in:',\n f\"```{guildData.created_at.strftime('%d/%m/%Y')}```\", True), (\n 'Member Count:', f'```{int(linkData.approximate_member_count)}```',\n True), ('Link', f'```{linkData.url}```', True), ('\\u200b', '\\u200b',\n True)]\n for name, value, inline in fields:\n embed.add_field(name=name, value=value, inline=inline)\n if linkData.inviter:\n embed.add_field(name='Inviter ID:', value=f'```{inviterData.id}```',\n inline=True)\n embed.add_field(name='Inviter:', value=\n f\"```{inviterData.name + '#' + inviterData.discriminator}```\",\n inline=True)\n embed.set_footer(text='Selfium (◔‿◔)')\n await ctx.send(embed=embed)\n",
"step-3": "import discord\nfrom app.vars.client import client\nfrom app.helpers import delete, getUser, getGuild\n\n\[email protected]()\nasync def inviteInfo(ctx, link):\n try:\n await delete.byContext(ctx)\n except:\n pass\n linkData = await client.fetch_invite(url=link)\n if linkData.inviter:\n inviterData = await getUser.byID(linkData.inviter.id)\n try:\n guildData = await getGuild.byID(linkData.guild.id)\n except:\n guildData = linkData.guild\n embed = discord.Embed(title='Invite information', colour=discord.Color.\n purple())\n embed.set_thumbnail(url=guildData.icon_url)\n fields = [('ID', f'```{guildData.id}```', True), ('Name::',\n f'```{guildData.name}```', True), ('Description',\n f'```{guildData.description}```', True), ('Created in:',\n f\"```{guildData.created_at.strftime('%d/%m/%Y')}```\", True), (\n 'Member Count:', f'```{int(linkData.approximate_member_count)}```',\n True), ('Link', f'```{linkData.url}```', True), ('\\u200b', '\\u200b',\n True)]\n for name, value, inline in fields:\n embed.add_field(name=name, value=value, inline=inline)\n if linkData.inviter:\n embed.add_field(name='Inviter ID:', value=f'```{inviterData.id}```',\n inline=True)\n embed.add_field(name='Inviter:', value=\n f\"```{inviterData.name + '#' + inviterData.discriminator}```\",\n inline=True)\n embed.set_footer(text='Selfium (◔‿◔)')\n await ctx.send(embed=embed)\n",
"step-4": "import discord\nfrom app.vars.client import client\nfrom app.helpers import delete, getUser, getGuild\n\[email protected]()\nasync def inviteInfo(ctx, link):\n try:\n await delete.byContext(ctx)\n except:\n pass\n\n linkData = await client.fetch_invite(url=link)\n if (linkData.inviter):\n inviterData = await getUser.byID(linkData.inviter.id)\n try:\n guildData = await getGuild.byID(linkData.guild.id)\n except:\n guildData = linkData.guild\n\n embed = discord.Embed(title=\"Invite information\", colour=discord.Color.purple())\n embed.set_thumbnail(url=guildData.icon_url)\n fields = [\n (\"ID\", f\"```{guildData.id}```\", True),\n (\"Name::\", f\"```{guildData.name}```\", True),\n (\"Description\", f\"```{guildData.description}```\", True),\n (\"Created in:\", f'```{guildData.created_at.strftime(\"%d/%m/%Y\")}```', True),\n (\"Member Count:\", f\"```{int(linkData.approximate_member_count)}```\", True), \n (\"Link\", f\"```{linkData.url}```\", True),\n (\"\\u200b\", \"\\u200b\", True),\n ]\n for name, value, inline in fields:\n embed.add_field(name=name, value=value, inline=inline)\n \n if (linkData.inviter):\n embed.add_field(name=\"Inviter ID:\", value=f\"```{inviterData.id}```\", inline=True)\n embed.add_field(name=\"Inviter:\", value=f\"```{inviterData.name + '#' + inviterData.discriminator}```\", inline=True)\n\n embed.set_footer(text='Selfium (◔‿◔)')\n await ctx.send(embed=embed)",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
var blackList = []string{
// global
"document", "window", "top", "parent", "global", "this",
//func
"console", "alert", "log", "promise", "fetch", "eval", "import",
//char
"<", ">", "`", "\\*", "&", "#", "%", "\\\\",
//key
"if", "set", "get", "with", "yield", "async", "wait", "func", "for", "error", "string",
//string
"href", "location", "url", "cookie", "src",
}
|
normal
|
{
"blob_id": "f502290cc8ffa9571454a214497aff1d1c5e1c9f",
"index": 8285,
"step-1": "var blackList = []string{\n\t// global\n\t\"document\", \"window\", \"top\", \"parent\", \"global\", \"this\",\n\t//func\n\t\"console\", \"alert\", \"log\", \"promise\", \"fetch\", \"eval\", \"import\",\n\t//char\n\t\"<\", \">\", \"`\", \"\\\\*\", \"&\", \"#\", \"%\", \"\\\\\\\\\",\n\t//key\n\t\"if\", \"set\", \"get\", \"with\", \"yield\", \"async\", \"wait\", \"func\", \"for\", \"error\", \"string\",\n\t//string\n\t\"href\", \"location\", \"url\", \"cookie\", \"src\",\n}",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
from django.http import HttpResponse
from django.shortcuts import get_object_or_404
from django.urls import reverse
from gatheros_event.views.mixins import AccountMixin
from gatheros_subscription.helpers.extract import (
create_extract,
get_extract_file_name,
)
from gatheros_subscription.models import Subscription
class ExtractSubscriptionPDFView(AccountMixin):
subscription = None
def pre_dispatch(self, request):
uuid = self.kwargs.get('pk')
self.subscription = get_object_or_404(Subscription,
uuid=uuid)
return super().pre_dispatch(request)
def get_permission_denied_url(self):
""" Resgata url quando permissão negada. """
return reverse('subscription:subscription-view', kwargs={
'event_pk': self.kwargs.get('event_pk'),
'pk': self.kwargs.get('pk'),
})
def get(self, request, *args, **kwargs):
pdf = create_extract(subscription=self.subscription,
user=self.request.user)
response = HttpResponse(pdf, content_type='application/pdf')
response['Content-Disposition'] = 'inline; filename="{}"'.format(
get_extract_file_name(subscription=self.subscription)
)
return response
def can_access(self):
return self.subscription.lot.price > 0
|
normal
|
{
"blob_id": "431f109903e014a29aed7f125d47f327e17b9f65",
"index": 4366,
"step-1": "<mask token>\n\n\nclass ExtractSubscriptionPDFView(AccountMixin):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass ExtractSubscriptionPDFView(AccountMixin):\n <mask token>\n\n def pre_dispatch(self, request):\n uuid = self.kwargs.get('pk')\n self.subscription = get_object_or_404(Subscription, uuid=uuid)\n return super().pre_dispatch(request)\n\n def get_permission_denied_url(self):\n \"\"\" Resgata url quando permissão negada. \"\"\"\n return reverse('subscription:subscription-view', kwargs={'event_pk':\n self.kwargs.get('event_pk'), 'pk': self.kwargs.get('pk')})\n <mask token>\n\n def can_access(self):\n return self.subscription.lot.price > 0\n",
"step-3": "<mask token>\n\n\nclass ExtractSubscriptionPDFView(AccountMixin):\n subscription = None\n\n def pre_dispatch(self, request):\n uuid = self.kwargs.get('pk')\n self.subscription = get_object_or_404(Subscription, uuid=uuid)\n return super().pre_dispatch(request)\n\n def get_permission_denied_url(self):\n \"\"\" Resgata url quando permissão negada. \"\"\"\n return reverse('subscription:subscription-view', kwargs={'event_pk':\n self.kwargs.get('event_pk'), 'pk': self.kwargs.get('pk')})\n\n def get(self, request, *args, **kwargs):\n pdf = create_extract(subscription=self.subscription, user=self.\n request.user)\n response = HttpResponse(pdf, content_type='application/pdf')\n response['Content-Disposition'] = 'inline; filename=\"{}\"'.format(\n get_extract_file_name(subscription=self.subscription))\n return response\n\n def can_access(self):\n return self.subscription.lot.price > 0\n",
"step-4": "from django.http import HttpResponse\nfrom django.shortcuts import get_object_or_404\nfrom django.urls import reverse\nfrom gatheros_event.views.mixins import AccountMixin\nfrom gatheros_subscription.helpers.extract import create_extract, get_extract_file_name\nfrom gatheros_subscription.models import Subscription\n\n\nclass ExtractSubscriptionPDFView(AccountMixin):\n subscription = None\n\n def pre_dispatch(self, request):\n uuid = self.kwargs.get('pk')\n self.subscription = get_object_or_404(Subscription, uuid=uuid)\n return super().pre_dispatch(request)\n\n def get_permission_denied_url(self):\n \"\"\" Resgata url quando permissão negada. \"\"\"\n return reverse('subscription:subscription-view', kwargs={'event_pk':\n self.kwargs.get('event_pk'), 'pk': self.kwargs.get('pk')})\n\n def get(self, request, *args, **kwargs):\n pdf = create_extract(subscription=self.subscription, user=self.\n request.user)\n response = HttpResponse(pdf, content_type='application/pdf')\n response['Content-Disposition'] = 'inline; filename=\"{}\"'.format(\n get_extract_file_name(subscription=self.subscription))\n return response\n\n def can_access(self):\n return self.subscription.lot.price > 0\n",
"step-5": "from django.http import HttpResponse\nfrom django.shortcuts import get_object_or_404\nfrom django.urls import reverse\n\nfrom gatheros_event.views.mixins import AccountMixin\nfrom gatheros_subscription.helpers.extract import (\n create_extract,\n get_extract_file_name,\n)\nfrom gatheros_subscription.models import Subscription\n\n\nclass ExtractSubscriptionPDFView(AccountMixin):\n subscription = None\n\n def pre_dispatch(self, request):\n uuid = self.kwargs.get('pk')\n self.subscription = get_object_or_404(Subscription,\n uuid=uuid)\n\n return super().pre_dispatch(request)\n\n def get_permission_denied_url(self):\n \"\"\" Resgata url quando permissão negada. \"\"\"\n return reverse('subscription:subscription-view', kwargs={\n 'event_pk': self.kwargs.get('event_pk'),\n 'pk': self.kwargs.get('pk'),\n })\n\n def get(self, request, *args, **kwargs):\n pdf = create_extract(subscription=self.subscription,\n user=self.request.user)\n response = HttpResponse(pdf, content_type='application/pdf')\n response['Content-Disposition'] = 'inline; filename=\"{}\"'.format(\n get_extract_file_name(subscription=self.subscription)\n )\n\n return response\n\n def can_access(self):\n return self.subscription.lot.price > 0\n",
"step-ids": [
1,
4,
6,
7,
8
]
}
|
[
1,
4,
6,
7,
8
] |
<|reserved_special_token_0|>
class TestCases(unittest.TestCase):
def test_chart_cell(self):
t = [{'country': 'US', 'quantity': 100}, {'country': 'ZA',
'quantity': 50}]
IPython.get_ipython().user_ns = {}
chart = datalab.utils.commands._chart._chart_cell({'chart': 'geo',
'data': t, 'fields': None}, '')
self.assertTrue(chart.find('charts.render(') > 0)
self.assertTrue(chart.find("'geo'") > 0)
self.assertTrue(chart.find('"fields": "*"') > 0)
self.assertTrue(chart.find('{"c": [{"v": "US"}, {"v": 100}]}') > 0 or
chart.find('{"c": [{"v": 100}, {"v": "US"}]}') > 0)
self.assertTrue(chart.find('{"c": [{"v": "ZA"}, {"v": 50}]}') > 0 or
chart.find('{"c": [{"v": 50}, {"v": "ZA"}]}') > 0)
def test_chart_magic(self):
pass
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def noop_decorator(func):
return func
<|reserved_special_token_0|>
class TestCases(unittest.TestCase):
def test_chart_cell(self):
t = [{'country': 'US', 'quantity': 100}, {'country': 'ZA',
'quantity': 50}]
IPython.get_ipython().user_ns = {}
chart = datalab.utils.commands._chart._chart_cell({'chart': 'geo',
'data': t, 'fields': None}, '')
self.assertTrue(chart.find('charts.render(') > 0)
self.assertTrue(chart.find("'geo'") > 0)
self.assertTrue(chart.find('"fields": "*"') > 0)
self.assertTrue(chart.find('{"c": [{"v": "US"}, {"v": 100}]}') > 0 or
chart.find('{"c": [{"v": 100}, {"v": "US"}]}') > 0)
self.assertTrue(chart.find('{"c": [{"v": "ZA"}, {"v": 50}]}') > 0 or
chart.find('{"c": [{"v": 50}, {"v": "ZA"}]}') > 0)
def test_chart_magic(self):
pass
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def noop_decorator(func):
return func
IPython.core.magic.register_line_cell_magic = noop_decorator
IPython.core.magic.register_line_magic = noop_decorator
IPython.core.magic.register_cell_magic = noop_decorator
IPython.core.display.HTML = lambda x: x
IPython.core.display.JSON = lambda x: x
class TestCases(unittest.TestCase):
def test_chart_cell(self):
t = [{'country': 'US', 'quantity': 100}, {'country': 'ZA',
'quantity': 50}]
IPython.get_ipython().user_ns = {}
chart = datalab.utils.commands._chart._chart_cell({'chart': 'geo',
'data': t, 'fields': None}, '')
self.assertTrue(chart.find('charts.render(') > 0)
self.assertTrue(chart.find("'geo'") > 0)
self.assertTrue(chart.find('"fields": "*"') > 0)
self.assertTrue(chart.find('{"c": [{"v": "US"}, {"v": 100}]}') > 0 or
chart.find('{"c": [{"v": 100}, {"v": "US"}]}') > 0)
self.assertTrue(chart.find('{"c": [{"v": "ZA"}, {"v": 50}]}') > 0 or
chart.find('{"c": [{"v": 50}, {"v": "ZA"}]}') > 0)
def test_chart_magic(self):
pass
<|reserved_special_token_1|>
from __future__ import absolute_import
from __future__ import unicode_literals
import unittest
import IPython.core.display
import IPython.core.magic
import datalab.utils.commands
def noop_decorator(func):
return func
IPython.core.magic.register_line_cell_magic = noop_decorator
IPython.core.magic.register_line_magic = noop_decorator
IPython.core.magic.register_cell_magic = noop_decorator
IPython.core.display.HTML = lambda x: x
IPython.core.display.JSON = lambda x: x
class TestCases(unittest.TestCase):
def test_chart_cell(self):
t = [{'country': 'US', 'quantity': 100}, {'country': 'ZA',
'quantity': 50}]
IPython.get_ipython().user_ns = {}
chart = datalab.utils.commands._chart._chart_cell({'chart': 'geo',
'data': t, 'fields': None}, '')
self.assertTrue(chart.find('charts.render(') > 0)
self.assertTrue(chart.find("'geo'") > 0)
self.assertTrue(chart.find('"fields": "*"') > 0)
self.assertTrue(chart.find('{"c": [{"v": "US"}, {"v": 100}]}') > 0 or
chart.find('{"c": [{"v": 100}, {"v": "US"}]}') > 0)
self.assertTrue(chart.find('{"c": [{"v": "ZA"}, {"v": 50}]}') > 0 or
chart.find('{"c": [{"v": 50}, {"v": "ZA"}]}') > 0)
def test_chart_magic(self):
pass
<|reserved_special_token_1|>
# Copyright 2015 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing permissions and limitations under
# the License.
from __future__ import absolute_import
from __future__ import unicode_literals
import unittest
# import Python so we can mock the parts we need to here.
import IPython.core.display
import IPython.core.magic
import datalab.utils.commands
def noop_decorator(func):
return func
IPython.core.magic.register_line_cell_magic = noop_decorator
IPython.core.magic.register_line_magic = noop_decorator
IPython.core.magic.register_cell_magic = noop_decorator
IPython.core.display.HTML = lambda x: x
IPython.core.display.JSON = lambda x: x
class TestCases(unittest.TestCase):
def test_chart_cell(self):
t = [{'country': 'US', 'quantity': 100}, {'country': 'ZA', 'quantity': 50}]
IPython.get_ipython().user_ns = {}
chart = datalab.utils.commands._chart._chart_cell({'chart': 'geo', 'data': t, 'fields': None},
'')
self.assertTrue(chart.find('charts.render(') > 0)
self.assertTrue(chart.find('\'geo\'') > 0)
self.assertTrue(chart.find('"fields": "*"') > 0)
self.assertTrue(chart.find('{"c": [{"v": "US"}, {"v": 100}]}') > 0 or
chart.find('{"c": [{"v": 100}, {"v": "US"}]}') > 0)
self.assertTrue(chart.find('{"c": [{"v": "ZA"}, {"v": 50}]}') > 0 or
chart.find('{"c": [{"v": 50}, {"v": "ZA"}]}') > 0)
def test_chart_magic(self):
# TODO(gram): complete this test
pass
|
flexible
|
{
"blob_id": "445e91edbeb88a3e300761342b28369fd9833fbb",
"index": 5727,
"step-1": "<mask token>\n\n\nclass TestCases(unittest.TestCase):\n\n def test_chart_cell(self):\n t = [{'country': 'US', 'quantity': 100}, {'country': 'ZA',\n 'quantity': 50}]\n IPython.get_ipython().user_ns = {}\n chart = datalab.utils.commands._chart._chart_cell({'chart': 'geo',\n 'data': t, 'fields': None}, '')\n self.assertTrue(chart.find('charts.render(') > 0)\n self.assertTrue(chart.find(\"'geo'\") > 0)\n self.assertTrue(chart.find('\"fields\": \"*\"') > 0)\n self.assertTrue(chart.find('{\"c\": [{\"v\": \"US\"}, {\"v\": 100}]}') > 0 or\n chart.find('{\"c\": [{\"v\": 100}, {\"v\": \"US\"}]}') > 0)\n self.assertTrue(chart.find('{\"c\": [{\"v\": \"ZA\"}, {\"v\": 50}]}') > 0 or\n chart.find('{\"c\": [{\"v\": 50}, {\"v\": \"ZA\"}]}') > 0)\n\n def test_chart_magic(self):\n pass\n",
"step-2": "<mask token>\n\n\ndef noop_decorator(func):\n return func\n\n\n<mask token>\n\n\nclass TestCases(unittest.TestCase):\n\n def test_chart_cell(self):\n t = [{'country': 'US', 'quantity': 100}, {'country': 'ZA',\n 'quantity': 50}]\n IPython.get_ipython().user_ns = {}\n chart = datalab.utils.commands._chart._chart_cell({'chart': 'geo',\n 'data': t, 'fields': None}, '')\n self.assertTrue(chart.find('charts.render(') > 0)\n self.assertTrue(chart.find(\"'geo'\") > 0)\n self.assertTrue(chart.find('\"fields\": \"*\"') > 0)\n self.assertTrue(chart.find('{\"c\": [{\"v\": \"US\"}, {\"v\": 100}]}') > 0 or\n chart.find('{\"c\": [{\"v\": 100}, {\"v\": \"US\"}]}') > 0)\n self.assertTrue(chart.find('{\"c\": [{\"v\": \"ZA\"}, {\"v\": 50}]}') > 0 or\n chart.find('{\"c\": [{\"v\": 50}, {\"v\": \"ZA\"}]}') > 0)\n\n def test_chart_magic(self):\n pass\n",
"step-3": "<mask token>\n\n\ndef noop_decorator(func):\n return func\n\n\nIPython.core.magic.register_line_cell_magic = noop_decorator\nIPython.core.magic.register_line_magic = noop_decorator\nIPython.core.magic.register_cell_magic = noop_decorator\nIPython.core.display.HTML = lambda x: x\nIPython.core.display.JSON = lambda x: x\n\n\nclass TestCases(unittest.TestCase):\n\n def test_chart_cell(self):\n t = [{'country': 'US', 'quantity': 100}, {'country': 'ZA',\n 'quantity': 50}]\n IPython.get_ipython().user_ns = {}\n chart = datalab.utils.commands._chart._chart_cell({'chart': 'geo',\n 'data': t, 'fields': None}, '')\n self.assertTrue(chart.find('charts.render(') > 0)\n self.assertTrue(chart.find(\"'geo'\") > 0)\n self.assertTrue(chart.find('\"fields\": \"*\"') > 0)\n self.assertTrue(chart.find('{\"c\": [{\"v\": \"US\"}, {\"v\": 100}]}') > 0 or\n chart.find('{\"c\": [{\"v\": 100}, {\"v\": \"US\"}]}') > 0)\n self.assertTrue(chart.find('{\"c\": [{\"v\": \"ZA\"}, {\"v\": 50}]}') > 0 or\n chart.find('{\"c\": [{\"v\": 50}, {\"v\": \"ZA\"}]}') > 0)\n\n def test_chart_magic(self):\n pass\n",
"step-4": "from __future__ import absolute_import\nfrom __future__ import unicode_literals\nimport unittest\nimport IPython.core.display\nimport IPython.core.magic\nimport datalab.utils.commands\n\n\ndef noop_decorator(func):\n return func\n\n\nIPython.core.magic.register_line_cell_magic = noop_decorator\nIPython.core.magic.register_line_magic = noop_decorator\nIPython.core.magic.register_cell_magic = noop_decorator\nIPython.core.display.HTML = lambda x: x\nIPython.core.display.JSON = lambda x: x\n\n\nclass TestCases(unittest.TestCase):\n\n def test_chart_cell(self):\n t = [{'country': 'US', 'quantity': 100}, {'country': 'ZA',\n 'quantity': 50}]\n IPython.get_ipython().user_ns = {}\n chart = datalab.utils.commands._chart._chart_cell({'chart': 'geo',\n 'data': t, 'fields': None}, '')\n self.assertTrue(chart.find('charts.render(') > 0)\n self.assertTrue(chart.find(\"'geo'\") > 0)\n self.assertTrue(chart.find('\"fields\": \"*\"') > 0)\n self.assertTrue(chart.find('{\"c\": [{\"v\": \"US\"}, {\"v\": 100}]}') > 0 or\n chart.find('{\"c\": [{\"v\": 100}, {\"v\": \"US\"}]}') > 0)\n self.assertTrue(chart.find('{\"c\": [{\"v\": \"ZA\"}, {\"v\": 50}]}') > 0 or\n chart.find('{\"c\": [{\"v\": 50}, {\"v\": \"ZA\"}]}') > 0)\n\n def test_chart_magic(self):\n pass\n",
"step-5": "# Copyright 2015 Google Inc. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except\n# in compliance with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software distributed under the License\n# is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express\n# or implied. See the License for the specific language governing permissions and limitations under\n# the License.\n\nfrom __future__ import absolute_import\nfrom __future__ import unicode_literals\nimport unittest\n\n# import Python so we can mock the parts we need to here.\nimport IPython.core.display\nimport IPython.core.magic\n\nimport datalab.utils.commands\n\n\ndef noop_decorator(func):\n return func\n\n\nIPython.core.magic.register_line_cell_magic = noop_decorator\nIPython.core.magic.register_line_magic = noop_decorator\nIPython.core.magic.register_cell_magic = noop_decorator\nIPython.core.display.HTML = lambda x: x\nIPython.core.display.JSON = lambda x: x\n\n\nclass TestCases(unittest.TestCase):\n\n def test_chart_cell(self):\n t = [{'country': 'US', 'quantity': 100}, {'country': 'ZA', 'quantity': 50}]\n IPython.get_ipython().user_ns = {}\n chart = datalab.utils.commands._chart._chart_cell({'chart': 'geo', 'data': t, 'fields': None},\n '')\n self.assertTrue(chart.find('charts.render(') > 0)\n self.assertTrue(chart.find('\\'geo\\'') > 0)\n self.assertTrue(chart.find('\"fields\": \"*\"') > 0)\n self.assertTrue(chart.find('{\"c\": [{\"v\": \"US\"}, {\"v\": 100}]}') > 0 or\n chart.find('{\"c\": [{\"v\": 100}, {\"v\": \"US\"}]}') > 0)\n self.assertTrue(chart.find('{\"c\": [{\"v\": \"ZA\"}, {\"v\": 50}]}') > 0 or\n chart.find('{\"c\": [{\"v\": 50}, {\"v\": \"ZA\"}]}') > 0)\n\n def test_chart_magic(self):\n # TODO(gram): complete this test\n pass\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class Port(object):
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class Port(object):
def __init__(self, mac):
self.mac = mac
|
flexible
|
{
"blob_id": "cd89c9eaea9d331288fd07f1968ef9dce89b4a4b",
"index": 7228,
"step-1": "<mask token>\n",
"step-2": "class Port(object):\n <mask token>\n",
"step-3": "class Port(object):\n\n def __init__(self, mac):\n self.mac = mac\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
for n in my_list:
if new_num <= n:
i += 1
my_list.insert(i, float(new_num))
print(my_list)
<|reserved_special_token_1|>
my_list = [9, 9, 9, 8, 8, 7, 7, 6, 6, 5, 4, 4, 4, 2, 2, 1]
new_num = int(input('Enter a new number - '))
i = 0
for n in my_list:
if new_num <= n:
i += 1
my_list.insert(i, float(new_num))
print(my_list)
|
flexible
|
{
"blob_id": "be16e13c0e03952e45f98b175975795bba19cf9a",
"index": 2775,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor n in my_list:\n if new_num <= n:\n i += 1\nmy_list.insert(i, float(new_num))\nprint(my_list)\n",
"step-3": "my_list = [9, 9, 9, 8, 8, 7, 7, 6, 6, 5, 4, 4, 4, 2, 2, 1]\nnew_num = int(input('Enter a new number - '))\ni = 0\nfor n in my_list:\n if new_num <= n:\n i += 1\nmy_list.insert(i, float(new_num))\nprint(my_list)\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
<|reserved_special_token_0|>
class TestRollingWindow(unittest.TestCase):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class TestRollingWindow(unittest.TestCase):
"""Test class for _rolling_window."""
def setUp(self):
"""Prepare cube for tests."""
self.cube = _create_2d_cube()
def test_rolling_window_time(self):
"""Test rolling_window_statistics over time coordinate."""
cube_time_sum = rolling_window_statistics(self.cube, coordinate=
'time', operator='sum', window_length=2)
expected_data = np.broadcast_to(np.arange(3, 30, 2), (11, 14))
assert_equal(cube_time_sum.data, expected_data)
assert cube_time_sum.shape == (11, 14)
def test_rolling_window_latitude(self):
"""Test rolling_window_statistics over latitude coordinate."""
cube_lat_mean = rolling_window_statistics(self.cube, coordinate=
'latitude', operator='mean', window_length=3)
expected_data = np.broadcast_to(np.arange(1, 16), (9, 15))
assert_equal(cube_lat_mean.data, expected_data)
assert cube_lat_mean.shape == (9, 15)
def test_rolling_window_coord(self):
self.cube.remove_coord('latitude')
with self.assertRaises(iris.exceptions.CoordinateNotFoundError):
rolling_window_statistics(self.cube, coordinate='latitude',
operator='mean', window_length=3)
def test_rolling_window_operator(self):
with self.assertRaises(ValueError):
rolling_window_statistics(self.cube, coordinate='time',
operator='percentile', window_length=2)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def _create_2d_cube():
cube = Cube(np.broadcast_to(np.arange(1, 16), (11, 15)), var_name='tas',
units='K')
cube.add_dim_coord(iris.coords.DimCoord(np.arange(-5, 6), standard_name
='latitude', units=Unit('degrees')), 0)
cube.add_dim_coord(iris.coords.DimCoord(np.arange(1, 16), standard_name
='time', units=Unit('days since 1950-01-01 00:00:00', calendar=
'gregorian')), 1)
return cube
class TestRollingWindow(unittest.TestCase):
"""Test class for _rolling_window."""
def setUp(self):
"""Prepare cube for tests."""
self.cube = _create_2d_cube()
def test_rolling_window_time(self):
"""Test rolling_window_statistics over time coordinate."""
cube_time_sum = rolling_window_statistics(self.cube, coordinate=
'time', operator='sum', window_length=2)
expected_data = np.broadcast_to(np.arange(3, 30, 2), (11, 14))
assert_equal(cube_time_sum.data, expected_data)
assert cube_time_sum.shape == (11, 14)
def test_rolling_window_latitude(self):
"""Test rolling_window_statistics over latitude coordinate."""
cube_lat_mean = rolling_window_statistics(self.cube, coordinate=
'latitude', operator='mean', window_length=3)
expected_data = np.broadcast_to(np.arange(1, 16), (9, 15))
assert_equal(cube_lat_mean.data, expected_data)
assert cube_lat_mean.shape == (9, 15)
def test_rolling_window_coord(self):
self.cube.remove_coord('latitude')
with self.assertRaises(iris.exceptions.CoordinateNotFoundError):
rolling_window_statistics(self.cube, coordinate='latitude',
operator='mean', window_length=3)
def test_rolling_window_operator(self):
with self.assertRaises(ValueError):
rolling_window_statistics(self.cube, coordinate='time',
operator='percentile', window_length=2)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def _create_2d_cube():
cube = Cube(np.broadcast_to(np.arange(1, 16), (11, 15)), var_name='tas',
units='K')
cube.add_dim_coord(iris.coords.DimCoord(np.arange(-5, 6), standard_name
='latitude', units=Unit('degrees')), 0)
cube.add_dim_coord(iris.coords.DimCoord(np.arange(1, 16), standard_name
='time', units=Unit('days since 1950-01-01 00:00:00', calendar=
'gregorian')), 1)
return cube
class TestRollingWindow(unittest.TestCase):
"""Test class for _rolling_window."""
def setUp(self):
"""Prepare cube for tests."""
self.cube = _create_2d_cube()
def test_rolling_window_time(self):
"""Test rolling_window_statistics over time coordinate."""
cube_time_sum = rolling_window_statistics(self.cube, coordinate=
'time', operator='sum', window_length=2)
expected_data = np.broadcast_to(np.arange(3, 30, 2), (11, 14))
assert_equal(cube_time_sum.data, expected_data)
assert cube_time_sum.shape == (11, 14)
def test_rolling_window_latitude(self):
"""Test rolling_window_statistics over latitude coordinate."""
cube_lat_mean = rolling_window_statistics(self.cube, coordinate=
'latitude', operator='mean', window_length=3)
expected_data = np.broadcast_to(np.arange(1, 16), (9, 15))
assert_equal(cube_lat_mean.data, expected_data)
assert cube_lat_mean.shape == (9, 15)
def test_rolling_window_coord(self):
self.cube.remove_coord('latitude')
with self.assertRaises(iris.exceptions.CoordinateNotFoundError):
rolling_window_statistics(self.cube, coordinate='latitude',
operator='mean', window_length=3)
def test_rolling_window_operator(self):
with self.assertRaises(ValueError):
rolling_window_statistics(self.cube, coordinate='time',
operator='percentile', window_length=2)
if __name__ == '__main__':
unittest.main()
<|reserved_special_token_1|>
"""Unit tests for the `esmvalcore.preprocessor._rolling_window` function."""
import unittest
import iris.coords
import iris.exceptions
import numpy as np
from cf_units import Unit
from iris.cube import Cube
from numpy.testing import assert_equal
from esmvalcore.preprocessor._rolling_window import rolling_window_statistics
def _create_2d_cube():
cube = Cube(np.broadcast_to(np.arange(1, 16), (11, 15)),
var_name='tas',
units='K')
cube.add_dim_coord(
iris.coords.DimCoord(
np.arange(-5, 6),
standard_name='latitude',
units=Unit('degrees'),
), 0)
cube.add_dim_coord(
iris.coords.DimCoord(
np.arange(1, 16),
standard_name='time',
units=Unit('days since 1950-01-01 00:00:00', calendar='gregorian'),
), 1)
return cube
class TestRollingWindow(unittest.TestCase):
"""Test class for _rolling_window."""
def setUp(self):
"""Prepare cube for tests."""
self.cube = _create_2d_cube()
def test_rolling_window_time(self):
"""Test rolling_window_statistics over time coordinate."""
cube_time_sum = rolling_window_statistics(self.cube,
coordinate='time',
operator='sum',
window_length=2)
expected_data = np.broadcast_to(np.arange(3, 30, 2), (11, 14))
assert_equal(cube_time_sum.data, expected_data)
assert cube_time_sum.shape == (11, 14)
def test_rolling_window_latitude(self):
"""Test rolling_window_statistics over latitude coordinate."""
cube_lat_mean = rolling_window_statistics(self.cube,
coordinate='latitude',
operator='mean',
window_length=3)
expected_data = np.broadcast_to(np.arange(1, 16), (9, 15))
assert_equal(cube_lat_mean.data, expected_data)
assert cube_lat_mean.shape == (9, 15)
def test_rolling_window_coord(self):
self.cube.remove_coord('latitude')
with self.assertRaises(iris.exceptions.CoordinateNotFoundError):
rolling_window_statistics(self.cube,
coordinate='latitude',
operator='mean',
window_length=3)
def test_rolling_window_operator(self):
with self.assertRaises(ValueError):
rolling_window_statistics(self.cube,
coordinate='time',
operator='percentile',
window_length=2)
if __name__ == '__main__':
unittest.main()
|
flexible
|
{
"blob_id": "9539d2a4da87af1ff90b83bbcf72dfc8ab7b6db0",
"index": 5501,
"step-1": "<mask token>\n\n\nclass TestRollingWindow(unittest.TestCase):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass TestRollingWindow(unittest.TestCase):\n \"\"\"Test class for _rolling_window.\"\"\"\n\n def setUp(self):\n \"\"\"Prepare cube for tests.\"\"\"\n self.cube = _create_2d_cube()\n\n def test_rolling_window_time(self):\n \"\"\"Test rolling_window_statistics over time coordinate.\"\"\"\n cube_time_sum = rolling_window_statistics(self.cube, coordinate=\n 'time', operator='sum', window_length=2)\n expected_data = np.broadcast_to(np.arange(3, 30, 2), (11, 14))\n assert_equal(cube_time_sum.data, expected_data)\n assert cube_time_sum.shape == (11, 14)\n\n def test_rolling_window_latitude(self):\n \"\"\"Test rolling_window_statistics over latitude coordinate.\"\"\"\n cube_lat_mean = rolling_window_statistics(self.cube, coordinate=\n 'latitude', operator='mean', window_length=3)\n expected_data = np.broadcast_to(np.arange(1, 16), (9, 15))\n assert_equal(cube_lat_mean.data, expected_data)\n assert cube_lat_mean.shape == (9, 15)\n\n def test_rolling_window_coord(self):\n self.cube.remove_coord('latitude')\n with self.assertRaises(iris.exceptions.CoordinateNotFoundError):\n rolling_window_statistics(self.cube, coordinate='latitude',\n operator='mean', window_length=3)\n\n def test_rolling_window_operator(self):\n with self.assertRaises(ValueError):\n rolling_window_statistics(self.cube, coordinate='time',\n operator='percentile', window_length=2)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef _create_2d_cube():\n cube = Cube(np.broadcast_to(np.arange(1, 16), (11, 15)), var_name='tas',\n units='K')\n cube.add_dim_coord(iris.coords.DimCoord(np.arange(-5, 6), standard_name\n ='latitude', units=Unit('degrees')), 0)\n cube.add_dim_coord(iris.coords.DimCoord(np.arange(1, 16), standard_name\n ='time', units=Unit('days since 1950-01-01 00:00:00', calendar=\n 'gregorian')), 1)\n return cube\n\n\nclass TestRollingWindow(unittest.TestCase):\n \"\"\"Test class for _rolling_window.\"\"\"\n\n def setUp(self):\n \"\"\"Prepare cube for tests.\"\"\"\n self.cube = _create_2d_cube()\n\n def test_rolling_window_time(self):\n \"\"\"Test rolling_window_statistics over time coordinate.\"\"\"\n cube_time_sum = rolling_window_statistics(self.cube, coordinate=\n 'time', operator='sum', window_length=2)\n expected_data = np.broadcast_to(np.arange(3, 30, 2), (11, 14))\n assert_equal(cube_time_sum.data, expected_data)\n assert cube_time_sum.shape == (11, 14)\n\n def test_rolling_window_latitude(self):\n \"\"\"Test rolling_window_statistics over latitude coordinate.\"\"\"\n cube_lat_mean = rolling_window_statistics(self.cube, coordinate=\n 'latitude', operator='mean', window_length=3)\n expected_data = np.broadcast_to(np.arange(1, 16), (9, 15))\n assert_equal(cube_lat_mean.data, expected_data)\n assert cube_lat_mean.shape == (9, 15)\n\n def test_rolling_window_coord(self):\n self.cube.remove_coord('latitude')\n with self.assertRaises(iris.exceptions.CoordinateNotFoundError):\n rolling_window_statistics(self.cube, coordinate='latitude',\n operator='mean', window_length=3)\n\n def test_rolling_window_operator(self):\n with self.assertRaises(ValueError):\n rolling_window_statistics(self.cube, coordinate='time',\n operator='percentile', window_length=2)\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\ndef _create_2d_cube():\n cube = Cube(np.broadcast_to(np.arange(1, 16), (11, 15)), var_name='tas',\n units='K')\n cube.add_dim_coord(iris.coords.DimCoord(np.arange(-5, 6), standard_name\n ='latitude', units=Unit('degrees')), 0)\n cube.add_dim_coord(iris.coords.DimCoord(np.arange(1, 16), standard_name\n ='time', units=Unit('days since 1950-01-01 00:00:00', calendar=\n 'gregorian')), 1)\n return cube\n\n\nclass TestRollingWindow(unittest.TestCase):\n \"\"\"Test class for _rolling_window.\"\"\"\n\n def setUp(self):\n \"\"\"Prepare cube for tests.\"\"\"\n self.cube = _create_2d_cube()\n\n def test_rolling_window_time(self):\n \"\"\"Test rolling_window_statistics over time coordinate.\"\"\"\n cube_time_sum = rolling_window_statistics(self.cube, coordinate=\n 'time', operator='sum', window_length=2)\n expected_data = np.broadcast_to(np.arange(3, 30, 2), (11, 14))\n assert_equal(cube_time_sum.data, expected_data)\n assert cube_time_sum.shape == (11, 14)\n\n def test_rolling_window_latitude(self):\n \"\"\"Test rolling_window_statistics over latitude coordinate.\"\"\"\n cube_lat_mean = rolling_window_statistics(self.cube, coordinate=\n 'latitude', operator='mean', window_length=3)\n expected_data = np.broadcast_to(np.arange(1, 16), (9, 15))\n assert_equal(cube_lat_mean.data, expected_data)\n assert cube_lat_mean.shape == (9, 15)\n\n def test_rolling_window_coord(self):\n self.cube.remove_coord('latitude')\n with self.assertRaises(iris.exceptions.CoordinateNotFoundError):\n rolling_window_statistics(self.cube, coordinate='latitude',\n operator='mean', window_length=3)\n\n def test_rolling_window_operator(self):\n with self.assertRaises(ValueError):\n rolling_window_statistics(self.cube, coordinate='time',\n operator='percentile', window_length=2)\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"step-5": "\"\"\"Unit tests for the `esmvalcore.preprocessor._rolling_window` function.\"\"\"\nimport unittest\n\nimport iris.coords\nimport iris.exceptions\nimport numpy as np\nfrom cf_units import Unit\nfrom iris.cube import Cube\nfrom numpy.testing import assert_equal\n\nfrom esmvalcore.preprocessor._rolling_window import rolling_window_statistics\n\n\ndef _create_2d_cube():\n\n cube = Cube(np.broadcast_to(np.arange(1, 16), (11, 15)),\n var_name='tas',\n units='K')\n cube.add_dim_coord(\n iris.coords.DimCoord(\n np.arange(-5, 6),\n standard_name='latitude',\n units=Unit('degrees'),\n ), 0)\n cube.add_dim_coord(\n iris.coords.DimCoord(\n np.arange(1, 16),\n standard_name='time',\n units=Unit('days since 1950-01-01 00:00:00', calendar='gregorian'),\n ), 1)\n\n return cube\n\n\nclass TestRollingWindow(unittest.TestCase):\n \"\"\"Test class for _rolling_window.\"\"\"\n\n def setUp(self):\n \"\"\"Prepare cube for tests.\"\"\"\n self.cube = _create_2d_cube()\n\n def test_rolling_window_time(self):\n \"\"\"Test rolling_window_statistics over time coordinate.\"\"\"\n cube_time_sum = rolling_window_statistics(self.cube,\n coordinate='time',\n operator='sum',\n window_length=2)\n expected_data = np.broadcast_to(np.arange(3, 30, 2), (11, 14))\n assert_equal(cube_time_sum.data, expected_data)\n assert cube_time_sum.shape == (11, 14)\n\n def test_rolling_window_latitude(self):\n \"\"\"Test rolling_window_statistics over latitude coordinate.\"\"\"\n cube_lat_mean = rolling_window_statistics(self.cube,\n coordinate='latitude',\n operator='mean',\n window_length=3)\n expected_data = np.broadcast_to(np.arange(1, 16), (9, 15))\n assert_equal(cube_lat_mean.data, expected_data)\n assert cube_lat_mean.shape == (9, 15)\n\n def test_rolling_window_coord(self):\n self.cube.remove_coord('latitude')\n with self.assertRaises(iris.exceptions.CoordinateNotFoundError):\n rolling_window_statistics(self.cube,\n coordinate='latitude',\n operator='mean',\n window_length=3)\n\n def test_rolling_window_operator(self):\n with self.assertRaises(ValueError):\n rolling_window_statistics(self.cube,\n coordinate='time',\n operator='percentile',\n window_length=2)\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"step-ids": [
1,
7,
8,
9,
11
]
}
|
[
1,
7,
8,
9,
11
] |
#https://www.youtube.com/watch?v=CQ5kc_j4RjA
import pandas as pd
#import quandl
import math, datetime
import time
import numpy as np
from pandas.tools.plotting import scatter_matrix
import matplotlib.pyplot as plt
from sklearn import cross_validation, preprocessing, svm
from sklearn.metrics import accuracy_score
from sklearn.linear_model import LogisticRegression
from sklearn.linear_model import LinearRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.naive_bayes import GaussianNB
from sklearn.svm import SVC
from matplotlib import style
style.use ('ggplot')
import datetime
from pandas_datareader import data
import csv
#Setting Companies
def Set_Ticker():
global stockTicker
stockTicker = 'ONGC.NS'
## stockTicker = input("Enter the Ticker: ")
print ("Possible options: ONGC.NS, ")
return
def Set_Date():
#Setting Date
global end_date
global start_date
## end_date = input("Enter prediction date(YYYY-MM-DD):")
end_date = datetime.datetime(2017,1,30)
start_date = end_date
print (end_date)
return
def Actual_Value():
#Actual Value
global df
print("The Actual Closing Value is Displayed below")
df = data.DataReader(stockTicker, 'yahoo', '2017-01-28', '2017-02-5')
ao=df['Close']
print (str(ao))
return
def Add_Features_x():
#Create Features - X
global df
df ['OC_Change'] = (df['Close']-df['Open']/df['Open']*100)
df ['HL_Change'] = (df['High']-df['Low']/df['Low']*100)
df = df[['Close', 'HL_Change', 'OC_Change', 'Volume']]
return
def Forcast_Values():
#Forecast
global forecast_out
global forecast_col
forecast_col = 'Close'
forecast_out = int(math.ceil(0.01*len(df)))
return
def Add_Features_y():
#Label - y
df['label'] = df[forecast_col].shift(-forecast_out)
df.dropna(inplace=True)
return
def Setup_Validate_data():
#Set X and y
global y
global X
global X_train, X_test, y_train, y_test
X = np.array(df.drop(['label'],1))
y = np.array(df['label'])
#Split Training and Testing Data
X_train, X_test, y_train, y_test = cross_validation.train_test_split(X,y,test_size=0.2)
return
def Set_Model():
#Set Model for ML
global clf
clf = LinearRegression()
clf.fit(X_train, y_train)
return
def get_Accuracy():
#Accuracy of Test Data
global accuracy
accuracy = clf.score(X_test, y_test)
return()
def Prediction():
#Predict Next Values
global X
X = X[:-forecast_out]
global X_lately
global forecast_set
X_lately = X[-forecast_out:]
forecast_set = clf.predict(X_lately)
def Data_frame_Create():
#Creat a DataFrame
global df
df = data.DataReader(stockTicker, 'yahoo', start_date, end_date)
## df.plot(kind="box", subplots=True, layout=(1,6), sharex=False, sharey=False)
## plt.show()
## df.hist()
## plt.show()
## scatter_matrix(df)
## plt.show()
return
Set_Ticker()
Actual_Value()
#Setting Date
Set_Date()
#Gap of 1 month in time
#n = int(input("Enter the No. of Years in Months:"))
start_date += datetime.timedelta(weeks=-100)
#Creat a DataFrame
Data_frame_Create()
#Create Features - X
Add_Features_x()
#Forecast
Forcast_Values()
#Label - y
Add_Features_y()
#Split Training and Testing Data
Setup_Validate_data()
#Set Model for ML
Set_Model()
#Accuracy of Test Data
get_Accuracy()
#Predict Next Values
Prediction()
print (stockTicker.partition('.')[0])
##print ("Start Date:" + str(start_date))
print ("Accuracy: " + str(accuracy*100))
print ("Next day value: "+ str(forecast_set[0]))
print (forecast_set)
print ("3rd day value: "+ str(forecast_set[1]))
print ("5th day value: "+ str(forecast_set[2]))
print ("7th day value: "+ str(forecast_set[3]))
print ("10th day value: "+ str(forecast_set[4]))
##dict = {'Next Day':forecast_set[0],'3rd Day':forecast_set[1],'5th Day':forecast_set[2]}
##print (dict)
somedict = dict(NextDay=forecast_set[0],ThirdDay=forecast_set[1],FifthDay=forecast_set[2])
with open('mycsvfile.csv','wb') as f:
w = csv.writer(f)
w.writerows(somedict.items())
|
normal
|
{
"blob_id": "9c4676edbeef3748a4947f827fefa29e95674bfa",
"index": 121,
"step-1": "<mask token>\n\n\ndef Actual_Value():\n global df\n print('The Actual Closing Value is Displayed below')\n df = data.DataReader(stockTicker, 'yahoo', '2017-01-28', '2017-02-5')\n ao = df['Close']\n print(str(ao))\n return\n\n\n<mask token>\n\n\ndef Forcast_Values():\n global forecast_out\n global forecast_col\n forecast_col = 'Close'\n forecast_out = int(math.ceil(0.01 * len(df)))\n return\n\n\n<mask token>\n\n\ndef Setup_Validate_data():\n global y\n global X\n global X_train, X_test, y_train, y_test\n X = np.array(df.drop(['label'], 1))\n y = np.array(df['label'])\n X_train, X_test, y_train, y_test = cross_validation.train_test_split(X,\n y, test_size=0.2)\n return\n\n\n<mask token>\n\n\ndef get_Accuracy():\n global accuracy\n accuracy = clf.score(X_test, y_test)\n return ()\n\n\ndef Prediction():\n global X\n X = X[:-forecast_out]\n global X_lately\n global forecast_set\n X_lately = X[-forecast_out:]\n forecast_set = clf.predict(X_lately)\n\n\n<mask token>\n",
"step-2": "<mask token>\nstyle.use('ggplot')\n<mask token>\n\n\ndef Set_Ticker():\n global stockTicker\n stockTicker = 'ONGC.NS'\n print('Possible options: ONGC.NS, ')\n return\n\n\ndef Set_Date():\n global end_date\n global start_date\n end_date = datetime.datetime(2017, 1, 30)\n start_date = end_date\n print(end_date)\n return\n\n\ndef Actual_Value():\n global df\n print('The Actual Closing Value is Displayed below')\n df = data.DataReader(stockTicker, 'yahoo', '2017-01-28', '2017-02-5')\n ao = df['Close']\n print(str(ao))\n return\n\n\ndef Add_Features_x():\n global df\n df['OC_Change'] = df['Close'] - df['Open'] / df['Open'] * 100\n df['HL_Change'] = df['High'] - df['Low'] / df['Low'] * 100\n df = df[['Close', 'HL_Change', 'OC_Change', 'Volume']]\n return\n\n\ndef Forcast_Values():\n global forecast_out\n global forecast_col\n forecast_col = 'Close'\n forecast_out = int(math.ceil(0.01 * len(df)))\n return\n\n\ndef Add_Features_y():\n df['label'] = df[forecast_col].shift(-forecast_out)\n df.dropna(inplace=True)\n return\n\n\ndef Setup_Validate_data():\n global y\n global X\n global X_train, X_test, y_train, y_test\n X = np.array(df.drop(['label'], 1))\n y = np.array(df['label'])\n X_train, X_test, y_train, y_test = cross_validation.train_test_split(X,\n y, test_size=0.2)\n return\n\n\ndef Set_Model():\n global clf\n clf = LinearRegression()\n clf.fit(X_train, y_train)\n return\n\n\ndef get_Accuracy():\n global accuracy\n accuracy = clf.score(X_test, y_test)\n return ()\n\n\ndef Prediction():\n global X\n X = X[:-forecast_out]\n global X_lately\n global forecast_set\n X_lately = X[-forecast_out:]\n forecast_set = clf.predict(X_lately)\n\n\ndef Data_frame_Create():\n global df\n df = data.DataReader(stockTicker, 'yahoo', start_date, end_date)\n return\n\n\nSet_Ticker()\nActual_Value()\nSet_Date()\nstart_date += datetime.timedelta(weeks=-100)\nData_frame_Create()\nAdd_Features_x()\nForcast_Values()\nAdd_Features_y()\nSetup_Validate_data()\nSet_Model()\nget_Accuracy()\nPrediction()\nprint(stockTicker.partition('.')[0])\nprint('Accuracy: ' + str(accuracy * 100))\nprint('Next day value: ' + str(forecast_set[0]))\nprint(forecast_set)\nprint('3rd day value: ' + str(forecast_set[1]))\nprint('5th day value: ' + str(forecast_set[2]))\nprint('7th day value: ' + str(forecast_set[3]))\nprint('10th day value: ' + str(forecast_set[4]))\n<mask token>\nwith open('mycsvfile.csv', 'wb') as f:\n w = csv.writer(f)\n w.writerows(somedict.items())\n",
"step-3": "<mask token>\nstyle.use('ggplot')\n<mask token>\n\n\ndef Set_Ticker():\n global stockTicker\n stockTicker = 'ONGC.NS'\n print('Possible options: ONGC.NS, ')\n return\n\n\ndef Set_Date():\n global end_date\n global start_date\n end_date = datetime.datetime(2017, 1, 30)\n start_date = end_date\n print(end_date)\n return\n\n\ndef Actual_Value():\n global df\n print('The Actual Closing Value is Displayed below')\n df = data.DataReader(stockTicker, 'yahoo', '2017-01-28', '2017-02-5')\n ao = df['Close']\n print(str(ao))\n return\n\n\ndef Add_Features_x():\n global df\n df['OC_Change'] = df['Close'] - df['Open'] / df['Open'] * 100\n df['HL_Change'] = df['High'] - df['Low'] / df['Low'] * 100\n df = df[['Close', 'HL_Change', 'OC_Change', 'Volume']]\n return\n\n\ndef Forcast_Values():\n global forecast_out\n global forecast_col\n forecast_col = 'Close'\n forecast_out = int(math.ceil(0.01 * len(df)))\n return\n\n\ndef Add_Features_y():\n df['label'] = df[forecast_col].shift(-forecast_out)\n df.dropna(inplace=True)\n return\n\n\ndef Setup_Validate_data():\n global y\n global X\n global X_train, X_test, y_train, y_test\n X = np.array(df.drop(['label'], 1))\n y = np.array(df['label'])\n X_train, X_test, y_train, y_test = cross_validation.train_test_split(X,\n y, test_size=0.2)\n return\n\n\ndef Set_Model():\n global clf\n clf = LinearRegression()\n clf.fit(X_train, y_train)\n return\n\n\ndef get_Accuracy():\n global accuracy\n accuracy = clf.score(X_test, y_test)\n return ()\n\n\ndef Prediction():\n global X\n X = X[:-forecast_out]\n global X_lately\n global forecast_set\n X_lately = X[-forecast_out:]\n forecast_set = clf.predict(X_lately)\n\n\ndef Data_frame_Create():\n global df\n df = data.DataReader(stockTicker, 'yahoo', start_date, end_date)\n return\n\n\nSet_Ticker()\nActual_Value()\nSet_Date()\nstart_date += datetime.timedelta(weeks=-100)\nData_frame_Create()\nAdd_Features_x()\nForcast_Values()\nAdd_Features_y()\nSetup_Validate_data()\nSet_Model()\nget_Accuracy()\nPrediction()\nprint(stockTicker.partition('.')[0])\nprint('Accuracy: ' + str(accuracy * 100))\nprint('Next day value: ' + str(forecast_set[0]))\nprint(forecast_set)\nprint('3rd day value: ' + str(forecast_set[1]))\nprint('5th day value: ' + str(forecast_set[2]))\nprint('7th day value: ' + str(forecast_set[3]))\nprint('10th day value: ' + str(forecast_set[4]))\nsomedict = dict(NextDay=forecast_set[0], ThirdDay=forecast_set[1], FifthDay\n =forecast_set[2])\nwith open('mycsvfile.csv', 'wb') as f:\n w = csv.writer(f)\n w.writerows(somedict.items())\n",
"step-4": "import pandas as pd\nimport math, datetime\nimport time\nimport numpy as np\nfrom pandas.tools.plotting import scatter_matrix\nimport matplotlib.pyplot as plt\nfrom sklearn import cross_validation, preprocessing, svm\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.discriminant_analysis import LinearDiscriminantAnalysis\nfrom sklearn.naive_bayes import GaussianNB\nfrom sklearn.svm import SVC\nfrom matplotlib import style\nstyle.use('ggplot')\nimport datetime\nfrom pandas_datareader import data\nimport csv\n\n\ndef Set_Ticker():\n global stockTicker\n stockTicker = 'ONGC.NS'\n print('Possible options: ONGC.NS, ')\n return\n\n\ndef Set_Date():\n global end_date\n global start_date\n end_date = datetime.datetime(2017, 1, 30)\n start_date = end_date\n print(end_date)\n return\n\n\ndef Actual_Value():\n global df\n print('The Actual Closing Value is Displayed below')\n df = data.DataReader(stockTicker, 'yahoo', '2017-01-28', '2017-02-5')\n ao = df['Close']\n print(str(ao))\n return\n\n\ndef Add_Features_x():\n global df\n df['OC_Change'] = df['Close'] - df['Open'] / df['Open'] * 100\n df['HL_Change'] = df['High'] - df['Low'] / df['Low'] * 100\n df = df[['Close', 'HL_Change', 'OC_Change', 'Volume']]\n return\n\n\ndef Forcast_Values():\n global forecast_out\n global forecast_col\n forecast_col = 'Close'\n forecast_out = int(math.ceil(0.01 * len(df)))\n return\n\n\ndef Add_Features_y():\n df['label'] = df[forecast_col].shift(-forecast_out)\n df.dropna(inplace=True)\n return\n\n\ndef Setup_Validate_data():\n global y\n global X\n global X_train, X_test, y_train, y_test\n X = np.array(df.drop(['label'], 1))\n y = np.array(df['label'])\n X_train, X_test, y_train, y_test = cross_validation.train_test_split(X,\n y, test_size=0.2)\n return\n\n\ndef Set_Model():\n global clf\n clf = LinearRegression()\n clf.fit(X_train, y_train)\n return\n\n\ndef get_Accuracy():\n global accuracy\n accuracy = clf.score(X_test, y_test)\n return ()\n\n\ndef Prediction():\n global X\n X = X[:-forecast_out]\n global X_lately\n global forecast_set\n X_lately = X[-forecast_out:]\n forecast_set = clf.predict(X_lately)\n\n\ndef Data_frame_Create():\n global df\n df = data.DataReader(stockTicker, 'yahoo', start_date, end_date)\n return\n\n\nSet_Ticker()\nActual_Value()\nSet_Date()\nstart_date += datetime.timedelta(weeks=-100)\nData_frame_Create()\nAdd_Features_x()\nForcast_Values()\nAdd_Features_y()\nSetup_Validate_data()\nSet_Model()\nget_Accuracy()\nPrediction()\nprint(stockTicker.partition('.')[0])\nprint('Accuracy: ' + str(accuracy * 100))\nprint('Next day value: ' + str(forecast_set[0]))\nprint(forecast_set)\nprint('3rd day value: ' + str(forecast_set[1]))\nprint('5th day value: ' + str(forecast_set[2]))\nprint('7th day value: ' + str(forecast_set[3]))\nprint('10th day value: ' + str(forecast_set[4]))\nsomedict = dict(NextDay=forecast_set[0], ThirdDay=forecast_set[1], FifthDay\n =forecast_set[2])\nwith open('mycsvfile.csv', 'wb') as f:\n w = csv.writer(f)\n w.writerows(somedict.items())\n",
"step-5": "#https://www.youtube.com/watch?v=CQ5kc_j4RjA\r\n\r\nimport pandas as pd\r\n#import quandl\r\nimport math, datetime\r\nimport time\r\nimport numpy as np\r\nfrom pandas.tools.plotting import scatter_matrix\r\nimport matplotlib.pyplot as plt\r\nfrom sklearn import cross_validation, preprocessing, svm\r\nfrom sklearn.metrics import accuracy_score\r\nfrom sklearn.linear_model import LogisticRegression\r\nfrom sklearn.linear_model import LinearRegression\r\nfrom sklearn.tree import DecisionTreeClassifier\r\nfrom sklearn.neighbors import KNeighborsClassifier\r\nfrom sklearn.discriminant_analysis import LinearDiscriminantAnalysis\r\nfrom sklearn.naive_bayes import GaussianNB\r\nfrom sklearn.svm import SVC\r\nfrom matplotlib import style\r\nstyle.use ('ggplot')\r\nimport datetime\r\nfrom pandas_datareader import data\r\nimport csv\r\n\r\n\r\n#Setting Companies\r\ndef Set_Ticker():\r\n global stockTicker\r\n stockTicker = 'ONGC.NS'\r\n## stockTicker = input(\"Enter the Ticker: \")\r\n print (\"Possible options: ONGC.NS, \")\r\n return \r\n\r\ndef Set_Date():\r\n #Setting Date\r\n global end_date\r\n global start_date\r\n## end_date = input(\"Enter prediction date(YYYY-MM-DD):\")\r\n end_date = datetime.datetime(2017,1,30)\r\n start_date = end_date\r\n print (end_date)\r\n return\r\n\r\n\r\ndef Actual_Value():\r\n #Actual Value\r\n global df\r\n print(\"The Actual Closing Value is Displayed below\")\r\n df = data.DataReader(stockTicker, 'yahoo', '2017-01-28', '2017-02-5')\r\n ao=df['Close']\r\n print (str(ao))\r\n return\r\n\r\n\r\ndef Add_Features_x():\r\n #Create Features - X\r\n global df\r\n df ['OC_Change'] = (df['Close']-df['Open']/df['Open']*100)\r\n df ['HL_Change'] = (df['High']-df['Low']/df['Low']*100)\r\n df = df[['Close', 'HL_Change', 'OC_Change', 'Volume']]\r\n return\r\n\r\ndef Forcast_Values():\r\n #Forecast\r\n global forecast_out\r\n global forecast_col\r\n forecast_col = 'Close'\r\n forecast_out = int(math.ceil(0.01*len(df)))\r\n return\r\n\r\ndef Add_Features_y():\r\n #Label - y\r\n df['label'] = df[forecast_col].shift(-forecast_out)\r\n df.dropna(inplace=True)\r\n return\r\n\r\ndef Setup_Validate_data():\r\n #Set X and y \r\n global y\r\n global X\r\n global X_train, X_test, y_train, y_test\r\n X = np.array(df.drop(['label'],1))\r\n y = np.array(df['label'])\r\n #Split Training and Testing Data\r\n X_train, X_test, y_train, y_test = cross_validation.train_test_split(X,y,test_size=0.2)\r\n return\r\n\r\ndef Set_Model():\r\n #Set Model for ML\r\n global clf\r\n clf = LinearRegression()\r\n clf.fit(X_train, y_train)\r\n return\r\n\r\ndef get_Accuracy():\r\n #Accuracy of Test Data\r\n global accuracy\r\n accuracy = clf.score(X_test, y_test)\r\n return()\r\n\r\ndef Prediction():\r\n #Predict Next Values\r\n global X\r\n X = X[:-forecast_out]\r\n global X_lately\r\n global forecast_set\r\n X_lately = X[-forecast_out:]\r\n forecast_set = clf.predict(X_lately)\r\n\r\ndef Data_frame_Create():\r\n #Creat a DataFrame \r\n global df\r\n df = data.DataReader(stockTicker, 'yahoo', start_date, end_date)\r\n## df.plot(kind=\"box\", subplots=True, layout=(1,6), sharex=False, sharey=False)\r\n## plt.show()\r\n## df.hist()\r\n## plt.show()\r\n## scatter_matrix(df)\r\n## plt.show()\r\n return\r\n\r\n\r\nSet_Ticker()\r\nActual_Value()\r\n\r\n#Setting Date\r\nSet_Date()\r\n\r\n#Gap of 1 month in time\r\n#n = int(input(\"Enter the No. of Years in Months:\"))\r\nstart_date += datetime.timedelta(weeks=-100)\r\n\r\n#Creat a DataFrame\r\nData_frame_Create() \r\n\r\n#Create Features - X\r\nAdd_Features_x()\r\n\r\n#Forecast\r\nForcast_Values()\r\n\r\n#Label - y\r\nAdd_Features_y()\r\n\r\n#Split Training and Testing Data\r\nSetup_Validate_data()\r\n\r\n#Set Model for ML\r\nSet_Model()\r\n\r\n#Accuracy of Test Data\r\nget_Accuracy()\r\n\r\n#Predict Next Values\r\nPrediction()\r\n \r\nprint (stockTicker.partition('.')[0])\r\n##print (\"Start Date:\" + str(start_date))\r\nprint (\"Accuracy: \" + str(accuracy*100))\r\nprint (\"Next day value: \"+ str(forecast_set[0]))\r\nprint (forecast_set)\r\nprint (\"3rd day value: \"+ str(forecast_set[1]))\r\nprint (\"5th day value: \"+ str(forecast_set[2]))\r\nprint (\"7th day value: \"+ str(forecast_set[3]))\r\nprint (\"10th day value: \"+ str(forecast_set[4]))\r\n\r\n##dict = {'Next Day':forecast_set[0],'3rd Day':forecast_set[1],'5th Day':forecast_set[2]}\r\n##print (dict)\r\n\r\nsomedict = dict(NextDay=forecast_set[0],ThirdDay=forecast_set[1],FifthDay=forecast_set[2])\r\n\r\nwith open('mycsvfile.csv','wb') as f:\r\n w = csv.writer(f)\r\n w.writerows(somedict.items())\r\n",
"step-ids": [
5,
12,
13,
14,
15
]
}
|
[
5,
12,
13,
14,
15
] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Feb 1 11:52:48 2022
@author: ccamargo
"""
import xarray as xr
import numpy as np
import matplotlib.pyplot as plt
import os
# 1. get filelist
path = "/Volumes/LaCie_NIOZ/data/steric/data/"
path_to_original_files = path + "original/"
flist = [file for file in os.listdir(path_to_original_files) if file.endswith(".nc")]
path_to_regrided_files = path + "regrid_180x360/"
#%% 2. Regrid:
# for file in flist:
# fin=path_to_original_files+file
# fout=path_to_regrided_files+file
# command_list=str('cdo -L remapbil,r360x180 '+fin+' '+fout)
# _tmp=os.system(command_list)
#%% landmask
ds = xr.open_dataset("/Volumes/LaCie_NIOZ/data/masks/ETOPO_mask.nc")
ds = ds.where((ds.lat > -66) & (ds.lat < 66), np.nan)
mask = np.array(ds.landmask)
ds = xr.open_dataset(
"/Volumes/LaCie_NIOZ/data/barystatic/masks/"
+ "LAND_MASK_CRI-JPL_180x360_conservative.nc"
)
ds = ds.where((ds.lat > -66) & (ds.lat < 66), np.nan)
mask = np.array(ds.mask)
mask[mask == 1] = np.nan
mask[mask == 0] = 1
# %% 3. get data
flist = [file for file in os.listdir(path_to_regrided_files) if file.endswith(".nc")]
datasets = []
for file in flist:
print(file)
name = file.split(".nc")[0]
ds = xr.open_dataset(path_to_regrided_files + file, decode_times=False)
timespan = [ds.timespan]
print(timespan)
ti, tf = timespan[0].split(" to ")
yf = int(tf.split("-")[0])
mf = int(tf.split("-")[1])
if mf == 12:
yf = yf + 1
mf = "01"
else:
mf = mf + 1
tf = "{}-{}-28".format(yf, str(mf).zfill(2))
if name == "Ishii":
ti = "1990-01-31T00:00:00.000000"
tf = "2019-01-31T00:00:00.000000"
print("correct time: {} to {}".format(ti, tf))
# tf = '{}-{}-{}'.format(time[-1].year,str(time[-1].month).zfill(2),time[-1].day +15)
time = np.arange(ti, tf, dtype="datetime64[M]")
ds["time"] = np.array(time)
da = ds["data"].rename("sla_" + name)
da.data = da.data * mask
da.data = da.data - np.array(
da.sel(time=slice("2005-01-01", "2016-01-01")).mean(dim="time")
)
datasets.append(da)
# print(da)
#%% merge datasets
ds = xr.merge(datasets)
#% % select since 1993
ds = ds.sel(time=slice("1993-01-01", ds.time[-1]))
#% % compute ENS mean
var = [
key
for key in ds.variables
if key.split("_")[0] == "sla" and len(key.split("_")) == 2
]
data = np.zeros((len(var), len(ds.time), len(ds.lat), len(ds.lon)))
data.fill(np.nan)
names = [v.split("_")[-1] for v in var]
for i, v in enumerate(var):
data[i] = np.array(ds[v])
da = xr.Dataset(
data_vars={"data": (("names", "time", "lat", "lon"), data)},
coords={"lat": ds.lat, "lon": ds.lon, "time": ds.time, "names": names},
)
# ds['sla_ens'] = (['time','lat','lon'],np.nanmean(datamu,axis=0))
ds["sla_ens"] = da.data.mean(dim="names")
ens = np.zeros((1, len(ds.time), len(ds.lat), len(ds.lon)))
ens.fill(np.nan)
ens[0] = np.array(ds.sla_ens)
data2 = np.vstack([data, ens])
names.append("ENS")
ds = ds.assign_coords({"names": names})
ds["SLA"] = (["names", "time", "lat", "lon"], data2)
ds.attrs["units"] = "meters"
ds.attrs["description"] = "Steric sea-level height (m)"
ds.attrs["time_mean"] = "Removed time mean from 2005-2015 (full years)"
ds.attrs["script"] = "SLB-steric.py"
#%% save
path_save = "/Volumes/LaCie_NIOZ/data/budget/"
ds.to_netcdf(path_save + "steric_upper.nc")
|
normal
|
{
"blob_id": "4fc4bb81d47a33e4669df46033033fddeca6544e",
"index": 8858,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor file in flist:\n print(file)\n name = file.split('.nc')[0]\n ds = xr.open_dataset(path_to_regrided_files + file, decode_times=False)\n timespan = [ds.timespan]\n print(timespan)\n ti, tf = timespan[0].split(' to ')\n yf = int(tf.split('-')[0])\n mf = int(tf.split('-')[1])\n if mf == 12:\n yf = yf + 1\n mf = '01'\n else:\n mf = mf + 1\n tf = '{}-{}-28'.format(yf, str(mf).zfill(2))\n if name == 'Ishii':\n ti = '1990-01-31T00:00:00.000000'\n tf = '2019-01-31T00:00:00.000000'\n print('correct time: {} to {}'.format(ti, tf))\n time = np.arange(ti, tf, dtype='datetime64[M]')\n ds['time'] = np.array(time)\n da = ds['data'].rename('sla_' + name)\n da.data = da.data * mask\n da.data = da.data - np.array(da.sel(time=slice('2005-01-01',\n '2016-01-01')).mean(dim='time'))\n datasets.append(da)\n<mask token>\ndata.fill(np.nan)\n<mask token>\nfor i, v in enumerate(var):\n data[i] = np.array(ds[v])\n<mask token>\nens.fill(np.nan)\n<mask token>\nnames.append('ENS')\n<mask token>\nds.to_netcdf(path_save + 'steric_upper.nc')\n",
"step-3": "<mask token>\npath = '/Volumes/LaCie_NIOZ/data/steric/data/'\npath_to_original_files = path + 'original/'\nflist = [file for file in os.listdir(path_to_original_files) if file.\n endswith('.nc')]\npath_to_regrided_files = path + 'regrid_180x360/'\nds = xr.open_dataset('/Volumes/LaCie_NIOZ/data/masks/ETOPO_mask.nc')\nds = ds.where((ds.lat > -66) & (ds.lat < 66), np.nan)\nmask = np.array(ds.landmask)\nds = xr.open_dataset('/Volumes/LaCie_NIOZ/data/barystatic/masks/' +\n 'LAND_MASK_CRI-JPL_180x360_conservative.nc')\nds = ds.where((ds.lat > -66) & (ds.lat < 66), np.nan)\nmask = np.array(ds.mask)\nmask[mask == 1] = np.nan\nmask[mask == 0] = 1\nflist = [file for file in os.listdir(path_to_regrided_files) if file.\n endswith('.nc')]\ndatasets = []\nfor file in flist:\n print(file)\n name = file.split('.nc')[0]\n ds = xr.open_dataset(path_to_regrided_files + file, decode_times=False)\n timespan = [ds.timespan]\n print(timespan)\n ti, tf = timespan[0].split(' to ')\n yf = int(tf.split('-')[0])\n mf = int(tf.split('-')[1])\n if mf == 12:\n yf = yf + 1\n mf = '01'\n else:\n mf = mf + 1\n tf = '{}-{}-28'.format(yf, str(mf).zfill(2))\n if name == 'Ishii':\n ti = '1990-01-31T00:00:00.000000'\n tf = '2019-01-31T00:00:00.000000'\n print('correct time: {} to {}'.format(ti, tf))\n time = np.arange(ti, tf, dtype='datetime64[M]')\n ds['time'] = np.array(time)\n da = ds['data'].rename('sla_' + name)\n da.data = da.data * mask\n da.data = da.data - np.array(da.sel(time=slice('2005-01-01',\n '2016-01-01')).mean(dim='time'))\n datasets.append(da)\nds = xr.merge(datasets)\nds = ds.sel(time=slice('1993-01-01', ds.time[-1]))\nvar = [key for key in ds.variables if key.split('_')[0] == 'sla' and len(\n key.split('_')) == 2]\ndata = np.zeros((len(var), len(ds.time), len(ds.lat), len(ds.lon)))\ndata.fill(np.nan)\nnames = [v.split('_')[-1] for v in var]\nfor i, v in enumerate(var):\n data[i] = np.array(ds[v])\nda = xr.Dataset(data_vars={'data': (('names', 'time', 'lat', 'lon'), data)},\n coords={'lat': ds.lat, 'lon': ds.lon, 'time': ds.time, 'names': names})\nds['sla_ens'] = da.data.mean(dim='names')\nens = np.zeros((1, len(ds.time), len(ds.lat), len(ds.lon)))\nens.fill(np.nan)\nens[0] = np.array(ds.sla_ens)\ndata2 = np.vstack([data, ens])\nnames.append('ENS')\nds = ds.assign_coords({'names': names})\nds['SLA'] = ['names', 'time', 'lat', 'lon'], data2\nds.attrs['units'] = 'meters'\nds.attrs['description'] = 'Steric sea-level height (m)'\nds.attrs['time_mean'] = 'Removed time mean from 2005-2015 (full years)'\nds.attrs['script'] = 'SLB-steric.py'\npath_save = '/Volumes/LaCie_NIOZ/data/budget/'\nds.to_netcdf(path_save + 'steric_upper.nc')\n",
"step-4": "<mask token>\nimport xarray as xr\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport os\npath = '/Volumes/LaCie_NIOZ/data/steric/data/'\npath_to_original_files = path + 'original/'\nflist = [file for file in os.listdir(path_to_original_files) if file.\n endswith('.nc')]\npath_to_regrided_files = path + 'regrid_180x360/'\nds = xr.open_dataset('/Volumes/LaCie_NIOZ/data/masks/ETOPO_mask.nc')\nds = ds.where((ds.lat > -66) & (ds.lat < 66), np.nan)\nmask = np.array(ds.landmask)\nds = xr.open_dataset('/Volumes/LaCie_NIOZ/data/barystatic/masks/' +\n 'LAND_MASK_CRI-JPL_180x360_conservative.nc')\nds = ds.where((ds.lat > -66) & (ds.lat < 66), np.nan)\nmask = np.array(ds.mask)\nmask[mask == 1] = np.nan\nmask[mask == 0] = 1\nflist = [file for file in os.listdir(path_to_regrided_files) if file.\n endswith('.nc')]\ndatasets = []\nfor file in flist:\n print(file)\n name = file.split('.nc')[0]\n ds = xr.open_dataset(path_to_regrided_files + file, decode_times=False)\n timespan = [ds.timespan]\n print(timespan)\n ti, tf = timespan[0].split(' to ')\n yf = int(tf.split('-')[0])\n mf = int(tf.split('-')[1])\n if mf == 12:\n yf = yf + 1\n mf = '01'\n else:\n mf = mf + 1\n tf = '{}-{}-28'.format(yf, str(mf).zfill(2))\n if name == 'Ishii':\n ti = '1990-01-31T00:00:00.000000'\n tf = '2019-01-31T00:00:00.000000'\n print('correct time: {} to {}'.format(ti, tf))\n time = np.arange(ti, tf, dtype='datetime64[M]')\n ds['time'] = np.array(time)\n da = ds['data'].rename('sla_' + name)\n da.data = da.data * mask\n da.data = da.data - np.array(da.sel(time=slice('2005-01-01',\n '2016-01-01')).mean(dim='time'))\n datasets.append(da)\nds = xr.merge(datasets)\nds = ds.sel(time=slice('1993-01-01', ds.time[-1]))\nvar = [key for key in ds.variables if key.split('_')[0] == 'sla' and len(\n key.split('_')) == 2]\ndata = np.zeros((len(var), len(ds.time), len(ds.lat), len(ds.lon)))\ndata.fill(np.nan)\nnames = [v.split('_')[-1] for v in var]\nfor i, v in enumerate(var):\n data[i] = np.array(ds[v])\nda = xr.Dataset(data_vars={'data': (('names', 'time', 'lat', 'lon'), data)},\n coords={'lat': ds.lat, 'lon': ds.lon, 'time': ds.time, 'names': names})\nds['sla_ens'] = da.data.mean(dim='names')\nens = np.zeros((1, len(ds.time), len(ds.lat), len(ds.lon)))\nens.fill(np.nan)\nens[0] = np.array(ds.sla_ens)\ndata2 = np.vstack([data, ens])\nnames.append('ENS')\nds = ds.assign_coords({'names': names})\nds['SLA'] = ['names', 'time', 'lat', 'lon'], data2\nds.attrs['units'] = 'meters'\nds.attrs['description'] = 'Steric sea-level height (m)'\nds.attrs['time_mean'] = 'Removed time mean from 2005-2015 (full years)'\nds.attrs['script'] = 'SLB-steric.py'\npath_save = '/Volumes/LaCie_NIOZ/data/budget/'\nds.to_netcdf(path_save + 'steric_upper.nc')\n",
"step-5": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Feb 1 11:52:48 2022\n\n@author: ccamargo\n\"\"\"\n\nimport xarray as xr\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport os\n\n# 1. get filelist\npath = \"/Volumes/LaCie_NIOZ/data/steric/data/\"\npath_to_original_files = path + \"original/\"\nflist = [file for file in os.listdir(path_to_original_files) if file.endswith(\".nc\")]\n\npath_to_regrided_files = path + \"regrid_180x360/\"\n\n\n#%% 2. Regrid:\n# for file in flist:\n# fin=path_to_original_files+file\n# fout=path_to_regrided_files+file\n# command_list=str('cdo -L remapbil,r360x180 '+fin+' '+fout)\n# _tmp=os.system(command_list)\n#%% landmask\nds = xr.open_dataset(\"/Volumes/LaCie_NIOZ/data/masks/ETOPO_mask.nc\")\nds = ds.where((ds.lat > -66) & (ds.lat < 66), np.nan)\nmask = np.array(ds.landmask)\n\nds = xr.open_dataset(\n \"/Volumes/LaCie_NIOZ/data/barystatic/masks/\"\n + \"LAND_MASK_CRI-JPL_180x360_conservative.nc\"\n)\nds = ds.where((ds.lat > -66) & (ds.lat < 66), np.nan)\nmask = np.array(ds.mask)\nmask[mask == 1] = np.nan\nmask[mask == 0] = 1\n# %% 3. get data\nflist = [file for file in os.listdir(path_to_regrided_files) if file.endswith(\".nc\")]\ndatasets = []\nfor file in flist:\n print(file)\n name = file.split(\".nc\")[0]\n ds = xr.open_dataset(path_to_regrided_files + file, decode_times=False)\n timespan = [ds.timespan]\n print(timespan)\n ti, tf = timespan[0].split(\" to \")\n yf = int(tf.split(\"-\")[0])\n mf = int(tf.split(\"-\")[1])\n if mf == 12:\n yf = yf + 1\n mf = \"01\"\n else:\n mf = mf + 1\n tf = \"{}-{}-28\".format(yf, str(mf).zfill(2))\n if name == \"Ishii\":\n ti = \"1990-01-31T00:00:00.000000\"\n tf = \"2019-01-31T00:00:00.000000\"\n print(\"correct time: {} to {}\".format(ti, tf))\n # tf = '{}-{}-{}'.format(time[-1].year,str(time[-1].month).zfill(2),time[-1].day +15)\n time = np.arange(ti, tf, dtype=\"datetime64[M]\")\n ds[\"time\"] = np.array(time)\n\n da = ds[\"data\"].rename(\"sla_\" + name)\n da.data = da.data * mask\n da.data = da.data - np.array(\n da.sel(time=slice(\"2005-01-01\", \"2016-01-01\")).mean(dim=\"time\")\n )\n datasets.append(da)\n # print(da)\n#%% merge datasets\nds = xr.merge(datasets)\n#% % select since 1993\nds = ds.sel(time=slice(\"1993-01-01\", ds.time[-1]))\n#% % compute ENS mean\nvar = [\n key\n for key in ds.variables\n if key.split(\"_\")[0] == \"sla\" and len(key.split(\"_\")) == 2\n]\ndata = np.zeros((len(var), len(ds.time), len(ds.lat), len(ds.lon)))\ndata.fill(np.nan)\nnames = [v.split(\"_\")[-1] for v in var]\nfor i, v in enumerate(var):\n data[i] = np.array(ds[v])\nda = xr.Dataset(\n data_vars={\"data\": ((\"names\", \"time\", \"lat\", \"lon\"), data)},\n coords={\"lat\": ds.lat, \"lon\": ds.lon, \"time\": ds.time, \"names\": names},\n)\n\n# ds['sla_ens'] = (['time','lat','lon'],np.nanmean(datamu,axis=0))\nds[\"sla_ens\"] = da.data.mean(dim=\"names\")\nens = np.zeros((1, len(ds.time), len(ds.lat), len(ds.lon)))\nens.fill(np.nan)\nens[0] = np.array(ds.sla_ens)\ndata2 = np.vstack([data, ens])\nnames.append(\"ENS\")\nds = ds.assign_coords({\"names\": names})\nds[\"SLA\"] = ([\"names\", \"time\", \"lat\", \"lon\"], data2)\n\nds.attrs[\"units\"] = \"meters\"\nds.attrs[\"description\"] = \"Steric sea-level height (m)\"\nds.attrs[\"time_mean\"] = \"Removed time mean from 2005-2015 (full years)\"\nds.attrs[\"script\"] = \"SLB-steric.py\"\n#%% save\npath_save = \"/Volumes/LaCie_NIOZ/data/budget/\"\nds.to_netcdf(path_save + \"steric_upper.nc\")\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
class AdminUrlUserPermission(permissions.BasePermission):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class ReadOnly(permissions.BasePermission):
def has_permission(self, request, view):
return request.method in permissions.SAFE_METHODS
class AuthorModeratorAdminOrReadOnly(permissions.BasePermission):
def has_permission(self, request, view):
is_safe = request.method in permissions.SAFE_METHODS
is_auth = request.user.is_authenticated
return is_safe or is_auth
def has_object_permission(self, request, view, obj):
is_safe = request.method in permissions.SAFE_METHODS
is_author = obj.author == request.user
is_privileged = None
if request.user.is_authenticated:
is_privileged = request.user.role in ('moderator', 'admin')
return is_author or is_safe or is_privileged
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class AdminUrlUserPermission(permissions.BasePermission):
def has_permission(self, request, view):
return request.user.is_authenticated and (request.user.role ==
'admin' or request.user.is_superuser)
<|reserved_special_token_0|>
class ReadOnly(permissions.BasePermission):
def has_permission(self, request, view):
return request.method in permissions.SAFE_METHODS
class AuthorModeratorAdminOrReadOnly(permissions.BasePermission):
def has_permission(self, request, view):
is_safe = request.method in permissions.SAFE_METHODS
is_auth = request.user.is_authenticated
return is_safe or is_auth
def has_object_permission(self, request, view, obj):
is_safe = request.method in permissions.SAFE_METHODS
is_author = obj.author == request.user
is_privileged = None
if request.user.is_authenticated:
is_privileged = request.user.role in ('moderator', 'admin')
return is_author or is_safe or is_privileged
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class AdminUrlUserPermission(permissions.BasePermission):
def has_permission(self, request, view):
return request.user.is_authenticated and (request.user.role ==
'admin' or request.user.is_superuser)
def has_object_permission(self, request, view, obj):
return request.user.role == 'admin' or request.user.is_superuser
class ReadOnly(permissions.BasePermission):
def has_permission(self, request, view):
return request.method in permissions.SAFE_METHODS
class AuthorModeratorAdminOrReadOnly(permissions.BasePermission):
def has_permission(self, request, view):
is_safe = request.method in permissions.SAFE_METHODS
is_auth = request.user.is_authenticated
return is_safe or is_auth
def has_object_permission(self, request, view, obj):
is_safe = request.method in permissions.SAFE_METHODS
is_author = obj.author == request.user
is_privileged = None
if request.user.is_authenticated:
is_privileged = request.user.role in ('moderator', 'admin')
return is_author or is_safe or is_privileged
<|reserved_special_token_1|>
from rest_framework import permissions
class AdminUrlUserPermission(permissions.BasePermission):
def has_permission(self, request, view):
return request.user.is_authenticated and (request.user.role ==
'admin' or request.user.is_superuser)
def has_object_permission(self, request, view, obj):
return request.user.role == 'admin' or request.user.is_superuser
class ReadOnly(permissions.BasePermission):
def has_permission(self, request, view):
return request.method in permissions.SAFE_METHODS
class AuthorModeratorAdminOrReadOnly(permissions.BasePermission):
def has_permission(self, request, view):
is_safe = request.method in permissions.SAFE_METHODS
is_auth = request.user.is_authenticated
return is_safe or is_auth
def has_object_permission(self, request, view, obj):
is_safe = request.method in permissions.SAFE_METHODS
is_author = obj.author == request.user
is_privileged = None
if request.user.is_authenticated:
is_privileged = request.user.role in ('moderator', 'admin')
return is_author or is_safe or is_privileged
<|reserved_special_token_1|>
from rest_framework import permissions
class AdminUrlUserPermission(permissions.BasePermission):
def has_permission(self, request, view):
return (request.user.is_authenticated
and (request.user.role == 'admin'
or request.user.is_superuser))
def has_object_permission(self, request, view, obj):
return (request.user.role == 'admin'
or request.user.is_superuser)
class ReadOnly(permissions.BasePermission):
def has_permission(self, request, view):
return request.method in permissions.SAFE_METHODS
class AuthorModeratorAdminOrReadOnly(permissions.BasePermission):
def has_permission(self, request, view):
is_safe = request.method in permissions.SAFE_METHODS
is_auth = request.user.is_authenticated
return is_safe or is_auth
def has_object_permission(self, request, view, obj):
is_safe = request.method in permissions.SAFE_METHODS
is_author = obj.author == request.user
is_privileged = None
if request.user.is_authenticated:
is_privileged = request.user.role in ('moderator', 'admin')
return is_author or is_safe or is_privileged
|
flexible
|
{
"blob_id": "4549f26cf8051535f9d3486d111fc7afe7514dea",
"index": 5674,
"step-1": "<mask token>\n\n\nclass AdminUrlUserPermission(permissions.BasePermission):\n <mask token>\n <mask token>\n\n\nclass ReadOnly(permissions.BasePermission):\n\n def has_permission(self, request, view):\n return request.method in permissions.SAFE_METHODS\n\n\nclass AuthorModeratorAdminOrReadOnly(permissions.BasePermission):\n\n def has_permission(self, request, view):\n is_safe = request.method in permissions.SAFE_METHODS\n is_auth = request.user.is_authenticated\n return is_safe or is_auth\n\n def has_object_permission(self, request, view, obj):\n is_safe = request.method in permissions.SAFE_METHODS\n is_author = obj.author == request.user\n is_privileged = None\n if request.user.is_authenticated:\n is_privileged = request.user.role in ('moderator', 'admin')\n return is_author or is_safe or is_privileged\n",
"step-2": "<mask token>\n\n\nclass AdminUrlUserPermission(permissions.BasePermission):\n\n def has_permission(self, request, view):\n return request.user.is_authenticated and (request.user.role ==\n 'admin' or request.user.is_superuser)\n <mask token>\n\n\nclass ReadOnly(permissions.BasePermission):\n\n def has_permission(self, request, view):\n return request.method in permissions.SAFE_METHODS\n\n\nclass AuthorModeratorAdminOrReadOnly(permissions.BasePermission):\n\n def has_permission(self, request, view):\n is_safe = request.method in permissions.SAFE_METHODS\n is_auth = request.user.is_authenticated\n return is_safe or is_auth\n\n def has_object_permission(self, request, view, obj):\n is_safe = request.method in permissions.SAFE_METHODS\n is_author = obj.author == request.user\n is_privileged = None\n if request.user.is_authenticated:\n is_privileged = request.user.role in ('moderator', 'admin')\n return is_author or is_safe or is_privileged\n",
"step-3": "<mask token>\n\n\nclass AdminUrlUserPermission(permissions.BasePermission):\n\n def has_permission(self, request, view):\n return request.user.is_authenticated and (request.user.role ==\n 'admin' or request.user.is_superuser)\n\n def has_object_permission(self, request, view, obj):\n return request.user.role == 'admin' or request.user.is_superuser\n\n\nclass ReadOnly(permissions.BasePermission):\n\n def has_permission(self, request, view):\n return request.method in permissions.SAFE_METHODS\n\n\nclass AuthorModeratorAdminOrReadOnly(permissions.BasePermission):\n\n def has_permission(self, request, view):\n is_safe = request.method in permissions.SAFE_METHODS\n is_auth = request.user.is_authenticated\n return is_safe or is_auth\n\n def has_object_permission(self, request, view, obj):\n is_safe = request.method in permissions.SAFE_METHODS\n is_author = obj.author == request.user\n is_privileged = None\n if request.user.is_authenticated:\n is_privileged = request.user.role in ('moderator', 'admin')\n return is_author or is_safe or is_privileged\n",
"step-4": "from rest_framework import permissions\n\n\nclass AdminUrlUserPermission(permissions.BasePermission):\n\n def has_permission(self, request, view):\n return request.user.is_authenticated and (request.user.role ==\n 'admin' or request.user.is_superuser)\n\n def has_object_permission(self, request, view, obj):\n return request.user.role == 'admin' or request.user.is_superuser\n\n\nclass ReadOnly(permissions.BasePermission):\n\n def has_permission(self, request, view):\n return request.method in permissions.SAFE_METHODS\n\n\nclass AuthorModeratorAdminOrReadOnly(permissions.BasePermission):\n\n def has_permission(self, request, view):\n is_safe = request.method in permissions.SAFE_METHODS\n is_auth = request.user.is_authenticated\n return is_safe or is_auth\n\n def has_object_permission(self, request, view, obj):\n is_safe = request.method in permissions.SAFE_METHODS\n is_author = obj.author == request.user\n is_privileged = None\n if request.user.is_authenticated:\n is_privileged = request.user.role in ('moderator', 'admin')\n return is_author or is_safe or is_privileged\n",
"step-5": "from rest_framework import permissions\n\n\nclass AdminUrlUserPermission(permissions.BasePermission):\n def has_permission(self, request, view):\n return (request.user.is_authenticated\n and (request.user.role == 'admin'\n or request.user.is_superuser))\n\n def has_object_permission(self, request, view, obj):\n return (request.user.role == 'admin'\n or request.user.is_superuser)\n\n\nclass ReadOnly(permissions.BasePermission):\n def has_permission(self, request, view):\n return request.method in permissions.SAFE_METHODS\n\n\nclass AuthorModeratorAdminOrReadOnly(permissions.BasePermission):\n def has_permission(self, request, view):\n is_safe = request.method in permissions.SAFE_METHODS\n is_auth = request.user.is_authenticated\n return is_safe or is_auth\n\n def has_object_permission(self, request, view, obj):\n is_safe = request.method in permissions.SAFE_METHODS\n is_author = obj.author == request.user\n is_privileged = None\n if request.user.is_authenticated:\n is_privileged = request.user.role in ('moderator', 'admin')\n return is_author or is_safe or is_privileged\n",
"step-ids": [
6,
7,
8,
9,
10
]
}
|
[
6,
7,
8,
9,
10
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def test_mults():
assert task5.mults(3, 5, 10) == 23
assert task5.mults(5, 3, 10) == 23
assert task5.mults(3, 2, 10) == 32
assert task5.mults(7, 8, 50) == 364
<|reserved_special_token_1|>
<|reserved_special_token_0|>
sys.path.insert(0, '..')
<|reserved_special_token_0|>
def test_mults():
assert task5.mults(3, 5, 10) == 23
assert task5.mults(5, 3, 10) == 23
assert task5.mults(3, 2, 10) == 32
assert task5.mults(7, 8, 50) == 364
<|reserved_special_token_1|>
import pytest
import sys
sys.path.insert(0, '..')
from task_05 import task5
def test_mults():
assert task5.mults(3, 5, 10) == 23
assert task5.mults(5, 3, 10) == 23
assert task5.mults(3, 2, 10) == 32
assert task5.mults(7, 8, 50) == 364
|
flexible
|
{
"blob_id": "1c8622167240243da05a241e3630f79cdf36d7a8",
"index": 4776,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef test_mults():\n assert task5.mults(3, 5, 10) == 23\n assert task5.mults(5, 3, 10) == 23\n assert task5.mults(3, 2, 10) == 32\n assert task5.mults(7, 8, 50) == 364\n",
"step-3": "<mask token>\nsys.path.insert(0, '..')\n<mask token>\n\n\ndef test_mults():\n assert task5.mults(3, 5, 10) == 23\n assert task5.mults(5, 3, 10) == 23\n assert task5.mults(3, 2, 10) == 32\n assert task5.mults(7, 8, 50) == 364\n",
"step-4": "import pytest\nimport sys\nsys.path.insert(0, '..')\nfrom task_05 import task5\n\n\ndef test_mults():\n assert task5.mults(3, 5, 10) == 23\n assert task5.mults(5, 3, 10) == 23\n assert task5.mults(3, 2, 10) == 32\n assert task5.mults(7, 8, 50) == 364\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import json
import sqlite3
import time
import shelve
import os
from constants import *
VEC_TYPES = [
'''
CREATE TABLE "{}"
(ID TEXT PRIMARY KEY NOT NULL,
num TEXT NOT NULL);
''',
'''
CREATE TABLE "{}"
(ID INT PRIMARY KEY NOT NULL,
num TEXT NOT NULL);
'''
]
class Vector():
def __init__(self, name, type, url_path):
self._name = name
self._conn = sqlite3.connect(url_path)
self._cur = self._conn.cursor()
# check if table exists, if not create TABLE
self._cur.execute("SELECT name FROM sqlite_master WHERE type='table';")
tables = self._cur.fetchall()
if name not in [val[0] for val in tables]:
self._conn.execute(VEC_TYPES[type].format(self._name.replace('"', '""')))
def __setitem__(self, index, edges):
try:
self._conn.execute(
"""
INSERT INTO "{}" (ID, num)
VALUES (?, ?);
""".format(self._name.replace('"', '""')), (index, edges)
)
except Exception as e:
print(e)
print("Update Failed")
def __getitem__(self, index):
self._cur.execute(
"""
SELECT * FROM "{}"
WHERE ID = ?;
""".format(self._name.replace('"', '""')), (index,)
)
try:
return self._cur.fetchall()[0][1]
except Exception as e:
print(e)
return None
def get_multiple(self, keys):
print(keys)
if len(keys) == 0:
return []
keys = [(key,) for key in keys]
print(keys)
self._cur.executemany(
"""
SELECT * FROM "{}"
WHERE ID = ?;
""".format(self._name.replace('"', '""')), keys
)
try:
a = [val[1] for val in self._cur.fetchall()]
print(a)
return a
except Exception as e:
print(e)
return []
def save(self):
self._conn.commit()
def close(self):
self._conn.close()
"""
vec = Vector("yoav_table", 0, EDGES_VECTOR_PATH)
print(vec[0])
vec[0] = "yo"
print(vec[0])
vec.save()
"""
|
normal
|
{
"blob_id": "0a6cb6d3fad09ab7f0e19b6c79965315c0e0d634",
"index": 4793,
"step-1": "<mask token>\n\n\nclass Vector:\n\n def __init__(self, name, type, url_path):\n self._name = name\n self._conn = sqlite3.connect(url_path)\n self._cur = self._conn.cursor()\n self._cur.execute(\"SELECT name FROM sqlite_master WHERE type='table';\")\n tables = self._cur.fetchall()\n if name not in [val[0] for val in tables]:\n self._conn.execute(VEC_TYPES[type].format(self._name.replace(\n '\"', '\"\"')))\n <mask token>\n <mask token>\n <mask token>\n\n def save(self):\n self._conn.commit()\n <mask token>\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Vector:\n\n def __init__(self, name, type, url_path):\n self._name = name\n self._conn = sqlite3.connect(url_path)\n self._cur = self._conn.cursor()\n self._cur.execute(\"SELECT name FROM sqlite_master WHERE type='table';\")\n tables = self._cur.fetchall()\n if name not in [val[0] for val in tables]:\n self._conn.execute(VEC_TYPES[type].format(self._name.replace(\n '\"', '\"\"')))\n <mask token>\n\n def __getitem__(self, index):\n self._cur.execute(\n \"\"\"\n SELECT * FROM \"{}\"\n WHERE ID = ?;\n \"\"\"\n .format(self._name.replace('\"', '\"\"')), (index,))\n try:\n return self._cur.fetchall()[0][1]\n except Exception as e:\n print(e)\n return None\n\n def get_multiple(self, keys):\n print(keys)\n if len(keys) == 0:\n return []\n keys = [(key,) for key in keys]\n print(keys)\n self._cur.executemany(\n \"\"\"\n SELECT * FROM \"{}\"\n WHERE ID = ?;\n \"\"\"\n .format(self._name.replace('\"', '\"\"')), keys)\n try:\n a = [val[1] for val in self._cur.fetchall()]\n print(a)\n return a\n except Exception as e:\n print(e)\n return []\n\n def save(self):\n self._conn.commit()\n\n def close(self):\n self._conn.close()\n\n\n<mask token>\n",
"step-3": "<mask token>\nVEC_TYPES = [\n \"\"\"\n CREATE TABLE \"{}\"\n (ID TEXT PRIMARY KEY NOT NULL,\n num TEXT NOT NULL);\n \"\"\"\n ,\n \"\"\"\n CREATE TABLE \"{}\"\n (ID INT PRIMARY KEY NOT NULL,\n num TEXT NOT NULL);\n \"\"\"\n ]\n\n\nclass Vector:\n\n def __init__(self, name, type, url_path):\n self._name = name\n self._conn = sqlite3.connect(url_path)\n self._cur = self._conn.cursor()\n self._cur.execute(\"SELECT name FROM sqlite_master WHERE type='table';\")\n tables = self._cur.fetchall()\n if name not in [val[0] for val in tables]:\n self._conn.execute(VEC_TYPES[type].format(self._name.replace(\n '\"', '\"\"')))\n\n def __setitem__(self, index, edges):\n try:\n self._conn.execute(\n \"\"\"\n INSERT INTO \"{}\" (ID, num) \n VALUES (?, ?);\n \"\"\"\n .format(self._name.replace('\"', '\"\"')), (index, edges))\n except Exception as e:\n print(e)\n print('Update Failed')\n\n def __getitem__(self, index):\n self._cur.execute(\n \"\"\"\n SELECT * FROM \"{}\"\n WHERE ID = ?;\n \"\"\"\n .format(self._name.replace('\"', '\"\"')), (index,))\n try:\n return self._cur.fetchall()[0][1]\n except Exception as e:\n print(e)\n return None\n\n def get_multiple(self, keys):\n print(keys)\n if len(keys) == 0:\n return []\n keys = [(key,) for key in keys]\n print(keys)\n self._cur.executemany(\n \"\"\"\n SELECT * FROM \"{}\"\n WHERE ID = ?;\n \"\"\"\n .format(self._name.replace('\"', '\"\"')), keys)\n try:\n a = [val[1] for val in self._cur.fetchall()]\n print(a)\n return a\n except Exception as e:\n print(e)\n return []\n\n def save(self):\n self._conn.commit()\n\n def close(self):\n self._conn.close()\n\n\n<mask token>\n",
"step-4": "import json\nimport sqlite3\nimport time\nimport shelve\nimport os\nfrom constants import *\nVEC_TYPES = [\n \"\"\"\n CREATE TABLE \"{}\"\n (ID TEXT PRIMARY KEY NOT NULL,\n num TEXT NOT NULL);\n \"\"\"\n ,\n \"\"\"\n CREATE TABLE \"{}\"\n (ID INT PRIMARY KEY NOT NULL,\n num TEXT NOT NULL);\n \"\"\"\n ]\n\n\nclass Vector:\n\n def __init__(self, name, type, url_path):\n self._name = name\n self._conn = sqlite3.connect(url_path)\n self._cur = self._conn.cursor()\n self._cur.execute(\"SELECT name FROM sqlite_master WHERE type='table';\")\n tables = self._cur.fetchall()\n if name not in [val[0] for val in tables]:\n self._conn.execute(VEC_TYPES[type].format(self._name.replace(\n '\"', '\"\"')))\n\n def __setitem__(self, index, edges):\n try:\n self._conn.execute(\n \"\"\"\n INSERT INTO \"{}\" (ID, num) \n VALUES (?, ?);\n \"\"\"\n .format(self._name.replace('\"', '\"\"')), (index, edges))\n except Exception as e:\n print(e)\n print('Update Failed')\n\n def __getitem__(self, index):\n self._cur.execute(\n \"\"\"\n SELECT * FROM \"{}\"\n WHERE ID = ?;\n \"\"\"\n .format(self._name.replace('\"', '\"\"')), (index,))\n try:\n return self._cur.fetchall()[0][1]\n except Exception as e:\n print(e)\n return None\n\n def get_multiple(self, keys):\n print(keys)\n if len(keys) == 0:\n return []\n keys = [(key,) for key in keys]\n print(keys)\n self._cur.executemany(\n \"\"\"\n SELECT * FROM \"{}\"\n WHERE ID = ?;\n \"\"\"\n .format(self._name.replace('\"', '\"\"')), keys)\n try:\n a = [val[1] for val in self._cur.fetchall()]\n print(a)\n return a\n except Exception as e:\n print(e)\n return []\n\n def save(self):\n self._conn.commit()\n\n def close(self):\n self._conn.close()\n\n\n<mask token>\n",
"step-5": "import json\nimport sqlite3\nimport time\nimport shelve\nimport os\n\nfrom constants import *\n\n\nVEC_TYPES = [\n '''\n CREATE TABLE \"{}\"\n (ID TEXT PRIMARY KEY NOT NULL,\n num TEXT NOT NULL);\n ''',\n '''\n CREATE TABLE \"{}\"\n (ID INT PRIMARY KEY NOT NULL,\n num TEXT NOT NULL);\n '''\n]\n\n\nclass Vector():\n def __init__(self, name, type, url_path):\n self._name = name\n self._conn = sqlite3.connect(url_path)\n self._cur = self._conn.cursor()\n\n # check if table exists, if not create TABLE\n self._cur.execute(\"SELECT name FROM sqlite_master WHERE type='table';\")\n tables = self._cur.fetchall()\n if name not in [val[0] for val in tables]:\n self._conn.execute(VEC_TYPES[type].format(self._name.replace('\"', '\"\"')))\n\n\n def __setitem__(self, index, edges):\n try:\n self._conn.execute(\n \"\"\"\n INSERT INTO \"{}\" (ID, num) \n VALUES (?, ?);\n \"\"\".format(self._name.replace('\"', '\"\"')), (index, edges)\n )\n except Exception as e:\n print(e)\n print(\"Update Failed\")\n\n def __getitem__(self, index):\n self._cur.execute(\n \"\"\"\n SELECT * FROM \"{}\"\n WHERE ID = ?;\n \"\"\".format(self._name.replace('\"', '\"\"')), (index,)\n )\n try:\n return self._cur.fetchall()[0][1]\n except Exception as e:\n print(e)\n return None\n\n def get_multiple(self, keys):\n print(keys)\n if len(keys) == 0:\n return []\n\n keys = [(key,) for key in keys]\n print(keys)\n self._cur.executemany(\n \"\"\"\n SELECT * FROM \"{}\"\n WHERE ID = ?;\n \"\"\".format(self._name.replace('\"', '\"\"')), keys\n )\n try:\n a = [val[1] for val in self._cur.fetchall()]\n print(a)\n return a\n except Exception as e:\n print(e)\n return []\n\n def save(self):\n self._conn.commit()\n\n def close(self):\n self._conn.close()\n\n\"\"\"\nvec = Vector(\"yoav_table\", 0, EDGES_VECTOR_PATH)\nprint(vec[0])\nvec[0] = \"yo\"\nprint(vec[0])\nvec.save()\n\n\"\"\"",
"step-ids": [
3,
6,
8,
9,
10
]
}
|
[
3,
6,
8,
9,
10
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Migration(migrations.Migration):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Migration(migrations.Migration):
dependencies = [('ml', '0003_auto_20191123_1835')]
operations = [migrations.AlterField(model_name='ml', name='file', field
=models.ImageField(upload_to='images'))]
<|reserved_special_token_1|>
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [('ml', '0003_auto_20191123_1835')]
operations = [migrations.AlterField(model_name='ml', name='file', field
=models.ImageField(upload_to='images'))]
<|reserved_special_token_1|>
# Generated by Django 2.2.7 on 2019-11-23 18:40
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('ml', '0003_auto_20191123_1835'),
]
operations = [
migrations.AlterField(
model_name='ml',
name='file',
field=models.ImageField(upload_to='images'),
),
]
|
flexible
|
{
"blob_id": "2bf5ec4b4c0f0eed8364dcc9f1be599a804846f2",
"index": 4981,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('ml', '0003_auto_20191123_1835')]\n operations = [migrations.AlterField(model_name='ml', name='file', field\n =models.ImageField(upload_to='images'))]\n",
"step-4": "from django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n dependencies = [('ml', '0003_auto_20191123_1835')]\n operations = [migrations.AlterField(model_name='ml', name='file', field\n =models.ImageField(upload_to='images'))]\n",
"step-5": "# Generated by Django 2.2.7 on 2019-11-23 18:40\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('ml', '0003_auto_20191123_1835'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='ml',\n name='file',\n field=models.ImageField(upload_to='images'),\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import copy
from basics.binary_tree.binary_tree import TreeNode
from basics.binary_tree.traversals import level_order_traversal
def max_depth_bottom_up(root):
if not root:
return 0
max_so_far = 0
def max_depth(node, depth):
nonlocal max_so_far
if not node.left and not node.right:
max_so_far = max(max_so_far, depth)
else:
if node.left:
max_depth(node.left, 1 + depth)
if node.right:
max_depth(node.right, 1 + depth)
max_depth(root, 1)
return max_so_far
def max_depth_top_down(root):
if not root:
return 0
return 1 + max(max_depth_top_down(root.left),
max_depth_top_down(root.right))
def is_symmetric(root):
def is_mirror(left, right):
if left is None and right is None:
return True
elif left is None or right is None:
return False
else:
return (left.val == right.val and
is_mirror(left.right, right.left) and
is_mirror(left.left, right.right))
return is_mirror(root, root)
def has_path_sum(root, target_sum):
def path_sum(node, sum_left):
if not node:
return False
if not node.left and not node.right and node.val == sum_left:
return True
return (path_sum(node.left, sum_left-node.val) or
path_sum(node.right, sum_left - node.val))
return path_sum(root, target_sum)
def build_tree_from_inorder_preorder(inorder, preorder):
if not inorder or not preorder:
return None
inorder_map = {val: i for i, val in enumerate(inorder)}
def helper(lo, hi):
if lo > hi:
return None
node = TreeNode(preorder.pop(0))
mid = inorder_map[node.val]
node.left = helper(lo, mid - 1)
node.right = helper(mid + 1, hi)
return node
return helper(0, len(inorder) - 1)
def build_tree_from_inorder_postorder(inorder, postorder):
if not inorder or not postorder:
return None
inorder_map = {val: i for i, val in enumerate(inorder)}
def helper(lo, hi):
if lo > hi:
return None
node = TreeNode(postorder.pop())
mid = inorder_map[node.val]
node.right = helper(mid+1, hi)
node.left = helper(lo, mid-1)
return node
return helper(0, len(inorder)-1)
def next_right_pointer(root):
"""
class TreeNode:
def __init__(self, val=0, left=None, right=None):
self.val = val
self.left = left
self.right = right
self.next = None
"""
levels = []
to_do = [root]
while to_do:
cur_level = []
next_to_do = []
for n in to_do:
if n is not None:
cur_level.append(n)
next_to_do += [n.left, n.right]
if cur_level:
levels.append(cur_level)
to_do = next_to_do
for level in levels[1:]:
level.append(None)
for i in range(1, len(level)):
level[i-1].next = level[i]
return root
def lowest_common_ancestor(root, p, q):
answer = None
def recurse_tree(node):
nonlocal answer
if not node:
return False
left = recurse_tree(node.left)
right = recurse_tree(node.right)
mid = node == p or node == q
if mid + left + right >= 2:
answer = node
return mid or left or right
recurse_tree(root)
return answer
def lowest_common_ancestor_2(root, p, q):
if root == p or root == q:
return root
left = right = None
if root.left:
left = lowest_common_ancestor_2(root.left, p, q)
if root.right:
right = lowest_common_ancestor_2(root.right, p, q)
if left and right:
return root
else:
return left or right
def lowest_common_ancestor_3(root, p, q):
stack = [root]
parents = {root: None}
while p not in parents or q not in parents:
node = stack.pop()
if node.left:
parents[node.left] = node
stack.append(node.left)
if node.right:
parents[node.right] = node
stack.append(node.right)
ancestors = set()
while p:
ancestors.add(p)
p = parents[p]
while q not in ancestors:
q = parents[q]
return q
def serialize_tree(root):
levels = level_order_traversal(root)
return levels
def deserialize_tree(serialized):
if not serialized:
return None
levels = copy.deepcopy(serialized)
root = TreeNode(levels.pop(0)[0])
nodes = [root]
while levels:
level = levels.pop(0)
next_nodes = []
for i, node in enumerate(nodes):
if node:
node.left = TreeNode(level[2*i]) if level[2*i] else None
node.right = TreeNode(level[2*i+1]) if level[2*i+1] else None
next_nodes += [node.left, node.right]
else:
next_nodes += [None, None]
nodes = next_nodes
return root
def equal(root1, root2):
if not root1 and not root2:
return True
if not root1 or not root2:
return False
return (root1.val == root2.val and
equal(root1.left, root2.left) and
equal(root1.right, root2.right))
|
normal
|
{
"blob_id": "555646a5d57152034b467cbce16b6c183bcfbb37",
"index": 6658,
"step-1": "<mask token>\n\n\ndef max_depth_bottom_up(root):\n if not root:\n return 0\n max_so_far = 0\n\n def max_depth(node, depth):\n nonlocal max_so_far\n if not node.left and not node.right:\n max_so_far = max(max_so_far, depth)\n else:\n if node.left:\n max_depth(node.left, 1 + depth)\n if node.right:\n max_depth(node.right, 1 + depth)\n max_depth(root, 1)\n return max_so_far\n\n\ndef max_depth_top_down(root):\n if not root:\n return 0\n return 1 + max(max_depth_top_down(root.left), max_depth_top_down(root.\n right))\n\n\n<mask token>\n\n\ndef has_path_sum(root, target_sum):\n\n def path_sum(node, sum_left):\n if not node:\n return False\n if not node.left and not node.right and node.val == sum_left:\n return True\n return path_sum(node.left, sum_left - node.val) or path_sum(node.\n right, sum_left - node.val)\n return path_sum(root, target_sum)\n\n\ndef build_tree_from_inorder_preorder(inorder, preorder):\n if not inorder or not preorder:\n return None\n inorder_map = {val: i for i, val in enumerate(inorder)}\n\n def helper(lo, hi):\n if lo > hi:\n return None\n node = TreeNode(preorder.pop(0))\n mid = inorder_map[node.val]\n node.left = helper(lo, mid - 1)\n node.right = helper(mid + 1, hi)\n return node\n return helper(0, len(inorder) - 1)\n\n\n<mask token>\n\n\ndef next_right_pointer(root):\n \"\"\"\n class TreeNode:\n def __init__(self, val=0, left=None, right=None):\n self.val = val\n self.left = left\n self.right = right\n self.next = None\n \"\"\"\n levels = []\n to_do = [root]\n while to_do:\n cur_level = []\n next_to_do = []\n for n in to_do:\n if n is not None:\n cur_level.append(n)\n next_to_do += [n.left, n.right]\n if cur_level:\n levels.append(cur_level)\n to_do = next_to_do\n for level in levels[1:]:\n level.append(None)\n for i in range(1, len(level)):\n level[i - 1].next = level[i]\n return root\n\n\n<mask token>\n\n\ndef serialize_tree(root):\n levels = level_order_traversal(root)\n return levels\n\n\ndef deserialize_tree(serialized):\n if not serialized:\n return None\n levels = copy.deepcopy(serialized)\n root = TreeNode(levels.pop(0)[0])\n nodes = [root]\n while levels:\n level = levels.pop(0)\n next_nodes = []\n for i, node in enumerate(nodes):\n if node:\n node.left = TreeNode(level[2 * i]) if level[2 * i] else None\n node.right = TreeNode(level[2 * i + 1]) if level[2 * i + 1\n ] else None\n next_nodes += [node.left, node.right]\n else:\n next_nodes += [None, None]\n nodes = next_nodes\n return root\n\n\ndef equal(root1, root2):\n if not root1 and not root2:\n return True\n if not root1 or not root2:\n return False\n return root1.val == root2.val and equal(root1.left, root2.left) and equal(\n root1.right, root2.right)\n",
"step-2": "<mask token>\n\n\ndef max_depth_bottom_up(root):\n if not root:\n return 0\n max_so_far = 0\n\n def max_depth(node, depth):\n nonlocal max_so_far\n if not node.left and not node.right:\n max_so_far = max(max_so_far, depth)\n else:\n if node.left:\n max_depth(node.left, 1 + depth)\n if node.right:\n max_depth(node.right, 1 + depth)\n max_depth(root, 1)\n return max_so_far\n\n\ndef max_depth_top_down(root):\n if not root:\n return 0\n return 1 + max(max_depth_top_down(root.left), max_depth_top_down(root.\n right))\n\n\n<mask token>\n\n\ndef has_path_sum(root, target_sum):\n\n def path_sum(node, sum_left):\n if not node:\n return False\n if not node.left and not node.right and node.val == sum_left:\n return True\n return path_sum(node.left, sum_left - node.val) or path_sum(node.\n right, sum_left - node.val)\n return path_sum(root, target_sum)\n\n\ndef build_tree_from_inorder_preorder(inorder, preorder):\n if not inorder or not preorder:\n return None\n inorder_map = {val: i for i, val in enumerate(inorder)}\n\n def helper(lo, hi):\n if lo > hi:\n return None\n node = TreeNode(preorder.pop(0))\n mid = inorder_map[node.val]\n node.left = helper(lo, mid - 1)\n node.right = helper(mid + 1, hi)\n return node\n return helper(0, len(inorder) - 1)\n\n\n<mask token>\n\n\ndef next_right_pointer(root):\n \"\"\"\n class TreeNode:\n def __init__(self, val=0, left=None, right=None):\n self.val = val\n self.left = left\n self.right = right\n self.next = None\n \"\"\"\n levels = []\n to_do = [root]\n while to_do:\n cur_level = []\n next_to_do = []\n for n in to_do:\n if n is not None:\n cur_level.append(n)\n next_to_do += [n.left, n.right]\n if cur_level:\n levels.append(cur_level)\n to_do = next_to_do\n for level in levels[1:]:\n level.append(None)\n for i in range(1, len(level)):\n level[i - 1].next = level[i]\n return root\n\n\n<mask token>\n\n\ndef lowest_common_ancestor_2(root, p, q):\n if root == p or root == q:\n return root\n left = right = None\n if root.left:\n left = lowest_common_ancestor_2(root.left, p, q)\n if root.right:\n right = lowest_common_ancestor_2(root.right, p, q)\n if left and right:\n return root\n else:\n return left or right\n\n\n<mask token>\n\n\ndef serialize_tree(root):\n levels = level_order_traversal(root)\n return levels\n\n\ndef deserialize_tree(serialized):\n if not serialized:\n return None\n levels = copy.deepcopy(serialized)\n root = TreeNode(levels.pop(0)[0])\n nodes = [root]\n while levels:\n level = levels.pop(0)\n next_nodes = []\n for i, node in enumerate(nodes):\n if node:\n node.left = TreeNode(level[2 * i]) if level[2 * i] else None\n node.right = TreeNode(level[2 * i + 1]) if level[2 * i + 1\n ] else None\n next_nodes += [node.left, node.right]\n else:\n next_nodes += [None, None]\n nodes = next_nodes\n return root\n\n\ndef equal(root1, root2):\n if not root1 and not root2:\n return True\n if not root1 or not root2:\n return False\n return root1.val == root2.val and equal(root1.left, root2.left) and equal(\n root1.right, root2.right)\n",
"step-3": "<mask token>\n\n\ndef max_depth_bottom_up(root):\n if not root:\n return 0\n max_so_far = 0\n\n def max_depth(node, depth):\n nonlocal max_so_far\n if not node.left and not node.right:\n max_so_far = max(max_so_far, depth)\n else:\n if node.left:\n max_depth(node.left, 1 + depth)\n if node.right:\n max_depth(node.right, 1 + depth)\n max_depth(root, 1)\n return max_so_far\n\n\ndef max_depth_top_down(root):\n if not root:\n return 0\n return 1 + max(max_depth_top_down(root.left), max_depth_top_down(root.\n right))\n\n\n<mask token>\n\n\ndef has_path_sum(root, target_sum):\n\n def path_sum(node, sum_left):\n if not node:\n return False\n if not node.left and not node.right and node.val == sum_left:\n return True\n return path_sum(node.left, sum_left - node.val) or path_sum(node.\n right, sum_left - node.val)\n return path_sum(root, target_sum)\n\n\ndef build_tree_from_inorder_preorder(inorder, preorder):\n if not inorder or not preorder:\n return None\n inorder_map = {val: i for i, val in enumerate(inorder)}\n\n def helper(lo, hi):\n if lo > hi:\n return None\n node = TreeNode(preorder.pop(0))\n mid = inorder_map[node.val]\n node.left = helper(lo, mid - 1)\n node.right = helper(mid + 1, hi)\n return node\n return helper(0, len(inorder) - 1)\n\n\ndef build_tree_from_inorder_postorder(inorder, postorder):\n if not inorder or not postorder:\n return None\n inorder_map = {val: i for i, val in enumerate(inorder)}\n\n def helper(lo, hi):\n if lo > hi:\n return None\n node = TreeNode(postorder.pop())\n mid = inorder_map[node.val]\n node.right = helper(mid + 1, hi)\n node.left = helper(lo, mid - 1)\n return node\n return helper(0, len(inorder) - 1)\n\n\ndef next_right_pointer(root):\n \"\"\"\n class TreeNode:\n def __init__(self, val=0, left=None, right=None):\n self.val = val\n self.left = left\n self.right = right\n self.next = None\n \"\"\"\n levels = []\n to_do = [root]\n while to_do:\n cur_level = []\n next_to_do = []\n for n in to_do:\n if n is not None:\n cur_level.append(n)\n next_to_do += [n.left, n.right]\n if cur_level:\n levels.append(cur_level)\n to_do = next_to_do\n for level in levels[1:]:\n level.append(None)\n for i in range(1, len(level)):\n level[i - 1].next = level[i]\n return root\n\n\ndef lowest_common_ancestor(root, p, q):\n answer = None\n\n def recurse_tree(node):\n nonlocal answer\n if not node:\n return False\n left = recurse_tree(node.left)\n right = recurse_tree(node.right)\n mid = node == p or node == q\n if mid + left + right >= 2:\n answer = node\n return mid or left or right\n recurse_tree(root)\n return answer\n\n\ndef lowest_common_ancestor_2(root, p, q):\n if root == p or root == q:\n return root\n left = right = None\n if root.left:\n left = lowest_common_ancestor_2(root.left, p, q)\n if root.right:\n right = lowest_common_ancestor_2(root.right, p, q)\n if left and right:\n return root\n else:\n return left or right\n\n\ndef lowest_common_ancestor_3(root, p, q):\n stack = [root]\n parents = {root: None}\n while p not in parents or q not in parents:\n node = stack.pop()\n if node.left:\n parents[node.left] = node\n stack.append(node.left)\n if node.right:\n parents[node.right] = node\n stack.append(node.right)\n ancestors = set()\n while p:\n ancestors.add(p)\n p = parents[p]\n while q not in ancestors:\n q = parents[q]\n return q\n\n\ndef serialize_tree(root):\n levels = level_order_traversal(root)\n return levels\n\n\ndef deserialize_tree(serialized):\n if not serialized:\n return None\n levels = copy.deepcopy(serialized)\n root = TreeNode(levels.pop(0)[0])\n nodes = [root]\n while levels:\n level = levels.pop(0)\n next_nodes = []\n for i, node in enumerate(nodes):\n if node:\n node.left = TreeNode(level[2 * i]) if level[2 * i] else None\n node.right = TreeNode(level[2 * i + 1]) if level[2 * i + 1\n ] else None\n next_nodes += [node.left, node.right]\n else:\n next_nodes += [None, None]\n nodes = next_nodes\n return root\n\n\ndef equal(root1, root2):\n if not root1 and not root2:\n return True\n if not root1 or not root2:\n return False\n return root1.val == root2.val and equal(root1.left, root2.left) and equal(\n root1.right, root2.right)\n",
"step-4": "import copy\nfrom basics.binary_tree.binary_tree import TreeNode\nfrom basics.binary_tree.traversals import level_order_traversal\n\n\ndef max_depth_bottom_up(root):\n if not root:\n return 0\n max_so_far = 0\n\n def max_depth(node, depth):\n nonlocal max_so_far\n if not node.left and not node.right:\n max_so_far = max(max_so_far, depth)\n else:\n if node.left:\n max_depth(node.left, 1 + depth)\n if node.right:\n max_depth(node.right, 1 + depth)\n max_depth(root, 1)\n return max_so_far\n\n\ndef max_depth_top_down(root):\n if not root:\n return 0\n return 1 + max(max_depth_top_down(root.left), max_depth_top_down(root.\n right))\n\n\ndef is_symmetric(root):\n\n def is_mirror(left, right):\n if left is None and right is None:\n return True\n elif left is None or right is None:\n return False\n else:\n return left.val == right.val and is_mirror(left.right, right.left\n ) and is_mirror(left.left, right.right)\n return is_mirror(root, root)\n\n\ndef has_path_sum(root, target_sum):\n\n def path_sum(node, sum_left):\n if not node:\n return False\n if not node.left and not node.right and node.val == sum_left:\n return True\n return path_sum(node.left, sum_left - node.val) or path_sum(node.\n right, sum_left - node.val)\n return path_sum(root, target_sum)\n\n\ndef build_tree_from_inorder_preorder(inorder, preorder):\n if not inorder or not preorder:\n return None\n inorder_map = {val: i for i, val in enumerate(inorder)}\n\n def helper(lo, hi):\n if lo > hi:\n return None\n node = TreeNode(preorder.pop(0))\n mid = inorder_map[node.val]\n node.left = helper(lo, mid - 1)\n node.right = helper(mid + 1, hi)\n return node\n return helper(0, len(inorder) - 1)\n\n\ndef build_tree_from_inorder_postorder(inorder, postorder):\n if not inorder or not postorder:\n return None\n inorder_map = {val: i for i, val in enumerate(inorder)}\n\n def helper(lo, hi):\n if lo > hi:\n return None\n node = TreeNode(postorder.pop())\n mid = inorder_map[node.val]\n node.right = helper(mid + 1, hi)\n node.left = helper(lo, mid - 1)\n return node\n return helper(0, len(inorder) - 1)\n\n\ndef next_right_pointer(root):\n \"\"\"\n class TreeNode:\n def __init__(self, val=0, left=None, right=None):\n self.val = val\n self.left = left\n self.right = right\n self.next = None\n \"\"\"\n levels = []\n to_do = [root]\n while to_do:\n cur_level = []\n next_to_do = []\n for n in to_do:\n if n is not None:\n cur_level.append(n)\n next_to_do += [n.left, n.right]\n if cur_level:\n levels.append(cur_level)\n to_do = next_to_do\n for level in levels[1:]:\n level.append(None)\n for i in range(1, len(level)):\n level[i - 1].next = level[i]\n return root\n\n\ndef lowest_common_ancestor(root, p, q):\n answer = None\n\n def recurse_tree(node):\n nonlocal answer\n if not node:\n return False\n left = recurse_tree(node.left)\n right = recurse_tree(node.right)\n mid = node == p or node == q\n if mid + left + right >= 2:\n answer = node\n return mid or left or right\n recurse_tree(root)\n return answer\n\n\ndef lowest_common_ancestor_2(root, p, q):\n if root == p or root == q:\n return root\n left = right = None\n if root.left:\n left = lowest_common_ancestor_2(root.left, p, q)\n if root.right:\n right = lowest_common_ancestor_2(root.right, p, q)\n if left and right:\n return root\n else:\n return left or right\n\n\ndef lowest_common_ancestor_3(root, p, q):\n stack = [root]\n parents = {root: None}\n while p not in parents or q not in parents:\n node = stack.pop()\n if node.left:\n parents[node.left] = node\n stack.append(node.left)\n if node.right:\n parents[node.right] = node\n stack.append(node.right)\n ancestors = set()\n while p:\n ancestors.add(p)\n p = parents[p]\n while q not in ancestors:\n q = parents[q]\n return q\n\n\ndef serialize_tree(root):\n levels = level_order_traversal(root)\n return levels\n\n\ndef deserialize_tree(serialized):\n if not serialized:\n return None\n levels = copy.deepcopy(serialized)\n root = TreeNode(levels.pop(0)[0])\n nodes = [root]\n while levels:\n level = levels.pop(0)\n next_nodes = []\n for i, node in enumerate(nodes):\n if node:\n node.left = TreeNode(level[2 * i]) if level[2 * i] else None\n node.right = TreeNode(level[2 * i + 1]) if level[2 * i + 1\n ] else None\n next_nodes += [node.left, node.right]\n else:\n next_nodes += [None, None]\n nodes = next_nodes\n return root\n\n\ndef equal(root1, root2):\n if not root1 and not root2:\n return True\n if not root1 or not root2:\n return False\n return root1.val == root2.val and equal(root1.left, root2.left) and equal(\n root1.right, root2.right)\n",
"step-5": "import copy\n\nfrom basics.binary_tree.binary_tree import TreeNode\nfrom basics.binary_tree.traversals import level_order_traversal\n\n\ndef max_depth_bottom_up(root):\n if not root:\n return 0\n\n max_so_far = 0\n\n def max_depth(node, depth):\n nonlocal max_so_far\n if not node.left and not node.right:\n max_so_far = max(max_so_far, depth)\n else:\n if node.left:\n max_depth(node.left, 1 + depth)\n if node.right:\n max_depth(node.right, 1 + depth)\n\n max_depth(root, 1)\n\n return max_so_far\n\n\ndef max_depth_top_down(root):\n if not root:\n return 0\n return 1 + max(max_depth_top_down(root.left),\n max_depth_top_down(root.right))\n\n\ndef is_symmetric(root):\n\n def is_mirror(left, right):\n if left is None and right is None:\n return True\n elif left is None or right is None:\n return False\n else:\n return (left.val == right.val and\n is_mirror(left.right, right.left) and\n is_mirror(left.left, right.right))\n\n return is_mirror(root, root)\n\n\ndef has_path_sum(root, target_sum):\n\n def path_sum(node, sum_left):\n if not node:\n return False\n if not node.left and not node.right and node.val == sum_left:\n return True\n return (path_sum(node.left, sum_left-node.val) or\n path_sum(node.right, sum_left - node.val))\n\n return path_sum(root, target_sum)\n\n\ndef build_tree_from_inorder_preorder(inorder, preorder):\n if not inorder or not preorder:\n return None\n inorder_map = {val: i for i, val in enumerate(inorder)}\n\n def helper(lo, hi):\n if lo > hi:\n return None\n node = TreeNode(preorder.pop(0))\n mid = inorder_map[node.val]\n node.left = helper(lo, mid - 1)\n node.right = helper(mid + 1, hi)\n return node\n\n return helper(0, len(inorder) - 1)\n\n\ndef build_tree_from_inorder_postorder(inorder, postorder):\n if not inorder or not postorder:\n return None\n inorder_map = {val: i for i, val in enumerate(inorder)}\n\n def helper(lo, hi):\n if lo > hi:\n return None\n node = TreeNode(postorder.pop())\n mid = inorder_map[node.val]\n node.right = helper(mid+1, hi)\n node.left = helper(lo, mid-1)\n return node\n\n return helper(0, len(inorder)-1)\n\n\ndef next_right_pointer(root):\n \"\"\"\n class TreeNode:\n def __init__(self, val=0, left=None, right=None):\n self.val = val\n self.left = left\n self.right = right\n self.next = None\n \"\"\"\n levels = []\n to_do = [root]\n\n while to_do:\n cur_level = []\n next_to_do = []\n for n in to_do:\n if n is not None:\n cur_level.append(n)\n next_to_do += [n.left, n.right]\n if cur_level:\n levels.append(cur_level)\n to_do = next_to_do\n\n for level in levels[1:]:\n level.append(None)\n for i in range(1, len(level)):\n level[i-1].next = level[i]\n\n return root\n\n\ndef lowest_common_ancestor(root, p, q):\n answer = None\n\n def recurse_tree(node):\n nonlocal answer\n if not node:\n return False\n\n left = recurse_tree(node.left)\n right = recurse_tree(node.right)\n\n mid = node == p or node == q\n if mid + left + right >= 2:\n answer = node\n\n return mid or left or right\n\n recurse_tree(root)\n\n return answer\n\n\ndef lowest_common_ancestor_2(root, p, q):\n if root == p or root == q:\n return root\n\n left = right = None\n\n if root.left:\n left = lowest_common_ancestor_2(root.left, p, q)\n if root.right:\n right = lowest_common_ancestor_2(root.right, p, q)\n\n if left and right:\n return root\n else:\n return left or right\n\n\ndef lowest_common_ancestor_3(root, p, q):\n stack = [root]\n parents = {root: None}\n\n while p not in parents or q not in parents:\n node = stack.pop()\n if node.left:\n parents[node.left] = node\n stack.append(node.left)\n if node.right:\n parents[node.right] = node\n stack.append(node.right)\n\n ancestors = set()\n while p:\n ancestors.add(p)\n p = parents[p]\n\n while q not in ancestors:\n q = parents[q]\n\n return q\n\n\ndef serialize_tree(root):\n levels = level_order_traversal(root)\n return levels\n\n\ndef deserialize_tree(serialized):\n if not serialized:\n return None\n levels = copy.deepcopy(serialized)\n root = TreeNode(levels.pop(0)[0])\n nodes = [root]\n while levels:\n level = levels.pop(0)\n next_nodes = []\n for i, node in enumerate(nodes):\n if node:\n node.left = TreeNode(level[2*i]) if level[2*i] else None\n node.right = TreeNode(level[2*i+1]) if level[2*i+1] else None\n next_nodes += [node.left, node.right]\n else:\n next_nodes += [None, None]\n nodes = next_nodes\n\n return root\n\n\ndef equal(root1, root2):\n if not root1 and not root2:\n return True\n if not root1 or not root2:\n return False\n return (root1.val == root2.val and\n equal(root1.left, root2.left) and\n equal(root1.right, root2.right))\n\n\n\n\n\n\n",
"step-ids": [
8,
9,
12,
14,
15
]
}
|
[
8,
9,
12,
14,
15
] |
#! /usr/bin/env python
# -*- conding:utf-8 -*-
import MySQLdb
import os
import commands
from common import logger_init
from logging import getLogger
import re
from db import VlanInfo,Session,WafBridge
def getVlan(): # get vlan data from t_vlan
session=Session()
vlanport=[]
for info in session.query(VlanInfo):
a=[]
a.append(info.nets)
a.append(info.vlan_id)
vlanport.append(a)
interface=[]
for i in range(len(vlanport)):
nic=vlanport[i]
a=nic[0].split(',')
interface.append( a[0]+'.'+nic[1])
interface.append(a[1]+'.'+nic[1])
return interface
def getBridgeInfo(): #get data from t_bridge
session=Session()
brgport=[]
for info in session.query(WafBridge.nics):
info=list(tuple(info))
info=''.join(info)
brgport.append(info)
brgport=' '.join(brgport)
return brgport
def getSysInterface(): #Gets the configured interface
info=os.popen('ifconfig').read()
f=open('ifconfig_info.txt','w')
print >>f,info
f.close()
match=re.compile(r'(.+?)\s*?Link')
f=open('ifconfig_info.txt','r')
interface=[]
for line in f:
if 'Link encap' in line:
info=match.match(line).groups()
interface.append(info)
f.close()
b=[]
for i in range(len(interface)):
a=list(tuple(interface[i]))
a=''.join(a)
b.append(a)
strinfo=' '.join(b)
listinfo=strinfo.split()
port=[]
nic=[]
for i in range(len(listinfo)):
if '.'in listinfo[i]:
port.append(listinfo[i])
else:
nic.append(listinfo[i])
all_port=[]
all_port.append(port)
all_port.append(nic)
return all_port
def VlanConfig(): #config vlan(add and delete)
logger_init('main','log/vlanconfig.log','INFO')
config_interface=getVlan()
configured_port=getSysInterface()
vlan_port=' '.join(configured_port[0])
configured_nic=' '.join(configured_port[1])
for i in range(len(config_interface)):
if config_interface[i] in vlan_port:
continue
else:
a=config_interface[i].split('.')
if a[0] not in configured_nic:
(status,output)=commands.getstatusoutput('ifconfig %s up'%a[0])
if status!=0:
return
(status,output)=commands.getstatusoutput('vconfig add %s %s'%(a[0],a[1]))
getLogger('main').info(output)
(status,output)=commands.getstatusoutput('ifconfig %s up'%config_interface[i])
if status==0:
getLogger('main').info('ifconfig %s up OK'%config_interface[i])
config_interface=' '.join(config_interface)
vlan_port=configured_port[0]
brgport=getBridgeInfo()
for i in range(len(vlan_port)):
if vlan_port[i] not in config_interface:
if vlan_port[i] not in brgport:
(status,output)=commands.getstatusoutput('vconfig rem %s'%vlan_port[i])
if status==0:
getLogger('main').info('vconfig rem %s ok'%vlan_port[i])
if __name__=='__main__':
VlanConfig()
# getVlan()
# getSysInterface()
# getBridgeInfo()
|
normal
|
{
"blob_id": "cd564ebb51cf91993d2ed1810707aead44c19a6b",
"index": 6959,
"step-1": "<mask token>\n\n\ndef getVlan():\n session = Session()\n vlanport = []\n for info in session.query(VlanInfo):\n a = []\n a.append(info.nets)\n a.append(info.vlan_id)\n vlanport.append(a)\n interface = []\n for i in range(len(vlanport)):\n nic = vlanport[i]\n a = nic[0].split(',')\n interface.append(a[0] + '.' + nic[1])\n interface.append(a[1] + '.' + nic[1])\n return interface\n\n\ndef getBridgeInfo():\n session = Session()\n brgport = []\n for info in session.query(WafBridge.nics):\n info = list(tuple(info))\n info = ''.join(info)\n brgport.append(info)\n brgport = ' '.join(brgport)\n return brgport\n\n\n<mask token>\n\n\ndef VlanConfig():\n logger_init('main', 'log/vlanconfig.log', 'INFO')\n config_interface = getVlan()\n configured_port = getSysInterface()\n vlan_port = ' '.join(configured_port[0])\n configured_nic = ' '.join(configured_port[1])\n for i in range(len(config_interface)):\n if config_interface[i] in vlan_port:\n continue\n else:\n a = config_interface[i].split('.')\n if a[0] not in configured_nic:\n status, output = commands.getstatusoutput('ifconfig %s up' %\n a[0])\n if status != 0:\n return\n status, output = commands.getstatusoutput('vconfig add %s %s' %\n (a[0], a[1]))\n getLogger('main').info(output)\n status, output = commands.getstatusoutput('ifconfig %s up' %\n config_interface[i])\n if status == 0:\n getLogger('main').info('ifconfig %s up OK' %\n config_interface[i])\n config_interface = ' '.join(config_interface)\n vlan_port = configured_port[0]\n brgport = getBridgeInfo()\n for i in range(len(vlan_port)):\n if vlan_port[i] not in config_interface:\n if vlan_port[i] not in brgport:\n status, output = commands.getstatusoutput('vconfig rem %s' %\n vlan_port[i])\n if status == 0:\n getLogger('main').info('vconfig rem %s ok' % vlan_port[i])\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef getVlan():\n session = Session()\n vlanport = []\n for info in session.query(VlanInfo):\n a = []\n a.append(info.nets)\n a.append(info.vlan_id)\n vlanport.append(a)\n interface = []\n for i in range(len(vlanport)):\n nic = vlanport[i]\n a = nic[0].split(',')\n interface.append(a[0] + '.' + nic[1])\n interface.append(a[1] + '.' + nic[1])\n return interface\n\n\ndef getBridgeInfo():\n session = Session()\n brgport = []\n for info in session.query(WafBridge.nics):\n info = list(tuple(info))\n info = ''.join(info)\n brgport.append(info)\n brgport = ' '.join(brgport)\n return brgport\n\n\ndef getSysInterface():\n info = os.popen('ifconfig').read()\n f = open('ifconfig_info.txt', 'w')\n print >> f, info\n f.close()\n match = re.compile('(.+?)\\\\s*?Link')\n f = open('ifconfig_info.txt', 'r')\n interface = []\n for line in f:\n if 'Link encap' in line:\n info = match.match(line).groups()\n interface.append(info)\n f.close()\n b = []\n for i in range(len(interface)):\n a = list(tuple(interface[i]))\n a = ''.join(a)\n b.append(a)\n strinfo = ' '.join(b)\n listinfo = strinfo.split()\n port = []\n nic = []\n for i in range(len(listinfo)):\n if '.' in listinfo[i]:\n port.append(listinfo[i])\n else:\n nic.append(listinfo[i])\n all_port = []\n all_port.append(port)\n all_port.append(nic)\n return all_port\n\n\ndef VlanConfig():\n logger_init('main', 'log/vlanconfig.log', 'INFO')\n config_interface = getVlan()\n configured_port = getSysInterface()\n vlan_port = ' '.join(configured_port[0])\n configured_nic = ' '.join(configured_port[1])\n for i in range(len(config_interface)):\n if config_interface[i] in vlan_port:\n continue\n else:\n a = config_interface[i].split('.')\n if a[0] not in configured_nic:\n status, output = commands.getstatusoutput('ifconfig %s up' %\n a[0])\n if status != 0:\n return\n status, output = commands.getstatusoutput('vconfig add %s %s' %\n (a[0], a[1]))\n getLogger('main').info(output)\n status, output = commands.getstatusoutput('ifconfig %s up' %\n config_interface[i])\n if status == 0:\n getLogger('main').info('ifconfig %s up OK' %\n config_interface[i])\n config_interface = ' '.join(config_interface)\n vlan_port = configured_port[0]\n brgport = getBridgeInfo()\n for i in range(len(vlan_port)):\n if vlan_port[i] not in config_interface:\n if vlan_port[i] not in brgport:\n status, output = commands.getstatusoutput('vconfig rem %s' %\n vlan_port[i])\n if status == 0:\n getLogger('main').info('vconfig rem %s ok' % vlan_port[i])\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef getVlan():\n session = Session()\n vlanport = []\n for info in session.query(VlanInfo):\n a = []\n a.append(info.nets)\n a.append(info.vlan_id)\n vlanport.append(a)\n interface = []\n for i in range(len(vlanport)):\n nic = vlanport[i]\n a = nic[0].split(',')\n interface.append(a[0] + '.' + nic[1])\n interface.append(a[1] + '.' + nic[1])\n return interface\n\n\ndef getBridgeInfo():\n session = Session()\n brgport = []\n for info in session.query(WafBridge.nics):\n info = list(tuple(info))\n info = ''.join(info)\n brgport.append(info)\n brgport = ' '.join(brgport)\n return brgport\n\n\ndef getSysInterface():\n info = os.popen('ifconfig').read()\n f = open('ifconfig_info.txt', 'w')\n print >> f, info\n f.close()\n match = re.compile('(.+?)\\\\s*?Link')\n f = open('ifconfig_info.txt', 'r')\n interface = []\n for line in f:\n if 'Link encap' in line:\n info = match.match(line).groups()\n interface.append(info)\n f.close()\n b = []\n for i in range(len(interface)):\n a = list(tuple(interface[i]))\n a = ''.join(a)\n b.append(a)\n strinfo = ' '.join(b)\n listinfo = strinfo.split()\n port = []\n nic = []\n for i in range(len(listinfo)):\n if '.' in listinfo[i]:\n port.append(listinfo[i])\n else:\n nic.append(listinfo[i])\n all_port = []\n all_port.append(port)\n all_port.append(nic)\n return all_port\n\n\ndef VlanConfig():\n logger_init('main', 'log/vlanconfig.log', 'INFO')\n config_interface = getVlan()\n configured_port = getSysInterface()\n vlan_port = ' '.join(configured_port[0])\n configured_nic = ' '.join(configured_port[1])\n for i in range(len(config_interface)):\n if config_interface[i] in vlan_port:\n continue\n else:\n a = config_interface[i].split('.')\n if a[0] not in configured_nic:\n status, output = commands.getstatusoutput('ifconfig %s up' %\n a[0])\n if status != 0:\n return\n status, output = commands.getstatusoutput('vconfig add %s %s' %\n (a[0], a[1]))\n getLogger('main').info(output)\n status, output = commands.getstatusoutput('ifconfig %s up' %\n config_interface[i])\n if status == 0:\n getLogger('main').info('ifconfig %s up OK' %\n config_interface[i])\n config_interface = ' '.join(config_interface)\n vlan_port = configured_port[0]\n brgport = getBridgeInfo()\n for i in range(len(vlan_port)):\n if vlan_port[i] not in config_interface:\n if vlan_port[i] not in brgport:\n status, output = commands.getstatusoutput('vconfig rem %s' %\n vlan_port[i])\n if status == 0:\n getLogger('main').info('vconfig rem %s ok' % vlan_port[i])\n\n\nif __name__ == '__main__':\n VlanConfig()\n",
"step-4": "import MySQLdb\nimport os\nimport commands\nfrom common import logger_init\nfrom logging import getLogger\nimport re\nfrom db import VlanInfo, Session, WafBridge\n\n\ndef getVlan():\n session = Session()\n vlanport = []\n for info in session.query(VlanInfo):\n a = []\n a.append(info.nets)\n a.append(info.vlan_id)\n vlanport.append(a)\n interface = []\n for i in range(len(vlanport)):\n nic = vlanport[i]\n a = nic[0].split(',')\n interface.append(a[0] + '.' + nic[1])\n interface.append(a[1] + '.' + nic[1])\n return interface\n\n\ndef getBridgeInfo():\n session = Session()\n brgport = []\n for info in session.query(WafBridge.nics):\n info = list(tuple(info))\n info = ''.join(info)\n brgport.append(info)\n brgport = ' '.join(brgport)\n return brgport\n\n\ndef getSysInterface():\n info = os.popen('ifconfig').read()\n f = open('ifconfig_info.txt', 'w')\n print >> f, info\n f.close()\n match = re.compile('(.+?)\\\\s*?Link')\n f = open('ifconfig_info.txt', 'r')\n interface = []\n for line in f:\n if 'Link encap' in line:\n info = match.match(line).groups()\n interface.append(info)\n f.close()\n b = []\n for i in range(len(interface)):\n a = list(tuple(interface[i]))\n a = ''.join(a)\n b.append(a)\n strinfo = ' '.join(b)\n listinfo = strinfo.split()\n port = []\n nic = []\n for i in range(len(listinfo)):\n if '.' in listinfo[i]:\n port.append(listinfo[i])\n else:\n nic.append(listinfo[i])\n all_port = []\n all_port.append(port)\n all_port.append(nic)\n return all_port\n\n\ndef VlanConfig():\n logger_init('main', 'log/vlanconfig.log', 'INFO')\n config_interface = getVlan()\n configured_port = getSysInterface()\n vlan_port = ' '.join(configured_port[0])\n configured_nic = ' '.join(configured_port[1])\n for i in range(len(config_interface)):\n if config_interface[i] in vlan_port:\n continue\n else:\n a = config_interface[i].split('.')\n if a[0] not in configured_nic:\n status, output = commands.getstatusoutput('ifconfig %s up' %\n a[0])\n if status != 0:\n return\n status, output = commands.getstatusoutput('vconfig add %s %s' %\n (a[0], a[1]))\n getLogger('main').info(output)\n status, output = commands.getstatusoutput('ifconfig %s up' %\n config_interface[i])\n if status == 0:\n getLogger('main').info('ifconfig %s up OK' %\n config_interface[i])\n config_interface = ' '.join(config_interface)\n vlan_port = configured_port[0]\n brgport = getBridgeInfo()\n for i in range(len(vlan_port)):\n if vlan_port[i] not in config_interface:\n if vlan_port[i] not in brgport:\n status, output = commands.getstatusoutput('vconfig rem %s' %\n vlan_port[i])\n if status == 0:\n getLogger('main').info('vconfig rem %s ok' % vlan_port[i])\n\n\nif __name__ == '__main__':\n VlanConfig()\n",
"step-5": "#! /usr/bin/env python\n# -*- conding:utf-8 -*-\nimport MySQLdb\nimport os\nimport commands\nfrom common import logger_init\nfrom logging import getLogger\nimport re\nfrom db import VlanInfo,Session,WafBridge\n\n\ndef getVlan(): # get vlan data from t_vlan\n session=Session()\n vlanport=[]\n for info in session.query(VlanInfo):\n a=[]\n a.append(info.nets)\n a.append(info.vlan_id)\n vlanport.append(a)\n interface=[]\n for i in range(len(vlanport)):\n nic=vlanport[i]\n a=nic[0].split(',')\n interface.append( a[0]+'.'+nic[1])\n interface.append(a[1]+'.'+nic[1])\n return interface\n\ndef getBridgeInfo(): #get data from t_bridge\n session=Session()\n brgport=[]\n for info in session.query(WafBridge.nics):\n info=list(tuple(info))\n info=''.join(info)\n brgport.append(info)\n brgport=' '.join(brgport)\n return brgport\n\n\ndef getSysInterface(): #Gets the configured interface\n info=os.popen('ifconfig').read()\n f=open('ifconfig_info.txt','w')\n print >>f,info\n f.close()\n match=re.compile(r'(.+?)\\s*?Link')\n f=open('ifconfig_info.txt','r')\n interface=[]\n for line in f:\n if 'Link encap' in line:\n info=match.match(line).groups()\n interface.append(info)\n f.close()\n b=[]\n for i in range(len(interface)):\n a=list(tuple(interface[i]))\n a=''.join(a)\n b.append(a)\n strinfo=' '.join(b)\n listinfo=strinfo.split()\n port=[]\n nic=[]\n for i in range(len(listinfo)):\n if '.'in listinfo[i]:\n port.append(listinfo[i])\n else:\n nic.append(listinfo[i])\n all_port=[]\n all_port.append(port)\n all_port.append(nic)\n return all_port\n\n\n\ndef VlanConfig(): #config vlan(add and delete)\n logger_init('main','log/vlanconfig.log','INFO')\n config_interface=getVlan()\n configured_port=getSysInterface()\n vlan_port=' '.join(configured_port[0])\n configured_nic=' '.join(configured_port[1])\n for i in range(len(config_interface)):\n if config_interface[i] in vlan_port:\n continue\n else:\n a=config_interface[i].split('.')\n if a[0] not in configured_nic:\n (status,output)=commands.getstatusoutput('ifconfig %s up'%a[0])\n if status!=0:\n return\n (status,output)=commands.getstatusoutput('vconfig add %s %s'%(a[0],a[1]))\n getLogger('main').info(output)\n (status,output)=commands.getstatusoutput('ifconfig %s up'%config_interface[i])\n if status==0:\n getLogger('main').info('ifconfig %s up OK'%config_interface[i])\n config_interface=' '.join(config_interface)\n vlan_port=configured_port[0]\n brgport=getBridgeInfo()\n for i in range(len(vlan_port)):\n if vlan_port[i] not in config_interface:\n if vlan_port[i] not in brgport:\n (status,output)=commands.getstatusoutput('vconfig rem %s'%vlan_port[i])\n if status==0:\n getLogger('main').info('vconfig rem %s ok'%vlan_port[i])\n\n\nif __name__=='__main__':\n VlanConfig()\n# getVlan()\n# getSysInterface()\n# getBridgeInfo()\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class Solution:
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class Solution:
def numSmallerByFrequency(self, queries: List[str], words: List[str]
) ->List[int]:
words_freq = {word: word.count(min(word)) for word in words}
queries_freq = {}
ans = []
for query in queries:
if query in queries_freq:
ans.append(queries_freq[query])
continue
query_freq = query.count(min(query))
num = sum([(1 if query_freq < words_freq[word] else 0) for word in
words])
ans.append(num)
queries_freq[query] = num
return ans
<|reserved_special_token_1|>
class Solution:
# complexity: 2*n^2 + 4*n^2 -> 8*n^2
def numSmallerByFrequency(self, queries: List[str], words: List[str]) -> List[int]:
# complexity: n*2*l where l is the length of the word -> 2*n^2
words_freq = {
word: word.count(min(word)) for word in words
}
queries_freq = {}
ans = []
# complexity: q*4*n where q is the length of queries -> 4n^2
for query in queries:
if query in queries_freq:
ans.append(queries_freq[query])
continue
# complexity: 2*l where l is the length of the word -> 2*n
query_freq = query.count(min(query))
# complexity: n*n due the iteration and the sum -> 2*n
num = sum([1 if query_freq < words_freq[word]
else 0 for word in words])
ans.append(num)
queries_freq[query] = num
return ans
|
flexible
|
{
"blob_id": "e9918f4fac2e13b36d9b20ffc28dc6508aad6f9b",
"index": 2159,
"step-1": "<mask token>\n",
"step-2": "class Solution:\n <mask token>\n",
"step-3": "class Solution:\n\n def numSmallerByFrequency(self, queries: List[str], words: List[str]\n ) ->List[int]:\n words_freq = {word: word.count(min(word)) for word in words}\n queries_freq = {}\n ans = []\n for query in queries:\n if query in queries_freq:\n ans.append(queries_freq[query])\n continue\n query_freq = query.count(min(query))\n num = sum([(1 if query_freq < words_freq[word] else 0) for word in\n words])\n ans.append(num)\n queries_freq[query] = num\n return ans\n",
"step-4": "class Solution:\n # complexity: 2*n^2 + 4*n^2 -> 8*n^2\n def numSmallerByFrequency(self, queries: List[str], words: List[str]) -> List[int]:\n # complexity: n*2*l where l is the length of the word -> 2*n^2\n words_freq = {\n word: word.count(min(word)) for word in words\n }\n\n queries_freq = {}\n\n ans = []\n\n # complexity: q*4*n where q is the length of queries -> 4n^2\n for query in queries:\n if query in queries_freq:\n ans.append(queries_freq[query])\n continue\n\n # complexity: 2*l where l is the length of the word -> 2*n\n query_freq = query.count(min(query))\n # complexity: n*n due the iteration and the sum -> 2*n\n num = sum([1 if query_freq < words_freq[word]\n else 0 for word in words])\n ans.append(num)\n queries_freq[query] = num\n\n return ans\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
urlpatterns = [path('', views.home, name='home'), path('ppt1', views.ppt1,
name='ppt1'), path('ppt2', views.ppt2, name='ppt2')]
<|reserved_special_token_1|>
from django.urls import path
from . import views
urlpatterns = [path('', views.home, name='home'), path('ppt1', views.ppt1,
name='ppt1'), path('ppt2', views.ppt2, name='ppt2')]
<|reserved_special_token_1|>
from django.urls import path
from . import views # 현재 패키지에서 views 모듈을 가져옴
urlpatterns = [
path('', views.home, name='home'),
path('ppt1',views.ppt1,name='ppt1'),
path('ppt2',views.ppt2,name='ppt2'),
]
|
flexible
|
{
"blob_id": "9db1887c5379623687d1dea343d72122bab66303",
"index": 2143,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nurlpatterns = [path('', views.home, name='home'), path('ppt1', views.ppt1,\n name='ppt1'), path('ppt2', views.ppt2, name='ppt2')]\n",
"step-3": "from django.urls import path\nfrom . import views\nurlpatterns = [path('', views.home, name='home'), path('ppt1', views.ppt1,\n name='ppt1'), path('ppt2', views.ppt2, name='ppt2')]\n",
"step-4": "from django.urls import path\n\nfrom . import views # 현재 패키지에서 views 모듈을 가져옴\n\nurlpatterns = [\n path('', views.home, name='home'),\n path('ppt1',views.ppt1,name='ppt1'),\n path('ppt2',views.ppt2,name='ppt2'),\n\n]",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from .facebook import *
|
normal
|
{
"blob_id": "7901a2bd4ae1070c8263d3cd97351b01ffbf7bb1",
"index": 7246,
"step-1": "<mask token>\n",
"step-2": "from .facebook import *\n",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
import contextlib
import logging
import os
import pwd
import sys
from typing import Iterable
from sqlalchemy import Table, exists, null, select
from sqlalchemy.engine import Engine
from sqlalchemy.exc import DBAPIError
from sqlalchemy.pool import NullPool
from hades import constants
from hades.common import db
from hades.common.cli import (
ArgumentParser, parser as common_parser, setup_cli_logging,
)
from hades.common.privileges import dropped_privileges
from hades.config.loader import load_config
logger = logging.getLogger(__package__)
def check_database(engine: Engine, user_name: pwd.struct_passwd,
tables: Iterable[Table]):
logger.info("Checking database access as user %s", user_name)
try:
conn = engine.connect()
except DBAPIError as e:
logger.critical("Could not connect to database as %s: %s",
user_name, e)
raise
with contextlib.closing(conn):
for table in tables:
try:
check_table(conn, table)
except DBAPIError as e:
logger.critical("Query check for table %s as user %s failed: "
"%s", table.name, user_name, e)
raise
def check_table(conn, table):
conn.execute(select([exists(select([null()]).select_from(table))])).scalar()
def main():
parser = ArgumentParser(parents=[common_parser])
args = parser.parse_args()
setup_cli_logging(parser.prog, args)
config = load_config(args.config, runtime_checks=True)
try:
engine = db.create_engine(config, poolclass=NullPool)
agent_pwd = pwd.getpwnam(constants.AGENT_USER)
with dropped_privileges(agent_pwd):
check_database(engine, agent_pwd.pw_name,
(db.radacct, db.radpostauth))
portal_pwd = pwd.getpwnam(constants.PORTAL_USER)
with dropped_privileges(portal_pwd):
check_database(engine, portal_pwd.pw_name,
(db.radacct, db.radpostauth, db.radusergroup))
radius_pwd = pwd.getpwnam(constants.RADIUS_USER)
with dropped_privileges(radius_pwd):
check_database(engine, radius_pwd.pw_name,
(db.radacct, db.radgroupcheck, db.radgroupreply,
db.radpostauth, db.radreply, db.radusergroup))
except DBAPIError:
return os.EX_TEMPFAIL
return os.EX_OK
if __name__ == '__main__':
sys.exit(main())
|
normal
|
{
"blob_id": "c9df53ac06b8bb106d73825d60fa885c06385e95",
"index": 8557,
"step-1": "<mask token>\n\n\ndef check_database(engine: Engine, user_name: pwd.struct_passwd, tables:\n Iterable[Table]):\n logger.info('Checking database access as user %s', user_name)\n try:\n conn = engine.connect()\n except DBAPIError as e:\n logger.critical('Could not connect to database as %s: %s', user_name, e\n )\n raise\n with contextlib.closing(conn):\n for table in tables:\n try:\n check_table(conn, table)\n except DBAPIError as e:\n logger.critical(\n 'Query check for table %s as user %s failed: %s', table\n .name, user_name, e)\n raise\n\n\ndef check_table(conn, table):\n conn.execute(select([exists(select([null()]).select_from(table))])).scalar(\n )\n\n\ndef main():\n parser = ArgumentParser(parents=[common_parser])\n args = parser.parse_args()\n setup_cli_logging(parser.prog, args)\n config = load_config(args.config, runtime_checks=True)\n try:\n engine = db.create_engine(config, poolclass=NullPool)\n agent_pwd = pwd.getpwnam(constants.AGENT_USER)\n with dropped_privileges(agent_pwd):\n check_database(engine, agent_pwd.pw_name, (db.radacct, db.\n radpostauth))\n portal_pwd = pwd.getpwnam(constants.PORTAL_USER)\n with dropped_privileges(portal_pwd):\n check_database(engine, portal_pwd.pw_name, (db.radacct, db.\n radpostauth, db.radusergroup))\n radius_pwd = pwd.getpwnam(constants.RADIUS_USER)\n with dropped_privileges(radius_pwd):\n check_database(engine, radius_pwd.pw_name, (db.radacct, db.\n radgroupcheck, db.radgroupreply, db.radpostauth, db.\n radreply, db.radusergroup))\n except DBAPIError:\n return os.EX_TEMPFAIL\n return os.EX_OK\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef check_database(engine: Engine, user_name: pwd.struct_passwd, tables:\n Iterable[Table]):\n logger.info('Checking database access as user %s', user_name)\n try:\n conn = engine.connect()\n except DBAPIError as e:\n logger.critical('Could not connect to database as %s: %s', user_name, e\n )\n raise\n with contextlib.closing(conn):\n for table in tables:\n try:\n check_table(conn, table)\n except DBAPIError as e:\n logger.critical(\n 'Query check for table %s as user %s failed: %s', table\n .name, user_name, e)\n raise\n\n\ndef check_table(conn, table):\n conn.execute(select([exists(select([null()]).select_from(table))])).scalar(\n )\n\n\ndef main():\n parser = ArgumentParser(parents=[common_parser])\n args = parser.parse_args()\n setup_cli_logging(parser.prog, args)\n config = load_config(args.config, runtime_checks=True)\n try:\n engine = db.create_engine(config, poolclass=NullPool)\n agent_pwd = pwd.getpwnam(constants.AGENT_USER)\n with dropped_privileges(agent_pwd):\n check_database(engine, agent_pwd.pw_name, (db.radacct, db.\n radpostauth))\n portal_pwd = pwd.getpwnam(constants.PORTAL_USER)\n with dropped_privileges(portal_pwd):\n check_database(engine, portal_pwd.pw_name, (db.radacct, db.\n radpostauth, db.radusergroup))\n radius_pwd = pwd.getpwnam(constants.RADIUS_USER)\n with dropped_privileges(radius_pwd):\n check_database(engine, radius_pwd.pw_name, (db.radacct, db.\n radgroupcheck, db.radgroupreply, db.radpostauth, db.\n radreply, db.radusergroup))\n except DBAPIError:\n return os.EX_TEMPFAIL\n return os.EX_OK\n\n\nif __name__ == '__main__':\n sys.exit(main())\n",
"step-3": "<mask token>\nlogger = logging.getLogger(__package__)\n\n\ndef check_database(engine: Engine, user_name: pwd.struct_passwd, tables:\n Iterable[Table]):\n logger.info('Checking database access as user %s', user_name)\n try:\n conn = engine.connect()\n except DBAPIError as e:\n logger.critical('Could not connect to database as %s: %s', user_name, e\n )\n raise\n with contextlib.closing(conn):\n for table in tables:\n try:\n check_table(conn, table)\n except DBAPIError as e:\n logger.critical(\n 'Query check for table %s as user %s failed: %s', table\n .name, user_name, e)\n raise\n\n\ndef check_table(conn, table):\n conn.execute(select([exists(select([null()]).select_from(table))])).scalar(\n )\n\n\ndef main():\n parser = ArgumentParser(parents=[common_parser])\n args = parser.parse_args()\n setup_cli_logging(parser.prog, args)\n config = load_config(args.config, runtime_checks=True)\n try:\n engine = db.create_engine(config, poolclass=NullPool)\n agent_pwd = pwd.getpwnam(constants.AGENT_USER)\n with dropped_privileges(agent_pwd):\n check_database(engine, agent_pwd.pw_name, (db.radacct, db.\n radpostauth))\n portal_pwd = pwd.getpwnam(constants.PORTAL_USER)\n with dropped_privileges(portal_pwd):\n check_database(engine, portal_pwd.pw_name, (db.radacct, db.\n radpostauth, db.radusergroup))\n radius_pwd = pwd.getpwnam(constants.RADIUS_USER)\n with dropped_privileges(radius_pwd):\n check_database(engine, radius_pwd.pw_name, (db.radacct, db.\n radgroupcheck, db.radgroupreply, db.radpostauth, db.\n radreply, db.radusergroup))\n except DBAPIError:\n return os.EX_TEMPFAIL\n return os.EX_OK\n\n\nif __name__ == '__main__':\n sys.exit(main())\n",
"step-4": "import contextlib\nimport logging\nimport os\nimport pwd\nimport sys\nfrom typing import Iterable\nfrom sqlalchemy import Table, exists, null, select\nfrom sqlalchemy.engine import Engine\nfrom sqlalchemy.exc import DBAPIError\nfrom sqlalchemy.pool import NullPool\nfrom hades import constants\nfrom hades.common import db\nfrom hades.common.cli import ArgumentParser, parser as common_parser, setup_cli_logging\nfrom hades.common.privileges import dropped_privileges\nfrom hades.config.loader import load_config\nlogger = logging.getLogger(__package__)\n\n\ndef check_database(engine: Engine, user_name: pwd.struct_passwd, tables:\n Iterable[Table]):\n logger.info('Checking database access as user %s', user_name)\n try:\n conn = engine.connect()\n except DBAPIError as e:\n logger.critical('Could not connect to database as %s: %s', user_name, e\n )\n raise\n with contextlib.closing(conn):\n for table in tables:\n try:\n check_table(conn, table)\n except DBAPIError as e:\n logger.critical(\n 'Query check for table %s as user %s failed: %s', table\n .name, user_name, e)\n raise\n\n\ndef check_table(conn, table):\n conn.execute(select([exists(select([null()]).select_from(table))])).scalar(\n )\n\n\ndef main():\n parser = ArgumentParser(parents=[common_parser])\n args = parser.parse_args()\n setup_cli_logging(parser.prog, args)\n config = load_config(args.config, runtime_checks=True)\n try:\n engine = db.create_engine(config, poolclass=NullPool)\n agent_pwd = pwd.getpwnam(constants.AGENT_USER)\n with dropped_privileges(agent_pwd):\n check_database(engine, agent_pwd.pw_name, (db.radacct, db.\n radpostauth))\n portal_pwd = pwd.getpwnam(constants.PORTAL_USER)\n with dropped_privileges(portal_pwd):\n check_database(engine, portal_pwd.pw_name, (db.radacct, db.\n radpostauth, db.radusergroup))\n radius_pwd = pwd.getpwnam(constants.RADIUS_USER)\n with dropped_privileges(radius_pwd):\n check_database(engine, radius_pwd.pw_name, (db.radacct, db.\n radgroupcheck, db.radgroupreply, db.radpostauth, db.\n radreply, db.radusergroup))\n except DBAPIError:\n return os.EX_TEMPFAIL\n return os.EX_OK\n\n\nif __name__ == '__main__':\n sys.exit(main())\n",
"step-5": "import contextlib\nimport logging\nimport os\nimport pwd\nimport sys\nfrom typing import Iterable\n\nfrom sqlalchemy import Table, exists, null, select\nfrom sqlalchemy.engine import Engine\nfrom sqlalchemy.exc import DBAPIError\nfrom sqlalchemy.pool import NullPool\n\nfrom hades import constants\nfrom hades.common import db\nfrom hades.common.cli import (\n ArgumentParser, parser as common_parser, setup_cli_logging,\n)\nfrom hades.common.privileges import dropped_privileges\nfrom hades.config.loader import load_config\n\nlogger = logging.getLogger(__package__)\n\n\ndef check_database(engine: Engine, user_name: pwd.struct_passwd,\n tables: Iterable[Table]):\n logger.info(\"Checking database access as user %s\", user_name)\n try:\n conn = engine.connect()\n except DBAPIError as e:\n logger.critical(\"Could not connect to database as %s: %s\",\n user_name, e)\n raise\n with contextlib.closing(conn):\n for table in tables:\n try:\n check_table(conn, table)\n except DBAPIError as e:\n logger.critical(\"Query check for table %s as user %s failed: \"\n \"%s\", table.name, user_name, e)\n raise\n\n\ndef check_table(conn, table):\n conn.execute(select([exists(select([null()]).select_from(table))])).scalar()\n\n\ndef main():\n parser = ArgumentParser(parents=[common_parser])\n args = parser.parse_args()\n setup_cli_logging(parser.prog, args)\n config = load_config(args.config, runtime_checks=True)\n try:\n engine = db.create_engine(config, poolclass=NullPool)\n agent_pwd = pwd.getpwnam(constants.AGENT_USER)\n with dropped_privileges(agent_pwd):\n check_database(engine, agent_pwd.pw_name,\n (db.radacct, db.radpostauth))\n portal_pwd = pwd.getpwnam(constants.PORTAL_USER)\n with dropped_privileges(portal_pwd):\n check_database(engine, portal_pwd.pw_name,\n (db.radacct, db.radpostauth, db.radusergroup))\n radius_pwd = pwd.getpwnam(constants.RADIUS_USER)\n with dropped_privileges(radius_pwd):\n check_database(engine, radius_pwd.pw_name,\n (db.radacct, db.radgroupcheck, db.radgroupreply,\n db.radpostauth, db.radreply, db.radusergroup))\n except DBAPIError:\n return os.EX_TEMPFAIL\n return os.EX_OK\n\n\nif __name__ == '__main__':\n sys.exit(main())\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
<|reserved_special_token_0|>
def report_mismatch_for_module(modules_1, modules_2, index):
module_1 = modules_1[index]
module_2 = modules_2[index]
if len(module_1) != 2 or len(module_2) != 2:
raise AssertionError('Module {}, value {}, has length {}'.format(
index, module_1, len(module_1)))
print('Module {}\n'.format(index))
print(report_mismatch(modules_1[index][1]))
print(report_mismatch(modules_2[index][1]))
print()
<|reserved_special_token_0|>
def report_honor(save_game_1, save_game_2):
print('Honor is 1 is {}'.format(save_game_1['scripts']['modules'][1][1]
[3076]))
print('Honor is 2 is {}'.format(save_game_2['scripts']['modules'][1][1]
[3076]))
def main():
parser = argparse.ArgumentParser(description='Parse Save File')
parser.add_argument('--file1', dest='file1', default=
'/home/krieghan/hq_saves/agssave.000.hqthor')
parser.add_argument('--file2', dest='file2', default=
'/home/krieghan/hq_saves/agssave.001.hqthor')
parser.add_argument('--full', dest='full', action='store_true', default
=False)
parser.add_argument('--catch-transition', default=None)
parser.add_argument('--honor', action='store_true', default=False)
args = parser.parse_args()
save_game_1 = saved_game.get_save_game(args.file1, num_characters=69)
save_game_2 = saved_game.get_save_game(args.file2, num_characters=69)
from kobold import compare
result = compare.compare(save_game_1, save_game_2, type_compare='full')
modules_1 = result[0]['scripts']['modules']
modules_2 = result[1]['scripts']['modules']
stat_module_1 = modules_1[1][1]
stat_module_2 = modules_2[1][1]
if args.honor:
report_honor(save_game_1, save_game_2)
elif args.catch_transition is not None:
pass
elif args.full:
report_mismatch_all_modules(result)
else:
report_mismatch_for_module(1)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def report_mismatch(compare_result_list):
report = []
for i in range(len(compare_result_list)):
value = compare_result_list[i]
if value != '_':
report.append((i, value))
return report
def report_mismatch_for_module(modules_1, modules_2, index):
module_1 = modules_1[index]
module_2 = modules_2[index]
if len(module_1) != 2 or len(module_2) != 2:
raise AssertionError('Module {}, value {}, has length {}'.format(
index, module_1, len(module_1)))
print('Module {}\n'.format(index))
print(report_mismatch(modules_1[index][1]))
print(report_mismatch(modules_2[index][1]))
print()
def report_mismatch_all_modules(result):
scripts_1 = result[0]['scripts']
scripts_2 = result[1]['scripts']
modules_1 = scripts_1['modules']
modules_2 = scripts_2['modules']
print('Global Data:\n')
print(report_mismatch(scripts_1['global_data']))
print(report_mismatch(scripts_2['global_data']))
print()
for i in range(len(scripts_1['modules'])):
try:
report_mismatch_for_module(modules_1, modules_2, i)
except AssertionError:
print('Module {} was a match'.format(i))
def report_honor(save_game_1, save_game_2):
print('Honor is 1 is {}'.format(save_game_1['scripts']['modules'][1][1]
[3076]))
print('Honor is 2 is {}'.format(save_game_2['scripts']['modules'][1][1]
[3076]))
def main():
parser = argparse.ArgumentParser(description='Parse Save File')
parser.add_argument('--file1', dest='file1', default=
'/home/krieghan/hq_saves/agssave.000.hqthor')
parser.add_argument('--file2', dest='file2', default=
'/home/krieghan/hq_saves/agssave.001.hqthor')
parser.add_argument('--full', dest='full', action='store_true', default
=False)
parser.add_argument('--catch-transition', default=None)
parser.add_argument('--honor', action='store_true', default=False)
args = parser.parse_args()
save_game_1 = saved_game.get_save_game(args.file1, num_characters=69)
save_game_2 = saved_game.get_save_game(args.file2, num_characters=69)
from kobold import compare
result = compare.compare(save_game_1, save_game_2, type_compare='full')
modules_1 = result[0]['scripts']['modules']
modules_2 = result[1]['scripts']['modules']
stat_module_1 = modules_1[1][1]
stat_module_2 = modules_2[1][1]
if args.honor:
report_honor(save_game_1, save_game_2)
elif args.catch_transition is not None:
pass
elif args.full:
report_mismatch_all_modules(result)
else:
report_mismatch_for_module(1)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def report_mismatch(compare_result_list):
report = []
for i in range(len(compare_result_list)):
value = compare_result_list[i]
if value != '_':
report.append((i, value))
return report
def report_mismatch_for_module(modules_1, modules_2, index):
module_1 = modules_1[index]
module_2 = modules_2[index]
if len(module_1) != 2 or len(module_2) != 2:
raise AssertionError('Module {}, value {}, has length {}'.format(
index, module_1, len(module_1)))
print('Module {}\n'.format(index))
print(report_mismatch(modules_1[index][1]))
print(report_mismatch(modules_2[index][1]))
print()
def report_mismatch_all_modules(result):
scripts_1 = result[0]['scripts']
scripts_2 = result[1]['scripts']
modules_1 = scripts_1['modules']
modules_2 = scripts_2['modules']
print('Global Data:\n')
print(report_mismatch(scripts_1['global_data']))
print(report_mismatch(scripts_2['global_data']))
print()
for i in range(len(scripts_1['modules'])):
try:
report_mismatch_for_module(modules_1, modules_2, i)
except AssertionError:
print('Module {} was a match'.format(i))
def report_honor(save_game_1, save_game_2):
print('Honor is 1 is {}'.format(save_game_1['scripts']['modules'][1][1]
[3076]))
print('Honor is 2 is {}'.format(save_game_2['scripts']['modules'][1][1]
[3076]))
def main():
parser = argparse.ArgumentParser(description='Parse Save File')
parser.add_argument('--file1', dest='file1', default=
'/home/krieghan/hq_saves/agssave.000.hqthor')
parser.add_argument('--file2', dest='file2', default=
'/home/krieghan/hq_saves/agssave.001.hqthor')
parser.add_argument('--full', dest='full', action='store_true', default
=False)
parser.add_argument('--catch-transition', default=None)
parser.add_argument('--honor', action='store_true', default=False)
args = parser.parse_args()
save_game_1 = saved_game.get_save_game(args.file1, num_characters=69)
save_game_2 = saved_game.get_save_game(args.file2, num_characters=69)
from kobold import compare
result = compare.compare(save_game_1, save_game_2, type_compare='full')
modules_1 = result[0]['scripts']['modules']
modules_2 = result[1]['scripts']['modules']
stat_module_1 = modules_1[1][1]
stat_module_2 = modules_2[1][1]
if args.honor:
report_honor(save_game_1, save_game_2)
elif args.catch_transition is not None:
pass
elif args.full:
report_mismatch_all_modules(result)
else:
report_mismatch_for_module(1)
if __name__ == '__main__':
main()
<|reserved_special_token_1|>
import argparse
from ags_save_parser import saved_game
def report_mismatch(compare_result_list):
report = []
for i in range(len(compare_result_list)):
value = compare_result_list[i]
if value != '_':
report.append((i, value))
return report
def report_mismatch_for_module(modules_1, modules_2, index):
module_1 = modules_1[index]
module_2 = modules_2[index]
if len(module_1) != 2 or len(module_2) != 2:
raise AssertionError('Module {}, value {}, has length {}'.format(
index, module_1, len(module_1)))
print('Module {}\n'.format(index))
print(report_mismatch(modules_1[index][1]))
print(report_mismatch(modules_2[index][1]))
print()
def report_mismatch_all_modules(result):
scripts_1 = result[0]['scripts']
scripts_2 = result[1]['scripts']
modules_1 = scripts_1['modules']
modules_2 = scripts_2['modules']
print('Global Data:\n')
print(report_mismatch(scripts_1['global_data']))
print(report_mismatch(scripts_2['global_data']))
print()
for i in range(len(scripts_1['modules'])):
try:
report_mismatch_for_module(modules_1, modules_2, i)
except AssertionError:
print('Module {} was a match'.format(i))
def report_honor(save_game_1, save_game_2):
print('Honor is 1 is {}'.format(save_game_1['scripts']['modules'][1][1]
[3076]))
print('Honor is 2 is {}'.format(save_game_2['scripts']['modules'][1][1]
[3076]))
def main():
parser = argparse.ArgumentParser(description='Parse Save File')
parser.add_argument('--file1', dest='file1', default=
'/home/krieghan/hq_saves/agssave.000.hqthor')
parser.add_argument('--file2', dest='file2', default=
'/home/krieghan/hq_saves/agssave.001.hqthor')
parser.add_argument('--full', dest='full', action='store_true', default
=False)
parser.add_argument('--catch-transition', default=None)
parser.add_argument('--honor', action='store_true', default=False)
args = parser.parse_args()
save_game_1 = saved_game.get_save_game(args.file1, num_characters=69)
save_game_2 = saved_game.get_save_game(args.file2, num_characters=69)
from kobold import compare
result = compare.compare(save_game_1, save_game_2, type_compare='full')
modules_1 = result[0]['scripts']['modules']
modules_2 = result[1]['scripts']['modules']
stat_module_1 = modules_1[1][1]
stat_module_2 = modules_2[1][1]
if args.honor:
report_honor(save_game_1, save_game_2)
elif args.catch_transition is not None:
pass
elif args.full:
report_mismatch_all_modules(result)
else:
report_mismatch_for_module(1)
if __name__ == '__main__':
main()
<|reserved_special_token_1|>
import argparse
from ags_save_parser import saved_game
def report_mismatch(compare_result_list):
report = []
for i in range(len(compare_result_list)):
value = compare_result_list[i]
if value != '_':
report.append((i, value))
return report
def report_mismatch_for_module(
modules_1,
modules_2,
index):
module_1 = modules_1[index]
module_2 = modules_2[index]
if len(module_1) != 2 or len(module_2) != 2:
raise AssertionError(
"Module {}, value {}, has length {}".format(
index,
module_1,
len(module_1)))
print('Module {}\n'.format(index))
print(report_mismatch(modules_1[index][1]))
print(report_mismatch(modules_2[index][1]))
print()
def report_mismatch_all_modules(result):
scripts_1 = result[0]['scripts']
scripts_2 = result[1]['scripts']
modules_1 = scripts_1['modules']
modules_2 = scripts_2['modules']
print('Global Data:\n')
print(report_mismatch(scripts_1['global_data']))
print(report_mismatch(scripts_2['global_data']))
print()
for i in range(len(scripts_1['modules'])):
try:
report_mismatch_for_module(
modules_1,
modules_2,
i)
except AssertionError:
print('Module {} was a match'.format(i))
def report_honor(save_game_1, save_game_2):
print("Honor is 1 is {}".format(
save_game_1['scripts']['modules'][1][1][3076]))
print("Honor is 2 is {}".format(
save_game_2['scripts']['modules'][1][1][3076]))
def main():
parser = argparse.ArgumentParser(description='Parse Save File')
parser.add_argument(
'--file1',
dest='file1',
default='/home/krieghan/hq_saves/agssave.000.hqthor')
parser.add_argument(
'--file2',
dest='file2',
default='/home/krieghan/hq_saves/agssave.001.hqthor')
parser.add_argument(
'--full',
dest='full',
action='store_true',
default=False)
parser.add_argument(
'--catch-transition',
default=None)
parser.add_argument(
'--honor',
action='store_true',
default=False)
args = parser.parse_args()
save_game_1 = saved_game.get_save_game(args.file1, num_characters=69)
save_game_2 = saved_game.get_save_game(args.file2, num_characters=69)
from kobold import compare
result = compare.compare(save_game_1, save_game_2, type_compare='full')
modules_1 = result[0]['scripts']['modules']
modules_2 = result[1]['scripts']['modules']
stat_module_1 = modules_1[1][1]
stat_module_2 = modules_2[1][1]
if args.honor:
report_honor(save_game_1, save_game_2)
elif args.catch_transition is not None:
pass
elif args.full:
report_mismatch_all_modules(result)
else:
report_mismatch_for_module(1)
if __name__ == '__main__':
main()
|
flexible
|
{
"blob_id": "329451a3d3fa95f5572dc1701d1adbf4aaa72628",
"index": 8521,
"step-1": "<mask token>\n\n\ndef report_mismatch_for_module(modules_1, modules_2, index):\n module_1 = modules_1[index]\n module_2 = modules_2[index]\n if len(module_1) != 2 or len(module_2) != 2:\n raise AssertionError('Module {}, value {}, has length {}'.format(\n index, module_1, len(module_1)))\n print('Module {}\\n'.format(index))\n print(report_mismatch(modules_1[index][1]))\n print(report_mismatch(modules_2[index][1]))\n print()\n\n\n<mask token>\n\n\ndef report_honor(save_game_1, save_game_2):\n print('Honor is 1 is {}'.format(save_game_1['scripts']['modules'][1][1]\n [3076]))\n print('Honor is 2 is {}'.format(save_game_2['scripts']['modules'][1][1]\n [3076]))\n\n\ndef main():\n parser = argparse.ArgumentParser(description='Parse Save File')\n parser.add_argument('--file1', dest='file1', default=\n '/home/krieghan/hq_saves/agssave.000.hqthor')\n parser.add_argument('--file2', dest='file2', default=\n '/home/krieghan/hq_saves/agssave.001.hqthor')\n parser.add_argument('--full', dest='full', action='store_true', default\n =False)\n parser.add_argument('--catch-transition', default=None)\n parser.add_argument('--honor', action='store_true', default=False)\n args = parser.parse_args()\n save_game_1 = saved_game.get_save_game(args.file1, num_characters=69)\n save_game_2 = saved_game.get_save_game(args.file2, num_characters=69)\n from kobold import compare\n result = compare.compare(save_game_1, save_game_2, type_compare='full')\n modules_1 = result[0]['scripts']['modules']\n modules_2 = result[1]['scripts']['modules']\n stat_module_1 = modules_1[1][1]\n stat_module_2 = modules_2[1][1]\n if args.honor:\n report_honor(save_game_1, save_game_2)\n elif args.catch_transition is not None:\n pass\n elif args.full:\n report_mismatch_all_modules(result)\n else:\n report_mismatch_for_module(1)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef report_mismatch(compare_result_list):\n report = []\n for i in range(len(compare_result_list)):\n value = compare_result_list[i]\n if value != '_':\n report.append((i, value))\n return report\n\n\ndef report_mismatch_for_module(modules_1, modules_2, index):\n module_1 = modules_1[index]\n module_2 = modules_2[index]\n if len(module_1) != 2 or len(module_2) != 2:\n raise AssertionError('Module {}, value {}, has length {}'.format(\n index, module_1, len(module_1)))\n print('Module {}\\n'.format(index))\n print(report_mismatch(modules_1[index][1]))\n print(report_mismatch(modules_2[index][1]))\n print()\n\n\ndef report_mismatch_all_modules(result):\n scripts_1 = result[0]['scripts']\n scripts_2 = result[1]['scripts']\n modules_1 = scripts_1['modules']\n modules_2 = scripts_2['modules']\n print('Global Data:\\n')\n print(report_mismatch(scripts_1['global_data']))\n print(report_mismatch(scripts_2['global_data']))\n print()\n for i in range(len(scripts_1['modules'])):\n try:\n report_mismatch_for_module(modules_1, modules_2, i)\n except AssertionError:\n print('Module {} was a match'.format(i))\n\n\ndef report_honor(save_game_1, save_game_2):\n print('Honor is 1 is {}'.format(save_game_1['scripts']['modules'][1][1]\n [3076]))\n print('Honor is 2 is {}'.format(save_game_2['scripts']['modules'][1][1]\n [3076]))\n\n\ndef main():\n parser = argparse.ArgumentParser(description='Parse Save File')\n parser.add_argument('--file1', dest='file1', default=\n '/home/krieghan/hq_saves/agssave.000.hqthor')\n parser.add_argument('--file2', dest='file2', default=\n '/home/krieghan/hq_saves/agssave.001.hqthor')\n parser.add_argument('--full', dest='full', action='store_true', default\n =False)\n parser.add_argument('--catch-transition', default=None)\n parser.add_argument('--honor', action='store_true', default=False)\n args = parser.parse_args()\n save_game_1 = saved_game.get_save_game(args.file1, num_characters=69)\n save_game_2 = saved_game.get_save_game(args.file2, num_characters=69)\n from kobold import compare\n result = compare.compare(save_game_1, save_game_2, type_compare='full')\n modules_1 = result[0]['scripts']['modules']\n modules_2 = result[1]['scripts']['modules']\n stat_module_1 = modules_1[1][1]\n stat_module_2 = modules_2[1][1]\n if args.honor:\n report_honor(save_game_1, save_game_2)\n elif args.catch_transition is not None:\n pass\n elif args.full:\n report_mismatch_all_modules(result)\n else:\n report_mismatch_for_module(1)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef report_mismatch(compare_result_list):\n report = []\n for i in range(len(compare_result_list)):\n value = compare_result_list[i]\n if value != '_':\n report.append((i, value))\n return report\n\n\ndef report_mismatch_for_module(modules_1, modules_2, index):\n module_1 = modules_1[index]\n module_2 = modules_2[index]\n if len(module_1) != 2 or len(module_2) != 2:\n raise AssertionError('Module {}, value {}, has length {}'.format(\n index, module_1, len(module_1)))\n print('Module {}\\n'.format(index))\n print(report_mismatch(modules_1[index][1]))\n print(report_mismatch(modules_2[index][1]))\n print()\n\n\ndef report_mismatch_all_modules(result):\n scripts_1 = result[0]['scripts']\n scripts_2 = result[1]['scripts']\n modules_1 = scripts_1['modules']\n modules_2 = scripts_2['modules']\n print('Global Data:\\n')\n print(report_mismatch(scripts_1['global_data']))\n print(report_mismatch(scripts_2['global_data']))\n print()\n for i in range(len(scripts_1['modules'])):\n try:\n report_mismatch_for_module(modules_1, modules_2, i)\n except AssertionError:\n print('Module {} was a match'.format(i))\n\n\ndef report_honor(save_game_1, save_game_2):\n print('Honor is 1 is {}'.format(save_game_1['scripts']['modules'][1][1]\n [3076]))\n print('Honor is 2 is {}'.format(save_game_2['scripts']['modules'][1][1]\n [3076]))\n\n\ndef main():\n parser = argparse.ArgumentParser(description='Parse Save File')\n parser.add_argument('--file1', dest='file1', default=\n '/home/krieghan/hq_saves/agssave.000.hqthor')\n parser.add_argument('--file2', dest='file2', default=\n '/home/krieghan/hq_saves/agssave.001.hqthor')\n parser.add_argument('--full', dest='full', action='store_true', default\n =False)\n parser.add_argument('--catch-transition', default=None)\n parser.add_argument('--honor', action='store_true', default=False)\n args = parser.parse_args()\n save_game_1 = saved_game.get_save_game(args.file1, num_characters=69)\n save_game_2 = saved_game.get_save_game(args.file2, num_characters=69)\n from kobold import compare\n result = compare.compare(save_game_1, save_game_2, type_compare='full')\n modules_1 = result[0]['scripts']['modules']\n modules_2 = result[1]['scripts']['modules']\n stat_module_1 = modules_1[1][1]\n stat_module_2 = modules_2[1][1]\n if args.honor:\n report_honor(save_game_1, save_game_2)\n elif args.catch_transition is not None:\n pass\n elif args.full:\n report_mismatch_all_modules(result)\n else:\n report_mismatch_for_module(1)\n\n\nif __name__ == '__main__':\n main()\n",
"step-4": "import argparse\nfrom ags_save_parser import saved_game\n\n\ndef report_mismatch(compare_result_list):\n report = []\n for i in range(len(compare_result_list)):\n value = compare_result_list[i]\n if value != '_':\n report.append((i, value))\n return report\n\n\ndef report_mismatch_for_module(modules_1, modules_2, index):\n module_1 = modules_1[index]\n module_2 = modules_2[index]\n if len(module_1) != 2 or len(module_2) != 2:\n raise AssertionError('Module {}, value {}, has length {}'.format(\n index, module_1, len(module_1)))\n print('Module {}\\n'.format(index))\n print(report_mismatch(modules_1[index][1]))\n print(report_mismatch(modules_2[index][1]))\n print()\n\n\ndef report_mismatch_all_modules(result):\n scripts_1 = result[0]['scripts']\n scripts_2 = result[1]['scripts']\n modules_1 = scripts_1['modules']\n modules_2 = scripts_2['modules']\n print('Global Data:\\n')\n print(report_mismatch(scripts_1['global_data']))\n print(report_mismatch(scripts_2['global_data']))\n print()\n for i in range(len(scripts_1['modules'])):\n try:\n report_mismatch_for_module(modules_1, modules_2, i)\n except AssertionError:\n print('Module {} was a match'.format(i))\n\n\ndef report_honor(save_game_1, save_game_2):\n print('Honor is 1 is {}'.format(save_game_1['scripts']['modules'][1][1]\n [3076]))\n print('Honor is 2 is {}'.format(save_game_2['scripts']['modules'][1][1]\n [3076]))\n\n\ndef main():\n parser = argparse.ArgumentParser(description='Parse Save File')\n parser.add_argument('--file1', dest='file1', default=\n '/home/krieghan/hq_saves/agssave.000.hqthor')\n parser.add_argument('--file2', dest='file2', default=\n '/home/krieghan/hq_saves/agssave.001.hqthor')\n parser.add_argument('--full', dest='full', action='store_true', default\n =False)\n parser.add_argument('--catch-transition', default=None)\n parser.add_argument('--honor', action='store_true', default=False)\n args = parser.parse_args()\n save_game_1 = saved_game.get_save_game(args.file1, num_characters=69)\n save_game_2 = saved_game.get_save_game(args.file2, num_characters=69)\n from kobold import compare\n result = compare.compare(save_game_1, save_game_2, type_compare='full')\n modules_1 = result[0]['scripts']['modules']\n modules_2 = result[1]['scripts']['modules']\n stat_module_1 = modules_1[1][1]\n stat_module_2 = modules_2[1][1]\n if args.honor:\n report_honor(save_game_1, save_game_2)\n elif args.catch_transition is not None:\n pass\n elif args.full:\n report_mismatch_all_modules(result)\n else:\n report_mismatch_for_module(1)\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "import argparse\n\nfrom ags_save_parser import saved_game\n\ndef report_mismatch(compare_result_list):\n report = []\n for i in range(len(compare_result_list)):\n value = compare_result_list[i]\n if value != '_':\n report.append((i, value))\n return report\n\ndef report_mismatch_for_module(\n modules_1,\n modules_2,\n index):\n module_1 = modules_1[index]\n module_2 = modules_2[index]\n if len(module_1) != 2 or len(module_2) != 2:\n raise AssertionError(\n \"Module {}, value {}, has length {}\".format(\n index,\n module_1,\n len(module_1)))\n print('Module {}\\n'.format(index))\n print(report_mismatch(modules_1[index][1]))\n print(report_mismatch(modules_2[index][1]))\n print()\n\ndef report_mismatch_all_modules(result):\n scripts_1 = result[0]['scripts']\n scripts_2 = result[1]['scripts']\n modules_1 = scripts_1['modules']\n modules_2 = scripts_2['modules']\n \n print('Global Data:\\n')\n print(report_mismatch(scripts_1['global_data']))\n print(report_mismatch(scripts_2['global_data']))\n print()\n\n for i in range(len(scripts_1['modules'])):\n try:\n report_mismatch_for_module(\n modules_1,\n modules_2,\n i)\n except AssertionError:\n print('Module {} was a match'.format(i))\n\ndef report_honor(save_game_1, save_game_2):\n print(\"Honor is 1 is {}\".format(\n save_game_1['scripts']['modules'][1][1][3076]))\n print(\"Honor is 2 is {}\".format(\n save_game_2['scripts']['modules'][1][1][3076]))\n \n\n\ndef main():\n parser = argparse.ArgumentParser(description='Parse Save File')\n parser.add_argument(\n '--file1',\n dest='file1',\n default='/home/krieghan/hq_saves/agssave.000.hqthor')\n parser.add_argument(\n '--file2',\n dest='file2',\n default='/home/krieghan/hq_saves/agssave.001.hqthor')\n parser.add_argument(\n '--full',\n dest='full',\n action='store_true',\n default=False)\n parser.add_argument(\n '--catch-transition',\n default=None)\n parser.add_argument(\n '--honor',\n action='store_true',\n default=False)\n\n args = parser.parse_args()\n save_game_1 = saved_game.get_save_game(args.file1, num_characters=69)\n save_game_2 = saved_game.get_save_game(args.file2, num_characters=69)\n \n from kobold import compare\n\n result = compare.compare(save_game_1, save_game_2, type_compare='full')\n modules_1 = result[0]['scripts']['modules']\n modules_2 = result[1]['scripts']['modules']\n stat_module_1 = modules_1[1][1]\n stat_module_2 = modules_2[1][1]\n\n if args.honor:\n report_honor(save_game_1, save_game_2)\n elif args.catch_transition is not None:\n pass\n elif args.full:\n report_mismatch_all_modules(result)\n else:\n report_mismatch_for_module(1)\n\n\nif __name__ == '__main__':\n main()\n\n",
"step-ids": [
3,
5,
6,
7,
8
]
}
|
[
3,
5,
6,
7,
8
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Migration(migrations.Migration):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Migration(migrations.Migration):
initial = True
dependencies = []
operations = [migrations.CreateModel(name='BillDetail', fields=[('id',
models.BigAutoField(auto_created=True, primary_key=True, serialize=
False, verbose_name='ID')), ('Name', models.CharField(default=None,
max_length=150)), ('CID', models.IntegerField(unique=True)), (
'Units', models.PositiveIntegerField(default=None, validators=[
django.core.validators.MaxValueValidator(100)])), ('Amount', models
.PositiveIntegerField(default=None, validators=[django.core.
validators.MaxValueValidator(100)])), ('BillGenerated', models.
DateField(auto_now_add=True))])]
<|reserved_special_token_1|>
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = []
operations = [migrations.CreateModel(name='BillDetail', fields=[('id',
models.BigAutoField(auto_created=True, primary_key=True, serialize=
False, verbose_name='ID')), ('Name', models.CharField(default=None,
max_length=150)), ('CID', models.IntegerField(unique=True)), (
'Units', models.PositiveIntegerField(default=None, validators=[
django.core.validators.MaxValueValidator(100)])), ('Amount', models
.PositiveIntegerField(default=None, validators=[django.core.
validators.MaxValueValidator(100)])), ('BillGenerated', models.
DateField(auto_now_add=True))])]
<|reserved_special_token_1|>
# Generated by Django 3.2.3 on 2021-06-19 11:27
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='BillDetail',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('Name', models.CharField(default=None, max_length=150)),
('CID', models.IntegerField(unique=True)),
('Units', models.PositiveIntegerField(default=None, validators=[django.core.validators.MaxValueValidator(100)])),
('Amount', models.PositiveIntegerField(default=None, validators=[django.core.validators.MaxValueValidator(100)])),
('BillGenerated', models.DateField(auto_now_add=True)),
],
),
]
|
flexible
|
{
"blob_id": "b7a8e4105f1c1c532eaae27afae14e9a4f2ddfba",
"index": 2915,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n initial = True\n dependencies = []\n operations = [migrations.CreateModel(name='BillDetail', fields=[('id',\n models.BigAutoField(auto_created=True, primary_key=True, serialize=\n False, verbose_name='ID')), ('Name', models.CharField(default=None,\n max_length=150)), ('CID', models.IntegerField(unique=True)), (\n 'Units', models.PositiveIntegerField(default=None, validators=[\n django.core.validators.MaxValueValidator(100)])), ('Amount', models\n .PositiveIntegerField(default=None, validators=[django.core.\n validators.MaxValueValidator(100)])), ('BillGenerated', models.\n DateField(auto_now_add=True))])]\n",
"step-4": "import django.core.validators\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n initial = True\n dependencies = []\n operations = [migrations.CreateModel(name='BillDetail', fields=[('id',\n models.BigAutoField(auto_created=True, primary_key=True, serialize=\n False, verbose_name='ID')), ('Name', models.CharField(default=None,\n max_length=150)), ('CID', models.IntegerField(unique=True)), (\n 'Units', models.PositiveIntegerField(default=None, validators=[\n django.core.validators.MaxValueValidator(100)])), ('Amount', models\n .PositiveIntegerField(default=None, validators=[django.core.\n validators.MaxValueValidator(100)])), ('BillGenerated', models.\n DateField(auto_now_add=True))])]\n",
"step-5": "# Generated by Django 3.2.3 on 2021-06-19 11:27\r\n\r\nimport django.core.validators\r\nfrom django.db import migrations, models\r\n\r\n\r\nclass Migration(migrations.Migration):\r\n\r\n initial = True\r\n\r\n dependencies = [\r\n ]\r\n\r\n operations = [\r\n migrations.CreateModel(\r\n name='BillDetail',\r\n fields=[\r\n ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\r\n ('Name', models.CharField(default=None, max_length=150)),\r\n ('CID', models.IntegerField(unique=True)),\r\n ('Units', models.PositiveIntegerField(default=None, validators=[django.core.validators.MaxValueValidator(100)])),\r\n ('Amount', models.PositiveIntegerField(default=None, validators=[django.core.validators.MaxValueValidator(100)])),\r\n ('BillGenerated', models.DateField(auto_now_add=True)),\r\n ],\r\n ),\r\n ]\r\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
#!/usr/bin/env python
# coding: utf-8
# # Cabecera
# In[1]:
# -*- coding: utf-8 -*-
# ------------- Cantidad de segundos que has vivido -------------
# # Definición de variables
# In[2]:
# Definición de variables
anios = 30
dias_por_anio = 365
horas_por_dia = 24
segundos_por_hora = 60
# # Operación
# In[3]:
# Operación
print (anios * dias_por_anio * horas_por_dia * segundos_por_hora)
# In[ ]:
|
normal
|
{
"blob_id": "f153da7e4537f807f6c9d9d268a00443933d8315",
"index": 4167,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(anios * dias_por_anio * horas_por_dia * segundos_por_hora)\n",
"step-3": "anios = 30\ndias_por_anio = 365\nhoras_por_dia = 24\nsegundos_por_hora = 60\nprint(anios * dias_por_anio * horas_por_dia * segundos_por_hora)\n",
"step-4": "#!/usr/bin/env python\n# coding: utf-8\n\n# # Cabecera\n\n# In[1]:\n\n\n# -*- coding: utf-8 -*-\n\n# ------------- Cantidad de segundos que has vivido -------------\n\n\n# # Definición de variables\n\n# In[2]:\n\n\n# Definición de variables\nanios = 30\ndias_por_anio = 365\nhoras_por_dia = 24\nsegundos_por_hora = 60\n\n\n# # Operación\n\n# In[3]:\n\n\n# Operación\nprint (anios * dias_por_anio * horas_por_dia * segundos_por_hora)\n\n\n# In[ ]:\n\n\n\n\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.