code
stringlengths 13
1.2M
| order_type
stringclasses 1
value | original_example
dict | step_ids
listlengths 1
5
|
---|---|---|---|
class Component:
pass
class Entity:
def __init__(self, id):
self.id = id
self.components = {}
def add_component(self, component):
if type(component) in self.components:
raise Exception("This entity already has a component of that type")
# Since there is only one of each type of component, they are stored by type
self.components[type(component)] = component
def has_component(self, component_type):
return component_type in self.components
def get_component(self, component_type):
return self.components[component_type]
class System:
def __init__(self, *required):
self.required = required
self.entity_ids = set()
def bind_manager(self, manager):
self.manager = manager
def update(self, deltaTime):
self.begin()
for entity_id in self.entity_ids:
entity = self.manager.get_entity_by_id(entity_id)
self.process(entity, deltaTime)
self.end()
# Overridden in the derived class to specify functionality of system
def process(self, entity, deltaTime):
pass
# Can be overridden if you want to do something before the first entity is processed
def begin(self):
pass
# Can be overridden if you want to do something after the last entity is processed
def end(self):
pass
def update_entity_registration(self, entity):
contains = entity.id in self.entity_ids
matches = self.matches(entity)
# Already exists, but no longer matches
if contains and not matches:
self.entity_ids.remove(entity.id)
# Doesn't exist, but does match
elif not contains and matches:
self.entity_ids.add(entity.id)
def matches(self, entity):
for required in self.required:
if not entity.has_component(required):
return False
return True
class Manager:
def __init__(self):
self.entities = {}
self.current_id = 0
self.systems = []
def create_entity(self):
entity = Entity(self.current_id)
self.current_id += 1
self.entities[entity.id] = entity
return entity
def get_entity_by_id(self, id):
return self.entities[id]
# Use this to add components, not the entity method!! Wish there was a way to enforce that in python
def add_component_to_entity(self, entity, component):
entity.add_component(component)
self.update_entity_registration(entity)
def add_system(self, system):
system.bind_manager(self)
self.systems.append(system)
def update(self, deltaTime):
for system in self.systems:
system.update(deltaTime)
def update_entity_registration(self, entity):
for system in self.systems:
system.update_entity_registration(entity)
|
normal
|
{
"blob_id": "14f7f31fa64799cdc08b1363b945da50841d16b5",
"index": 3020,
"step-1": "<mask token>\n\n\nclass System:\n <mask token>\n\n def bind_manager(self, manager):\n self.manager = manager\n <mask token>\n\n def process(self, entity, deltaTime):\n pass\n <mask token>\n <mask token>\n\n def update_entity_registration(self, entity):\n contains = entity.id in self.entity_ids\n matches = self.matches(entity)\n if contains and not matches:\n self.entity_ids.remove(entity.id)\n elif not contains and matches:\n self.entity_ids.add(entity.id)\n\n def matches(self, entity):\n for required in self.required:\n if not entity.has_component(required):\n return False\n return True\n\n\nclass Manager:\n\n def __init__(self):\n self.entities = {}\n self.current_id = 0\n self.systems = []\n\n def create_entity(self):\n entity = Entity(self.current_id)\n self.current_id += 1\n self.entities[entity.id] = entity\n return entity\n\n def get_entity_by_id(self, id):\n return self.entities[id]\n\n def add_component_to_entity(self, entity, component):\n entity.add_component(component)\n self.update_entity_registration(entity)\n\n def add_system(self, system):\n system.bind_manager(self)\n self.systems.append(system)\n\n def update(self, deltaTime):\n for system in self.systems:\n system.update(deltaTime)\n\n def update_entity_registration(self, entity):\n for system in self.systems:\n system.update_entity_registration(entity)\n",
"step-2": "<mask token>\n\n\nclass System:\n <mask token>\n\n def bind_manager(self, manager):\n self.manager = manager\n\n def update(self, deltaTime):\n self.begin()\n for entity_id in self.entity_ids:\n entity = self.manager.get_entity_by_id(entity_id)\n self.process(entity, deltaTime)\n self.end()\n\n def process(self, entity, deltaTime):\n pass\n <mask token>\n <mask token>\n\n def update_entity_registration(self, entity):\n contains = entity.id in self.entity_ids\n matches = self.matches(entity)\n if contains and not matches:\n self.entity_ids.remove(entity.id)\n elif not contains and matches:\n self.entity_ids.add(entity.id)\n\n def matches(self, entity):\n for required in self.required:\n if not entity.has_component(required):\n return False\n return True\n\n\nclass Manager:\n\n def __init__(self):\n self.entities = {}\n self.current_id = 0\n self.systems = []\n\n def create_entity(self):\n entity = Entity(self.current_id)\n self.current_id += 1\n self.entities[entity.id] = entity\n return entity\n\n def get_entity_by_id(self, id):\n return self.entities[id]\n\n def add_component_to_entity(self, entity, component):\n entity.add_component(component)\n self.update_entity_registration(entity)\n\n def add_system(self, system):\n system.bind_manager(self)\n self.systems.append(system)\n\n def update(self, deltaTime):\n for system in self.systems:\n system.update(deltaTime)\n\n def update_entity_registration(self, entity):\n for system in self.systems:\n system.update_entity_registration(entity)\n",
"step-3": "<mask token>\n\n\nclass System:\n\n def __init__(self, *required):\n self.required = required\n self.entity_ids = set()\n\n def bind_manager(self, manager):\n self.manager = manager\n\n def update(self, deltaTime):\n self.begin()\n for entity_id in self.entity_ids:\n entity = self.manager.get_entity_by_id(entity_id)\n self.process(entity, deltaTime)\n self.end()\n\n def process(self, entity, deltaTime):\n pass\n\n def begin(self):\n pass\n\n def end(self):\n pass\n\n def update_entity_registration(self, entity):\n contains = entity.id in self.entity_ids\n matches = self.matches(entity)\n if contains and not matches:\n self.entity_ids.remove(entity.id)\n elif not contains and matches:\n self.entity_ids.add(entity.id)\n\n def matches(self, entity):\n for required in self.required:\n if not entity.has_component(required):\n return False\n return True\n\n\nclass Manager:\n\n def __init__(self):\n self.entities = {}\n self.current_id = 0\n self.systems = []\n\n def create_entity(self):\n entity = Entity(self.current_id)\n self.current_id += 1\n self.entities[entity.id] = entity\n return entity\n\n def get_entity_by_id(self, id):\n return self.entities[id]\n\n def add_component_to_entity(self, entity, component):\n entity.add_component(component)\n self.update_entity_registration(entity)\n\n def add_system(self, system):\n system.bind_manager(self)\n self.systems.append(system)\n\n def update(self, deltaTime):\n for system in self.systems:\n system.update(deltaTime)\n\n def update_entity_registration(self, entity):\n for system in self.systems:\n system.update_entity_registration(entity)\n",
"step-4": "<mask token>\n\n\nclass Entity:\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\nclass System:\n\n def __init__(self, *required):\n self.required = required\n self.entity_ids = set()\n\n def bind_manager(self, manager):\n self.manager = manager\n\n def update(self, deltaTime):\n self.begin()\n for entity_id in self.entity_ids:\n entity = self.manager.get_entity_by_id(entity_id)\n self.process(entity, deltaTime)\n self.end()\n\n def process(self, entity, deltaTime):\n pass\n\n def begin(self):\n pass\n\n def end(self):\n pass\n\n def update_entity_registration(self, entity):\n contains = entity.id in self.entity_ids\n matches = self.matches(entity)\n if contains and not matches:\n self.entity_ids.remove(entity.id)\n elif not contains and matches:\n self.entity_ids.add(entity.id)\n\n def matches(self, entity):\n for required in self.required:\n if not entity.has_component(required):\n return False\n return True\n\n\nclass Manager:\n\n def __init__(self):\n self.entities = {}\n self.current_id = 0\n self.systems = []\n\n def create_entity(self):\n entity = Entity(self.current_id)\n self.current_id += 1\n self.entities[entity.id] = entity\n return entity\n\n def get_entity_by_id(self, id):\n return self.entities[id]\n\n def add_component_to_entity(self, entity, component):\n entity.add_component(component)\n self.update_entity_registration(entity)\n\n def add_system(self, system):\n system.bind_manager(self)\n self.systems.append(system)\n\n def update(self, deltaTime):\n for system in self.systems:\n system.update(deltaTime)\n\n def update_entity_registration(self, entity):\n for system in self.systems:\n system.update_entity_registration(entity)\n",
"step-5": "\nclass Component:\n pass\n\nclass Entity:\n\n def __init__(self, id):\n self.id = id\n self.components = {}\n\n def add_component(self, component):\n if type(component) in self.components:\n raise Exception(\"This entity already has a component of that type\")\n\n # Since there is only one of each type of component, they are stored by type\n self.components[type(component)] = component\n\n def has_component(self, component_type):\n return component_type in self.components\n\n def get_component(self, component_type):\n return self.components[component_type]\n\nclass System:\n\n def __init__(self, *required):\n self.required = required\n self.entity_ids = set()\n\n def bind_manager(self, manager):\n self.manager = manager\n \n def update(self, deltaTime):\n self.begin()\n\n for entity_id in self.entity_ids:\n entity = self.manager.get_entity_by_id(entity_id)\n self.process(entity, deltaTime)\n \n self.end()\n\n # Overridden in the derived class to specify functionality of system\n def process(self, entity, deltaTime):\n pass\n\n # Can be overridden if you want to do something before the first entity is processed\n def begin(self):\n pass\n\n # Can be overridden if you want to do something after the last entity is processed\n def end(self):\n pass\n\n def update_entity_registration(self, entity):\n contains = entity.id in self.entity_ids\n matches = self.matches(entity)\n\n # Already exists, but no longer matches\n if contains and not matches:\n self.entity_ids.remove(entity.id)\n # Doesn't exist, but does match\n elif not contains and matches:\n self.entity_ids.add(entity.id)\n \n def matches(self, entity):\n for required in self.required:\n if not entity.has_component(required):\n return False\n\n return True\n\nclass Manager:\n\n def __init__(self):\n self.entities = {}\n self.current_id = 0\n\n self.systems = []\n \n def create_entity(self):\n entity = Entity(self.current_id)\n self.current_id += 1\n\n self.entities[entity.id] = entity\n return entity\n\n def get_entity_by_id(self, id):\n return self.entities[id]\n\n # Use this to add components, not the entity method!! Wish there was a way to enforce that in python\n def add_component_to_entity(self, entity, component):\n entity.add_component(component)\n self.update_entity_registration(entity)\n \n def add_system(self, system):\n system.bind_manager(self)\n self.systems.append(system)\n\n def update(self, deltaTime):\n for system in self.systems:\n system.update(deltaTime)\n\n def update_entity_registration(self, entity):\n for system in self.systems:\n system.update_entity_registration(entity)\n",
"step-ids": [
13,
14,
17,
18,
24
]
}
|
[
13,
14,
17,
18,
24
] |
from flask import Flask, flash, abort, redirect, url_for, request, render_template, make_response, json, Response
import os, sys
import config
import boto.ec2.elb
import boto
from boto.ec2 import *
app = Flask(__name__)
@app.route('/')
def index():
list = []
creds = config.get_ec2_conf()
for region in config.region_list():
conn = connect_to_region(region, aws_access_key_id=creds[
'AWS_ACCESS_KEY_ID'], aws_secret_access_key=creds[
'AWS_SECRET_ACCESS_KEY'])
zones = conn.get_all_zones()
instances = conn.get_all_instance_status()
instance_count = len(instances)
ebs = conn.get_all_volumes()
ebscount = len(ebs)
unattached_ebs = 0
unattached_eli = 0
event_count = 0
for instance in instances:
events = instance.events
if events:
event_count = event_count + 1
for vol in ebs:
state = vol.attachment_state()
if state == None:
unattached_ebs = unattached_ebs + 1
elis = conn.get_all_addresses()
eli_count = len(elis)
for eli in elis:
instance_id = eli.instance_id
if not instance_id:
unattached_eli = unattached_eli + 1
connelb = boto.ec2.elb.connect_to_region(region, aws_access_key_id=
creds['AWS_ACCESS_KEY_ID'], aws_secret_access_key=creds[
'AWS_SECRET_ACCESS_KEY'])
elb = connelb.get_all_load_balancers()
elb_count = len(elb)
list.append({'region': region, 'zones': zones, 'instance_count':
instance_count, 'ebscount': ebscount, 'unattached_ebs':
unattached_ebs, 'eli_count': eli_count, 'unattached_eli':
unattached_eli, 'elb_count': elb_count, 'event_count': event_count}
)
return render_template('index.html', list=list)
@app.route('/ebs_volumes/<region>/')
def ebs_volumes(region=None):
creds = config.get_ec2_conf()
conn = connect_to_region(region, aws_access_key_id=creds[
'AWS_ACCESS_KEY_ID'], aws_secret_access_key=creds[
'AWS_SECRET_ACCESS_KEY'])
ebs = conn.get_all_volumes()
ebs_vol = []
for vol in ebs:
state = vol.attachment_state()
if state == None:
ebs_info = {'id': vol.id, 'size': vol.size, 'iops': vol.iops,
'status': vol.status}
ebs_vol.append(ebs_info)
return render_template('ebs_volume.html', ebs_vol=ebs_vol, region=region)
@app.route('/ebs_volumes/<region>/delete/<vol_id>')
def delete_ebs_vol(region=None, vol_id=None):
creds = config.get_ec2_conf()
conn = connect_to_region(region, aws_access_key_id=creds[
'AWS_ACCESS_KEY_ID'], aws_secret_access_key=creds[
'AWS_SECRET_ACCESS_KEY'])
vol_id = vol_id.encode('ascii')
vol_ids = conn.get_all_volumes(volume_ids=vol_id)
for vol in vol_ids:
vol.delete()
return redirect(url_for('ebs_volumes', region=region))
@app.route('/elastic_ips/<region>/')
def elastic_ips(region=None):
creds = config.get_ec2_conf()
conn = connect_to_region(region, aws_access_key_id=creds[
'AWS_ACCESS_KEY_ID'], aws_secret_access_key=creds[
'AWS_SECRET_ACCESS_KEY'])
elis = conn.get_all_addresses()
un_eli = []
for eli in elis:
instance_id = eli.instance_id
if not instance_id:
eli_info = {'public_ip': eli.public_ip, 'domain': eli.domain}
un_eli.append(eli_info)
return render_template('elastic_ip.html', un_eli=un_eli, region=region)
@app.route('/elastic_ips/<region>/delete/<ip>')
def delete_elastic_ip(region=None, ip=None):
creds = config.get_ec2_conf()
conn = connect_to_region(region, aws_access_key_id=creds[
'AWS_ACCESS_KEY_ID'], aws_secret_access_key=creds[
'AWS_SECRET_ACCESS_KEY'])
ip = ip.encode('ascii')
elis = conn.get_all_addresses(addresses=ip)
for eli in elis:
eli.release()
return redirect(url_for('elastic_ips', region=region))
@app.route('/instance_events/<region>/')
def instance_events(region=None):
creds = config.get_ec2_conf()
conn = connect_to_region(region, aws_access_key_id=creds[
'AWS_ACCESS_KEY_ID'], aws_secret_access_key=creds[
'AWS_SECRET_ACCESS_KEY'])
instances = conn.get_all_instance_status()
instance_event_list = []
for instance in instances:
event = instance.events
if event:
event_info = {'instance_id': instance.id, 'event': instance.
events[0].code, 'description': instance.events[0].
description, 'event_before': instance.events[0].not_before,
'event_after': instance.events[0].not_after}
instance_event_list.append(event_info)
return render_template('instance_events.html', instance_event_list=
instance_event_list)
if __name__ == '__main__':
app.debug = True
app.run(host='0.0.0.0')
|
normal
|
{
"blob_id": "22c2425f1dc14b6b0005ebf2231af8abf43aa2e1",
"index": 5273,
"step-1": "<mask token>\n\n\[email protected]('/')\ndef index():\n list = []\n creds = config.get_ec2_conf()\n for region in config.region_list():\n conn = connect_to_region(region, aws_access_key_id=creds[\n 'AWS_ACCESS_KEY_ID'], aws_secret_access_key=creds[\n 'AWS_SECRET_ACCESS_KEY'])\n zones = conn.get_all_zones()\n instances = conn.get_all_instance_status()\n instance_count = len(instances)\n ebs = conn.get_all_volumes()\n ebscount = len(ebs)\n unattached_ebs = 0\n unattached_eli = 0\n event_count = 0\n for instance in instances:\n events = instance.events\n if events:\n event_count = event_count + 1\n for vol in ebs:\n state = vol.attachment_state()\n if state == None:\n unattached_ebs = unattached_ebs + 1\n elis = conn.get_all_addresses()\n eli_count = len(elis)\n for eli in elis:\n instance_id = eli.instance_id\n if not instance_id:\n unattached_eli = unattached_eli + 1\n connelb = boto.ec2.elb.connect_to_region(region, aws_access_key_id=\n creds['AWS_ACCESS_KEY_ID'], aws_secret_access_key=creds[\n 'AWS_SECRET_ACCESS_KEY'])\n elb = connelb.get_all_load_balancers()\n elb_count = len(elb)\n list.append({'region': region, 'zones': zones, 'instance_count':\n instance_count, 'ebscount': ebscount, 'unattached_ebs':\n unattached_ebs, 'eli_count': eli_count, 'unattached_eli':\n unattached_eli, 'elb_count': elb_count, 'event_count': event_count}\n )\n return render_template('index.html', list=list)\n\n\[email protected]('/ebs_volumes/<region>/')\ndef ebs_volumes(region=None):\n creds = config.get_ec2_conf()\n conn = connect_to_region(region, aws_access_key_id=creds[\n 'AWS_ACCESS_KEY_ID'], aws_secret_access_key=creds[\n 'AWS_SECRET_ACCESS_KEY'])\n ebs = conn.get_all_volumes()\n ebs_vol = []\n for vol in ebs:\n state = vol.attachment_state()\n if state == None:\n ebs_info = {'id': vol.id, 'size': vol.size, 'iops': vol.iops,\n 'status': vol.status}\n ebs_vol.append(ebs_info)\n return render_template('ebs_volume.html', ebs_vol=ebs_vol, region=region)\n\n\[email protected]('/ebs_volumes/<region>/delete/<vol_id>')\ndef delete_ebs_vol(region=None, vol_id=None):\n creds = config.get_ec2_conf()\n conn = connect_to_region(region, aws_access_key_id=creds[\n 'AWS_ACCESS_KEY_ID'], aws_secret_access_key=creds[\n 'AWS_SECRET_ACCESS_KEY'])\n vol_id = vol_id.encode('ascii')\n vol_ids = conn.get_all_volumes(volume_ids=vol_id)\n for vol in vol_ids:\n vol.delete()\n return redirect(url_for('ebs_volumes', region=region))\n\n\[email protected]('/elastic_ips/<region>/')\ndef elastic_ips(region=None):\n creds = config.get_ec2_conf()\n conn = connect_to_region(region, aws_access_key_id=creds[\n 'AWS_ACCESS_KEY_ID'], aws_secret_access_key=creds[\n 'AWS_SECRET_ACCESS_KEY'])\n elis = conn.get_all_addresses()\n un_eli = []\n for eli in elis:\n instance_id = eli.instance_id\n if not instance_id:\n eli_info = {'public_ip': eli.public_ip, 'domain': eli.domain}\n un_eli.append(eli_info)\n return render_template('elastic_ip.html', un_eli=un_eli, region=region)\n\n\[email protected]('/elastic_ips/<region>/delete/<ip>')\ndef delete_elastic_ip(region=None, ip=None):\n creds = config.get_ec2_conf()\n conn = connect_to_region(region, aws_access_key_id=creds[\n 'AWS_ACCESS_KEY_ID'], aws_secret_access_key=creds[\n 'AWS_SECRET_ACCESS_KEY'])\n ip = ip.encode('ascii')\n elis = conn.get_all_addresses(addresses=ip)\n for eli in elis:\n eli.release()\n return redirect(url_for('elastic_ips', region=region))\n\n\[email protected]('/instance_events/<region>/')\ndef instance_events(region=None):\n creds = config.get_ec2_conf()\n conn = connect_to_region(region, aws_access_key_id=creds[\n 'AWS_ACCESS_KEY_ID'], aws_secret_access_key=creds[\n 'AWS_SECRET_ACCESS_KEY'])\n instances = conn.get_all_instance_status()\n instance_event_list = []\n for instance in instances:\n event = instance.events\n if event:\n event_info = {'instance_id': instance.id, 'event': instance.\n events[0].code, 'description': instance.events[0].\n description, 'event_before': instance.events[0].not_before,\n 'event_after': instance.events[0].not_after}\n instance_event_list.append(event_info)\n return render_template('instance_events.html', instance_event_list=\n instance_event_list)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\[email protected]('/')\ndef index():\n list = []\n creds = config.get_ec2_conf()\n for region in config.region_list():\n conn = connect_to_region(region, aws_access_key_id=creds[\n 'AWS_ACCESS_KEY_ID'], aws_secret_access_key=creds[\n 'AWS_SECRET_ACCESS_KEY'])\n zones = conn.get_all_zones()\n instances = conn.get_all_instance_status()\n instance_count = len(instances)\n ebs = conn.get_all_volumes()\n ebscount = len(ebs)\n unattached_ebs = 0\n unattached_eli = 0\n event_count = 0\n for instance in instances:\n events = instance.events\n if events:\n event_count = event_count + 1\n for vol in ebs:\n state = vol.attachment_state()\n if state == None:\n unattached_ebs = unattached_ebs + 1\n elis = conn.get_all_addresses()\n eli_count = len(elis)\n for eli in elis:\n instance_id = eli.instance_id\n if not instance_id:\n unattached_eli = unattached_eli + 1\n connelb = boto.ec2.elb.connect_to_region(region, aws_access_key_id=\n creds['AWS_ACCESS_KEY_ID'], aws_secret_access_key=creds[\n 'AWS_SECRET_ACCESS_KEY'])\n elb = connelb.get_all_load_balancers()\n elb_count = len(elb)\n list.append({'region': region, 'zones': zones, 'instance_count':\n instance_count, 'ebscount': ebscount, 'unattached_ebs':\n unattached_ebs, 'eli_count': eli_count, 'unattached_eli':\n unattached_eli, 'elb_count': elb_count, 'event_count': event_count}\n )\n return render_template('index.html', list=list)\n\n\[email protected]('/ebs_volumes/<region>/')\ndef ebs_volumes(region=None):\n creds = config.get_ec2_conf()\n conn = connect_to_region(region, aws_access_key_id=creds[\n 'AWS_ACCESS_KEY_ID'], aws_secret_access_key=creds[\n 'AWS_SECRET_ACCESS_KEY'])\n ebs = conn.get_all_volumes()\n ebs_vol = []\n for vol in ebs:\n state = vol.attachment_state()\n if state == None:\n ebs_info = {'id': vol.id, 'size': vol.size, 'iops': vol.iops,\n 'status': vol.status}\n ebs_vol.append(ebs_info)\n return render_template('ebs_volume.html', ebs_vol=ebs_vol, region=region)\n\n\[email protected]('/ebs_volumes/<region>/delete/<vol_id>')\ndef delete_ebs_vol(region=None, vol_id=None):\n creds = config.get_ec2_conf()\n conn = connect_to_region(region, aws_access_key_id=creds[\n 'AWS_ACCESS_KEY_ID'], aws_secret_access_key=creds[\n 'AWS_SECRET_ACCESS_KEY'])\n vol_id = vol_id.encode('ascii')\n vol_ids = conn.get_all_volumes(volume_ids=vol_id)\n for vol in vol_ids:\n vol.delete()\n return redirect(url_for('ebs_volumes', region=region))\n\n\[email protected]('/elastic_ips/<region>/')\ndef elastic_ips(region=None):\n creds = config.get_ec2_conf()\n conn = connect_to_region(region, aws_access_key_id=creds[\n 'AWS_ACCESS_KEY_ID'], aws_secret_access_key=creds[\n 'AWS_SECRET_ACCESS_KEY'])\n elis = conn.get_all_addresses()\n un_eli = []\n for eli in elis:\n instance_id = eli.instance_id\n if not instance_id:\n eli_info = {'public_ip': eli.public_ip, 'domain': eli.domain}\n un_eli.append(eli_info)\n return render_template('elastic_ip.html', un_eli=un_eli, region=region)\n\n\[email protected]('/elastic_ips/<region>/delete/<ip>')\ndef delete_elastic_ip(region=None, ip=None):\n creds = config.get_ec2_conf()\n conn = connect_to_region(region, aws_access_key_id=creds[\n 'AWS_ACCESS_KEY_ID'], aws_secret_access_key=creds[\n 'AWS_SECRET_ACCESS_KEY'])\n ip = ip.encode('ascii')\n elis = conn.get_all_addresses(addresses=ip)\n for eli in elis:\n eli.release()\n return redirect(url_for('elastic_ips', region=region))\n\n\[email protected]('/instance_events/<region>/')\ndef instance_events(region=None):\n creds = config.get_ec2_conf()\n conn = connect_to_region(region, aws_access_key_id=creds[\n 'AWS_ACCESS_KEY_ID'], aws_secret_access_key=creds[\n 'AWS_SECRET_ACCESS_KEY'])\n instances = conn.get_all_instance_status()\n instance_event_list = []\n for instance in instances:\n event = instance.events\n if event:\n event_info = {'instance_id': instance.id, 'event': instance.\n events[0].code, 'description': instance.events[0].\n description, 'event_before': instance.events[0].not_before,\n 'event_after': instance.events[0].not_after}\n instance_event_list.append(event_info)\n return render_template('instance_events.html', instance_event_list=\n instance_event_list)\n\n\nif __name__ == '__main__':\n app.debug = True\n app.run(host='0.0.0.0')\n",
"step-3": "<mask token>\napp = Flask(__name__)\n\n\[email protected]('/')\ndef index():\n list = []\n creds = config.get_ec2_conf()\n for region in config.region_list():\n conn = connect_to_region(region, aws_access_key_id=creds[\n 'AWS_ACCESS_KEY_ID'], aws_secret_access_key=creds[\n 'AWS_SECRET_ACCESS_KEY'])\n zones = conn.get_all_zones()\n instances = conn.get_all_instance_status()\n instance_count = len(instances)\n ebs = conn.get_all_volumes()\n ebscount = len(ebs)\n unattached_ebs = 0\n unattached_eli = 0\n event_count = 0\n for instance in instances:\n events = instance.events\n if events:\n event_count = event_count + 1\n for vol in ebs:\n state = vol.attachment_state()\n if state == None:\n unattached_ebs = unattached_ebs + 1\n elis = conn.get_all_addresses()\n eli_count = len(elis)\n for eli in elis:\n instance_id = eli.instance_id\n if not instance_id:\n unattached_eli = unattached_eli + 1\n connelb = boto.ec2.elb.connect_to_region(region, aws_access_key_id=\n creds['AWS_ACCESS_KEY_ID'], aws_secret_access_key=creds[\n 'AWS_SECRET_ACCESS_KEY'])\n elb = connelb.get_all_load_balancers()\n elb_count = len(elb)\n list.append({'region': region, 'zones': zones, 'instance_count':\n instance_count, 'ebscount': ebscount, 'unattached_ebs':\n unattached_ebs, 'eli_count': eli_count, 'unattached_eli':\n unattached_eli, 'elb_count': elb_count, 'event_count': event_count}\n )\n return render_template('index.html', list=list)\n\n\[email protected]('/ebs_volumes/<region>/')\ndef ebs_volumes(region=None):\n creds = config.get_ec2_conf()\n conn = connect_to_region(region, aws_access_key_id=creds[\n 'AWS_ACCESS_KEY_ID'], aws_secret_access_key=creds[\n 'AWS_SECRET_ACCESS_KEY'])\n ebs = conn.get_all_volumes()\n ebs_vol = []\n for vol in ebs:\n state = vol.attachment_state()\n if state == None:\n ebs_info = {'id': vol.id, 'size': vol.size, 'iops': vol.iops,\n 'status': vol.status}\n ebs_vol.append(ebs_info)\n return render_template('ebs_volume.html', ebs_vol=ebs_vol, region=region)\n\n\[email protected]('/ebs_volumes/<region>/delete/<vol_id>')\ndef delete_ebs_vol(region=None, vol_id=None):\n creds = config.get_ec2_conf()\n conn = connect_to_region(region, aws_access_key_id=creds[\n 'AWS_ACCESS_KEY_ID'], aws_secret_access_key=creds[\n 'AWS_SECRET_ACCESS_KEY'])\n vol_id = vol_id.encode('ascii')\n vol_ids = conn.get_all_volumes(volume_ids=vol_id)\n for vol in vol_ids:\n vol.delete()\n return redirect(url_for('ebs_volumes', region=region))\n\n\[email protected]('/elastic_ips/<region>/')\ndef elastic_ips(region=None):\n creds = config.get_ec2_conf()\n conn = connect_to_region(region, aws_access_key_id=creds[\n 'AWS_ACCESS_KEY_ID'], aws_secret_access_key=creds[\n 'AWS_SECRET_ACCESS_KEY'])\n elis = conn.get_all_addresses()\n un_eli = []\n for eli in elis:\n instance_id = eli.instance_id\n if not instance_id:\n eli_info = {'public_ip': eli.public_ip, 'domain': eli.domain}\n un_eli.append(eli_info)\n return render_template('elastic_ip.html', un_eli=un_eli, region=region)\n\n\[email protected]('/elastic_ips/<region>/delete/<ip>')\ndef delete_elastic_ip(region=None, ip=None):\n creds = config.get_ec2_conf()\n conn = connect_to_region(region, aws_access_key_id=creds[\n 'AWS_ACCESS_KEY_ID'], aws_secret_access_key=creds[\n 'AWS_SECRET_ACCESS_KEY'])\n ip = ip.encode('ascii')\n elis = conn.get_all_addresses(addresses=ip)\n for eli in elis:\n eli.release()\n return redirect(url_for('elastic_ips', region=region))\n\n\[email protected]('/instance_events/<region>/')\ndef instance_events(region=None):\n creds = config.get_ec2_conf()\n conn = connect_to_region(region, aws_access_key_id=creds[\n 'AWS_ACCESS_KEY_ID'], aws_secret_access_key=creds[\n 'AWS_SECRET_ACCESS_KEY'])\n instances = conn.get_all_instance_status()\n instance_event_list = []\n for instance in instances:\n event = instance.events\n if event:\n event_info = {'instance_id': instance.id, 'event': instance.\n events[0].code, 'description': instance.events[0].\n description, 'event_before': instance.events[0].not_before,\n 'event_after': instance.events[0].not_after}\n instance_event_list.append(event_info)\n return render_template('instance_events.html', instance_event_list=\n instance_event_list)\n\n\nif __name__ == '__main__':\n app.debug = True\n app.run(host='0.0.0.0')\n",
"step-4": "from flask import Flask, flash, abort, redirect, url_for, request, render_template, make_response, json, Response\nimport os, sys\nimport config\nimport boto.ec2.elb\nimport boto\nfrom boto.ec2 import *\napp = Flask(__name__)\n\n\[email protected]('/')\ndef index():\n list = []\n creds = config.get_ec2_conf()\n for region in config.region_list():\n conn = connect_to_region(region, aws_access_key_id=creds[\n 'AWS_ACCESS_KEY_ID'], aws_secret_access_key=creds[\n 'AWS_SECRET_ACCESS_KEY'])\n zones = conn.get_all_zones()\n instances = conn.get_all_instance_status()\n instance_count = len(instances)\n ebs = conn.get_all_volumes()\n ebscount = len(ebs)\n unattached_ebs = 0\n unattached_eli = 0\n event_count = 0\n for instance in instances:\n events = instance.events\n if events:\n event_count = event_count + 1\n for vol in ebs:\n state = vol.attachment_state()\n if state == None:\n unattached_ebs = unattached_ebs + 1\n elis = conn.get_all_addresses()\n eli_count = len(elis)\n for eli in elis:\n instance_id = eli.instance_id\n if not instance_id:\n unattached_eli = unattached_eli + 1\n connelb = boto.ec2.elb.connect_to_region(region, aws_access_key_id=\n creds['AWS_ACCESS_KEY_ID'], aws_secret_access_key=creds[\n 'AWS_SECRET_ACCESS_KEY'])\n elb = connelb.get_all_load_balancers()\n elb_count = len(elb)\n list.append({'region': region, 'zones': zones, 'instance_count':\n instance_count, 'ebscount': ebscount, 'unattached_ebs':\n unattached_ebs, 'eli_count': eli_count, 'unattached_eli':\n unattached_eli, 'elb_count': elb_count, 'event_count': event_count}\n )\n return render_template('index.html', list=list)\n\n\[email protected]('/ebs_volumes/<region>/')\ndef ebs_volumes(region=None):\n creds = config.get_ec2_conf()\n conn = connect_to_region(region, aws_access_key_id=creds[\n 'AWS_ACCESS_KEY_ID'], aws_secret_access_key=creds[\n 'AWS_SECRET_ACCESS_KEY'])\n ebs = conn.get_all_volumes()\n ebs_vol = []\n for vol in ebs:\n state = vol.attachment_state()\n if state == None:\n ebs_info = {'id': vol.id, 'size': vol.size, 'iops': vol.iops,\n 'status': vol.status}\n ebs_vol.append(ebs_info)\n return render_template('ebs_volume.html', ebs_vol=ebs_vol, region=region)\n\n\[email protected]('/ebs_volumes/<region>/delete/<vol_id>')\ndef delete_ebs_vol(region=None, vol_id=None):\n creds = config.get_ec2_conf()\n conn = connect_to_region(region, aws_access_key_id=creds[\n 'AWS_ACCESS_KEY_ID'], aws_secret_access_key=creds[\n 'AWS_SECRET_ACCESS_KEY'])\n vol_id = vol_id.encode('ascii')\n vol_ids = conn.get_all_volumes(volume_ids=vol_id)\n for vol in vol_ids:\n vol.delete()\n return redirect(url_for('ebs_volumes', region=region))\n\n\[email protected]('/elastic_ips/<region>/')\ndef elastic_ips(region=None):\n creds = config.get_ec2_conf()\n conn = connect_to_region(region, aws_access_key_id=creds[\n 'AWS_ACCESS_KEY_ID'], aws_secret_access_key=creds[\n 'AWS_SECRET_ACCESS_KEY'])\n elis = conn.get_all_addresses()\n un_eli = []\n for eli in elis:\n instance_id = eli.instance_id\n if not instance_id:\n eli_info = {'public_ip': eli.public_ip, 'domain': eli.domain}\n un_eli.append(eli_info)\n return render_template('elastic_ip.html', un_eli=un_eli, region=region)\n\n\[email protected]('/elastic_ips/<region>/delete/<ip>')\ndef delete_elastic_ip(region=None, ip=None):\n creds = config.get_ec2_conf()\n conn = connect_to_region(region, aws_access_key_id=creds[\n 'AWS_ACCESS_KEY_ID'], aws_secret_access_key=creds[\n 'AWS_SECRET_ACCESS_KEY'])\n ip = ip.encode('ascii')\n elis = conn.get_all_addresses(addresses=ip)\n for eli in elis:\n eli.release()\n return redirect(url_for('elastic_ips', region=region))\n\n\[email protected]('/instance_events/<region>/')\ndef instance_events(region=None):\n creds = config.get_ec2_conf()\n conn = connect_to_region(region, aws_access_key_id=creds[\n 'AWS_ACCESS_KEY_ID'], aws_secret_access_key=creds[\n 'AWS_SECRET_ACCESS_KEY'])\n instances = conn.get_all_instance_status()\n instance_event_list = []\n for instance in instances:\n event = instance.events\n if event:\n event_info = {'instance_id': instance.id, 'event': instance.\n events[0].code, 'description': instance.events[0].\n description, 'event_before': instance.events[0].not_before,\n 'event_after': instance.events[0].not_after}\n instance_event_list.append(event_info)\n return render_template('instance_events.html', instance_event_list=\n instance_event_list)\n\n\nif __name__ == '__main__':\n app.debug = True\n app.run(host='0.0.0.0')\n",
"step-5": null,
"step-ids": [
6,
7,
8,
9
]
}
|
[
6,
7,
8,
9
] |
# Packages
import PySimpleGUI as sg
import mysql.connector
import secrets
# TODO Add a view all button
# TODO Catch errors (specifically for TimeDate mismatches)
# TODO Add a downtime graph
# TODO Add a system feedback window instead of putting this in the out id textbox
error_sel_flag = False # Flag to check whether an error has been selected before performing logic requiring it
guest_user_flag = False # Flag to check whether the user is a guest, and limit which functions of the applciation (and database) they can use
unresolved_errors = [] # MEEP, could probably do without this in the refactor
current_error = { # Dictionary to hold all information about the current/selected error. This removes the need to hit the database for every bit of logic that requires an error
'fault_id': 'Null',
'fault_status': 'Null',
'fault_description': 'Null',
'voyage': 'Null',
'time_of_fault': 'Null',
'time_of_solution': 'Null',
'fault_type': 'Null',
'location': 'Null',
'sensor_id': 'Null',
'sensor_type': 'Null',
'fault_message': 'Null',
'log_date': 'Null'
}
# Dictionary for search parameters. NOTE: deviation from script naming convention is due to the naming convention used in the database
search_dict = {
'Voyage': '',
'FaultStatus': '',
'FaultType': '',
'Location': '',
'SensorID': '',
'SensorType': '',
'TimeOfFault': '',
'TimeOfSolution': ''
}
class DatabaseConnection():
''' This class instantiates and maintains the database connection, and encapsulates all functions that work directly with that connection.'''
def __init__(self, host, user, password, database):
''' This function is called whenever a new instance of 'DatabaseConnection' is instantiated. It created the connection and cursor to the
database, both of which are used by other functions of this class.'''
try:
self.connection = mysql.connector.connect(
host=host,
user=user,
passwd=password,
database=database,
auth_plugin='mysql_native_password'
)
self.cursor = self.connection.cursor()
except mysql.connector.Error as e:
print("Error %d: %s" % (e.args[0], e.args[1]))
exit(69)
def save_to_errors(self, fault_status, fault_desciption, voyage, time_of_fault, time_of_solution, fault_type, location, sensor_id, sensor_type, fault_message):
''' This function creates and carries out an 'INSERT' query for the 'errors' table. It forces null values for the time fields in the case that the GUI
returns blank values, this is to avoid a type mismatch with the database (This could probably be better handled somewhere else but it gets the job done for now).'''
if time_of_fault == '':
time_of_fault = "NULL"
if time_of_solution == '':
time_of_solution = "NULL"
if fault_status == '':
fault_status = "Unresolved"
insert_query = "INSERT INTO errors (FaultDescription, FaultMessage, FaultStatus, FaultType, Location, SensorID, SensorType, TimeOfFault, TimeOfSolution, Voyage) VALUES ('{}', '{}', '{}', '{}', '{}', '{}', '{}', {}, {}, '{}')".format(fault_desciption, fault_message, fault_status, fault_type, location, sensor_id, sensor_type, time_of_fault, time_of_solution, voyage)
print(insert_query)
self.cursor.execute(insert_query)
self.connection.commit()
def save_to_downtime(self, voyage, stop_time, start_time, reason, assosciated_error):
''' This function creates and carries out an 'INSERT' query for the 'downtime' table. It forces null values for the time fields in the case that the GUI
returns blank values, this is to avoid a type mismatch with the database (Again, this is not perfect but I'll relook it at a later stage).'''
insert_query = "INSERT INTO downtime (Voyage, StopTime, StartTime, Reason, AssosciatedError) VALUES ('{}', '{}', '{}', '{}', '{}')".format(voyage, stop_time, start_time, reason, assosciated_error)
print(insert_query)
self.cursor.execute(insert_query)
self.connection.commit()
pass
def fetch(self, fetch_query):
''' This function carries out a 'SELECT' query from the MySQL database and returns the result.'''
print("Fetch " + str(fetch_query))
_ = self.cursor.execute(fetch_query)
result = self.cursor.fetchall()
return result
def update(self, fault_status, fault_desciption, voyage, time_of_fault, time_of_solution, fault_type, location, sensor_id, sensor_type, fault_message, fault_id):
# ToDo Test the robustness of this, seems like it doens't like updating with unchanged fields
if time_of_fault == 'None':
time_of_fault = "NULL"
if time_of_solution =='None':
time_of_solution = "NULL"
update_query = "UPDATE errors SET FaultStatus = '{}', FaultDescription = '{}', Voyage = '{}', TimeOfFault = {}, TimeOfSolution = {}, FaultType = '{}', Location = '{}', SensorID = '{}', SensorType = '{}', FaultMessage = '{}' WHERE FaultID = {}".format(fault_status, fault_desciption, voyage, time_of_fault, time_of_solution, fault_type, location, sensor_id, sensor_type, fault_message, fault_id)
print(update_query)
self.cursor.execute(update_query)
self.connection.commit()
print("Updated")
def search(self, voyage, status, fault_type, location, sensor_id, sensor_type, start_time, end_time):
''' This function creates and carries out a 'SELECT' query from the MySQL database and returns the result.
It fills a dictionary and reduces it to only include the provided search terms in the query.'''
search_dict['Voyage'] = voyage
search_dict['FaultStatus'] = status
search_dict['FaultType'] = fault_type
search_dict['Location'] = location
search_dict['SensorID'] = sensor_id
search_dict['SensorType'] = sensor_type
search_dict['TimeOfFault'] = start_time
search_dict['TimeOfSolution'] = end_time
# Remove empty values so that only the required search parameters are included
reduced_search_dict = dict((k, v) for k, v in search_dict.items() if v) # New dictionary with all empty values removed
if(len(reduced_search_dict) < 2):
print("Please enter at least two search criteria (sorry, Nic rushed this section!)")
return 0
key_list = list(reduced_search_dict.keys())
value_list = list(reduced_search_dict.values())
# Remove enclosing apostrophes as is required in the MySQL syntax
key_tuple = tuple(key_list)
seperator = ", "
usable_key_tuple = seperator.join(key_tuple)
search_query = "SELECT * FROM errors WHERE ({}) = {}".format(usable_key_tuple, str(tuple(value_list)))
print(search_query)
_ = self.cursor.execute(search_query)
result = self.cursor.fetchall()
return result
def shutdown(self):
# Implement logic to close connection
pass
# Create window functions
def create_login_window():
''' This function contains the layout for, invokes, and monitors the login window. When a user logs in, it creates an instance of
the 'DatabaseConnection' class, establishing a connection to the database for use by the main application. This function returns the
created instance of 'DatabaseConnection' for use by other functions in the script.
'''
# Window setup
login_layout = [[sg.Text('Hostname: '), sg.In(size = (25, 0), key = '-HOST-')],
[sg.Text('Username: '), sg.In(size = (25, 0), key = '-USER-')],
[sg.Text('Password: '), sg.In(size = (25, 0), pad = (3, 0), password_char = '*', key='-PASS-')],
[sg.Button('Login', size = (14, 0), pad = ((0, 10), (5, 0)), enable_events = True, bind_return_key = True, key = '-LOGIN-'), sg.Button('Guest Login.', size = (14, 0), pad = ((10, 0), (5, 0)), enable_events = True, key = '-LOGIN GUEST-')]
]
login_window = sg.Window("LLMSDID - Login",
layout=login_layout,
margins=(20, 10),
grab_anywhere=True,
default_button_element_size=(12, 1)
)
# Logic
while True:
login_event, login_values = login_window.read()
if login_event == '-LOGIN-':
current_db = DatabaseConnection(login_values['-HOST-'], login_values['-USER-'], login_values['-PASS-'], "LLMSDID") # Instantiate instance of 'DatabaseConnection'
login_window.close()
return current_db
if login_event == '-LOGIN GUEST-':
current_db = DatabaseConnection('localhost', secrets.guestUsername, secrets.guestPassword, "LLMSDID") # Instantiate instance of 'DatabaseConnection'
global guest_user_flag
guest_user_flag = True
login_window.close()
return current_db
# If the user closes the window, exit this loop so that the program can close
if login_event == sg.WIN_CLOSED:
login_window.close()
exit(69)
def create_update_window(selected_error, database):
update_col_1 = sg.Column([[sg.Frame('Current values', [[sg.Column([[sg.Text("Voyage: ", size=(12,1)), sg.Text(selected_error['voyage'])],
[sg.Text("Status: ", size=(12,1)), sg.Text(selected_error['fault_status'])],
[sg.Text("Description: ", size=(12,4)), sg.Multiline(selected_error['fault_description'], size=(40, 4))],
[sg.Text("Fault message: ", size=(12,2)), sg.Multiline(selected_error['fault_message'], size=(40,2))],
[sg.Text("Fault type: ", size=(12,1)), sg.Text(selected_error['fault_type'])],
[sg.Text("Fault location: ", size=(12,1)), sg.Text(selected_error['location'])],
[sg.Text("Sensor ID: ", size=(12,1)), sg.Text(selected_error['sensor_id'])],
[sg.Text("Sensor type: ", size=(12,1)), sg.Text(selected_error['sensor_type'])],
[sg.Text("From: ", size=(12,1)), sg.Text(selected_error['time_of_fault'])],
[sg.Text("To: ", size=(12,1)), sg.Text(selected_error['time_of_solution'])]],
)]])]])
update_col_2 = sg.Column([[sg.Frame('Updated values', [[sg.Column([[sg.Text("Voyage: ", size=(12,1)), sg.In(selected_error['voyage'], size=(40,1), key='-NEW VOYAGE-')],
[sg.Text("Status: ", size=(12,1)), sg.InputCombo(["Unresolved", "Resolved"], default_value=selected_error['fault_status'], key='-NEW STATUS-')],
[sg.Text("Description: ", size=(12,4)), sg.Multiline(selected_error['fault_description'], size=(40,4), key='-NEW DESC-')],
[sg.Text("Fault message: ", size=(12,2)), sg.Multiline(selected_error['fault_message'], size=(40,2), key='-NEW MESSAGE-')],
[sg.Text("Fault type: ", size=(12,1)), sg.In(selected_error['fault_type'], size=(40,1), key='-NEW FTYPE-')],
[sg.Text("Fault location: ", size=(12,1)), sg.In(selected_error['location'], size=(40,1), key='-NEW LOC-')],
[sg.Text("Sensor ID: ", size=(12,1)), sg.In(selected_error['sensor_id'], size=(40,1), key='-NEW ID-')],
[sg.Text("Sensor type: ", size=(12,1)), sg.In(selected_error['sensor_type'], size=(40,1), key='-NEW STYPE-')],
[sg.Text("From: ", size=(12,1)), sg.In(selected_error['time_of_fault'], size=(40,1), key='-NEW FROM-')],
[sg.Text("To: ", size=(12,1)), sg.In(selected_error['time_of_solution'], size=(40,1), key='-NEW TO-')]],
)]])]])
update_col_3 = sg.Column([[sg.Frame('Actions', [[sg.Column([[sg.Button("Update", enable_events=True,
tooltip="Press me if you'd like to update this fault.",
key='-SAVE UPDATE-'),
sg.Button("Cancel", enable_events=True,
tooltip="Press me if you'd like to cancel this update.",
key='-CANCEL UPDATE-')]])]])]])
updateLayout = [[update_col_1, update_col_2], [update_col_3]]
update_window = sg.Window("LLMSDID - Update",
layout=updateLayout,
margins=(200, 100),
grab_anywhere=True,
default_button_element_size=(12, 1)
)
print("Updating " + str(selected_error['fault_id']))
while True:
update_event, update_value = update_window.read()
if update_event == '-SAVE UPDATE-':
database.update(update_value['-NEW STATUS-'], update_value['-NEW DESC-'], update_value['-NEW VOYAGE-'], update_value['-NEW FROM-'], update_value['-NEW TO-'], update_value['-NEW FTYPE-'], update_value['-NEW LOC-'], update_value['-NEW ID-'], update_value['-NEW STYPE-'], update_value['-NEW MESSAGE-'], selected_error['fault_id'])
update_window.close()
break
# If the user closes the window, exit this loop so that the program can close
if update_event == sg.WIN_CLOSED or update_event == '-CANCEL UPDATE-':
update_window.close()
break
def create_log_window(database):
log_layout = [
[sg.Text("Fault description", size=(12,1)), sg.In(size=(40, 40), key='-DESCRIPTION-')],
[sg.Text("Fault message", size=(12,1)), sg.In(size=(40, 40), key='-MESSAGE-')],
[sg.Text("Status", size=(12,1)), sg.InputCombo(["Unresolved", "Resolved"], key='-STATUS-')],
[sg.Text("Fault type", size=(12,1)), sg.In(size = (25, 1), key='-TYPE-')],
[sg.Text("Location", size=(12,1)), sg.In(size=(25, 1), key='-LOCATION-')],
[sg.Text("Sensor ID", size=(12,1)), sg.In(size=(25, 1), key='-SENSOR ID-')],
[sg.Text("Sensor type", size=(12,1)), sg.In(size=(25, 1), key='-SENSOR TYPE-')],
[sg.Text("Time of fault", tooltip = "dd-mm-yy hh:mm:ss", size=(12,1)), sg.In(size=(25, 1), key='-START-')],
[sg.Text("Time of solution", tooltip = "dd-mm-yy hh:mm:ss", size=(12,1)), sg.In(size=(25, 1), key='-END-')],
[sg.Text("Voyage", size=(12,1)), sg.In(size=(25, 1), key='-VOYAGE-')],
[sg.Button("Save", enable_events=True, key='-LOG SAVE-'), sg.Button("Cancel", enable_events=True, key='-LOG CANCEL-')]
]
log_window = sg.Window("LLMSDID - Log an error",
layout=log_layout,
margins=(200, 100),
grab_anywhere=True,
default_button_element_size=(12, 1)
)
while True:
log_event, log_values = log_window.read()
if log_event == '-LOG SAVE-':
database.save_to_errors(log_values['-STATUS-'], log_values['-DESCRIPTION-'], log_values['-VOYAGE-'], log_values['-START-'], log_values['-END-'], log_values['-TYPE-'], log_values['-LOCATION-'], log_values['-SENSOR ID-'], log_values['-SENSOR TYPE-'], log_values['-MESSAGE-'])
log_window.close()
break
# If the user closes the window, exit this loop so that the program can close
if log_event == sg.WIN_CLOSED or log_event == '-LOG CANCEL-':
log_window.close()
break
def create_more_window(selected_error, database):
more_col_1 = sg.Column([[sg.Frame('Parameter', [[sg.Column([[sg.Text("Fault ID: ")],
[sg.Text("Voyage: ")],
[sg.Text("Status: ")],
[sg.Text("Description: ")],
[sg.Text("Fault message: ")],
[sg.Text("Fault type: ")],
[sg.Text("Fault location: ")],
[sg.Text("Sensor ID: ")],
[sg.Text("Sensor type: ")],
[sg.Text("From: ")],
[sg.Text("To: ")],
[sg.Text("Log date: ")]],
)]])]])
more_col_2 = sg.Column([[sg.Frame('Value', [[sg.Column([[sg.Text("Fault ID: ", size=(12,1)), sg.Text(selected_error['fault_id'], size=(40,1))],
[sg.Text("Voyage: ", size=(12,1)), sg.Text(selected_error['voyage'], size=(40,1))],
[sg.Text("Status: ", size=(12,1)), sg.Text(selected_error['fault_status'], size=(40,1))],
[sg.Text("Description: ", size=(12,4)), sg.Multiline(selected_error['fault_description'], size=(40,4))],
[sg.Text("Fault message: ", size=(12,2)), sg.Multiline(selected_error['fault_message'], size=(40,2))],
[sg.Text("Fault type: ", size=(12,1)), sg.Text(selected_error['fault_type'], size=(40,1))],
[sg.Text("Fault location: ", size=(12,1)), sg.Text(selected_error['location'], size=(40,1))],
[sg.Text("Sensor ID: ", size=(12,1)), sg.Text(selected_error['sensor_id'], size=(40,1))],
[sg.Text("Sensor type: ", size=(12,1)), sg.Text(selected_error['sensor_type'], size=(40,1))],
[sg.Text("From: ", size=(12,1)), sg.Text(selected_error['time_of_fault'], size=(40,1))],
[sg.Text("To: ", size=(12,1)), sg.Text(selected_error['time_of_solution'], size=(40,1))],
[sg.Text("Log date: ", size=(12,1)), sg.Text(selected_error['log_date'], size=(40,1))]],
)]])]])
more_col_3 = sg.Column([[sg.Frame('Actions', [[sg.Column([[sg.Button("Thanks", enable_events=True,
tooltip="Press me if you're done having a look.",
key='-THANKS-')
]])]])]])
moreLayout = [[more_col_2], [more_col_3]]
more_window = sg.Window("LLMSDID - More",
layout=moreLayout,
margins=(200, 100),
grab_anywhere=True,
default_button_element_size=(12, 1)
)
while True:
more_event, more_value = more_window.read()
# If the user closes the window, exit this loop so that the program can close
if more_event == sg.WIN_CLOSED or more_event == '-THANKS-':
more_window.close()
break
def create_downtime_window(database):
downtime_layout = [
[sg.Text("Voyage"), sg.In(size=(40, 40), key='-VOYAGE-')],
[sg.Text("System Stop Time"), sg.In(size=(40, 40), key='-STOP-')],
[sg.Text("System Restart Time", tooltip = "dd-mm-yy hh:mm:ss"), sg.In(size=(40, 40), key='-START-')],
[sg.Text("Reason for Downtime", tooltip = "dd-mm-yy hh:mm:ss"), sg.In(size=(25, 1), key='-REASON-')],
[sg.Text("Assosciated Error"), sg.In(size=(25, 1), key='-ASSOSCIATED ERROR-')],
[sg.Button("Save", enable_events=True, key='-LOG SAVE-'),
sg.Button("Cancel", enable_events=True, key='-LOG CANCEL-')]
]
downtime_window = sg.Window("LLMSDID - Log some downtime",
layout=downtime_layout,
margins=(200, 100),
grab_anywhere=True,
default_button_element_size=(12, 1)
)
while True:
downtime_event, downtime_values = downtime_window.read()
if downtime_event == '-LOG SAVE-':
database.save_to_downtime(downtime_values['-VOYAGE-'], downtime_values['-STOP-'], downtime_values['-START-'], downtime_values['-REASON-'], downtime_values['-ASSOSCIATED ERROR-'])
downtime_window.close()
break
# If the user closes the window, exit this loop so that the program can close
if downtime_event == sg.WIN_CLOSED or downtime_event == '-LOG CANCEL-':
downtime_window.close()
break
# Main window layout
main_column_1 = sg.Column([[sg.Frame('Advanced search', [[sg.Column([[sg.Text("Voyage: ", tooltip = "Let me know which voyage you'd like to see the errors for."), sg.In(size = (15, 1), pad = ((34, 0), (0, 0)), key = '-VOYAGE SEARCH-')],
[sg.Text("Status: ", tooltip = "Would you like to look at errors we've already solved? Let me know here!"), sg.In(size = (15, 1), pad = ((40, 0), (0, 0)), key = '-STATUS SEARCH-')],
[sg.Text("Fault type: ", tooltip = "Here you can let me know what type of fault you'd like to search for."), sg.In(size = (15, 1), pad = ((20, 0), (0, 0)), right_click_menu = ("Cable", "Hardware", "Sensor", "Connector"), key = '-TYPE SEARCH-')],
[sg.Text("Fault location: ", tooltip = "If you suspect that your fault might be location-specific, say so here to see previous errors that have occurred in that location."), sg.In(size = (15, 1), pad = ((0, 0), (0, 0)), key = '-LOCATION SEARCH-')],
[sg.Text("Sensor ID: ", tooltip = "Think that your error could be sensor-specific? Find previous issues with your exact sensor by entering it's asset number here."), sg.In(size = (15, 1), pad = ((21, 0), (0, 0)), key = '-SENSOR ID SEARCH-')],
[sg.Text("Sensor type: ", tooltip = "Search for previous errors that have been encountered with your specific type of sensor."), sg.In(size = (15, 1), pad = ((8, 0), (0, 0)), key = '-SENSOR TYPE SEARCH-')],
[sg.Text("From: ", tooltip = "Enter the start date for your search."), sg.In(size = (15, 1), tooltip = "dd-mm-yy hh:mm:ss", pad = ((48, 0), (0, 0)), key = '-FROM SEARCH-')],
[sg.Text("To: ", tooltip = "Enter the end date for your search."), sg.In(size = (15, 1), tooltip = "dd-mm-yy hh:mm:ss", pad = ((64, 0), (0, 0)), key = '-TO SEARCH-')],
[sg.Button("Search errors", size = (12, 1), pad = ((93, 0), (7, 0)), enable_events=True, tooltip = "Press me if you'd like to search for specific error characteristics.",key = '-SEARCH ERROR-')]], pad = (3, 3))]])]])
main_column_2 = sg.Column([[sg.Frame('Faults:', [[sg.Column([[sg.Listbox(unresolved_errors, enable_events = True, size=(20, len(unresolved_errors)), key = '-ERROR LIST-')]]),
sg.Column([[sg.Text("Error ID: ", size=(14,1)), sg.Text("", size=(20,1), key='-OUT ID-')],
[sg.Text("Error Description: ", size=(14,15)), sg.Multiline("", size=(20,15), key='-OUT DESC-')],
]) ],
[sg.Button("Update", enable_events = True, tooltip = "Press me if you'd like to update some of the information about the selected error.", key = '-UPDATE ERROR-'),
sg.Button("Give me more!", enable_events = True, tooltip = "Press me if you'd like to view all the information about this specific error.", key = '-SHOW ME MORE-'),
sg.Button("Show me unresolved errors", enable_events = True, tooltip="Press me if you'd like to see all the unresolved errors", key = '-UNRESOLVED-')]], pad=(0, 0))]])
main_column_3 = sg.Column([[sg.Frame('Actions', [[sg.Column([[sg.Button("Log a new error", enable_events=True, tooltip = "Press me if you'd like to log a new error.", key = '-LOG ERROR-'),
sg.Button("Log some downtime", enable_events=True, tooltip="Press me if you'd like to log system downtime as a result of a logged error.", key='-LOG DOWNTIME-')]])]])]])
main_layout = [[main_column_1, main_column_2], [main_column_3]]
main_window = sg.Window("LLMSDID - Home",
layout = main_layout,
margins = (200, 100),
grab_anywhere=True,
default_button_element_size=(12, 1))
if __name__ == "__main__":
db_object = create_login_window()
while True:
event, values = main_window.read()
if event == '-UNRESOLVED-':
update_query = "SELECT FaultID, FaultDescription FROM errors WHERE FaultStatus = 'Unresolved'"
unresolved_errors = db_object.fetch(update_query)
main_window['-ERROR LIST-'].update(unresolved_errors)
main_window.refresh()
if values['-ERROR LIST-']:
selected_error = values['-ERROR LIST-'][0]
error_sel_flag = True
fetch_query = "SELECT * FROM errors WHERE FaultId = " + str(selected_error[0])
current_error_list = db_object.fetch(fetch_query)
current_error['fault_id'] = current_error_list[0][0]
current_error['fault_status'] = current_error_list[0][1]
current_error['fault_description'] = current_error_list[0][2]
current_error['voyage'] = current_error_list[0][3]
current_error['time_of_fault'] = current_error_list[0][4]
current_error['time_of_solution'] = current_error_list[0][5]
current_error['fault_type'] = current_error_list[0][6]
current_error['location'] = current_error_list[0][7]
current_error['sensor_id'] = current_error_list[0][8]
current_error['sensor_type'] = current_error_list[0][9]
current_error['fault_message'] = current_error_list[0][10]
current_error['log_date'] = current_error_list[0][11]
main_window['-OUT ID-'].update(current_error['fault_id'])
main_window['-OUT DESC-'].update(current_error['fault_description'])
if event == '-UPDATE ERROR-':
if guest_user_flag:
print("User does not have privileges to update issues")
else:
if error_sel_flag:
create_update_window(current_error, db_object) # MEEP: point to db_object?
else:
main_window['-OUT ID-'].update("Please select a fault for us to update.")
print("No fault selected")
if event == '-LOG ERROR-':
if guest_user_flag:
print("User does not have privileges to log an error")
else:
create_log_window(db_object)
# TODO Set current issue as logged issue if it is unresolved
if event == '-SEARCH ERROR-':
unresolved_errors = db_object.search(values['-VOYAGE SEARCH-'], values['-STATUS SEARCH-'], values['-TYPE SEARCH-'], values['-LOCATION SEARCH-'], values['-SENSOR ID SEARCH-'], values['-SENSOR TYPE SEARCH-'], values['-FROM SEARCH-'], values['-TO SEARCH-'])
main_window['-ERROR LIST-'].update(unresolved_errors)
main_window.refresh()
if event == '-SHOW ME MORE-':
if error_sel_flag:
create_more_window(current_error, db_object)
else:
main_window['-OUT ID-'].update("Please select a fault for us to have a look at.")
print("No fault selected")
if event == '-LOG DOWNTIME-':
if(guest_user_flag):
print("User does not have privileges to log downtime")
else:
create_downtime_window(db_object)
if event == sg.WIN_CLOSED:
break
|
normal
|
{
"blob_id": "8fb5ef7244a8ca057f11cbcdf42d383665dade5e",
"index": 6884,
"step-1": "# Packages\nimport PySimpleGUI as sg\nimport mysql.connector\nimport secrets\n\n# TODO Add a view all button\n# TODO Catch errors (specifically for TimeDate mismatches)\n# TODO Add a downtime graph\n# TODO Add a system feedback window instead of putting this in the out id textbox\n\nerror_sel_flag = False\t# Flag to check whether an error has been selected before performing logic requiring it\nguest_user_flag = False\t# Flag to check whether the user is a guest, and limit which functions of the applciation (and database) they can use\nunresolved_errors = [] # MEEP, could probably do without this in the refactor\ncurrent_error = {\t# Dictionary to hold all information about the current/selected error. This removes the need to hit the database for every bit of logic that requires an error\n\t'fault_id': 'Null',\n\t'fault_status': 'Null',\n\t'fault_description': 'Null',\n\t'voyage': 'Null',\n\t'time_of_fault': 'Null',\n\t'time_of_solution': 'Null',\n\t'fault_type': 'Null',\n\t'location': 'Null',\n\t'sensor_id': 'Null',\n\t'sensor_type': 'Null',\n\t'fault_message': 'Null',\n\t'log_date': 'Null'\n}\t\n\n# Dictionary for search parameters. NOTE: deviation from script naming convention is due to the naming convention used in the database\nsearch_dict = {\n\t'Voyage': '',\n\t'FaultStatus': '',\n\t'FaultType': '',\n\t'Location': '',\n\t'SensorID': '',\n\t'SensorType': '',\n\t'TimeOfFault': '',\n\t'TimeOfSolution': ''\n}\n\nclass DatabaseConnection():\n\t''' This class instantiates and maintains the database connection, and encapsulates all functions that work directly with that connection.'''\n\t\n\tdef __init__(self, host, user, password, database):\n\t\t''' This function is called whenever a new instance of 'DatabaseConnection' is instantiated. It created the connection and cursor to the \n\t\tdatabase, both of which are used by other functions of this class.'''\n\t\ttry:\n\t\t\tself.connection = mysql.connector.connect(\n\t\t\t\thost=host,\n\t\t\t\tuser=user,\n\t\t\t\tpasswd=password,\n\t\t\t\tdatabase=database,\n\t\t\t\tauth_plugin='mysql_native_password'\n\t\t\t)\n\t\t\tself.cursor = self.connection.cursor()\n\t\texcept mysql.connector.Error as e:\n\t\t\tprint(\"Error %d: %s\" % (e.args[0], e.args[1]))\n\t\t\texit(69)\n\n\tdef save_to_errors(self, fault_status, fault_desciption, voyage, time_of_fault, time_of_solution, fault_type, location, sensor_id, sensor_type, fault_message):\n\t\t''' This function creates and carries out an 'INSERT' query for the 'errors' table. It forces null values for the time fields in the case that the GUI \n\t\treturns blank values, this is to avoid a type mismatch with the database (This could probably be better handled somewhere else but it gets the job done for now).'''\n\t\t\n\t\tif time_of_fault == '':\n\t\t\ttime_of_fault = \"NULL\"\n\n\t\tif time_of_solution == '':\n\t\t\ttime_of_solution = \"NULL\"\n\n\t\tif fault_status == '':\n\t\t\tfault_status = \"Unresolved\"\n\n\t\tinsert_query = \"INSERT INTO errors (FaultDescription, FaultMessage, FaultStatus, FaultType, Location, SensorID, SensorType, TimeOfFault, TimeOfSolution, Voyage) VALUES ('{}', '{}', '{}', '{}', '{}', '{}', '{}', {}, {}, '{}')\".format(fault_desciption, fault_message, fault_status, fault_type, location, sensor_id, sensor_type, time_of_fault, time_of_solution, voyage)\n\t\tprint(insert_query)\n\n\t\tself.cursor.execute(insert_query)\n\n\t\tself.connection.commit()\n\n\tdef save_to_downtime(self, voyage, stop_time, start_time, reason, assosciated_error):\n\t\t''' This function creates and carries out an 'INSERT' query for the 'downtime' table. It forces null values for the time fields in the case that the GUI \n\t\treturns blank values, this is to avoid a type mismatch with the database (Again, this is not perfect but I'll relook it at a later stage).'''\n\t\t\n\t\tinsert_query = \"INSERT INTO downtime (Voyage, StopTime, StartTime, Reason, AssosciatedError) VALUES ('{}', '{}', '{}', '{}', '{}')\".format(voyage, stop_time, start_time, reason, assosciated_error)\n\t\tprint(insert_query)\n\n\t\tself.cursor.execute(insert_query)\n\n\t\tself.connection.commit()\n\t\tpass\n\n\tdef fetch(self, fetch_query):\n\t\t''' This function carries out a 'SELECT' query from the MySQL database and returns the result.'''\n\t\tprint(\"Fetch \" + str(fetch_query))\n\n\t\t_ = self.cursor.execute(fetch_query)\n\t\tresult = self.cursor.fetchall()\n\n\t\treturn result\n\n\tdef update(self, fault_status, fault_desciption, voyage, time_of_fault, time_of_solution, fault_type, location, sensor_id, sensor_type, fault_message, fault_id):\n\t\t# ToDo Test the robustness of this, seems like it doens't like updating with unchanged fields\n\t\tif time_of_fault == 'None':\n\t\t\ttime_of_fault = \"NULL\"\n\n\t\tif time_of_solution =='None':\n\t\t\ttime_of_solution = \"NULL\"\n\n\t\tupdate_query = \"UPDATE errors SET FaultStatus = '{}', FaultDescription = '{}', Voyage = '{}', TimeOfFault = {}, TimeOfSolution = {}, FaultType = '{}', Location = '{}', SensorID = '{}', SensorType = '{}', FaultMessage = '{}' WHERE FaultID = {}\".format(fault_status, fault_desciption, voyage, time_of_fault, time_of_solution, fault_type, location, sensor_id, sensor_type, fault_message, fault_id)\n\n\t\tprint(update_query)\n\t\tself.cursor.execute(update_query)\n\n\t\tself.connection.commit()\n\n\t\tprint(\"Updated\")\n\n\tdef search(self, voyage, status, fault_type, location, sensor_id, sensor_type, start_time, end_time):\n\t\t''' This function creates and carries out a 'SELECT' query from the MySQL database and returns the result.\n\t\tIt fills a dictionary and reduces it to only include the provided search terms in the query.'''\n\n\t\tsearch_dict['Voyage'] = voyage\n\t\tsearch_dict['FaultStatus'] = status\n\t\tsearch_dict['FaultType'] = fault_type\n\t\tsearch_dict['Location'] = location\n\t\tsearch_dict['SensorID'] = sensor_id\n\t\tsearch_dict['SensorType'] = sensor_type\n\t\tsearch_dict['TimeOfFault'] = start_time\n\t\tsearch_dict['TimeOfSolution'] = end_time\n\n\t\t# Remove empty values so that only the required search parameters are included\n\t\treduced_search_dict = dict((k, v) for k, v in search_dict.items() if v) # New dictionary with all empty values removed\n\t\tif(len(reduced_search_dict) < 2):\n\t\t\tprint(\"Please enter at least two search criteria (sorry, Nic rushed this section!)\")\n\t\t\treturn 0\n\t\tkey_list = list(reduced_search_dict.keys())\n\t\tvalue_list = list(reduced_search_dict.values())\n\n\t\t# Remove enclosing apostrophes as is required in the MySQL syntax \n\t\tkey_tuple = tuple(key_list)\n\t\tseperator = \", \"\n\t\tusable_key_tuple = seperator.join(key_tuple)\n\n\t\tsearch_query = \"SELECT * FROM errors WHERE ({}) = {}\".format(usable_key_tuple, str(tuple(value_list)))\n\t\tprint(search_query)\n\n\t\t_ = self.cursor.execute(search_query)\n\t\tresult = self.cursor.fetchall()\n\n\t\treturn result\n\t\n\tdef shutdown(self):\n\t\t# Implement logic to close connection\n\t\tpass\n\t\n# Create window functions\ndef create_login_window():\n\t''' This function contains the layout for, invokes, and monitors the login window. When a user logs in, it creates an instance of \n\tthe 'DatabaseConnection' class, establishing a connection to the database for use by the main application. This function returns the \n\tcreated instance of 'DatabaseConnection' for use by other functions in the script.\n\t'''\n\n\t# Window setup\n\tlogin_layout = [[sg.Text('Hostname: '), sg.In(size = (25, 0), key = '-HOST-')],\n\t\t\t\t\t[sg.Text('Username: '), sg.In(size = (25, 0), key = '-USER-')],\n\t\t\t\t [sg.Text('Password: '), sg.In(size = (25, 0), pad = (3, 0), password_char = '*', key='-PASS-')],\n\t\t\t\t [sg.Button('Login', size = (14, 0), pad = ((0, 10), (5, 0)), enable_events = True, bind_return_key = True, key = '-LOGIN-'), sg.Button('Guest Login.', size = (14, 0), pad = ((10, 0), (5, 0)), enable_events = True, key = '-LOGIN GUEST-')]\n\t\t\t\t ]\n\n\tlogin_window = sg.Window(\"LLMSDID - Login\",\n\t\t\t\t\t\t\tlayout=login_layout,\n\t\t\t\t\t\t\tmargins=(20, 10),\n\t\t\t\t\t\t\tgrab_anywhere=True,\n\t\t\t\t\t\t\tdefault_button_element_size=(12, 1)\n\t\t\t\t\t\t\t)\n\n\t# Logic\n\twhile True:\n\t\tlogin_event, login_values = login_window.read()\n\n\t\tif login_event == '-LOGIN-':\n\t\t\tcurrent_db = DatabaseConnection(login_values['-HOST-'], login_values['-USER-'], login_values['-PASS-'], \"LLMSDID\")\t# Instantiate instance of 'DatabaseConnection'\n\t\t\tlogin_window.close()\n\t\t\treturn current_db\n\n\t\tif login_event == '-LOGIN GUEST-':\n\t\t\tcurrent_db = DatabaseConnection('localhost', secrets.guestUsername, secrets.guestPassword, \"LLMSDID\")\t# Instantiate instance of 'DatabaseConnection'\n\t\t\tglobal guest_user_flag\n\t\t\tguest_user_flag = True\n\t\t\tlogin_window.close()\n\t\t\treturn current_db\n\n\t\t# If the user closes the window, exit this loop so that the program can close\n\t\tif login_event == sg.WIN_CLOSED:\n\t\t\tlogin_window.close()\n\t\t\texit(69)\n\ndef create_update_window(selected_error, database):\n\tupdate_col_1 = sg.Column([[sg.Frame('Current values', [[sg.Column([[sg.Text(\"Voyage: \", size=(12,1)), sg.Text(selected_error['voyage'])],\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t [sg.Text(\"Status: \", size=(12,1)), sg.Text(selected_error['fault_status'])],\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t [sg.Text(\"Description: \", size=(12,4)), sg.Multiline(selected_error['fault_description'], size=(40, 4))],\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t [sg.Text(\"Fault message: \", size=(12,2)), sg.Multiline(selected_error['fault_message'], size=(40,2))],\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t [sg.Text(\"Fault type: \", size=(12,1)), sg.Text(selected_error['fault_type'])],\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t [sg.Text(\"Fault location: \", size=(12,1)), sg.Text(selected_error['location'])],\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t [sg.Text(\"Sensor ID: \", size=(12,1)), sg.Text(selected_error['sensor_id'])],\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t [sg.Text(\"Sensor type: \", size=(12,1)), sg.Text(selected_error['sensor_type'])],\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t [sg.Text(\"From: \", size=(12,1)), sg.Text(selected_error['time_of_fault'])],\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t [sg.Text(\"To: \", size=(12,1)), sg.Text(selected_error['time_of_solution'])]],\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t )]])]])\n\n\tupdate_col_2 = sg.Column([[sg.Frame('Updated values', [[sg.Column([[sg.Text(\"Voyage: \", size=(12,1)), sg.In(selected_error['voyage'], size=(40,1), key='-NEW VOYAGE-')],\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t [sg.Text(\"Status: \", size=(12,1)), sg.InputCombo([\"Unresolved\", \"Resolved\"], default_value=selected_error['fault_status'], key='-NEW STATUS-')],\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t [sg.Text(\"Description: \", size=(12,4)), sg.Multiline(selected_error['fault_description'], size=(40,4), key='-NEW DESC-')],\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t [sg.Text(\"Fault message: \", size=(12,2)), sg.Multiline(selected_error['fault_message'], size=(40,2), key='-NEW MESSAGE-')],\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t [sg.Text(\"Fault type: \", size=(12,1)), sg.In(selected_error['fault_type'], size=(40,1), key='-NEW FTYPE-')],\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t [sg.Text(\"Fault location: \", size=(12,1)), sg.In(selected_error['location'], size=(40,1), key='-NEW LOC-')],\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t [sg.Text(\"Sensor ID: \", size=(12,1)), sg.In(selected_error['sensor_id'], size=(40,1), key='-NEW ID-')],\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t [sg.Text(\"Sensor type: \", size=(12,1)), sg.In(selected_error['sensor_type'], size=(40,1), key='-NEW STYPE-')],\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t [sg.Text(\"From: \", size=(12,1)), sg.In(selected_error['time_of_fault'], size=(40,1), key='-NEW FROM-')],\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t [sg.Text(\"To: \", size=(12,1)), sg.In(selected_error['time_of_solution'], size=(40,1), key='-NEW TO-')]],\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t)]])]])\n\n\tupdate_col_3 = sg.Column([[sg.Frame('Actions', [[sg.Column([[sg.Button(\"Update\", enable_events=True,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\ttooltip=\"Press me if you'd like to update this fault.\",\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tkey='-SAVE UPDATE-'),\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t sg.Button(\"Cancel\", enable_events=True,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\ttooltip=\"Press me if you'd like to cancel this update.\",\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tkey='-CANCEL UPDATE-')]])]])]])\n\n\tupdateLayout = [[update_col_1, update_col_2], [update_col_3]]\n\n\tupdate_window = sg.Window(\"LLMSDID - Update\",\n\t\t\t\t\t\t\tlayout=updateLayout,\n\t\t\t\t\t\t\tmargins=(200, 100),\n\t\t\t\t\t\t\tgrab_anywhere=True,\n\t\t\t\t\t\t\tdefault_button_element_size=(12, 1)\n\t\t\t\t\t\t\t)\n\n\tprint(\"Updating \" + str(selected_error['fault_id']))\n\twhile True:\n\t\tupdate_event, update_value = update_window.read()\n\n\t\tif update_event == '-SAVE UPDATE-':\n\t\t\tdatabase.update(update_value['-NEW STATUS-'], update_value['-NEW DESC-'], update_value['-NEW VOYAGE-'], update_value['-NEW FROM-'], update_value['-NEW TO-'], update_value['-NEW FTYPE-'], update_value['-NEW LOC-'], update_value['-NEW ID-'], update_value['-NEW STYPE-'], update_value['-NEW MESSAGE-'], selected_error['fault_id'])\n\t\t\tupdate_window.close()\n\t\t\tbreak\n\n\t\t# If the user closes the window, exit this loop so that the program can close\n\t\tif update_event == sg.WIN_CLOSED or update_event == '-CANCEL UPDATE-':\n\t\t\tupdate_window.close()\n\t\t\tbreak\n\ndef create_log_window(database):\n\tlog_layout = [\n\t\t[sg.Text(\"Fault description\", size=(12,1)), sg.In(size=(40, 40), key='-DESCRIPTION-')],\n\t\t[sg.Text(\"Fault message\", size=(12,1)), sg.In(size=(40, 40), key='-MESSAGE-')],\n\t\t[sg.Text(\"Status\", size=(12,1)), sg.InputCombo([\"Unresolved\", \"Resolved\"], key='-STATUS-')],\n\t\t[sg.Text(\"Fault type\", size=(12,1)), sg.In(size = (25, 1), key='-TYPE-')],\n\t\t[sg.Text(\"Location\", size=(12,1)), sg.In(size=(25, 1), key='-LOCATION-')],\n\t\t[sg.Text(\"Sensor ID\", size=(12,1)), sg.In(size=(25, 1), key='-SENSOR ID-')],\n\t\t[sg.Text(\"Sensor type\", size=(12,1)), sg.In(size=(25, 1), key='-SENSOR TYPE-')],\n\t\t[sg.Text(\"Time of fault\", tooltip = \"dd-mm-yy hh:mm:ss\", size=(12,1)), sg.In(size=(25, 1), key='-START-')],\n\t\t[sg.Text(\"Time of solution\", tooltip = \"dd-mm-yy hh:mm:ss\", size=(12,1)), sg.In(size=(25, 1), key='-END-')],\n\t\t[sg.Text(\"Voyage\", size=(12,1)), sg.In(size=(25, 1), key='-VOYAGE-')],\n\t\t[sg.Button(\"Save\", enable_events=True, key='-LOG SAVE-'), sg.Button(\"Cancel\", enable_events=True, key='-LOG CANCEL-')]\n\t]\n\n\tlog_window = sg.Window(\"LLMSDID - Log an error\",\n\t\t\t\t\t\t\tlayout=log_layout,\n\t\t\t\t\t\t\tmargins=(200, 100),\n\t\t\t\t\t\t\tgrab_anywhere=True,\n\t\t\t\t\t\t\tdefault_button_element_size=(12, 1)\n\t\t\t\t\t\t\t)\n\n\twhile True:\n\t\tlog_event, log_values = log_window.read()\n\t\t\n\t\tif log_event == '-LOG SAVE-':\n\t\t\tdatabase.save_to_errors(log_values['-STATUS-'], log_values['-DESCRIPTION-'], log_values['-VOYAGE-'], log_values['-START-'], log_values['-END-'], log_values['-TYPE-'], log_values['-LOCATION-'], log_values['-SENSOR ID-'], log_values['-SENSOR TYPE-'], log_values['-MESSAGE-'])\n\t\t\tlog_window.close()\n\t\t\tbreak\n\n\t\t# If the user closes the window, exit this loop so that the program can close\n\t\tif log_event == sg.WIN_CLOSED or log_event == '-LOG CANCEL-':\n\t\t\tlog_window.close()\n\t\t\tbreak\n\ndef create_more_window(selected_error, database):\n\tmore_col_1 = sg.Column([[sg.Frame('Parameter', [[sg.Column([[sg.Text(\"Fault ID: \")],\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t \t\t[sg.Text(\"Voyage: \")],\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t \t[sg.Text(\"Status: \")],\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t[sg.Text(\"Description: \")],\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t[sg.Text(\"Fault message: \")],\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t[sg.Text(\"Fault type: \")],\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t \t[sg.Text(\"Fault location: \")],\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t \t[sg.Text(\"Sensor ID: \")],\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t \t[sg.Text(\"Sensor type: \")],\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t \t[sg.Text(\"From: \")],\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t \t[sg.Text(\"To: \")],\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t \t\t[sg.Text(\"Log date: \")]],\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t )]])]])\n\n\tmore_col_2 = sg.Column([[sg.Frame('Value', [[sg.Column([[sg.Text(\"Fault ID: \", size=(12,1)), sg.Text(selected_error['fault_id'], size=(40,1))],\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t [sg.Text(\"Voyage: \", size=(12,1)), sg.Text(selected_error['voyage'], size=(40,1))],\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t [sg.Text(\"Status: \", size=(12,1)), sg.Text(selected_error['fault_status'], size=(40,1))],\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t [sg.Text(\"Description: \", size=(12,4)), sg.Multiline(selected_error['fault_description'], size=(40,4))],\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t [sg.Text(\"Fault message: \", size=(12,2)), sg.Multiline(selected_error['fault_message'], size=(40,2))],\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t [sg.Text(\"Fault type: \", size=(12,1)), sg.Text(selected_error['fault_type'], size=(40,1))],\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t [sg.Text(\"Fault location: \", size=(12,1)), sg.Text(selected_error['location'], size=(40,1))],\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t [sg.Text(\"Sensor ID: \", size=(12,1)), sg.Text(selected_error['sensor_id'], size=(40,1))],\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t [sg.Text(\"Sensor type: \", size=(12,1)), sg.Text(selected_error['sensor_type'], size=(40,1))],\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t [sg.Text(\"From: \", size=(12,1)), sg.Text(selected_error['time_of_fault'], size=(40,1))],\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t [sg.Text(\"To: \", size=(12,1)), sg.Text(selected_error['time_of_solution'], size=(40,1))],\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t [sg.Text(\"Log date: \", size=(12,1)), sg.Text(selected_error['log_date'], size=(40,1))]],\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t )]])]])\n\n\tmore_col_3 = sg.Column([[sg.Frame('Actions', [[sg.Column([[sg.Button(\"Thanks\", enable_events=True,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\ttooltip=\"Press me if you're done having a look.\",\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tkey='-THANKS-')\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t ]])]])]])\n\n\tmoreLayout = [[more_col_2], [more_col_3]]\n\n\tmore_window = sg.Window(\"LLMSDID - More\",\n\t\t\t\t\t\t\t layout=moreLayout,\n\t\t\t\t\t\t\t margins=(200, 100),\n\t\t\t\t\t\t\t grab_anywhere=True,\n\t\t\t\t\t\t\t default_button_element_size=(12, 1)\n\t\t\t\t\t\t\t )\n\n\twhile True:\n\t\tmore_event, more_value = more_window.read()\n\n\t\t# If the user closes the window, exit this loop so that the program can close\n\t\tif more_event == sg.WIN_CLOSED or more_event == '-THANKS-':\n\t\t\tmore_window.close()\n\t\t\tbreak\n\ndef create_downtime_window(database):\n\t\tdowntime_layout = [\n\t\t[sg.Text(\"Voyage\"), sg.In(size=(40, 40), key='-VOYAGE-')],\n\t\t[sg.Text(\"System Stop Time\"), sg.In(size=(40, 40), key='-STOP-')],\n\t\t[sg.Text(\"System Restart Time\", tooltip = \"dd-mm-yy hh:mm:ss\"), sg.In(size=(40, 40), key='-START-')],\n\t\t[sg.Text(\"Reason for Downtime\", tooltip = \"dd-mm-yy hh:mm:ss\"), sg.In(size=(25, 1), key='-REASON-')],\n\t\t[sg.Text(\"Assosciated Error\"), sg.In(size=(25, 1), key='-ASSOSCIATED ERROR-')],\n\t\t[sg.Button(\"Save\", enable_events=True, key='-LOG SAVE-'),\n\t\t sg.Button(\"Cancel\", enable_events=True, key='-LOG CANCEL-')]\n\t]\n\n\tdowntime_window = sg.Window(\"LLMSDID - Log some downtime\",\n\t\t\t\t\t\t layout=downtime_layout,\n\t\t\t\t\t\t margins=(200, 100),\n\t\t\t\t\t\t grab_anywhere=True,\n\t\t\t\t\t\t default_button_element_size=(12, 1)\n\t\t\t\t\t\t )\n\twhile True:\n\t\tdowntime_event, downtime_values = downtime_window.read()\n\n\t\tif downtime_event == '-LOG SAVE-':\n\t\t\tdatabase.save_to_downtime(downtime_values['-VOYAGE-'], downtime_values['-STOP-'], downtime_values['-START-'], downtime_values['-REASON-'], downtime_values['-ASSOSCIATED ERROR-'])\n\t\t\tdowntime_window.close()\n\t\t\tbreak\n\n\t\t# If the user closes the window, exit this loop so that the program can close\n\t\tif downtime_event == sg.WIN_CLOSED or downtime_event == '-LOG CANCEL-':\n\t\t\tdowntime_window.close()\n\t\t\tbreak\n\n# Main window layout\nmain_column_1 = sg.Column([[sg.Frame('Advanced search', [[sg.Column([[sg.Text(\"Voyage: \", tooltip = \"Let me know which voyage you'd like to see the errors for.\"), sg.In(size = (15, 1), pad = ((34, 0), (0, 0)), key = '-VOYAGE SEARCH-')],\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t[sg.Text(\"Status: \", tooltip = \"Would you like to look at errors we've already solved? Let me know here!\"), sg.In(size = (15, 1), pad = ((40, 0), (0, 0)), key = '-STATUS SEARCH-')],\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t[sg.Text(\"Fault type: \", tooltip = \"Here you can let me know what type of fault you'd like to search for.\"), sg.In(size = (15, 1), pad = ((20, 0), (0, 0)), right_click_menu = (\"Cable\", \"Hardware\", \"Sensor\", \"Connector\"), key = '-TYPE SEARCH-')],\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t[sg.Text(\"Fault location: \", tooltip = \"If you suspect that your fault might be location-specific, say so here to see previous errors that have occurred in that location.\"), sg.In(size = (15, 1), pad = ((0, 0), (0, 0)), key = '-LOCATION SEARCH-')],\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t[sg.Text(\"Sensor ID: \", tooltip = \"Think that your error could be sensor-specific? Find previous issues with your exact sensor by entering it's asset number here.\"), sg.In(size = (15, 1), pad = ((21, 0), (0, 0)), key = '-SENSOR ID SEARCH-')],\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t[sg.Text(\"Sensor type: \", tooltip = \"Search for previous errors that have been encountered with your specific type of sensor.\"), sg.In(size = (15, 1), pad = ((8, 0), (0, 0)), key = '-SENSOR TYPE SEARCH-')],\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t[sg.Text(\"From: \", tooltip = \"Enter the start date for your search.\"), sg.In(size = (15, 1), tooltip = \"dd-mm-yy hh:mm:ss\", pad = ((48, 0), (0, 0)), key = '-FROM SEARCH-')],\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t[sg.Text(\"To: \", tooltip = \"Enter the end date for your search.\"), sg.In(size = (15, 1), tooltip = \"dd-mm-yy hh:mm:ss\", pad = ((64, 0), (0, 0)), key = '-TO SEARCH-')],\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t[sg.Button(\"Search errors\", size = (12, 1), pad = ((93, 0), (7, 0)), enable_events=True, tooltip = \"Press me if you'd like to search for specific error characteristics.\",key = '-SEARCH ERROR-')]], pad = (3, 3))]])]])\n\n\nmain_column_2 = sg.Column([[sg.Frame('Faults:', [[sg.Column([[sg.Listbox(unresolved_errors, enable_events = True, size=(20, len(unresolved_errors)), key = '-ERROR LIST-')]]),\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tsg.Column([[sg.Text(\"Error ID: \", size=(14,1)), sg.Text(\"\", size=(20,1), key='-OUT ID-')],\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t [sg.Text(\"Error Description: \", size=(14,15)), sg.Multiline(\"\", size=(20,15), key='-OUT DESC-')],\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t ]) ],\n\t\t\t\t\t\t\t\t\t\t\t\t\t [sg.Button(\"Update\", enable_events = True, tooltip = \"Press me if you'd like to update some of the information about the selected error.\", key = '-UPDATE ERROR-'),\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tsg.Button(\"Give me more!\", enable_events = True, tooltip = \"Press me if you'd like to view all the information about this specific error.\", key = '-SHOW ME MORE-'),\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tsg.Button(\"Show me unresolved errors\", enable_events = True, tooltip=\"Press me if you'd like to see all the unresolved errors\", key = '-UNRESOLVED-')]], pad=(0, 0))]])\n\nmain_column_3 = sg.Column([[sg.Frame('Actions', [[sg.Column([[sg.Button(\"Log a new error\", enable_events=True, tooltip = \"Press me if you'd like to log a new error.\", key = '-LOG ERROR-'),\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t sg.Button(\"Log some downtime\", enable_events=True, tooltip=\"Press me if you'd like to log system downtime as a result of a logged error.\", key='-LOG DOWNTIME-')]])]])]])\n\nmain_layout = [[main_column_1, main_column_2], [main_column_3]]\n\nmain_window = sg.Window(\"LLMSDID - Home\",\n\t\t\t\t\t layout = main_layout,\n\t\t\t\t\t margins = (200, 100),\n\t\t\t\t\t grab_anywhere=True,\n\t\t\t\t\t default_button_element_size=(12, 1))\n\n\nif __name__ == \"__main__\":\n\n\tdb_object = create_login_window()\n\n\twhile True:\n\t\tevent, values = main_window.read()\n\n\t\tif event == '-UNRESOLVED-':\n\t\t\tupdate_query = \"SELECT FaultID, FaultDescription FROM errors WHERE FaultStatus = 'Unresolved'\"\n\t\t\tunresolved_errors = db_object.fetch(update_query)\n\t\t\tmain_window['-ERROR LIST-'].update(unresolved_errors)\n\t\t\tmain_window.refresh()\n\t\t\n\t\tif values['-ERROR LIST-']:\n\t\t\tselected_error = values['-ERROR LIST-'][0]\n\t\t\terror_sel_flag = True\n\t\t\tfetch_query = \"SELECT * FROM errors WHERE FaultId = \" + str(selected_error[0])\n\t\t\tcurrent_error_list = db_object.fetch(fetch_query)\n\t\t\t\n\t\t\tcurrent_error['fault_id'] = current_error_list[0][0]\n\t\t\tcurrent_error['fault_status'] = current_error_list[0][1]\n\t\t\tcurrent_error['fault_description'] = current_error_list[0][2]\n\t\t\tcurrent_error['voyage'] = current_error_list[0][3]\n\t\t\tcurrent_error['time_of_fault'] = current_error_list[0][4]\n\t\t\tcurrent_error['time_of_solution'] = current_error_list[0][5]\n\t\t\tcurrent_error['fault_type'] = current_error_list[0][6]\n\t\t\tcurrent_error['location'] = current_error_list[0][7]\n\t\t\tcurrent_error['sensor_id'] = current_error_list[0][8]\n\t\t\tcurrent_error['sensor_type'] = current_error_list[0][9]\n\t\t\tcurrent_error['fault_message'] = current_error_list[0][10]\n\t\t\tcurrent_error['log_date'] = current_error_list[0][11]\n\n\t\t\tmain_window['-OUT ID-'].update(current_error['fault_id'])\n\t\t\tmain_window['-OUT DESC-'].update(current_error['fault_description'])\n\n\t\tif event == '-UPDATE ERROR-':\n\t\t\tif guest_user_flag:\n\t\t\t\tprint(\"User does not have privileges to update issues\")\n\t\t\telse:\n\t\t\t\tif error_sel_flag:\n\t\t\t\t\tcreate_update_window(current_error, db_object) # MEEP: point to db_object?\n\t\t\t\telse:\n\t\t\t\t\tmain_window['-OUT ID-'].update(\"Please select a fault for us to update.\")\n\t\t\t\t\tprint(\"No fault selected\")\n\t\t\n\t\tif event == '-LOG ERROR-':\n\t\t\tif guest_user_flag:\n\t\t\t\tprint(\"User does not have privileges to log an error\")\n\t\t\telse:\n\t\t\t\tcreate_log_window(db_object)\n\t\t\t\t# TODO Set current issue as logged issue if it is unresolved\n\n\t\tif event == '-SEARCH ERROR-':\n\t\t\tunresolved_errors = db_object.search(values['-VOYAGE SEARCH-'], values['-STATUS SEARCH-'], values['-TYPE SEARCH-'], values['-LOCATION SEARCH-'], values['-SENSOR ID SEARCH-'], values['-SENSOR TYPE SEARCH-'], values['-FROM SEARCH-'], values['-TO SEARCH-'])\n\t\t\tmain_window['-ERROR LIST-'].update(unresolved_errors)\n\t\t\tmain_window.refresh()\n\n\t\tif event == '-SHOW ME MORE-':\n\t\t\tif error_sel_flag:\n\t\t\t\tcreate_more_window(current_error, db_object)\n\t\t\telse:\n\t\t\t\tmain_window['-OUT ID-'].update(\"Please select a fault for us to have a look at.\")\n\t\t\t\tprint(\"No fault selected\")\n\n\t\tif event == '-LOG DOWNTIME-':\n\t\t\tif(guest_user_flag):\n\t\t\t\tprint(\"User does not have privileges to log downtime\")\n\t\t\telse:\n\t\t\t\tcreate_downtime_window(db_object)\n\n\t\tif event == sg.WIN_CLOSED:\n\t\t\tbreak",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
n =int(input("nhap gia tri"))
for i in range(1,n+1):
print(i)
|
normal
|
{
"blob_id": "21b295e28a7e4443ea116df1b22ff5074dca955a",
"index": 246,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor i in range(1, n + 1):\n print(i)\n",
"step-3": "n = int(input('nhap gia tri'))\nfor i in range(1, n + 1):\n print(i)\n",
"step-4": "n =int(input(\"nhap gia tri\"))\nfor i in range(1,n+1):\n\n\n\n\n\n\n\n\n\n\n\n\n print(i)",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import os
import time
import uuid
import subprocess
# Global variables. ADJUST THEM TO YOUR NEEDS
chia_executable = os.path.expanduser('~')+"/chia-blockchain/venv/bin/chia" # directory of chia binary file
numberOfLogicalCores = 16 # number of logical cores that you want to use overall
run_loop_interval = 10 # seconds of delay before this algorithm executes another loop
refresh_logs_interval = 10 # seconds of delay before this algorithm will try to re-read all logs after adding plot
logs_location = os.path.expanduser('~')+"/.chia/mainnet/plotter/" # location of the log files. Remove all corrupted and interrupted log files!
string_contained_in_all_logs = ".txt" # shared part of the name of all the log files (all logfiles must have it!)
phase_one_finished = "Time for phase 1 =" # part of the log file that means 1/2 core should be freed
phase_four_finished = "Time for phase 4 =" # part of the log file that means 2/2 core should be freed
temporary_directory = "/srv/chia/plots/" # plotting final destination
final_directory = "/mnt/chia/plots/" # plotting directory
farmer_public_key = "8536d991e929298b79570ad16ee1150d3905121a44251eda3740f550fcb4285578a2a22448a406c5e73c2e9d77cd7eb2" # change to your key
pool_public_key = "907f125022f2b5bf75ea5ef1f108b0c9110931891a043f421837ba6edcaa976920c5b2c5ba8ffdfb00c0bd71e7b5a2b1" # change to your key
# Functions
def fetch_file_content(file_path):
if not os.path.isfile(file_path):
print('File does not exist.')
else:
with open(file_path) as file:
return file.readlines()
def fetch_logs():
item_in_location_list = os.listdir(logs_location)
content_path_list = list(map(lambda log: logs_location + log, item_in_location_list))
text_file_list = list(filter(lambda path: string_contained_in_all_logs in path, content_path_list))
logs_content = list(map(fetch_file_content, text_file_list))
return logs_content
def count_used_cores(logs):
print("===START COUNTING===")
used_cores_counter = 0
for (index, log) in enumerate(logs):
print(f"Starting log #{index}")
print("Potentially it's still in phase one assigning 4 cores")
used_cores_counter += 4
for line in log:
if phase_one_finished in line:
print("Phase one was finished in the log, deallocating two cores")
used_cores_counter -= 2
if phase_four_finished in line:
print("Phase four was finished in the log, deallocating two cores")
used_cores_counter -= 2
print(f"===FINISH COUNTING: {used_cores_counter} USED CORES===")
return used_cores_counter
def use_all_cores():
log_list = fetch_logs()
cores_used = count_used_cores(log_list)
while numberOfLogicalCores > cores_used +1:
print("There are four cores free, adding new plot!")
add_plot()
time.sleep(refresh_logs_interval)
log_list = fetch_logs()
cores_used = count_used_cores(log_list)
def add_plot():
command = f"{chia_executable} plots create -k 32 -b 3724 -n 1 -r4 -t /srv/chia/plots/ -2 /srv/chia/plots/ -d /mnt/chia/plots &"
unique_filename = str(uuid.uuid4())
new_log_file_path = f"{logs_location}/{unique_filename}{string_contained_in_all_logs}"
with open(new_log_file_path, "w") as file:
subprocess.run(command, shell=True, stdout=file)
def run_loop():
while True:
use_all_cores()
time.sleep(run_loop_interval)
# Entry point
run_loop()
|
normal
|
{
"blob_id": "bc536440a8982d2d4a1bc5809c0d9bab5ac6553a",
"index": 2313,
"step-1": "<mask token>\n\n\ndef fetch_logs():\n item_in_location_list = os.listdir(logs_location)\n content_path_list = list(map(lambda log: logs_location + log,\n item_in_location_list))\n text_file_list = list(filter(lambda path: string_contained_in_all_logs in\n path, content_path_list))\n logs_content = list(map(fetch_file_content, text_file_list))\n return logs_content\n\n\ndef count_used_cores(logs):\n print('===START COUNTING===')\n used_cores_counter = 0\n for index, log in enumerate(logs):\n print(f'Starting log #{index}')\n print(\"Potentially it's still in phase one assigning 4 cores\")\n used_cores_counter += 4\n for line in log:\n if phase_one_finished in line:\n print(\n 'Phase one was finished in the log, deallocating two cores'\n )\n used_cores_counter -= 2\n if phase_four_finished in line:\n print(\n 'Phase four was finished in the log, deallocating two cores'\n )\n used_cores_counter -= 2\n print(f'===FINISH COUNTING: {used_cores_counter} USED CORES===')\n return used_cores_counter\n\n\ndef use_all_cores():\n log_list = fetch_logs()\n cores_used = count_used_cores(log_list)\n while numberOfLogicalCores > cores_used + 1:\n print('There are four cores free, adding new plot!')\n add_plot()\n time.sleep(refresh_logs_interval)\n log_list = fetch_logs()\n cores_used = count_used_cores(log_list)\n\n\n<mask token>\n\n\ndef run_loop():\n while True:\n use_all_cores()\n time.sleep(run_loop_interval)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef fetch_file_content(file_path):\n if not os.path.isfile(file_path):\n print('File does not exist.')\n else:\n with open(file_path) as file:\n return file.readlines()\n\n\ndef fetch_logs():\n item_in_location_list = os.listdir(logs_location)\n content_path_list = list(map(lambda log: logs_location + log,\n item_in_location_list))\n text_file_list = list(filter(lambda path: string_contained_in_all_logs in\n path, content_path_list))\n logs_content = list(map(fetch_file_content, text_file_list))\n return logs_content\n\n\ndef count_used_cores(logs):\n print('===START COUNTING===')\n used_cores_counter = 0\n for index, log in enumerate(logs):\n print(f'Starting log #{index}')\n print(\"Potentially it's still in phase one assigning 4 cores\")\n used_cores_counter += 4\n for line in log:\n if phase_one_finished in line:\n print(\n 'Phase one was finished in the log, deallocating two cores'\n )\n used_cores_counter -= 2\n if phase_four_finished in line:\n print(\n 'Phase four was finished in the log, deallocating two cores'\n )\n used_cores_counter -= 2\n print(f'===FINISH COUNTING: {used_cores_counter} USED CORES===')\n return used_cores_counter\n\n\ndef use_all_cores():\n log_list = fetch_logs()\n cores_used = count_used_cores(log_list)\n while numberOfLogicalCores > cores_used + 1:\n print('There are four cores free, adding new plot!')\n add_plot()\n time.sleep(refresh_logs_interval)\n log_list = fetch_logs()\n cores_used = count_used_cores(log_list)\n\n\ndef add_plot():\n command = (\n f'{chia_executable} plots create -k 32 -b 3724 -n 1 -r4 -t /srv/chia/plots/ -2 /srv/chia/plots/ -d /mnt/chia/plots &'\n )\n unique_filename = str(uuid.uuid4())\n new_log_file_path = (\n f'{logs_location}/{unique_filename}{string_contained_in_all_logs}')\n with open(new_log_file_path, 'w') as file:\n subprocess.run(command, shell=True, stdout=file)\n\n\ndef run_loop():\n while True:\n use_all_cores()\n time.sleep(run_loop_interval)\n\n\n<mask token>\n",
"step-3": "<mask token>\nchia_executable = os.path.expanduser('~') + '/chia-blockchain/venv/bin/chia'\nnumberOfLogicalCores = 16\nrun_loop_interval = 10\nrefresh_logs_interval = 10\nlogs_location = os.path.expanduser('~') + '/.chia/mainnet/plotter/'\nstring_contained_in_all_logs = '.txt'\nphase_one_finished = 'Time for phase 1 ='\nphase_four_finished = 'Time for phase 4 ='\ntemporary_directory = '/srv/chia/plots/'\nfinal_directory = '/mnt/chia/plots/'\nfarmer_public_key = (\n '8536d991e929298b79570ad16ee1150d3905121a44251eda3740f550fcb4285578a2a22448a406c5e73c2e9d77cd7eb2'\n )\npool_public_key = (\n '907f125022f2b5bf75ea5ef1f108b0c9110931891a043f421837ba6edcaa976920c5b2c5ba8ffdfb00c0bd71e7b5a2b1'\n )\n\n\ndef fetch_file_content(file_path):\n if not os.path.isfile(file_path):\n print('File does not exist.')\n else:\n with open(file_path) as file:\n return file.readlines()\n\n\ndef fetch_logs():\n item_in_location_list = os.listdir(logs_location)\n content_path_list = list(map(lambda log: logs_location + log,\n item_in_location_list))\n text_file_list = list(filter(lambda path: string_contained_in_all_logs in\n path, content_path_list))\n logs_content = list(map(fetch_file_content, text_file_list))\n return logs_content\n\n\ndef count_used_cores(logs):\n print('===START COUNTING===')\n used_cores_counter = 0\n for index, log in enumerate(logs):\n print(f'Starting log #{index}')\n print(\"Potentially it's still in phase one assigning 4 cores\")\n used_cores_counter += 4\n for line in log:\n if phase_one_finished in line:\n print(\n 'Phase one was finished in the log, deallocating two cores'\n )\n used_cores_counter -= 2\n if phase_four_finished in line:\n print(\n 'Phase four was finished in the log, deallocating two cores'\n )\n used_cores_counter -= 2\n print(f'===FINISH COUNTING: {used_cores_counter} USED CORES===')\n return used_cores_counter\n\n\ndef use_all_cores():\n log_list = fetch_logs()\n cores_used = count_used_cores(log_list)\n while numberOfLogicalCores > cores_used + 1:\n print('There are four cores free, adding new plot!')\n add_plot()\n time.sleep(refresh_logs_interval)\n log_list = fetch_logs()\n cores_used = count_used_cores(log_list)\n\n\ndef add_plot():\n command = (\n f'{chia_executable} plots create -k 32 -b 3724 -n 1 -r4 -t /srv/chia/plots/ -2 /srv/chia/plots/ -d /mnt/chia/plots &'\n )\n unique_filename = str(uuid.uuid4())\n new_log_file_path = (\n f'{logs_location}/{unique_filename}{string_contained_in_all_logs}')\n with open(new_log_file_path, 'w') as file:\n subprocess.run(command, shell=True, stdout=file)\n\n\ndef run_loop():\n while True:\n use_all_cores()\n time.sleep(run_loop_interval)\n\n\nrun_loop()\n",
"step-4": "import os\nimport time\nimport uuid\nimport subprocess\nchia_executable = os.path.expanduser('~') + '/chia-blockchain/venv/bin/chia'\nnumberOfLogicalCores = 16\nrun_loop_interval = 10\nrefresh_logs_interval = 10\nlogs_location = os.path.expanduser('~') + '/.chia/mainnet/plotter/'\nstring_contained_in_all_logs = '.txt'\nphase_one_finished = 'Time for phase 1 ='\nphase_four_finished = 'Time for phase 4 ='\ntemporary_directory = '/srv/chia/plots/'\nfinal_directory = '/mnt/chia/plots/'\nfarmer_public_key = (\n '8536d991e929298b79570ad16ee1150d3905121a44251eda3740f550fcb4285578a2a22448a406c5e73c2e9d77cd7eb2'\n )\npool_public_key = (\n '907f125022f2b5bf75ea5ef1f108b0c9110931891a043f421837ba6edcaa976920c5b2c5ba8ffdfb00c0bd71e7b5a2b1'\n )\n\n\ndef fetch_file_content(file_path):\n if not os.path.isfile(file_path):\n print('File does not exist.')\n else:\n with open(file_path) as file:\n return file.readlines()\n\n\ndef fetch_logs():\n item_in_location_list = os.listdir(logs_location)\n content_path_list = list(map(lambda log: logs_location + log,\n item_in_location_list))\n text_file_list = list(filter(lambda path: string_contained_in_all_logs in\n path, content_path_list))\n logs_content = list(map(fetch_file_content, text_file_list))\n return logs_content\n\n\ndef count_used_cores(logs):\n print('===START COUNTING===')\n used_cores_counter = 0\n for index, log in enumerate(logs):\n print(f'Starting log #{index}')\n print(\"Potentially it's still in phase one assigning 4 cores\")\n used_cores_counter += 4\n for line in log:\n if phase_one_finished in line:\n print(\n 'Phase one was finished in the log, deallocating two cores'\n )\n used_cores_counter -= 2\n if phase_four_finished in line:\n print(\n 'Phase four was finished in the log, deallocating two cores'\n )\n used_cores_counter -= 2\n print(f'===FINISH COUNTING: {used_cores_counter} USED CORES===')\n return used_cores_counter\n\n\ndef use_all_cores():\n log_list = fetch_logs()\n cores_used = count_used_cores(log_list)\n while numberOfLogicalCores > cores_used + 1:\n print('There are four cores free, adding new plot!')\n add_plot()\n time.sleep(refresh_logs_interval)\n log_list = fetch_logs()\n cores_used = count_used_cores(log_list)\n\n\ndef add_plot():\n command = (\n f'{chia_executable} plots create -k 32 -b 3724 -n 1 -r4 -t /srv/chia/plots/ -2 /srv/chia/plots/ -d /mnt/chia/plots &'\n )\n unique_filename = str(uuid.uuid4())\n new_log_file_path = (\n f'{logs_location}/{unique_filename}{string_contained_in_all_logs}')\n with open(new_log_file_path, 'w') as file:\n subprocess.run(command, shell=True, stdout=file)\n\n\ndef run_loop():\n while True:\n use_all_cores()\n time.sleep(run_loop_interval)\n\n\nrun_loop()\n",
"step-5": "import os\nimport time\nimport uuid\nimport subprocess\n\n# Global variables. ADJUST THEM TO YOUR NEEDS\nchia_executable = os.path.expanduser('~')+\"/chia-blockchain/venv/bin/chia\" # directory of chia binary file\nnumberOfLogicalCores = 16 # number of logical cores that you want to use overall\nrun_loop_interval = 10 # seconds of delay before this algorithm executes another loop\nrefresh_logs_interval = 10 # seconds of delay before this algorithm will try to re-read all logs after adding plot\nlogs_location = os.path.expanduser('~')+\"/.chia/mainnet/plotter/\" # location of the log files. Remove all corrupted and interrupted log files!\nstring_contained_in_all_logs = \".txt\" # shared part of the name of all the log files (all logfiles must have it!)\nphase_one_finished = \"Time for phase 1 =\" # part of the log file that means 1/2 core should be freed\nphase_four_finished = \"Time for phase 4 =\" # part of the log file that means 2/2 core should be freed\ntemporary_directory = \"/srv/chia/plots/\" # plotting final destination\nfinal_directory = \"/mnt/chia/plots/\" # plotting directory\nfarmer_public_key = \"8536d991e929298b79570ad16ee1150d3905121a44251eda3740f550fcb4285578a2a22448a406c5e73c2e9d77cd7eb2\" # change to your key\npool_public_key = \"907f125022f2b5bf75ea5ef1f108b0c9110931891a043f421837ba6edcaa976920c5b2c5ba8ffdfb00c0bd71e7b5a2b1\" # change to your key\n\n\n# Functions\ndef fetch_file_content(file_path):\n if not os.path.isfile(file_path):\n print('File does not exist.')\n else:\n with open(file_path) as file:\n return file.readlines()\n\n\ndef fetch_logs():\n item_in_location_list = os.listdir(logs_location)\n content_path_list = list(map(lambda log: logs_location + log, item_in_location_list))\n text_file_list = list(filter(lambda path: string_contained_in_all_logs in path, content_path_list))\n logs_content = list(map(fetch_file_content, text_file_list))\n return logs_content\n\n\ndef count_used_cores(logs):\n print(\"===START COUNTING===\")\n used_cores_counter = 0\n for (index, log) in enumerate(logs):\n print(f\"Starting log #{index}\")\n print(\"Potentially it's still in phase one assigning 4 cores\")\n used_cores_counter += 4\n for line in log:\n if phase_one_finished in line:\n print(\"Phase one was finished in the log, deallocating two cores\")\n used_cores_counter -= 2\n if phase_four_finished in line:\n print(\"Phase four was finished in the log, deallocating two cores\")\n used_cores_counter -= 2\n print(f\"===FINISH COUNTING: {used_cores_counter} USED CORES===\")\n return used_cores_counter\n\n\ndef use_all_cores():\n log_list = fetch_logs()\n cores_used = count_used_cores(log_list)\n while numberOfLogicalCores > cores_used +1:\n print(\"There are four cores free, adding new plot!\")\n add_plot()\n time.sleep(refresh_logs_interval)\n log_list = fetch_logs()\n cores_used = count_used_cores(log_list)\n\n\ndef add_plot():\n command = f\"{chia_executable} plots create -k 32 -b 3724 -n 1 -r4 -t /srv/chia/plots/ -2 /srv/chia/plots/ -d /mnt/chia/plots &\"\n unique_filename = str(uuid.uuid4())\n new_log_file_path = f\"{logs_location}/{unique_filename}{string_contained_in_all_logs}\"\n with open(new_log_file_path, \"w\") as file:\n subprocess.run(command, shell=True, stdout=file)\n\n\ndef run_loop():\n while True:\n use_all_cores()\n time.sleep(run_loop_interval)\n\n\n# Entry point\nrun_loop()\n",
"step-ids": [
4,
6,
8,
9,
10
]
}
|
[
4,
6,
8,
9,
10
] |
from helper.logger_helper import Log
from helper.mail_helper import MailHelper
import spider.spider as spider
from configuration.configuration_handler import Configuration
from configuration.products_handler import ProductsHandler
if __name__ == "__main__":
logger = Log()
conf = Configuration('configuration/configuration.yaml').load_configuration()
ph = ProductsHandler(conf["products_path"])
logger.info("Configuration loaded")
products = ph.load_products()
logger.info("Products loaded from {}".format(conf["products_path"]))
update, msg = spider.Spider(products, conf).crawl()
if len(update) > 0:
logger.info("Products to report")
mail_helper = MailHelper()
mail_helper.send_mail('', msg, "New prices lower")
logger.info("Mail sent")
mail_helper.close_connection()
else:
logger.info("Nothing to report")
ph.save_products(products)
logger.info("Configuration saved")
else:
print("Exec this file as the main entrypoint! -> python3 init.py")
|
normal
|
{
"blob_id": "2e140d1174e0b2d8a97df880b1bffdf84dc0d236",
"index": 1029,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif __name__ == '__main__':\n logger = Log()\n conf = Configuration('configuration/configuration.yaml'\n ).load_configuration()\n ph = ProductsHandler(conf['products_path'])\n logger.info('Configuration loaded')\n products = ph.load_products()\n logger.info('Products loaded from {}'.format(conf['products_path']))\n update, msg = spider.Spider(products, conf).crawl()\n if len(update) > 0:\n logger.info('Products to report')\n mail_helper = MailHelper()\n mail_helper.send_mail('', msg, 'New prices lower')\n logger.info('Mail sent')\n mail_helper.close_connection()\n else:\n logger.info('Nothing to report')\n ph.save_products(products)\n logger.info('Configuration saved')\nelse:\n print('Exec this file as the main entrypoint! -> python3 init.py')\n",
"step-3": "from helper.logger_helper import Log\nfrom helper.mail_helper import MailHelper\nimport spider.spider as spider\nfrom configuration.configuration_handler import Configuration\nfrom configuration.products_handler import ProductsHandler\nif __name__ == '__main__':\n logger = Log()\n conf = Configuration('configuration/configuration.yaml'\n ).load_configuration()\n ph = ProductsHandler(conf['products_path'])\n logger.info('Configuration loaded')\n products = ph.load_products()\n logger.info('Products loaded from {}'.format(conf['products_path']))\n update, msg = spider.Spider(products, conf).crawl()\n if len(update) > 0:\n logger.info('Products to report')\n mail_helper = MailHelper()\n mail_helper.send_mail('', msg, 'New prices lower')\n logger.info('Mail sent')\n mail_helper.close_connection()\n else:\n logger.info('Nothing to report')\n ph.save_products(products)\n logger.info('Configuration saved')\nelse:\n print('Exec this file as the main entrypoint! -> python3 init.py')\n",
"step-4": "from helper.logger_helper import Log\nfrom helper.mail_helper import MailHelper\nimport spider.spider as spider\nfrom configuration.configuration_handler import Configuration\nfrom configuration.products_handler import ProductsHandler\n\nif __name__ == \"__main__\":\n logger = Log()\n conf = Configuration('configuration/configuration.yaml').load_configuration()\n ph = ProductsHandler(conf[\"products_path\"]) \n logger.info(\"Configuration loaded\")\n products = ph.load_products()\n logger.info(\"Products loaded from {}\".format(conf[\"products_path\"]))\n\n update, msg = spider.Spider(products, conf).crawl()\n if len(update) > 0:\n logger.info(\"Products to report\")\n mail_helper = MailHelper()\n mail_helper.send_mail('', msg, \"New prices lower\")\n \n logger.info(\"Mail sent\")\n mail_helper.close_connection()\n\n else:\n logger.info(\"Nothing to report\")\n \n ph.save_products(products)\n logger.info(\"Configuration saved\")\nelse:\n print(\"Exec this file as the main entrypoint! -> python3 init.py\")",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
s = input()
if len(s) < 26:
for i in range(26):
c = chr(ord("a")+i)
if c not in s:
print(s+c)
exit()
else:
for i in reversed(range(1,26)):
if s[i-1] < s[i]:
s1 = s[0:i-1]
for j in range(26):
c = chr(ord("a")+j)
if c > s[i-1] and c not in s1:
print(s1+c)
exit()
print(-1)
|
normal
|
{
"blob_id": "9931fc25118981bcce80cffd3fda9dc99d951bf5",
"index": 180,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif len(s) < 26:\n for i in range(26):\n c = chr(ord('a') + i)\n if c not in s:\n print(s + c)\n exit()\nelse:\n for i in reversed(range(1, 26)):\n if s[i - 1] < s[i]:\n s1 = s[0:i - 1]\n for j in range(26):\n c = chr(ord('a') + j)\n if c > s[i - 1] and c not in s1:\n print(s1 + c)\n exit()\n print(-1)\n",
"step-3": "s = input()\nif len(s) < 26:\n for i in range(26):\n c = chr(ord('a') + i)\n if c not in s:\n print(s + c)\n exit()\nelse:\n for i in reversed(range(1, 26)):\n if s[i - 1] < s[i]:\n s1 = s[0:i - 1]\n for j in range(26):\n c = chr(ord('a') + j)\n if c > s[i - 1] and c not in s1:\n print(s1 + c)\n exit()\n print(-1)\n",
"step-4": "s = input()\nif len(s) < 26:\n for i in range(26):\n c = chr(ord(\"a\")+i)\n if c not in s:\n print(s+c)\n exit()\nelse:\n for i in reversed(range(1,26)):\n if s[i-1] < s[i]:\n s1 = s[0:i-1]\n for j in range(26):\n c = chr(ord(\"a\")+j)\n if c > s[i-1] and c not in s1:\n print(s1+c)\n exit()\n print(-1)",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
"""
Given a 2D binary matrix filled with 0's and 1's, find the largest square containing only 1's and return its area.
Example:
Input:
1 0 1 0 0
1 0 1 1 1
1 1 1 1 1
1 0 0 1 0
Output: 4
"""
# 196ms. 98 percentile
class Solution:
def maximalSquare(self, matrix: List[List[str]]) -> int:
if not matrix:
return 0
dp = [0]*(len(matrix[0]) + 1)
longestSide = 0
for i in range(len(matrix)):
prevSquare =0
for j in range(len(matrix[0])):
temp = dp[j]
if matrix[i][j] == '1':
dp[j] = 1 + min(dp[j], dp[j-1], prevSquare)
longestSide = max(longestSide, dp[j])
else:
dp[j] = 0
prevSquare = temp
return longestSide*longestSide
"""
Notes:
Two hard things in this problem. The first is the logic for the dp, although after the fact
it seems pretty straightforward imo.
At any element you can check if you have a 2 by 2 square by looking at its neighbors. So anywhere you see
1 1
1 1
you're going to replace the bottom right corner with a 2. Note we're going top down and left to right...
So if you see
2 2
2 1
...then you know that you actually have
1 1 1
1 2 2
1 2 1
meaning you can actually put 3 in the corner.
On the other hand, if any of the neighbors are 1's, then you won't have the full cube. Implying that
at each spot, if it's a 1, you take the min of the three neighbors + 1.
The second hard thing is just dealing with the fact the input is characters not ints...annoying imo. The second
solution up there just uses a standard 1d dp array to keep track of the last row processed in terms of ints...which
is all we need. So we can avoid casting anything.
The first solution only casts the first first row and the first column.
Most of it is straightforwards. The one thing I want to note is that temp variable switch. Basically because our dp is
a single array, when we're processing element i, we've already replaced element i-1 with an updated value. That's a problem
because the i-1 value represents the j-1,i-1 value for the ith element in the dp. So we use that little temp switch to
skirt the issue.
"""
|
normal
|
{
"blob_id": "e5d31a2ea4a8615d24626be2414f5ae49b9cd6a1",
"index": 184,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Solution:\n <mask token>\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Solution:\n\n def maximalSquare(self, matrix: List[List[str]]) ->int:\n if not matrix:\n return 0\n dp = [0] * (len(matrix[0]) + 1)\n longestSide = 0\n for i in range(len(matrix)):\n prevSquare = 0\n for j in range(len(matrix[0])):\n temp = dp[j]\n if matrix[i][j] == '1':\n dp[j] = 1 + min(dp[j], dp[j - 1], prevSquare)\n longestSide = max(longestSide, dp[j])\n else:\n dp[j] = 0\n prevSquare = temp\n return longestSide * longestSide\n\n\n<mask token>\n",
"step-4": "\"\"\"\nGiven a 2D binary matrix filled with 0's and 1's, find the largest square containing only 1's and return its area.\n\nExample:\n\nInput: \n\n1 0 1 0 0\n1 0 1 1 1\n1 1 1 1 1\n1 0 0 1 0\n\nOutput: 4\n\"\"\"\n# 196ms. 98 percentile\nclass Solution:\n def maximalSquare(self, matrix: List[List[str]]) -> int:\n if not matrix:\n return 0\n dp = [0]*(len(matrix[0]) + 1)\n longestSide = 0\n for i in range(len(matrix)):\n prevSquare =0\n for j in range(len(matrix[0])):\n temp = dp[j]\n if matrix[i][j] == '1':\n dp[j] = 1 + min(dp[j], dp[j-1], prevSquare)\n longestSide = max(longestSide, dp[j])\n else:\n dp[j] = 0\n \n prevSquare = temp\n \n return longestSide*longestSide\n\n\n\"\"\"\nNotes:\n\nTwo hard things in this problem. The first is the logic for the dp, although after the fact \nit seems pretty straightforward imo.\nAt any element you can check if you have a 2 by 2 square by looking at its neighbors. So anywhere you see\n1 1\n1 1\nyou're going to replace the bottom right corner with a 2. Note we're going top down and left to right...\nSo if you see\n2 2\n2 1 \n...then you know that you actually have\n1 1 1\n1 2 2\n1 2 1\nmeaning you can actually put 3 in the corner. \n\nOn the other hand, if any of the neighbors are 1's, then you won't have the full cube. Implying that\nat each spot, if it's a 1, you take the min of the three neighbors + 1. \n\n\nThe second hard thing is just dealing with the fact the input is characters not ints...annoying imo. The second\nsolution up there just uses a standard 1d dp array to keep track of the last row processed in terms of ints...which\nis all we need. So we can avoid casting anything. \n\nThe first solution only casts the first first row and the first column. \n\nMost of it is straightforwards. The one thing I want to note is that temp variable switch. Basically because our dp is\na single array, when we're processing element i, we've already replaced element i-1 with an updated value. That's a problem\nbecause the i-1 value represents the j-1,i-1 value for the ith element in the dp. So we use that little temp switch to \nskirt the issue. \n\"\"\"",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import cv2
import numpy as np
cap = cv2.VideoCapture("./vStream.h264")
count = 0
while True:
ret, frame = cap.read()
if ret:
print("Decoded frame")
# cv2.imshow("frame", frame)
cv2.imwrite("fr_"+str(count)+".png", frame)
count += 1
else:
print("Couldn\'t decoded frame")
|
normal
|
{
"blob_id": "40ac3292befa2354878927ada0e10c24368a9d73",
"index": 2643,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwhile True:\n ret, frame = cap.read()\n if ret:\n print('Decoded frame')\n cv2.imwrite('fr_' + str(count) + '.png', frame)\n count += 1\n else:\n print(\"Couldn't decoded frame\")\n",
"step-3": "<mask token>\ncap = cv2.VideoCapture('./vStream.h264')\ncount = 0\nwhile True:\n ret, frame = cap.read()\n if ret:\n print('Decoded frame')\n cv2.imwrite('fr_' + str(count) + '.png', frame)\n count += 1\n else:\n print(\"Couldn't decoded frame\")\n",
"step-4": "import cv2\nimport numpy as np\ncap = cv2.VideoCapture('./vStream.h264')\ncount = 0\nwhile True:\n ret, frame = cap.read()\n if ret:\n print('Decoded frame')\n cv2.imwrite('fr_' + str(count) + '.png', frame)\n count += 1\n else:\n print(\"Couldn't decoded frame\")\n",
"step-5": "import cv2\nimport numpy as np\n\ncap = cv2.VideoCapture(\"./vStream.h264\")\ncount = 0\n\nwhile True:\n ret, frame = cap.read()\n if ret:\n print(\"Decoded frame\")\n # cv2.imshow(\"frame\", frame)\n cv2.imwrite(\"fr_\"+str(count)+\".png\", frame)\n count += 1\n else:\n print(\"Couldn\\'t decoded frame\") \n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import numpy as np
import matplotlib.pyplot as plt
from scipy.optimize import minimize
from scipy.stats import chisquare, chi2, binom, poisson
def f_1(x, a):
return (1 / (x + 5)) * np.sin(a * x)
def f_2(x, a):
return np.sin(a * x) + 1
def f_3(x, a):
return np.sin(a * (x ** 2))
def f_4(x, a):
return np.sin(a * x + 1) ** 2
def f_5(x):
return x * np.tan(x)
def f_6(x, a, b):
return (1 + a * x + b * (x ** 2)) / ((2/3) * (b + 3))
def f_7(x, a, b):
return a + b * x
def f_8(x, a, b, c):
return np.sin(a * x) + c * np.exp(b * x) + 1
def f_9(x, a, b):
return np.exp(-(x - a) ** 2 / (2 * (b ** 2)))
def my_pdf(VAR, x):
a = VAR
pdf = f_1(x, a)
ln_pdf = np.log((pdf))
result = np.sum(-ln_pdf)
return result
fname = 'Exam_2018_Prob1.txt'
data = np.loadtxt(fname)
z = data[:, 0]
a_bound = (-10, 0)
b_bound = (-10, 10)
c_bound = (4000, 8000)
n_bound = (0, None)
p_bound = (0, None)
mu_bound = (0, None)
data_0 = minimize(my_pdf, [1, ], args=(z), method='SLSQP',
bounds=(a_bound, ))
print(data_0)
x = np.arange(20, 27, 0.01)
y = f_1(x, -3)
plt.plot(x, y+0.2)
plt.hist(z, bins=200, normed=True)
plt.show()
binwidth = 0.1
n_bins = np.arange(min(data[:, 2]), max(data[:, 2]) + binwidth, binwidth)
# Chi2 calculator
# observed_values, bins, _ = plt.hist(data[:, 2], bins=n_bins)
# plt.show()
# We normalize by multiplyting the length of the data with the binwidth
# expected_values = poisson.pmf(bins, data_0.x[0]) * len(data)
# print(observed_values[observed_values!=0])
# print(expected_values[expected_values!=0])
# print(chisquare(observed_values[observed_values!=0], f_exp=expected_values[expected_values!=0]))
# print('Threshold value ', chi2.isf(0.05, 18))
# x = np.arange(-1, 1, 0.01)
# y = f_6(x, data_0.x[0], data_0.x[1])
# plt.plot(x,y)
# plt.show()
|
normal
|
{
"blob_id": "27edc753ebb9d60715a2ffa25d77e69ef363d010",
"index": 3568,
"step-1": "<mask token>\n\n\ndef f_1(x, a):\n return 1 / (x + 5) * np.sin(a * x)\n\n\ndef f_2(x, a):\n return np.sin(a * x) + 1\n\n\ndef f_3(x, a):\n return np.sin(a * x ** 2)\n\n\n<mask token>\n\n\ndef f_5(x):\n return x * np.tan(x)\n\n\ndef f_6(x, a, b):\n return (1 + a * x + b * x ** 2) / (2 / 3 * (b + 3))\n\n\n<mask token>\n\n\ndef my_pdf(VAR, x):\n a = VAR\n pdf = f_1(x, a)\n ln_pdf = np.log(pdf)\n result = np.sum(-ln_pdf)\n return result\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef f_1(x, a):\n return 1 / (x + 5) * np.sin(a * x)\n\n\ndef f_2(x, a):\n return np.sin(a * x) + 1\n\n\ndef f_3(x, a):\n return np.sin(a * x ** 2)\n\n\ndef f_4(x, a):\n return np.sin(a * x + 1) ** 2\n\n\ndef f_5(x):\n return x * np.tan(x)\n\n\ndef f_6(x, a, b):\n return (1 + a * x + b * x ** 2) / (2 / 3 * (b + 3))\n\n\ndef f_7(x, a, b):\n return a + b * x\n\n\ndef f_8(x, a, b, c):\n return np.sin(a * x) + c * np.exp(b * x) + 1\n\n\n<mask token>\n\n\ndef my_pdf(VAR, x):\n a = VAR\n pdf = f_1(x, a)\n ln_pdf = np.log(pdf)\n result = np.sum(-ln_pdf)\n return result\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef f_1(x, a):\n return 1 / (x + 5) * np.sin(a * x)\n\n\ndef f_2(x, a):\n return np.sin(a * x) + 1\n\n\ndef f_3(x, a):\n return np.sin(a * x ** 2)\n\n\ndef f_4(x, a):\n return np.sin(a * x + 1) ** 2\n\n\ndef f_5(x):\n return x * np.tan(x)\n\n\ndef f_6(x, a, b):\n return (1 + a * x + b * x ** 2) / (2 / 3 * (b + 3))\n\n\ndef f_7(x, a, b):\n return a + b * x\n\n\ndef f_8(x, a, b, c):\n return np.sin(a * x) + c * np.exp(b * x) + 1\n\n\ndef f_9(x, a, b):\n return np.exp(-(x - a) ** 2 / (2 * b ** 2))\n\n\ndef my_pdf(VAR, x):\n a = VAR\n pdf = f_1(x, a)\n ln_pdf = np.log(pdf)\n result = np.sum(-ln_pdf)\n return result\n\n\nfname = 'Exam_2018_Prob1.txt'\ndata = np.loadtxt(fname)\nz = data[:, 0]\na_bound = -10, 0\nb_bound = -10, 10\nc_bound = 4000, 8000\nn_bound = 0, None\np_bound = 0, None\nmu_bound = 0, None\ndata_0 = minimize(my_pdf, [1], args=z, method='SLSQP', bounds=(a_bound,))\nprint(data_0)\nx = np.arange(20, 27, 0.01)\ny = f_1(x, -3)\nplt.plot(x, y + 0.2)\nplt.hist(z, bins=200, normed=True)\nplt.show()\nbinwidth = 0.1\nn_bins = np.arange(min(data[:, 2]), max(data[:, 2]) + binwidth, binwidth)\n",
"step-4": "import numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy.optimize import minimize\nfrom scipy.stats import chisquare, chi2, binom, poisson\n\n\ndef f_1(x, a):\n return 1 / (x + 5) * np.sin(a * x)\n\n\ndef f_2(x, a):\n return np.sin(a * x) + 1\n\n\ndef f_3(x, a):\n return np.sin(a * x ** 2)\n\n\ndef f_4(x, a):\n return np.sin(a * x + 1) ** 2\n\n\ndef f_5(x):\n return x * np.tan(x)\n\n\ndef f_6(x, a, b):\n return (1 + a * x + b * x ** 2) / (2 / 3 * (b + 3))\n\n\ndef f_7(x, a, b):\n return a + b * x\n\n\ndef f_8(x, a, b, c):\n return np.sin(a * x) + c * np.exp(b * x) + 1\n\n\ndef f_9(x, a, b):\n return np.exp(-(x - a) ** 2 / (2 * b ** 2))\n\n\ndef my_pdf(VAR, x):\n a = VAR\n pdf = f_1(x, a)\n ln_pdf = np.log(pdf)\n result = np.sum(-ln_pdf)\n return result\n\n\nfname = 'Exam_2018_Prob1.txt'\ndata = np.loadtxt(fname)\nz = data[:, 0]\na_bound = -10, 0\nb_bound = -10, 10\nc_bound = 4000, 8000\nn_bound = 0, None\np_bound = 0, None\nmu_bound = 0, None\ndata_0 = minimize(my_pdf, [1], args=z, method='SLSQP', bounds=(a_bound,))\nprint(data_0)\nx = np.arange(20, 27, 0.01)\ny = f_1(x, -3)\nplt.plot(x, y + 0.2)\nplt.hist(z, bins=200, normed=True)\nplt.show()\nbinwidth = 0.1\nn_bins = np.arange(min(data[:, 2]), max(data[:, 2]) + binwidth, binwidth)\n",
"step-5": "import numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy.optimize import minimize\nfrom scipy.stats import chisquare, chi2, binom, poisson\n\n\ndef f_1(x, a):\n return (1 / (x + 5)) * np.sin(a * x)\n\n\ndef f_2(x, a):\n return np.sin(a * x) + 1\n\n\ndef f_3(x, a):\n return np.sin(a * (x ** 2))\n\n\ndef f_4(x, a):\n return np.sin(a * x + 1) ** 2\n\n\ndef f_5(x):\n return x * np.tan(x)\n\n\ndef f_6(x, a, b):\n return (1 + a * x + b * (x ** 2)) / ((2/3) * (b + 3)) \n\n\ndef f_7(x, a, b):\n return a + b * x\n\n\ndef f_8(x, a, b, c):\n return np.sin(a * x) + c * np.exp(b * x) + 1\n\n\ndef f_9(x, a, b):\n return np.exp(-(x - a) ** 2 / (2 * (b ** 2)))\n\ndef my_pdf(VAR, x):\n a = VAR\n\n pdf = f_1(x, a)\n\n ln_pdf = np.log((pdf))\n result = np.sum(-ln_pdf)\n return result\n\n\nfname = 'Exam_2018_Prob1.txt'\ndata = np.loadtxt(fname)\n\nz = data[:, 0]\n\na_bound = (-10, 0)\nb_bound = (-10, 10)\nc_bound = (4000, 8000)\n\nn_bound = (0, None)\np_bound = (0, None)\n\nmu_bound = (0, None)\n\ndata_0 = minimize(my_pdf, [1, ], args=(z), method='SLSQP',\n bounds=(a_bound, ))\n\n\nprint(data_0)\nx = np.arange(20, 27, 0.01)\ny = f_1(x, -3)\nplt.plot(x, y+0.2)\nplt.hist(z, bins=200, normed=True)\nplt.show()\nbinwidth = 0.1\nn_bins = np.arange(min(data[:, 2]), max(data[:, 2]) + binwidth, binwidth)\n\n# Chi2 calculator\n# observed_values, bins, _ = plt.hist(data[:, 2], bins=n_bins)\n\n# plt.show()\n# We normalize by multiplyting the length of the data with the binwidth\n# expected_values = poisson.pmf(bins, data_0.x[0]) * len(data) \n\n# print(observed_values[observed_values!=0])\n# print(expected_values[expected_values!=0])\n# print(chisquare(observed_values[observed_values!=0], f_exp=expected_values[expected_values!=0]))\n# print('Threshold value ', chi2.isf(0.05, 18))\n\n\n# x = np.arange(-1, 1, 0.01)\n# y = f_6(x, data_0.x[0], data_0.x[1]) \n# plt.plot(x,y)\n# plt.show()\n\n",
"step-ids": [
6,
9,
12,
13,
14
]
}
|
[
6,
9,
12,
13,
14
] |
N,T=map(int,input().split())
nm=1000000
for i in range(N):
c,t=map(int,input().split())
if nm>c and T>=t:
nm=c
if nm==1000000:
print("TLE")
else:
print(nm)
|
normal
|
{
"blob_id": "8a0e781f29c426161240e33b9d2adc7537b3d352",
"index": 2513,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor i in range(N):\n c, t = map(int, input().split())\n if nm > c and T >= t:\n nm = c\nif nm == 1000000:\n print('TLE')\nelse:\n print(nm)\n",
"step-3": "N, T = map(int, input().split())\nnm = 1000000\nfor i in range(N):\n c, t = map(int, input().split())\n if nm > c and T >= t:\n nm = c\nif nm == 1000000:\n print('TLE')\nelse:\n print(nm)\n",
"step-4": "N,T=map(int,input().split())\nnm=1000000\nfor i in range(N):\n c,t=map(int,input().split())\n if nm>c and T>=t:\n nm=c\nif nm==1000000:\n print(\"TLE\")\nelse:\n print(nm)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
#implement variable!
import numpy as np
class Variable:
def __init__(self, data):
self.data = data
class Function:
'''
Base class
specific functions are implemented in the inherited class
'''
def __call__(self, input):
x = input.data #data extract
y = self.foward(x)
output = Variable(y) #here! is key point
return output
def foward(self, x):
raise NotImplementedError()
class Square(Function):
def foward(self, x):
return x ** 2
class Exp(Function):
def foward(self, x):
return np.exp(x)
# input/output of a Function.__call__ is unified as a variable instance.
square = Square()
exp = Exp()
# like a composite function
# x -> [Square] -> a -> [Exp] -> b -> [Square] -> y
x = Variable(np.array(0.5))
a = square(x)
b = exp(a)
y = square(b)
print(y.data)
|
normal
|
{
"blob_id": "9efd83524ebb598f30c8fb6c0f9f0c65333578e6",
"index": 6292,
"step-1": "<mask token>\n\n\nclass Function:\n <mask token>\n <mask token>\n\n def foward(self, x):\n raise NotImplementedError()\n\n\nclass Square(Function):\n\n def foward(self, x):\n return x ** 2\n\n\nclass Exp(Function):\n\n def foward(self, x):\n return np.exp(x)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Variable:\n\n def __init__(self, data):\n self.data = data\n\n\nclass Function:\n \"\"\"\n Base class\n specific functions are implemented in the inherited class\n \"\"\"\n\n def __call__(self, input):\n x = input.data\n y = self.foward(x)\n output = Variable(y)\n return output\n\n def foward(self, x):\n raise NotImplementedError()\n\n\nclass Square(Function):\n\n def foward(self, x):\n return x ** 2\n\n\nclass Exp(Function):\n\n def foward(self, x):\n return np.exp(x)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Variable:\n\n def __init__(self, data):\n self.data = data\n\n\nclass Function:\n \"\"\"\n Base class\n specific functions are implemented in the inherited class\n \"\"\"\n\n def __call__(self, input):\n x = input.data\n y = self.foward(x)\n output = Variable(y)\n return output\n\n def foward(self, x):\n raise NotImplementedError()\n\n\nclass Square(Function):\n\n def foward(self, x):\n return x ** 2\n\n\nclass Exp(Function):\n\n def foward(self, x):\n return np.exp(x)\n\n\n<mask token>\nprint(y.data)\n",
"step-4": "<mask token>\n\n\nclass Variable:\n\n def __init__(self, data):\n self.data = data\n\n\nclass Function:\n \"\"\"\n Base class\n specific functions are implemented in the inherited class\n \"\"\"\n\n def __call__(self, input):\n x = input.data\n y = self.foward(x)\n output = Variable(y)\n return output\n\n def foward(self, x):\n raise NotImplementedError()\n\n\nclass Square(Function):\n\n def foward(self, x):\n return x ** 2\n\n\nclass Exp(Function):\n\n def foward(self, x):\n return np.exp(x)\n\n\nsquare = Square()\nexp = Exp()\nx = Variable(np.array(0.5))\na = square(x)\nb = exp(a)\ny = square(b)\nprint(y.data)\n",
"step-5": "#implement variable!\nimport numpy as np\n\nclass Variable:\n def __init__(self, data):\n self.data = data\n\nclass Function:\n '''\n Base class\n specific functions are implemented in the inherited class\n '''\n def __call__(self, input): \n x = input.data #data extract\n y = self.foward(x)\n output = Variable(y) #here! is key point\n return output\n\n def foward(self, x):\n raise NotImplementedError()\n\nclass Square(Function):\n def foward(self, x):\n return x ** 2\n\nclass Exp(Function):\n def foward(self, x):\n return np.exp(x)\n\n# input/output of a Function.__call__ is unified as a variable instance.\nsquare = Square()\nexp = Exp()\n\n# like a composite function\n# x -> [Square] -> a -> [Exp] -> b -> [Square] -> y\nx = Variable(np.array(0.5))\na = square(x)\nb = exp(a)\ny = square(b)\nprint(y.data)\n\n\n",
"step-ids": [
6,
10,
11,
12,
14
]
}
|
[
6,
10,
11,
12,
14
] |
from django.core.paginator import Paginator, EmptyPage
from django.shortcuts import render
from django.views import View
from django.contrib.auth.mixins import LoginRequiredMixin
from logging import getLogger
from django_redis import get_redis_connection
from decimal import Decimal
import json
from django import http
from django.utils import timezone
from django.db import transaction
from users.models import Address
from goods.models import SKU
from meiduo_mall.utils import constants
from meiduo_mall.utils.auth_backend import LoginRequiredJsonMixin
from .models import OrderInfo, OrderGoods
from meiduo_mall.utils.response_code import RETCODE, err_msg
logger = getLogger('django')
class GoodsCommentView(View):
"""订单商品评价信息"""
def get(self, request, sku_id):
# 获取被评价的订单商品信息
order_goods_list = OrderGoods.objects.filter(sku_id=sku_id, is_commented=True).order_by('-create_time')[:constants.COMMENTS_LIST_LIMIT]
# 序列化
comment_list = []
for order_goods in order_goods_list:
username = order_goods.order.user.username
comment_list.append({
'username': username[0] + '***' + username[-1] if order_goods.is_anonymous else username,
'comment': order_goods.comment,
'score': order_goods.score,
})
return http.JsonResponse({'code': RETCODE.OK, 'errmsg': err_msg[RETCODE.OK], 'comment_list': comment_list})
class OrderCommentView(LoginRequiredMixin, View):
"""订单商品评价"""
def get(self, request):
"""展示商品评价页面"""
# 接收参数
order_id = request.GET.get('order_id')
# 校验参数
try:
OrderInfo.objects.get(order_id=order_id, user=request.user)
except OrderInfo.DoesNotExist:
return http.HttpResponseNotFound('订单不存在')
# 查询订单中未被评价的商品信息
try:
uncomment_goods = OrderGoods.objects.filter(order_id=order_id, is_commented=False)
except Exception as e:
logger.error(e)
return http.HttpResponseServerError('订单商品信息出错')
# 构造待评价商品数据
uncomment_goods_list = []
for goods in uncomment_goods:
uncomment_goods_list.append({
'order_id': goods.order.order_id,
'sku_id': goods.sku.id,
'name': goods.sku.name,
'price': str(goods.price),
'default_image_url': goods.sku.default_image.url,
'comment': goods.comment,
'score': goods.score,
'is_anonymous': str(goods.is_anonymous),
})
# 渲染模板
context = {
'uncomment_goods_list': uncomment_goods_list
}
return render(request, 'goods_judge.html', context)
def post(self, request):
"""评价订单商品"""
# 接收参数
json_dict = json.loads(request.body.decode())
order_id = json_dict.get('order_id')
sku_id = json_dict.get('sku_id')
score = json_dict.get('score')
comment = json_dict.get('comment')
is_anonymous = json_dict.get('is_anonymous')
# 校验参数
if not all([order_id, sku_id, score, comment]):
return http.HttpResponseForbidden('缺少必传参数')
try:
OrderInfo.objects.filter(order_id=order_id, user=request.user, status=OrderInfo.ORDER_STATUS_ENUM['UNCOMMENT'])
except OrderInfo.DoesNotExist:
return http.HttpResponseForbidden('参数order_id错误')
try:
sku = SKU.objects.get(id=sku_id)
except SKU.DoesNotExist:
return http.HttpResponseForbidden('参数sku_id错误')
if is_anonymous:
if not isinstance(is_anonymous, bool):
return http.HttpResponseForbidden('参数is_anonymous错误')
# 以下操作数据库的操作,开启作为一次事务
with transaction.atomic():
# 在数据库操作前,创建保存点(数据库最初的状态)
save_id = transaction.savepoint()
try:
# 保存订单商品评价数据
OrderGoods.objects.filter(order_id=order_id, sku_id=sku_id, is_commented=False).update(
comment=comment,
score=score,
is_anonymous=is_anonymous,
is_commented=True
)
# 累计评论数据
sku.comments += 1
sku.save()
sku.spu.comments += 1
sku.spu.save()
# 如果所有订单商品都已评价,则修改订单状态为已完成
if OrderGoods.objects.filter(order_id=order_id, is_commented=False).count() == 0:
OrderInfo.objects.filter(order_id=order_id).update(status=OrderInfo.ORDER_STATUS_ENUM['FINISHED'])
# 对于未知的数据库错误,暴力回滚
except Exception as e:
logger.error(e)
transaction.savepoint_rollback(save_id)
return http.JsonResponse({'code': RETCODE.COMMITMENTERR, 'errmsg': err_msg[RETCODE.COMMITMENTERR]})
else:
# 提交事务
transaction.savepoint_commit(save_id)
return http.JsonResponse({'code': RETCODE.OK, 'errmsg': err_msg[RETCODE.OK]})
class UserOrderInfoView(LoginRequiredMixin, View):
"""我的订单"""
def get(self, request, page_num):
"""提供我的订单页面"""
user = request.user
# 查询订单
orders = user.orderinfo_set.all().order_by("-create_time")
# 遍历所有订单
for order in orders:
# 绑定订单状态
order.status_name = OrderInfo.ORDER_STATUS_CHOICES[order.status-1][1]
# 绑定支付方式
order.pay_method_name = OrderInfo.PAY_METHOD_CHOICES[order.pay_method-1][1]
order.sku_list = []
# 查询订单商品
order_goods = order.skus.all()
# 遍历订单商品
for order_good in order_goods:
sku = order_good.sku
sku.count = order_good.count
sku.amount = sku.price * sku.count
order.sku_list.append(sku)
# 分页
page_num = int(page_num)
try:
paginator = Paginator(orders, constants.ORDERS_LIST_LIMIT)
page_orders = paginator.page(page_num)
total_page = paginator.num_pages
except EmptyPage:
return http.HttpResponseNotFound('订单不存在')
context = {
"page_orders": page_orders,
'total_page': total_page,
'page_num': page_num,
}
return render(request, "user_center_order.html", context)
class OrderSuccessView(LoginRequiredMixin, View):
"""订单成功页面"""
def get(self, request):
"""提供订单成功页面"""
# 接受参数
order_id = request.GET.get('order_id')
payment_amount = request.GET.get('payment_amount')
pay_method = request.GET.get('pay_method')
# 构造上下文
context = {
'order_id': order_id,
'payment_amount': payment_amount,
'pay_method': pay_method
}
return render(request, 'order_success.html', context)
class OrderCommitView(LoginRequiredJsonMixin, View):
"""提交订单"""
def post(self, request):
"""保存订单基本信息和订单商品信息"""
# 接收参数
json_dict = json.loads(request.body.decode())
address_id = json_dict.get('address_id')
pay_method = json_dict.get('pay_method')
# 校验参数
if not all([address_id, pay_method]):
return http.HttpResponseForbidden('缺少必传参数')
# 判断address_id是否合法
try:
address = Address.objects.get(id=address_id)
except Exception as e:
logger.error(e)
return http.HttpResponseForbidden('参数address_id错误')
# 判断pay_method是否合法
if pay_method not in [OrderInfo.PAY_METHODS_ENUM['CASH'], OrderInfo.PAY_METHODS_ENUM['ALIPAY']]:
return http.HttpResponseForbidden('参数pay_method错误')
# 以下操作数据库的操作,开启作为一次事务
with transaction.atomic():
# 在数据库操作前,创建保存点(数据库最初的状态)
save_id = transaction.savepoint()
# 获取登录用户
user = request.user
# 获取订单编号:时间 + user_id == '2020123113041200000001'
order_id = timezone.localtime().strftime('%Y%m%d%H%M%S') + '{:0>9d}'.format(user.id)
try:
# 保存订单基本信息(一)
order = OrderInfo.objects.create(
order_id=order_id,
user=user,
address=address,
total_count=0, # 仅用来初始化,后面根据订单中的商品进行更新
total_amount=Decimal('0.00'), # 仅用来初始化,后面根据订单中的商品进行更新
freight=Decimal(constants.ORDERS_FREIGHT_COST),
pay_method=pay_method,
# 如果支付方式为支付宝,支付状态为未付款,如果支付方式是货到付款,支付状态为未发货
status=OrderInfo.ORDER_STATUS_ENUM['UNPAID'] if pay_method == OrderInfo.PAY_METHODS_ENUM['ALIPAY'] else OrderInfo.ORDER_STATUS_ENUM['UNSEND']
)
# 保存订单商品信息(多)
# 查询redis中购物车被勾选的商品
redis_conn = get_redis_connection('carts')
# 购物车中商品的数量
redis_cart = redis_conn.hgetall('carts_%s' % user.id)
# 被勾选的商品sku_id
redis_selected = redis_conn.smembers('selected_{}'.format(user.id))
# 构造购物车中被勾选商品的数据 new_cart_dict,{sku_id: 2, sku_id: 1}
new_cart_dict = {}
for sku_id in redis_selected:
new_cart_dict[int(sku_id)] = int(redis_cart[sku_id])
# 获取被勾选商品的sku_id
sku_ids = new_cart_dict.keys()
for sku_id in sku_ids:
# 每个商品都有多次下单的机会,直到库存不足
while True:
# 读取商品的sku信息
sku = SKU.objects.get(id=sku_id) # 查询商品和库存信息时,不能出现缓存,所有不用 filter(id__in=sku_ids)
# 获取当前被勾选商品的库存
sku_count = new_cart_dict[sku.id]
# 获取sku商品原始的库存stock和销量sales
origin_stock = sku.stock
origin_sales = sku.sales
# # 模型网络延迟
# import time
# time.sleep(5)
# 如果订单中的商品数量大于库存,响应库存不足
if sku_count > origin_stock:
# 库存不足,回滚
transaction.savepoint_rollback(save_id)
print(request.user, '库存不足')
return http.JsonResponse({'code': RETCODE.STOCKERR, 'errmsg': err_msg[RETCODE.STOCKERR]})
# 如果库存满足,SKU 减库存,加销量
new_stock = origin_stock - sku_count
new_sales = origin_sales + sku_count
result = SKU.objects.filter(id=sku_id, stock=origin_stock).update(stock=new_stock, sales=new_sales)
# 如果在更新数据时,原始数据变化了,那么返回0,表示有资源抢夺
if result == 0:
# 由于其他用户提前对该商品完成下单,该商品此次下单失败,重新进行下单
continue
# SPU 加销量
sku.spu.sales += sku_count
sku.spu.save()
OrderGoods.objects.create(
order=order,
sku=sku,
count=sku_count,
price=sku.price,
)
# 累加订单中商品的总价和总数量
order.total_count += sku_count
order.total_amount += (sku_count * sku.price)
# 该件商品下单成功,退出循环
break
# 添加邮费和保存订单信息
order.total_amount += order.freight
order.save()
# 对于未知的数据库错误,暴力回滚
except Exception as e:
logger.error(e)
transaction.savepoint_rollback(save_id)
return http.JsonResponse({'code': RETCODE.ORDEROPERATEERR, 'errmsg': err_msg[RETCODE.ORDEROPERATEERR]})
else:
# 提交事务
transaction.savepoint_commit(save_id)
# 清除购物车中已结算的商品
pl = redis_conn.pipeline()
pl.hdel('carts_%s' % user.id, *redis_selected)
pl.srem('selected_%s' % user.id, *redis_selected)
try:
pl.execute()
except Exception as e:
logger.error(e)
return http.JsonResponse({'code': RETCODE.DUPLICATEORDERERR, 'errmsg': err_msg[RETCODE.DUPLICATEORDERERR]})
else:
# 返回响应
return http.JsonResponse({'code': RETCODE.OK, 'errmsg': err_msg[RETCODE.OK], 'order_id': order_id})
class OrderSettlementView(LoginRequiredMixin, View):
"""结算订单"""
def get(self, request):
"""查询并展示要结算的订单数据"""
# 获取登录用户
user = request.user
# 查询用户收货地址,没有被删除的收货地址
try:
addresses = Address.objects.filter(user=user, is_deleted=False)
except Exception as e:
logger.error(e)
# 如果没有查询出收货地址,可以去编辑收货地址
addresses = None
# 查询redis中购物车被勾选的商品
redis_conn = get_redis_connection('carts')
# 购物车中商品的数量
redis_cart = redis_conn.hgetall('carts_%s' % user.id)
# 被勾选的商品sku_id
redis_selected = redis_conn.smembers('selected_{}'.format(user.id))
# 构造购物车中被勾选商品的数据 new_cart_dict,{sku_id: 2, sku_id: 1}
new_cart_dict = {}
for sku_id in redis_selected:
new_cart_dict[int(sku_id)] = int(redis_cart[sku_id])
# 获取被勾选商品的sku_id
sku_ids = new_cart_dict.keys()
# 获取被勾选商品的sku信息
skus = SKU.objects.filter(id__in=sku_ids)
# 商品总数量与商品总金额
total_count = 0
total_amount = Decimal(0.00) # 或 Decimal('0.00')
for sku in skus:
# 遍历skus,给每个sku补充count(数量)和amount(小计)字段
sku.count = new_cart_dict[sku.id]
sku.amount = sku.price * sku.count # Decimal类型
# 累加商品数量和金额
total_count += sku.count
total_amount += sku.amount
# 构造上下文
context = {
'addresses': addresses,
'skus': skus,
'total_count': total_count,
'total_amount': total_amount,
'freight': constants.ORDERS_FREIGHT_COST, # 运费
'payment_amount': Decimal(constants.ORDERS_FREIGHT_COST) + total_amount,
}
return render(request, 'place_order.html', context)
|
normal
|
{
"blob_id": "0402096f215ae600318d17bc70e5e3067b0a176b",
"index": 3864,
"step-1": "<mask token>\n\n\nclass OrderSuccessView(LoginRequiredMixin, View):\n \"\"\"订单成功页面\"\"\"\n\n def get(self, request):\n \"\"\"提供订单成功页面\"\"\"\n order_id = request.GET.get('order_id')\n payment_amount = request.GET.get('payment_amount')\n pay_method = request.GET.get('pay_method')\n context = {'order_id': order_id, 'payment_amount': payment_amount,\n 'pay_method': pay_method}\n return render(request, 'order_success.html', context)\n\n\nclass OrderCommitView(LoginRequiredJsonMixin, View):\n \"\"\"提交订单\"\"\"\n\n def post(self, request):\n \"\"\"保存订单基本信息和订单商品信息\"\"\"\n json_dict = json.loads(request.body.decode())\n address_id = json_dict.get('address_id')\n pay_method = json_dict.get('pay_method')\n if not all([address_id, pay_method]):\n return http.HttpResponseForbidden('缺少必传参数')\n try:\n address = Address.objects.get(id=address_id)\n except Exception as e:\n logger.error(e)\n return http.HttpResponseForbidden('参数address_id错误')\n if pay_method not in [OrderInfo.PAY_METHODS_ENUM['CASH'], OrderInfo\n .PAY_METHODS_ENUM['ALIPAY']]:\n return http.HttpResponseForbidden('参数pay_method错误')\n with transaction.atomic():\n save_id = transaction.savepoint()\n user = request.user\n order_id = timezone.localtime().strftime('%Y%m%d%H%M%S'\n ) + '{:0>9d}'.format(user.id)\n try:\n order = OrderInfo.objects.create(order_id=order_id, user=\n user, address=address, total_count=0, total_amount=\n Decimal('0.00'), freight=Decimal(constants.\n ORDERS_FREIGHT_COST), pay_method=pay_method, status=\n OrderInfo.ORDER_STATUS_ENUM['UNPAID'] if pay_method ==\n OrderInfo.PAY_METHODS_ENUM['ALIPAY'] else OrderInfo.\n ORDER_STATUS_ENUM['UNSEND'])\n redis_conn = get_redis_connection('carts')\n redis_cart = redis_conn.hgetall('carts_%s' % user.id)\n redis_selected = redis_conn.smembers('selected_{}'.format(\n user.id))\n new_cart_dict = {}\n for sku_id in redis_selected:\n new_cart_dict[int(sku_id)] = int(redis_cart[sku_id])\n sku_ids = new_cart_dict.keys()\n for sku_id in sku_ids:\n while True:\n sku = SKU.objects.get(id=sku_id)\n sku_count = new_cart_dict[sku.id]\n origin_stock = sku.stock\n origin_sales = sku.sales\n if sku_count > origin_stock:\n transaction.savepoint_rollback(save_id)\n print(request.user, '库存不足')\n return http.JsonResponse({'code': RETCODE.\n STOCKERR, 'errmsg': err_msg[RETCODE.STOCKERR]})\n new_stock = origin_stock - sku_count\n new_sales = origin_sales + sku_count\n result = SKU.objects.filter(id=sku_id, stock=\n origin_stock).update(stock=new_stock, sales=\n new_sales)\n if result == 0:\n continue\n sku.spu.sales += sku_count\n sku.spu.save()\n OrderGoods.objects.create(order=order, sku=sku,\n count=sku_count, price=sku.price)\n order.total_count += sku_count\n order.total_amount += sku_count * sku.price\n break\n order.total_amount += order.freight\n order.save()\n except Exception as e:\n logger.error(e)\n transaction.savepoint_rollback(save_id)\n return http.JsonResponse({'code': RETCODE.ORDEROPERATEERR,\n 'errmsg': err_msg[RETCODE.ORDEROPERATEERR]})\n else:\n transaction.savepoint_commit(save_id)\n pl = redis_conn.pipeline()\n pl.hdel('carts_%s' % user.id, *redis_selected)\n pl.srem('selected_%s' % user.id, *redis_selected)\n try:\n pl.execute()\n except Exception as e:\n logger.error(e)\n return http.JsonResponse({'code': RETCODE.DUPLICATEORDERERR,\n 'errmsg': err_msg[RETCODE.DUPLICATEORDERERR]})\n else:\n return http.JsonResponse({'code': RETCODE.OK, 'errmsg': err_msg\n [RETCODE.OK], 'order_id': order_id})\n\n\nclass OrderSettlementView(LoginRequiredMixin, View):\n \"\"\"结算订单\"\"\"\n\n def get(self, request):\n \"\"\"查询并展示要结算的订单数据\"\"\"\n user = request.user\n try:\n addresses = Address.objects.filter(user=user, is_deleted=False)\n except Exception as e:\n logger.error(e)\n addresses = None\n redis_conn = get_redis_connection('carts')\n redis_cart = redis_conn.hgetall('carts_%s' % user.id)\n redis_selected = redis_conn.smembers('selected_{}'.format(user.id))\n new_cart_dict = {}\n for sku_id in redis_selected:\n new_cart_dict[int(sku_id)] = int(redis_cart[sku_id])\n sku_ids = new_cart_dict.keys()\n skus = SKU.objects.filter(id__in=sku_ids)\n total_count = 0\n total_amount = Decimal(0.0)\n for sku in skus:\n sku.count = new_cart_dict[sku.id]\n sku.amount = sku.price * sku.count\n total_count += sku.count\n total_amount += sku.amount\n context = {'addresses': addresses, 'skus': skus, 'total_count':\n total_count, 'total_amount': total_amount, 'freight': constants\n .ORDERS_FREIGHT_COST, 'payment_amount': Decimal(constants.\n ORDERS_FREIGHT_COST) + total_amount}\n return render(request, 'place_order.html', context)\n",
"step-2": "<mask token>\n\n\nclass OrderCommentView(LoginRequiredMixin, View):\n \"\"\"订单商品评价\"\"\"\n\n def get(self, request):\n \"\"\"展示商品评价页面\"\"\"\n order_id = request.GET.get('order_id')\n try:\n OrderInfo.objects.get(order_id=order_id, user=request.user)\n except OrderInfo.DoesNotExist:\n return http.HttpResponseNotFound('订单不存在')\n try:\n uncomment_goods = OrderGoods.objects.filter(order_id=order_id,\n is_commented=False)\n except Exception as e:\n logger.error(e)\n return http.HttpResponseServerError('订单商品信息出错')\n uncomment_goods_list = []\n for goods in uncomment_goods:\n uncomment_goods_list.append({'order_id': goods.order.order_id,\n 'sku_id': goods.sku.id, 'name': goods.sku.name, 'price':\n str(goods.price), 'default_image_url': goods.sku.\n default_image.url, 'comment': goods.comment, 'score': goods\n .score, 'is_anonymous': str(goods.is_anonymous)})\n context = {'uncomment_goods_list': uncomment_goods_list}\n return render(request, 'goods_judge.html', context)\n\n def post(self, request):\n \"\"\"评价订单商品\"\"\"\n json_dict = json.loads(request.body.decode())\n order_id = json_dict.get('order_id')\n sku_id = json_dict.get('sku_id')\n score = json_dict.get('score')\n comment = json_dict.get('comment')\n is_anonymous = json_dict.get('is_anonymous')\n if not all([order_id, sku_id, score, comment]):\n return http.HttpResponseForbidden('缺少必传参数')\n try:\n OrderInfo.objects.filter(order_id=order_id, user=request.user,\n status=OrderInfo.ORDER_STATUS_ENUM['UNCOMMENT'])\n except OrderInfo.DoesNotExist:\n return http.HttpResponseForbidden('参数order_id错误')\n try:\n sku = SKU.objects.get(id=sku_id)\n except SKU.DoesNotExist:\n return http.HttpResponseForbidden('参数sku_id错误')\n if is_anonymous:\n if not isinstance(is_anonymous, bool):\n return http.HttpResponseForbidden('参数is_anonymous错误')\n with transaction.atomic():\n save_id = transaction.savepoint()\n try:\n OrderGoods.objects.filter(order_id=order_id, sku_id=sku_id,\n is_commented=False).update(comment=comment, score=score,\n is_anonymous=is_anonymous, is_commented=True)\n sku.comments += 1\n sku.save()\n sku.spu.comments += 1\n sku.spu.save()\n if OrderGoods.objects.filter(order_id=order_id,\n is_commented=False).count() == 0:\n OrderInfo.objects.filter(order_id=order_id).update(status\n =OrderInfo.ORDER_STATUS_ENUM['FINISHED'])\n except Exception as e:\n logger.error(e)\n transaction.savepoint_rollback(save_id)\n return http.JsonResponse({'code': RETCODE.COMMITMENTERR,\n 'errmsg': err_msg[RETCODE.COMMITMENTERR]})\n else:\n transaction.savepoint_commit(save_id)\n return http.JsonResponse({'code': RETCODE.OK, 'errmsg': err_msg[\n RETCODE.OK]})\n\n\nclass UserOrderInfoView(LoginRequiredMixin, View):\n \"\"\"我的订单\"\"\"\n\n def get(self, request, page_num):\n \"\"\"提供我的订单页面\"\"\"\n user = request.user\n orders = user.orderinfo_set.all().order_by('-create_time')\n for order in orders:\n order.status_name = OrderInfo.ORDER_STATUS_CHOICES[order.status - 1\n ][1]\n order.pay_method_name = OrderInfo.PAY_METHOD_CHOICES[order.\n pay_method - 1][1]\n order.sku_list = []\n order_goods = order.skus.all()\n for order_good in order_goods:\n sku = order_good.sku\n sku.count = order_good.count\n sku.amount = sku.price * sku.count\n order.sku_list.append(sku)\n page_num = int(page_num)\n try:\n paginator = Paginator(orders, constants.ORDERS_LIST_LIMIT)\n page_orders = paginator.page(page_num)\n total_page = paginator.num_pages\n except EmptyPage:\n return http.HttpResponseNotFound('订单不存在')\n context = {'page_orders': page_orders, 'total_page': total_page,\n 'page_num': page_num}\n return render(request, 'user_center_order.html', context)\n\n\nclass OrderSuccessView(LoginRequiredMixin, View):\n \"\"\"订单成功页面\"\"\"\n\n def get(self, request):\n \"\"\"提供订单成功页面\"\"\"\n order_id = request.GET.get('order_id')\n payment_amount = request.GET.get('payment_amount')\n pay_method = request.GET.get('pay_method')\n context = {'order_id': order_id, 'payment_amount': payment_amount,\n 'pay_method': pay_method}\n return render(request, 'order_success.html', context)\n\n\nclass OrderCommitView(LoginRequiredJsonMixin, View):\n \"\"\"提交订单\"\"\"\n\n def post(self, request):\n \"\"\"保存订单基本信息和订单商品信息\"\"\"\n json_dict = json.loads(request.body.decode())\n address_id = json_dict.get('address_id')\n pay_method = json_dict.get('pay_method')\n if not all([address_id, pay_method]):\n return http.HttpResponseForbidden('缺少必传参数')\n try:\n address = Address.objects.get(id=address_id)\n except Exception as e:\n logger.error(e)\n return http.HttpResponseForbidden('参数address_id错误')\n if pay_method not in [OrderInfo.PAY_METHODS_ENUM['CASH'], OrderInfo\n .PAY_METHODS_ENUM['ALIPAY']]:\n return http.HttpResponseForbidden('参数pay_method错误')\n with transaction.atomic():\n save_id = transaction.savepoint()\n user = request.user\n order_id = timezone.localtime().strftime('%Y%m%d%H%M%S'\n ) + '{:0>9d}'.format(user.id)\n try:\n order = OrderInfo.objects.create(order_id=order_id, user=\n user, address=address, total_count=0, total_amount=\n Decimal('0.00'), freight=Decimal(constants.\n ORDERS_FREIGHT_COST), pay_method=pay_method, status=\n OrderInfo.ORDER_STATUS_ENUM['UNPAID'] if pay_method ==\n OrderInfo.PAY_METHODS_ENUM['ALIPAY'] else OrderInfo.\n ORDER_STATUS_ENUM['UNSEND'])\n redis_conn = get_redis_connection('carts')\n redis_cart = redis_conn.hgetall('carts_%s' % user.id)\n redis_selected = redis_conn.smembers('selected_{}'.format(\n user.id))\n new_cart_dict = {}\n for sku_id in redis_selected:\n new_cart_dict[int(sku_id)] = int(redis_cart[sku_id])\n sku_ids = new_cart_dict.keys()\n for sku_id in sku_ids:\n while True:\n sku = SKU.objects.get(id=sku_id)\n sku_count = new_cart_dict[sku.id]\n origin_stock = sku.stock\n origin_sales = sku.sales\n if sku_count > origin_stock:\n transaction.savepoint_rollback(save_id)\n print(request.user, '库存不足')\n return http.JsonResponse({'code': RETCODE.\n STOCKERR, 'errmsg': err_msg[RETCODE.STOCKERR]})\n new_stock = origin_stock - sku_count\n new_sales = origin_sales + sku_count\n result = SKU.objects.filter(id=sku_id, stock=\n origin_stock).update(stock=new_stock, sales=\n new_sales)\n if result == 0:\n continue\n sku.spu.sales += sku_count\n sku.spu.save()\n OrderGoods.objects.create(order=order, sku=sku,\n count=sku_count, price=sku.price)\n order.total_count += sku_count\n order.total_amount += sku_count * sku.price\n break\n order.total_amount += order.freight\n order.save()\n except Exception as e:\n logger.error(e)\n transaction.savepoint_rollback(save_id)\n return http.JsonResponse({'code': RETCODE.ORDEROPERATEERR,\n 'errmsg': err_msg[RETCODE.ORDEROPERATEERR]})\n else:\n transaction.savepoint_commit(save_id)\n pl = redis_conn.pipeline()\n pl.hdel('carts_%s' % user.id, *redis_selected)\n pl.srem('selected_%s' % user.id, *redis_selected)\n try:\n pl.execute()\n except Exception as e:\n logger.error(e)\n return http.JsonResponse({'code': RETCODE.DUPLICATEORDERERR,\n 'errmsg': err_msg[RETCODE.DUPLICATEORDERERR]})\n else:\n return http.JsonResponse({'code': RETCODE.OK, 'errmsg': err_msg\n [RETCODE.OK], 'order_id': order_id})\n\n\nclass OrderSettlementView(LoginRequiredMixin, View):\n \"\"\"结算订单\"\"\"\n\n def get(self, request):\n \"\"\"查询并展示要结算的订单数据\"\"\"\n user = request.user\n try:\n addresses = Address.objects.filter(user=user, is_deleted=False)\n except Exception as e:\n logger.error(e)\n addresses = None\n redis_conn = get_redis_connection('carts')\n redis_cart = redis_conn.hgetall('carts_%s' % user.id)\n redis_selected = redis_conn.smembers('selected_{}'.format(user.id))\n new_cart_dict = {}\n for sku_id in redis_selected:\n new_cart_dict[int(sku_id)] = int(redis_cart[sku_id])\n sku_ids = new_cart_dict.keys()\n skus = SKU.objects.filter(id__in=sku_ids)\n total_count = 0\n total_amount = Decimal(0.0)\n for sku in skus:\n sku.count = new_cart_dict[sku.id]\n sku.amount = sku.price * sku.count\n total_count += sku.count\n total_amount += sku.amount\n context = {'addresses': addresses, 'skus': skus, 'total_count':\n total_count, 'total_amount': total_amount, 'freight': constants\n .ORDERS_FREIGHT_COST, 'payment_amount': Decimal(constants.\n ORDERS_FREIGHT_COST) + total_amount}\n return render(request, 'place_order.html', context)\n",
"step-3": "<mask token>\n\n\nclass GoodsCommentView(View):\n <mask token>\n <mask token>\n\n\nclass OrderCommentView(LoginRequiredMixin, View):\n \"\"\"订单商品评价\"\"\"\n\n def get(self, request):\n \"\"\"展示商品评价页面\"\"\"\n order_id = request.GET.get('order_id')\n try:\n OrderInfo.objects.get(order_id=order_id, user=request.user)\n except OrderInfo.DoesNotExist:\n return http.HttpResponseNotFound('订单不存在')\n try:\n uncomment_goods = OrderGoods.objects.filter(order_id=order_id,\n is_commented=False)\n except Exception as e:\n logger.error(e)\n return http.HttpResponseServerError('订单商品信息出错')\n uncomment_goods_list = []\n for goods in uncomment_goods:\n uncomment_goods_list.append({'order_id': goods.order.order_id,\n 'sku_id': goods.sku.id, 'name': goods.sku.name, 'price':\n str(goods.price), 'default_image_url': goods.sku.\n default_image.url, 'comment': goods.comment, 'score': goods\n .score, 'is_anonymous': str(goods.is_anonymous)})\n context = {'uncomment_goods_list': uncomment_goods_list}\n return render(request, 'goods_judge.html', context)\n\n def post(self, request):\n \"\"\"评价订单商品\"\"\"\n json_dict = json.loads(request.body.decode())\n order_id = json_dict.get('order_id')\n sku_id = json_dict.get('sku_id')\n score = json_dict.get('score')\n comment = json_dict.get('comment')\n is_anonymous = json_dict.get('is_anonymous')\n if not all([order_id, sku_id, score, comment]):\n return http.HttpResponseForbidden('缺少必传参数')\n try:\n OrderInfo.objects.filter(order_id=order_id, user=request.user,\n status=OrderInfo.ORDER_STATUS_ENUM['UNCOMMENT'])\n except OrderInfo.DoesNotExist:\n return http.HttpResponseForbidden('参数order_id错误')\n try:\n sku = SKU.objects.get(id=sku_id)\n except SKU.DoesNotExist:\n return http.HttpResponseForbidden('参数sku_id错误')\n if is_anonymous:\n if not isinstance(is_anonymous, bool):\n return http.HttpResponseForbidden('参数is_anonymous错误')\n with transaction.atomic():\n save_id = transaction.savepoint()\n try:\n OrderGoods.objects.filter(order_id=order_id, sku_id=sku_id,\n is_commented=False).update(comment=comment, score=score,\n is_anonymous=is_anonymous, is_commented=True)\n sku.comments += 1\n sku.save()\n sku.spu.comments += 1\n sku.spu.save()\n if OrderGoods.objects.filter(order_id=order_id,\n is_commented=False).count() == 0:\n OrderInfo.objects.filter(order_id=order_id).update(status\n =OrderInfo.ORDER_STATUS_ENUM['FINISHED'])\n except Exception as e:\n logger.error(e)\n transaction.savepoint_rollback(save_id)\n return http.JsonResponse({'code': RETCODE.COMMITMENTERR,\n 'errmsg': err_msg[RETCODE.COMMITMENTERR]})\n else:\n transaction.savepoint_commit(save_id)\n return http.JsonResponse({'code': RETCODE.OK, 'errmsg': err_msg[\n RETCODE.OK]})\n\n\nclass UserOrderInfoView(LoginRequiredMixin, View):\n \"\"\"我的订单\"\"\"\n\n def get(self, request, page_num):\n \"\"\"提供我的订单页面\"\"\"\n user = request.user\n orders = user.orderinfo_set.all().order_by('-create_time')\n for order in orders:\n order.status_name = OrderInfo.ORDER_STATUS_CHOICES[order.status - 1\n ][1]\n order.pay_method_name = OrderInfo.PAY_METHOD_CHOICES[order.\n pay_method - 1][1]\n order.sku_list = []\n order_goods = order.skus.all()\n for order_good in order_goods:\n sku = order_good.sku\n sku.count = order_good.count\n sku.amount = sku.price * sku.count\n order.sku_list.append(sku)\n page_num = int(page_num)\n try:\n paginator = Paginator(orders, constants.ORDERS_LIST_LIMIT)\n page_orders = paginator.page(page_num)\n total_page = paginator.num_pages\n except EmptyPage:\n return http.HttpResponseNotFound('订单不存在')\n context = {'page_orders': page_orders, 'total_page': total_page,\n 'page_num': page_num}\n return render(request, 'user_center_order.html', context)\n\n\nclass OrderSuccessView(LoginRequiredMixin, View):\n \"\"\"订单成功页面\"\"\"\n\n def get(self, request):\n \"\"\"提供订单成功页面\"\"\"\n order_id = request.GET.get('order_id')\n payment_amount = request.GET.get('payment_amount')\n pay_method = request.GET.get('pay_method')\n context = {'order_id': order_id, 'payment_amount': payment_amount,\n 'pay_method': pay_method}\n return render(request, 'order_success.html', context)\n\n\nclass OrderCommitView(LoginRequiredJsonMixin, View):\n \"\"\"提交订单\"\"\"\n\n def post(self, request):\n \"\"\"保存订单基本信息和订单商品信息\"\"\"\n json_dict = json.loads(request.body.decode())\n address_id = json_dict.get('address_id')\n pay_method = json_dict.get('pay_method')\n if not all([address_id, pay_method]):\n return http.HttpResponseForbidden('缺少必传参数')\n try:\n address = Address.objects.get(id=address_id)\n except Exception as e:\n logger.error(e)\n return http.HttpResponseForbidden('参数address_id错误')\n if pay_method not in [OrderInfo.PAY_METHODS_ENUM['CASH'], OrderInfo\n .PAY_METHODS_ENUM['ALIPAY']]:\n return http.HttpResponseForbidden('参数pay_method错误')\n with transaction.atomic():\n save_id = transaction.savepoint()\n user = request.user\n order_id = timezone.localtime().strftime('%Y%m%d%H%M%S'\n ) + '{:0>9d}'.format(user.id)\n try:\n order = OrderInfo.objects.create(order_id=order_id, user=\n user, address=address, total_count=0, total_amount=\n Decimal('0.00'), freight=Decimal(constants.\n ORDERS_FREIGHT_COST), pay_method=pay_method, status=\n OrderInfo.ORDER_STATUS_ENUM['UNPAID'] if pay_method ==\n OrderInfo.PAY_METHODS_ENUM['ALIPAY'] else OrderInfo.\n ORDER_STATUS_ENUM['UNSEND'])\n redis_conn = get_redis_connection('carts')\n redis_cart = redis_conn.hgetall('carts_%s' % user.id)\n redis_selected = redis_conn.smembers('selected_{}'.format(\n user.id))\n new_cart_dict = {}\n for sku_id in redis_selected:\n new_cart_dict[int(sku_id)] = int(redis_cart[sku_id])\n sku_ids = new_cart_dict.keys()\n for sku_id in sku_ids:\n while True:\n sku = SKU.objects.get(id=sku_id)\n sku_count = new_cart_dict[sku.id]\n origin_stock = sku.stock\n origin_sales = sku.sales\n if sku_count > origin_stock:\n transaction.savepoint_rollback(save_id)\n print(request.user, '库存不足')\n return http.JsonResponse({'code': RETCODE.\n STOCKERR, 'errmsg': err_msg[RETCODE.STOCKERR]})\n new_stock = origin_stock - sku_count\n new_sales = origin_sales + sku_count\n result = SKU.objects.filter(id=sku_id, stock=\n origin_stock).update(stock=new_stock, sales=\n new_sales)\n if result == 0:\n continue\n sku.spu.sales += sku_count\n sku.spu.save()\n OrderGoods.objects.create(order=order, sku=sku,\n count=sku_count, price=sku.price)\n order.total_count += sku_count\n order.total_amount += sku_count * sku.price\n break\n order.total_amount += order.freight\n order.save()\n except Exception as e:\n logger.error(e)\n transaction.savepoint_rollback(save_id)\n return http.JsonResponse({'code': RETCODE.ORDEROPERATEERR,\n 'errmsg': err_msg[RETCODE.ORDEROPERATEERR]})\n else:\n transaction.savepoint_commit(save_id)\n pl = redis_conn.pipeline()\n pl.hdel('carts_%s' % user.id, *redis_selected)\n pl.srem('selected_%s' % user.id, *redis_selected)\n try:\n pl.execute()\n except Exception as e:\n logger.error(e)\n return http.JsonResponse({'code': RETCODE.DUPLICATEORDERERR,\n 'errmsg': err_msg[RETCODE.DUPLICATEORDERERR]})\n else:\n return http.JsonResponse({'code': RETCODE.OK, 'errmsg': err_msg\n [RETCODE.OK], 'order_id': order_id})\n\n\nclass OrderSettlementView(LoginRequiredMixin, View):\n \"\"\"结算订单\"\"\"\n\n def get(self, request):\n \"\"\"查询并展示要结算的订单数据\"\"\"\n user = request.user\n try:\n addresses = Address.objects.filter(user=user, is_deleted=False)\n except Exception as e:\n logger.error(e)\n addresses = None\n redis_conn = get_redis_connection('carts')\n redis_cart = redis_conn.hgetall('carts_%s' % user.id)\n redis_selected = redis_conn.smembers('selected_{}'.format(user.id))\n new_cart_dict = {}\n for sku_id in redis_selected:\n new_cart_dict[int(sku_id)] = int(redis_cart[sku_id])\n sku_ids = new_cart_dict.keys()\n skus = SKU.objects.filter(id__in=sku_ids)\n total_count = 0\n total_amount = Decimal(0.0)\n for sku in skus:\n sku.count = new_cart_dict[sku.id]\n sku.amount = sku.price * sku.count\n total_count += sku.count\n total_amount += sku.amount\n context = {'addresses': addresses, 'skus': skus, 'total_count':\n total_count, 'total_amount': total_amount, 'freight': constants\n .ORDERS_FREIGHT_COST, 'payment_amount': Decimal(constants.\n ORDERS_FREIGHT_COST) + total_amount}\n return render(request, 'place_order.html', context)\n",
"step-4": "<mask token>\n\n\nclass GoodsCommentView(View):\n \"\"\"订单商品评价信息\"\"\"\n\n def get(self, request, sku_id):\n order_goods_list = OrderGoods.objects.filter(sku_id=sku_id,\n is_commented=True).order_by('-create_time')[:constants.\n COMMENTS_LIST_LIMIT]\n comment_list = []\n for order_goods in order_goods_list:\n username = order_goods.order.user.username\n comment_list.append({'username': username[0] + '***' + username\n [-1] if order_goods.is_anonymous else username, 'comment':\n order_goods.comment, 'score': order_goods.score})\n return http.JsonResponse({'code': RETCODE.OK, 'errmsg': err_msg[\n RETCODE.OK], 'comment_list': comment_list})\n\n\nclass OrderCommentView(LoginRequiredMixin, View):\n \"\"\"订单商品评价\"\"\"\n\n def get(self, request):\n \"\"\"展示商品评价页面\"\"\"\n order_id = request.GET.get('order_id')\n try:\n OrderInfo.objects.get(order_id=order_id, user=request.user)\n except OrderInfo.DoesNotExist:\n return http.HttpResponseNotFound('订单不存在')\n try:\n uncomment_goods = OrderGoods.objects.filter(order_id=order_id,\n is_commented=False)\n except Exception as e:\n logger.error(e)\n return http.HttpResponseServerError('订单商品信息出错')\n uncomment_goods_list = []\n for goods in uncomment_goods:\n uncomment_goods_list.append({'order_id': goods.order.order_id,\n 'sku_id': goods.sku.id, 'name': goods.sku.name, 'price':\n str(goods.price), 'default_image_url': goods.sku.\n default_image.url, 'comment': goods.comment, 'score': goods\n .score, 'is_anonymous': str(goods.is_anonymous)})\n context = {'uncomment_goods_list': uncomment_goods_list}\n return render(request, 'goods_judge.html', context)\n\n def post(self, request):\n \"\"\"评价订单商品\"\"\"\n json_dict = json.loads(request.body.decode())\n order_id = json_dict.get('order_id')\n sku_id = json_dict.get('sku_id')\n score = json_dict.get('score')\n comment = json_dict.get('comment')\n is_anonymous = json_dict.get('is_anonymous')\n if not all([order_id, sku_id, score, comment]):\n return http.HttpResponseForbidden('缺少必传参数')\n try:\n OrderInfo.objects.filter(order_id=order_id, user=request.user,\n status=OrderInfo.ORDER_STATUS_ENUM['UNCOMMENT'])\n except OrderInfo.DoesNotExist:\n return http.HttpResponseForbidden('参数order_id错误')\n try:\n sku = SKU.objects.get(id=sku_id)\n except SKU.DoesNotExist:\n return http.HttpResponseForbidden('参数sku_id错误')\n if is_anonymous:\n if not isinstance(is_anonymous, bool):\n return http.HttpResponseForbidden('参数is_anonymous错误')\n with transaction.atomic():\n save_id = transaction.savepoint()\n try:\n OrderGoods.objects.filter(order_id=order_id, sku_id=sku_id,\n is_commented=False).update(comment=comment, score=score,\n is_anonymous=is_anonymous, is_commented=True)\n sku.comments += 1\n sku.save()\n sku.spu.comments += 1\n sku.spu.save()\n if OrderGoods.objects.filter(order_id=order_id,\n is_commented=False).count() == 0:\n OrderInfo.objects.filter(order_id=order_id).update(status\n =OrderInfo.ORDER_STATUS_ENUM['FINISHED'])\n except Exception as e:\n logger.error(e)\n transaction.savepoint_rollback(save_id)\n return http.JsonResponse({'code': RETCODE.COMMITMENTERR,\n 'errmsg': err_msg[RETCODE.COMMITMENTERR]})\n else:\n transaction.savepoint_commit(save_id)\n return http.JsonResponse({'code': RETCODE.OK, 'errmsg': err_msg[\n RETCODE.OK]})\n\n\nclass UserOrderInfoView(LoginRequiredMixin, View):\n \"\"\"我的订单\"\"\"\n\n def get(self, request, page_num):\n \"\"\"提供我的订单页面\"\"\"\n user = request.user\n orders = user.orderinfo_set.all().order_by('-create_time')\n for order in orders:\n order.status_name = OrderInfo.ORDER_STATUS_CHOICES[order.status - 1\n ][1]\n order.pay_method_name = OrderInfo.PAY_METHOD_CHOICES[order.\n pay_method - 1][1]\n order.sku_list = []\n order_goods = order.skus.all()\n for order_good in order_goods:\n sku = order_good.sku\n sku.count = order_good.count\n sku.amount = sku.price * sku.count\n order.sku_list.append(sku)\n page_num = int(page_num)\n try:\n paginator = Paginator(orders, constants.ORDERS_LIST_LIMIT)\n page_orders = paginator.page(page_num)\n total_page = paginator.num_pages\n except EmptyPage:\n return http.HttpResponseNotFound('订单不存在')\n context = {'page_orders': page_orders, 'total_page': total_page,\n 'page_num': page_num}\n return render(request, 'user_center_order.html', context)\n\n\nclass OrderSuccessView(LoginRequiredMixin, View):\n \"\"\"订单成功页面\"\"\"\n\n def get(self, request):\n \"\"\"提供订单成功页面\"\"\"\n order_id = request.GET.get('order_id')\n payment_amount = request.GET.get('payment_amount')\n pay_method = request.GET.get('pay_method')\n context = {'order_id': order_id, 'payment_amount': payment_amount,\n 'pay_method': pay_method}\n return render(request, 'order_success.html', context)\n\n\nclass OrderCommitView(LoginRequiredJsonMixin, View):\n \"\"\"提交订单\"\"\"\n\n def post(self, request):\n \"\"\"保存订单基本信息和订单商品信息\"\"\"\n json_dict = json.loads(request.body.decode())\n address_id = json_dict.get('address_id')\n pay_method = json_dict.get('pay_method')\n if not all([address_id, pay_method]):\n return http.HttpResponseForbidden('缺少必传参数')\n try:\n address = Address.objects.get(id=address_id)\n except Exception as e:\n logger.error(e)\n return http.HttpResponseForbidden('参数address_id错误')\n if pay_method not in [OrderInfo.PAY_METHODS_ENUM['CASH'], OrderInfo\n .PAY_METHODS_ENUM['ALIPAY']]:\n return http.HttpResponseForbidden('参数pay_method错误')\n with transaction.atomic():\n save_id = transaction.savepoint()\n user = request.user\n order_id = timezone.localtime().strftime('%Y%m%d%H%M%S'\n ) + '{:0>9d}'.format(user.id)\n try:\n order = OrderInfo.objects.create(order_id=order_id, user=\n user, address=address, total_count=0, total_amount=\n Decimal('0.00'), freight=Decimal(constants.\n ORDERS_FREIGHT_COST), pay_method=pay_method, status=\n OrderInfo.ORDER_STATUS_ENUM['UNPAID'] if pay_method ==\n OrderInfo.PAY_METHODS_ENUM['ALIPAY'] else OrderInfo.\n ORDER_STATUS_ENUM['UNSEND'])\n redis_conn = get_redis_connection('carts')\n redis_cart = redis_conn.hgetall('carts_%s' % user.id)\n redis_selected = redis_conn.smembers('selected_{}'.format(\n user.id))\n new_cart_dict = {}\n for sku_id in redis_selected:\n new_cart_dict[int(sku_id)] = int(redis_cart[sku_id])\n sku_ids = new_cart_dict.keys()\n for sku_id in sku_ids:\n while True:\n sku = SKU.objects.get(id=sku_id)\n sku_count = new_cart_dict[sku.id]\n origin_stock = sku.stock\n origin_sales = sku.sales\n if sku_count > origin_stock:\n transaction.savepoint_rollback(save_id)\n print(request.user, '库存不足')\n return http.JsonResponse({'code': RETCODE.\n STOCKERR, 'errmsg': err_msg[RETCODE.STOCKERR]})\n new_stock = origin_stock - sku_count\n new_sales = origin_sales + sku_count\n result = SKU.objects.filter(id=sku_id, stock=\n origin_stock).update(stock=new_stock, sales=\n new_sales)\n if result == 0:\n continue\n sku.spu.sales += sku_count\n sku.spu.save()\n OrderGoods.objects.create(order=order, sku=sku,\n count=sku_count, price=sku.price)\n order.total_count += sku_count\n order.total_amount += sku_count * sku.price\n break\n order.total_amount += order.freight\n order.save()\n except Exception as e:\n logger.error(e)\n transaction.savepoint_rollback(save_id)\n return http.JsonResponse({'code': RETCODE.ORDEROPERATEERR,\n 'errmsg': err_msg[RETCODE.ORDEROPERATEERR]})\n else:\n transaction.savepoint_commit(save_id)\n pl = redis_conn.pipeline()\n pl.hdel('carts_%s' % user.id, *redis_selected)\n pl.srem('selected_%s' % user.id, *redis_selected)\n try:\n pl.execute()\n except Exception as e:\n logger.error(e)\n return http.JsonResponse({'code': RETCODE.DUPLICATEORDERERR,\n 'errmsg': err_msg[RETCODE.DUPLICATEORDERERR]})\n else:\n return http.JsonResponse({'code': RETCODE.OK, 'errmsg': err_msg\n [RETCODE.OK], 'order_id': order_id})\n\n\nclass OrderSettlementView(LoginRequiredMixin, View):\n \"\"\"结算订单\"\"\"\n\n def get(self, request):\n \"\"\"查询并展示要结算的订单数据\"\"\"\n user = request.user\n try:\n addresses = Address.objects.filter(user=user, is_deleted=False)\n except Exception as e:\n logger.error(e)\n addresses = None\n redis_conn = get_redis_connection('carts')\n redis_cart = redis_conn.hgetall('carts_%s' % user.id)\n redis_selected = redis_conn.smembers('selected_{}'.format(user.id))\n new_cart_dict = {}\n for sku_id in redis_selected:\n new_cart_dict[int(sku_id)] = int(redis_cart[sku_id])\n sku_ids = new_cart_dict.keys()\n skus = SKU.objects.filter(id__in=sku_ids)\n total_count = 0\n total_amount = Decimal(0.0)\n for sku in skus:\n sku.count = new_cart_dict[sku.id]\n sku.amount = sku.price * sku.count\n total_count += sku.count\n total_amount += sku.amount\n context = {'addresses': addresses, 'skus': skus, 'total_count':\n total_count, 'total_amount': total_amount, 'freight': constants\n .ORDERS_FREIGHT_COST, 'payment_amount': Decimal(constants.\n ORDERS_FREIGHT_COST) + total_amount}\n return render(request, 'place_order.html', context)\n",
"step-5": "from django.core.paginator import Paginator, EmptyPage\nfrom django.shortcuts import render\nfrom django.views import View\nfrom django.contrib.auth.mixins import LoginRequiredMixin\nfrom logging import getLogger\nfrom django_redis import get_redis_connection\nfrom decimal import Decimal\nimport json\nfrom django import http\nfrom django.utils import timezone\nfrom django.db import transaction\n\nfrom users.models import Address\nfrom goods.models import SKU\nfrom meiduo_mall.utils import constants\nfrom meiduo_mall.utils.auth_backend import LoginRequiredJsonMixin\nfrom .models import OrderInfo, OrderGoods\nfrom meiduo_mall.utils.response_code import RETCODE, err_msg\n\n\nlogger = getLogger('django')\n\n\nclass GoodsCommentView(View):\n \"\"\"订单商品评价信息\"\"\"\n def get(self, request, sku_id):\n # 获取被评价的订单商品信息\n order_goods_list = OrderGoods.objects.filter(sku_id=sku_id, is_commented=True).order_by('-create_time')[:constants.COMMENTS_LIST_LIMIT]\n # 序列化\n comment_list = []\n for order_goods in order_goods_list:\n username = order_goods.order.user.username\n comment_list.append({\n 'username': username[0] + '***' + username[-1] if order_goods.is_anonymous else username,\n 'comment': order_goods.comment,\n 'score': order_goods.score,\n })\n return http.JsonResponse({'code': RETCODE.OK, 'errmsg': err_msg[RETCODE.OK], 'comment_list': comment_list})\n\n\nclass OrderCommentView(LoginRequiredMixin, View):\n \"\"\"订单商品评价\"\"\"\n def get(self, request):\n \"\"\"展示商品评价页面\"\"\"\n # 接收参数\n order_id = request.GET.get('order_id')\n # 校验参数\n try:\n OrderInfo.objects.get(order_id=order_id, user=request.user)\n except OrderInfo.DoesNotExist:\n return http.HttpResponseNotFound('订单不存在')\n # 查询订单中未被评价的商品信息\n try:\n uncomment_goods = OrderGoods.objects.filter(order_id=order_id, is_commented=False)\n except Exception as e:\n logger.error(e)\n return http.HttpResponseServerError('订单商品信息出错')\n # 构造待评价商品数据\n uncomment_goods_list = []\n for goods in uncomment_goods:\n uncomment_goods_list.append({\n 'order_id': goods.order.order_id,\n 'sku_id': goods.sku.id,\n 'name': goods.sku.name,\n 'price': str(goods.price),\n 'default_image_url': goods.sku.default_image.url,\n 'comment': goods.comment,\n 'score': goods.score,\n 'is_anonymous': str(goods.is_anonymous),\n })\n # 渲染模板\n context = {\n 'uncomment_goods_list': uncomment_goods_list\n }\n return render(request, 'goods_judge.html', context)\n\n def post(self, request):\n \"\"\"评价订单商品\"\"\"\n # 接收参数\n json_dict = json.loads(request.body.decode())\n order_id = json_dict.get('order_id')\n sku_id = json_dict.get('sku_id')\n score = json_dict.get('score')\n comment = json_dict.get('comment')\n is_anonymous = json_dict.get('is_anonymous')\n # 校验参数\n if not all([order_id, sku_id, score, comment]):\n return http.HttpResponseForbidden('缺少必传参数')\n try:\n OrderInfo.objects.filter(order_id=order_id, user=request.user, status=OrderInfo.ORDER_STATUS_ENUM['UNCOMMENT'])\n except OrderInfo.DoesNotExist:\n return http.HttpResponseForbidden('参数order_id错误')\n try:\n sku = SKU.objects.get(id=sku_id)\n except SKU.DoesNotExist:\n return http.HttpResponseForbidden('参数sku_id错误')\n if is_anonymous:\n if not isinstance(is_anonymous, bool):\n return http.HttpResponseForbidden('参数is_anonymous错误')\n # 以下操作数据库的操作,开启作为一次事务\n with transaction.atomic():\n # 在数据库操作前,创建保存点(数据库最初的状态)\n save_id = transaction.savepoint()\n try:\n # 保存订单商品评价数据\n OrderGoods.objects.filter(order_id=order_id, sku_id=sku_id, is_commented=False).update(\n comment=comment,\n score=score,\n is_anonymous=is_anonymous,\n is_commented=True\n )\n # 累计评论数据\n sku.comments += 1\n sku.save()\n sku.spu.comments += 1\n sku.spu.save()\n # 如果所有订单商品都已评价,则修改订单状态为已完成\n if OrderGoods.objects.filter(order_id=order_id, is_commented=False).count() == 0:\n OrderInfo.objects.filter(order_id=order_id).update(status=OrderInfo.ORDER_STATUS_ENUM['FINISHED'])\n # 对于未知的数据库错误,暴力回滚\n except Exception as e:\n logger.error(e)\n transaction.savepoint_rollback(save_id)\n return http.JsonResponse({'code': RETCODE.COMMITMENTERR, 'errmsg': err_msg[RETCODE.COMMITMENTERR]})\n else:\n # 提交事务\n transaction.savepoint_commit(save_id)\n return http.JsonResponse({'code': RETCODE.OK, 'errmsg': err_msg[RETCODE.OK]})\n\n\nclass UserOrderInfoView(LoginRequiredMixin, View):\n \"\"\"我的订单\"\"\"\n\n def get(self, request, page_num):\n \"\"\"提供我的订单页面\"\"\"\n user = request.user\n # 查询订单\n orders = user.orderinfo_set.all().order_by(\"-create_time\")\n # 遍历所有订单\n for order in orders:\n # 绑定订单状态\n order.status_name = OrderInfo.ORDER_STATUS_CHOICES[order.status-1][1]\n # 绑定支付方式\n order.pay_method_name = OrderInfo.PAY_METHOD_CHOICES[order.pay_method-1][1]\n order.sku_list = []\n # 查询订单商品\n order_goods = order.skus.all()\n # 遍历订单商品\n for order_good in order_goods:\n sku = order_good.sku\n sku.count = order_good.count\n sku.amount = sku.price * sku.count\n order.sku_list.append(sku)\n # 分页\n page_num = int(page_num)\n try:\n paginator = Paginator(orders, constants.ORDERS_LIST_LIMIT)\n page_orders = paginator.page(page_num)\n total_page = paginator.num_pages\n except EmptyPage:\n return http.HttpResponseNotFound('订单不存在')\n context = {\n \"page_orders\": page_orders,\n 'total_page': total_page,\n 'page_num': page_num,\n }\n return render(request, \"user_center_order.html\", context)\n\n\nclass OrderSuccessView(LoginRequiredMixin, View):\n \"\"\"订单成功页面\"\"\"\n def get(self, request):\n \"\"\"提供订单成功页面\"\"\"\n # 接受参数\n order_id = request.GET.get('order_id')\n payment_amount = request.GET.get('payment_amount')\n pay_method = request.GET.get('pay_method')\n # 构造上下文\n context = {\n 'order_id': order_id,\n 'payment_amount': payment_amount,\n 'pay_method': pay_method\n }\n return render(request, 'order_success.html', context)\n\n\nclass OrderCommitView(LoginRequiredJsonMixin, View):\n \"\"\"提交订单\"\"\"\n def post(self, request):\n \"\"\"保存订单基本信息和订单商品信息\"\"\"\n # 接收参数\n json_dict = json.loads(request.body.decode())\n address_id = json_dict.get('address_id')\n pay_method = json_dict.get('pay_method')\n # 校验参数\n if not all([address_id, pay_method]):\n return http.HttpResponseForbidden('缺少必传参数')\n # 判断address_id是否合法\n try:\n address = Address.objects.get(id=address_id)\n except Exception as e:\n logger.error(e)\n return http.HttpResponseForbidden('参数address_id错误')\n # 判断pay_method是否合法\n if pay_method not in [OrderInfo.PAY_METHODS_ENUM['CASH'], OrderInfo.PAY_METHODS_ENUM['ALIPAY']]:\n return http.HttpResponseForbidden('参数pay_method错误')\n # 以下操作数据库的操作,开启作为一次事务\n with transaction.atomic():\n # 在数据库操作前,创建保存点(数据库最初的状态)\n save_id = transaction.savepoint()\n # 获取登录用户\n user = request.user\n # 获取订单编号:时间 + user_id == '2020123113041200000001'\n order_id = timezone.localtime().strftime('%Y%m%d%H%M%S') + '{:0>9d}'.format(user.id)\n try:\n # 保存订单基本信息(一)\n order = OrderInfo.objects.create(\n order_id=order_id,\n user=user,\n address=address,\n total_count=0, # 仅用来初始化,后面根据订单中的商品进行更新\n total_amount=Decimal('0.00'), # 仅用来初始化,后面根据订单中的商品进行更新\n freight=Decimal(constants.ORDERS_FREIGHT_COST),\n pay_method=pay_method,\n # 如果支付方式为支付宝,支付状态为未付款,如果支付方式是货到付款,支付状态为未发货\n status=OrderInfo.ORDER_STATUS_ENUM['UNPAID'] if pay_method == OrderInfo.PAY_METHODS_ENUM['ALIPAY'] else OrderInfo.ORDER_STATUS_ENUM['UNSEND']\n )\n\n # 保存订单商品信息(多)\n # 查询redis中购物车被勾选的商品\n redis_conn = get_redis_connection('carts')\n # 购物车中商品的数量\n redis_cart = redis_conn.hgetall('carts_%s' % user.id)\n # 被勾选的商品sku_id\n redis_selected = redis_conn.smembers('selected_{}'.format(user.id))\n # 构造购物车中被勾选商品的数据 new_cart_dict,{sku_id: 2, sku_id: 1}\n new_cart_dict = {}\n for sku_id in redis_selected:\n new_cart_dict[int(sku_id)] = int(redis_cart[sku_id])\n # 获取被勾选商品的sku_id\n sku_ids = new_cart_dict.keys()\n for sku_id in sku_ids:\n # 每个商品都有多次下单的机会,直到库存不足\n while True:\n # 读取商品的sku信息\n sku = SKU.objects.get(id=sku_id) # 查询商品和库存信息时,不能出现缓存,所有不用 filter(id__in=sku_ids)\n # 获取当前被勾选商品的库存\n sku_count = new_cart_dict[sku.id]\n # 获取sku商品原始的库存stock和销量sales\n origin_stock = sku.stock\n origin_sales = sku.sales\n # # 模型网络延迟\n # import time\n # time.sleep(5)\n # 如果订单中的商品数量大于库存,响应库存不足\n if sku_count > origin_stock:\n # 库存不足,回滚\n transaction.savepoint_rollback(save_id)\n print(request.user, '库存不足')\n return http.JsonResponse({'code': RETCODE.STOCKERR, 'errmsg': err_msg[RETCODE.STOCKERR]})\n # 如果库存满足,SKU 减库存,加销量\n new_stock = origin_stock - sku_count\n new_sales = origin_sales + sku_count\n result = SKU.objects.filter(id=sku_id, stock=origin_stock).update(stock=new_stock, sales=new_sales)\n # 如果在更新数据时,原始数据变化了,那么返回0,表示有资源抢夺\n if result == 0:\n # 由于其他用户提前对该商品完成下单,该商品此次下单失败,重新进行下单\n continue\n # SPU 加销量\n sku.spu.sales += sku_count\n sku.spu.save()\n OrderGoods.objects.create(\n order=order,\n sku=sku,\n count=sku_count,\n price=sku.price,\n )\n # 累加订单中商品的总价和总数量\n order.total_count += sku_count\n order.total_amount += (sku_count * sku.price)\n # 该件商品下单成功,退出循环\n break\n # 添加邮费和保存订单信息\n order.total_amount += order.freight\n order.save()\n # 对于未知的数据库错误,暴力回滚\n except Exception as e:\n logger.error(e)\n transaction.savepoint_rollback(save_id)\n return http.JsonResponse({'code': RETCODE.ORDEROPERATEERR, 'errmsg': err_msg[RETCODE.ORDEROPERATEERR]})\n else:\n # 提交事务\n transaction.savepoint_commit(save_id)\n # 清除购物车中已结算的商品\n pl = redis_conn.pipeline()\n pl.hdel('carts_%s' % user.id, *redis_selected)\n pl.srem('selected_%s' % user.id, *redis_selected)\n try:\n pl.execute()\n except Exception as e:\n logger.error(e)\n return http.JsonResponse({'code': RETCODE.DUPLICATEORDERERR, 'errmsg': err_msg[RETCODE.DUPLICATEORDERERR]})\n else:\n # 返回响应\n return http.JsonResponse({'code': RETCODE.OK, 'errmsg': err_msg[RETCODE.OK], 'order_id': order_id})\n\n\nclass OrderSettlementView(LoginRequiredMixin, View):\n \"\"\"结算订单\"\"\"\n def get(self, request):\n \"\"\"查询并展示要结算的订单数据\"\"\"\n # 获取登录用户\n user = request.user\n # 查询用户收货地址,没有被删除的收货地址\n try:\n addresses = Address.objects.filter(user=user, is_deleted=False)\n except Exception as e:\n logger.error(e)\n # 如果没有查询出收货地址,可以去编辑收货地址\n addresses = None\n # 查询redis中购物车被勾选的商品\n redis_conn = get_redis_connection('carts')\n # 购物车中商品的数量\n redis_cart = redis_conn.hgetall('carts_%s' % user.id)\n # 被勾选的商品sku_id\n redis_selected = redis_conn.smembers('selected_{}'.format(user.id))\n # 构造购物车中被勾选商品的数据 new_cart_dict,{sku_id: 2, sku_id: 1}\n new_cart_dict = {}\n for sku_id in redis_selected:\n new_cart_dict[int(sku_id)] = int(redis_cart[sku_id])\n # 获取被勾选商品的sku_id\n sku_ids = new_cart_dict.keys()\n # 获取被勾选商品的sku信息\n skus = SKU.objects.filter(id__in=sku_ids)\n # 商品总数量与商品总金额\n total_count = 0\n total_amount = Decimal(0.00) # 或 Decimal('0.00')\n for sku in skus:\n # 遍历skus,给每个sku补充count(数量)和amount(小计)字段\n sku.count = new_cart_dict[sku.id]\n sku.amount = sku.price * sku.count # Decimal类型\n # 累加商品数量和金额\n total_count += sku.count\n total_amount += sku.amount\n # 构造上下文\n context = {\n 'addresses': addresses,\n 'skus': skus,\n 'total_count': total_count,\n 'total_amount': total_amount,\n 'freight': constants.ORDERS_FREIGHT_COST, # 运费\n 'payment_amount': Decimal(constants.ORDERS_FREIGHT_COST) + total_amount,\n }\n return render(request, 'place_order.html', context)\n",
"step-ids": [
9,
16,
17,
19,
22
]
}
|
[
9,
16,
17,
19,
22
] |
import math #h=g^x
h=input("h: ")
g=input("g: ")
p=input("p: ")
m=math.ceil(math.sqrt(p))
m=int(m)
aj=[0]*m
for i in range(m):
aj[i]=pow(g,i*m)
ainvm=pow(g,p-2,p)
gamma = h
for i in range(m):
if gamma in aj:
j = aj.index(gamma)
print (j*m)+i
break
gamma=(gamma*ainvm)%p
|
normal
|
{
"blob_id": "94439ffe3303f5efe15562f26d693e1e7a8115df",
"index": 2009,
"step-1": "import math #h=g^x\nh=input(\"h: \")\ng=input(\"g: \")\np=input(\"p: \")\nm=math.ceil(math.sqrt(p))\nm=int(m)\naj=[0]*m\nfor i in range(m):\n aj[i]=pow(g,i*m)\nainvm=pow(g,p-2,p)\ngamma = h\nfor i in range(m):\n if gamma in aj:\n \tj = aj.index(gamma)\n print (j*m)+i\n break\n gamma=(gamma*ainvm)%p\n\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from ._enums import *
__all__ = [
'ApplicationCredential',
'ApplicationTag',
]
@pulumi.output_type
class ApplicationCredential(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "credentialType":
suggest = "credential_type"
elif key == "databaseName":
suggest = "database_name"
elif key == "secretId":
suggest = "secret_id"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ApplicationCredential. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ApplicationCredential.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ApplicationCredential.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
credential_type: Optional['ApplicationCredentialCredentialType'] = None,
database_name: Optional[str] = None,
secret_id: Optional[str] = None):
if credential_type is not None:
pulumi.set(__self__, "credential_type", credential_type)
if database_name is not None:
pulumi.set(__self__, "database_name", database_name)
if secret_id is not None:
pulumi.set(__self__, "secret_id", secret_id)
@property
@pulumi.getter(name="credentialType")
def credential_type(self) -> Optional['ApplicationCredentialCredentialType']:
return pulumi.get(self, "credential_type")
@property
@pulumi.getter(name="databaseName")
def database_name(self) -> Optional[str]:
return pulumi.get(self, "database_name")
@property
@pulumi.getter(name="secretId")
def secret_id(self) -> Optional[str]:
return pulumi.get(self, "secret_id")
@pulumi.output_type
class ApplicationTag(dict):
"""
A key-value pair to associate with a resource.
"""
def __init__(__self__, *,
key: str,
value: str):
"""
A key-value pair to associate with a resource.
:param str key: The key name of the tag. You can specify a value that is 1 to 127 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -.
:param str value: The value for the tag. You can specify a value that is 1 to 255 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -.
"""
pulumi.set(__self__, "key", key)
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def key(self) -> str:
"""
The key name of the tag. You can specify a value that is 1 to 127 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -.
"""
return pulumi.get(self, "key")
@property
@pulumi.getter
def value(self) -> str:
"""
The value for the tag. You can specify a value that is 1 to 255 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -.
"""
return pulumi.get(self, "value")
|
normal
|
{
"blob_id": "8535020e7157699310b3412fe6c5a28ee8e61f49",
"index": 6911,
"step-1": "<mask token>\n\n\[email protected]_type\nclass ApplicationCredential(dict):\n <mask token>\n\n def __getitem__(self, key: str) ->Any:\n ApplicationCredential.__key_warning(key)\n return super().__getitem__(key)\n <mask token>\n\n def __init__(__self__, *, credential_type: Optional[\n 'ApplicationCredentialCredentialType']=None, database_name:\n Optional[str]=None, secret_id: Optional[str]=None):\n if credential_type is not None:\n pulumi.set(__self__, 'credential_type', credential_type)\n if database_name is not None:\n pulumi.set(__self__, 'database_name', database_name)\n if secret_id is not None:\n pulumi.set(__self__, 'secret_id', secret_id)\n\n @property\n @pulumi.getter(name='credentialType')\n def credential_type(self) ->Optional['ApplicationCredentialCredentialType'\n ]:\n return pulumi.get(self, 'credential_type')\n\n @property\n @pulumi.getter(name='databaseName')\n def database_name(self) ->Optional[str]:\n return pulumi.get(self, 'database_name')\n <mask token>\n\n\[email protected]_type\nclass ApplicationTag(dict):\n \"\"\"\n A key-value pair to associate with a resource.\n \"\"\"\n\n def __init__(__self__, *, key: str, value: str):\n \"\"\"\n A key-value pair to associate with a resource.\n :param str key: The key name of the tag. You can specify a value that is 1 to 127 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -. \n :param str value: The value for the tag. You can specify a value that is 1 to 255 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -. \n \"\"\"\n pulumi.set(__self__, 'key', key)\n pulumi.set(__self__, 'value', value)\n\n @property\n @pulumi.getter\n def key(self) ->str:\n \"\"\"\n The key name of the tag. You can specify a value that is 1 to 127 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -. \n \"\"\"\n return pulumi.get(self, 'key')\n\n @property\n @pulumi.getter\n def value(self) ->str:\n \"\"\"\n The value for the tag. You can specify a value that is 1 to 255 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -. \n \"\"\"\n return pulumi.get(self, 'value')\n",
"step-2": "<mask token>\n\n\[email protected]_type\nclass ApplicationCredential(dict):\n <mask token>\n\n def __getitem__(self, key: str) ->Any:\n ApplicationCredential.__key_warning(key)\n return super().__getitem__(key)\n <mask token>\n\n def __init__(__self__, *, credential_type: Optional[\n 'ApplicationCredentialCredentialType']=None, database_name:\n Optional[str]=None, secret_id: Optional[str]=None):\n if credential_type is not None:\n pulumi.set(__self__, 'credential_type', credential_type)\n if database_name is not None:\n pulumi.set(__self__, 'database_name', database_name)\n if secret_id is not None:\n pulumi.set(__self__, 'secret_id', secret_id)\n\n @property\n @pulumi.getter(name='credentialType')\n def credential_type(self) ->Optional['ApplicationCredentialCredentialType'\n ]:\n return pulumi.get(self, 'credential_type')\n\n @property\n @pulumi.getter(name='databaseName')\n def database_name(self) ->Optional[str]:\n return pulumi.get(self, 'database_name')\n\n @property\n @pulumi.getter(name='secretId')\n def secret_id(self) ->Optional[str]:\n return pulumi.get(self, 'secret_id')\n\n\[email protected]_type\nclass ApplicationTag(dict):\n \"\"\"\n A key-value pair to associate with a resource.\n \"\"\"\n\n def __init__(__self__, *, key: str, value: str):\n \"\"\"\n A key-value pair to associate with a resource.\n :param str key: The key name of the tag. You can specify a value that is 1 to 127 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -. \n :param str value: The value for the tag. You can specify a value that is 1 to 255 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -. \n \"\"\"\n pulumi.set(__self__, 'key', key)\n pulumi.set(__self__, 'value', value)\n\n @property\n @pulumi.getter\n def key(self) ->str:\n \"\"\"\n The key name of the tag. You can specify a value that is 1 to 127 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -. \n \"\"\"\n return pulumi.get(self, 'key')\n\n @property\n @pulumi.getter\n def value(self) ->str:\n \"\"\"\n The value for the tag. You can specify a value that is 1 to 255 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -. \n \"\"\"\n return pulumi.get(self, 'value')\n",
"step-3": "<mask token>\n\n\[email protected]_type\nclass ApplicationCredential(dict):\n <mask token>\n\n def __getitem__(self, key: str) ->Any:\n ApplicationCredential.__key_warning(key)\n return super().__getitem__(key)\n\n def get(self, key: str, default=None) ->Any:\n ApplicationCredential.__key_warning(key)\n return super().get(key, default)\n\n def __init__(__self__, *, credential_type: Optional[\n 'ApplicationCredentialCredentialType']=None, database_name:\n Optional[str]=None, secret_id: Optional[str]=None):\n if credential_type is not None:\n pulumi.set(__self__, 'credential_type', credential_type)\n if database_name is not None:\n pulumi.set(__self__, 'database_name', database_name)\n if secret_id is not None:\n pulumi.set(__self__, 'secret_id', secret_id)\n\n @property\n @pulumi.getter(name='credentialType')\n def credential_type(self) ->Optional['ApplicationCredentialCredentialType'\n ]:\n return pulumi.get(self, 'credential_type')\n\n @property\n @pulumi.getter(name='databaseName')\n def database_name(self) ->Optional[str]:\n return pulumi.get(self, 'database_name')\n\n @property\n @pulumi.getter(name='secretId')\n def secret_id(self) ->Optional[str]:\n return pulumi.get(self, 'secret_id')\n\n\[email protected]_type\nclass ApplicationTag(dict):\n \"\"\"\n A key-value pair to associate with a resource.\n \"\"\"\n\n def __init__(__self__, *, key: str, value: str):\n \"\"\"\n A key-value pair to associate with a resource.\n :param str key: The key name of the tag. You can specify a value that is 1 to 127 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -. \n :param str value: The value for the tag. You can specify a value that is 1 to 255 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -. \n \"\"\"\n pulumi.set(__self__, 'key', key)\n pulumi.set(__self__, 'value', value)\n\n @property\n @pulumi.getter\n def key(self) ->str:\n \"\"\"\n The key name of the tag. You can specify a value that is 1 to 127 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -. \n \"\"\"\n return pulumi.get(self, 'key')\n\n @property\n @pulumi.getter\n def value(self) ->str:\n \"\"\"\n The value for the tag. You can specify a value that is 1 to 255 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -. \n \"\"\"\n return pulumi.get(self, 'value')\n",
"step-4": "<mask token>\n\n\[email protected]_type\nclass ApplicationCredential(dict):\n\n @staticmethod\n def __key_warning(key: str):\n suggest = None\n if key == 'credentialType':\n suggest = 'credential_type'\n elif key == 'databaseName':\n suggest = 'database_name'\n elif key == 'secretId':\n suggest = 'secret_id'\n if suggest:\n pulumi.log.warn(\n f\"Key '{key}' not found in ApplicationCredential. Access the value via the '{suggest}' property getter instead.\"\n )\n\n def __getitem__(self, key: str) ->Any:\n ApplicationCredential.__key_warning(key)\n return super().__getitem__(key)\n\n def get(self, key: str, default=None) ->Any:\n ApplicationCredential.__key_warning(key)\n return super().get(key, default)\n\n def __init__(__self__, *, credential_type: Optional[\n 'ApplicationCredentialCredentialType']=None, database_name:\n Optional[str]=None, secret_id: Optional[str]=None):\n if credential_type is not None:\n pulumi.set(__self__, 'credential_type', credential_type)\n if database_name is not None:\n pulumi.set(__self__, 'database_name', database_name)\n if secret_id is not None:\n pulumi.set(__self__, 'secret_id', secret_id)\n\n @property\n @pulumi.getter(name='credentialType')\n def credential_type(self) ->Optional['ApplicationCredentialCredentialType'\n ]:\n return pulumi.get(self, 'credential_type')\n\n @property\n @pulumi.getter(name='databaseName')\n def database_name(self) ->Optional[str]:\n return pulumi.get(self, 'database_name')\n\n @property\n @pulumi.getter(name='secretId')\n def secret_id(self) ->Optional[str]:\n return pulumi.get(self, 'secret_id')\n\n\[email protected]_type\nclass ApplicationTag(dict):\n \"\"\"\n A key-value pair to associate with a resource.\n \"\"\"\n\n def __init__(__self__, *, key: str, value: str):\n \"\"\"\n A key-value pair to associate with a resource.\n :param str key: The key name of the tag. You can specify a value that is 1 to 127 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -. \n :param str value: The value for the tag. You can specify a value that is 1 to 255 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -. \n \"\"\"\n pulumi.set(__self__, 'key', key)\n pulumi.set(__self__, 'value', value)\n\n @property\n @pulumi.getter\n def key(self) ->str:\n \"\"\"\n The key name of the tag. You can specify a value that is 1 to 127 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -. \n \"\"\"\n return pulumi.get(self, 'key')\n\n @property\n @pulumi.getter\n def value(self) ->str:\n \"\"\"\n The value for the tag. You can specify a value that is 1 to 255 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -. \n \"\"\"\n return pulumi.get(self, 'value')\n",
"step-5": "# coding=utf-8\n# *** WARNING: this file was generated by the Pulumi SDK Generator. ***\n# *** Do not edit by hand unless you're certain you know what you are doing! ***\n\nimport copy\nimport warnings\nimport pulumi\nimport pulumi.runtime\nfrom typing import Any, Mapping, Optional, Sequence, Union, overload\nfrom .. import _utilities\nfrom ._enums import *\n\n__all__ = [\n 'ApplicationCredential',\n 'ApplicationTag',\n]\n\[email protected]_type\nclass ApplicationCredential(dict):\n @staticmethod\n def __key_warning(key: str):\n suggest = None\n if key == \"credentialType\":\n suggest = \"credential_type\"\n elif key == \"databaseName\":\n suggest = \"database_name\"\n elif key == \"secretId\":\n suggest = \"secret_id\"\n\n if suggest:\n pulumi.log.warn(f\"Key '{key}' not found in ApplicationCredential. Access the value via the '{suggest}' property getter instead.\")\n\n def __getitem__(self, key: str) -> Any:\n ApplicationCredential.__key_warning(key)\n return super().__getitem__(key)\n\n def get(self, key: str, default = None) -> Any:\n ApplicationCredential.__key_warning(key)\n return super().get(key, default)\n\n def __init__(__self__, *,\n credential_type: Optional['ApplicationCredentialCredentialType'] = None,\n database_name: Optional[str] = None,\n secret_id: Optional[str] = None):\n if credential_type is not None:\n pulumi.set(__self__, \"credential_type\", credential_type)\n if database_name is not None:\n pulumi.set(__self__, \"database_name\", database_name)\n if secret_id is not None:\n pulumi.set(__self__, \"secret_id\", secret_id)\n\n @property\n @pulumi.getter(name=\"credentialType\")\n def credential_type(self) -> Optional['ApplicationCredentialCredentialType']:\n return pulumi.get(self, \"credential_type\")\n\n @property\n @pulumi.getter(name=\"databaseName\")\n def database_name(self) -> Optional[str]:\n return pulumi.get(self, \"database_name\")\n\n @property\n @pulumi.getter(name=\"secretId\")\n def secret_id(self) -> Optional[str]:\n return pulumi.get(self, \"secret_id\")\n\n\[email protected]_type\nclass ApplicationTag(dict):\n \"\"\"\n A key-value pair to associate with a resource.\n \"\"\"\n def __init__(__self__, *,\n key: str,\n value: str):\n \"\"\"\n A key-value pair to associate with a resource.\n :param str key: The key name of the tag. You can specify a value that is 1 to 127 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -. \n :param str value: The value for the tag. You can specify a value that is 1 to 255 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -. \n \"\"\"\n pulumi.set(__self__, \"key\", key)\n pulumi.set(__self__, \"value\", value)\n\n @property\n @pulumi.getter\n def key(self) -> str:\n \"\"\"\n The key name of the tag. You can specify a value that is 1 to 127 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -. \n \"\"\"\n return pulumi.get(self, \"key\")\n\n @property\n @pulumi.getter\n def value(self) -> str:\n \"\"\"\n The value for the tag. You can specify a value that is 1 to 255 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -. \n \"\"\"\n return pulumi.get(self, \"value\")\n\n\n",
"step-ids": [
10,
11,
12,
13,
16
]
}
|
[
10,
11,
12,
13,
16
] |
student = []
while True:
name = str(input('Name: ')).capitalize().strip()
grade1 = float(input('Grade 1: '))
grade2 = float(input('Grade 2: '))
avgrade = (grade1 + grade2) / 2
student.append([name, [grade1, grade2], avgrade])
resp = ' '
while resp not in 'NnYy':
resp = str(input('Another student? [Y/N]'))
if resp == 'N':
break
print('-=' * 15)
print(f'{"No.":<4}{"Name:":<10}{"Average Grade:":>8}')
print('-=' * 15)
for i, a in enumerate(student):
print(f'{i:<4}{a[0]:<8}{a[2]:>8.1f}')
while True:
print('-=' * 20)
opt = int(input('Enter the student ID to show the grades: (999 to exit) '))
if opt == 999:
print('Exiting...')
break
if opt <= len(student) - 1:
print(f'Grades of {student[opt][0]} are {student[opt][1]}')
print('Have a nice day!!!')
|
normal
|
{
"blob_id": "74028a7b317c02c90603ad24c1ddb35a1d5d0e9d",
"index": 8678,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwhile True:\n name = str(input('Name: ')).capitalize().strip()\n grade1 = float(input('Grade 1: '))\n grade2 = float(input('Grade 2: '))\n avgrade = (grade1 + grade2) / 2\n student.append([name, [grade1, grade2], avgrade])\n resp = ' '\n while resp not in 'NnYy':\n resp = str(input('Another student? [Y/N]'))\n if resp == 'N':\n break\nprint('-=' * 15)\nprint(f\"{'No.':<4}{'Name:':<10}{'Average Grade:':>8}\")\nprint('-=' * 15)\nfor i, a in enumerate(student):\n print(f'{i:<4}{a[0]:<8}{a[2]:>8.1f}')\nwhile True:\n print('-=' * 20)\n opt = int(input('Enter the student ID to show the grades: (999 to exit) ')\n )\n if opt == 999:\n print('Exiting...')\n break\n if opt <= len(student) - 1:\n print(f'Grades of {student[opt][0]} are {student[opt][1]}')\nprint('Have a nice day!!!')\n",
"step-3": "student = []\nwhile True:\n name = str(input('Name: ')).capitalize().strip()\n grade1 = float(input('Grade 1: '))\n grade2 = float(input('Grade 2: '))\n avgrade = (grade1 + grade2) / 2\n student.append([name, [grade1, grade2], avgrade])\n resp = ' '\n while resp not in 'NnYy':\n resp = str(input('Another student? [Y/N]'))\n if resp == 'N':\n break\nprint('-=' * 15)\nprint(f\"{'No.':<4}{'Name:':<10}{'Average Grade:':>8}\")\nprint('-=' * 15)\nfor i, a in enumerate(student):\n print(f'{i:<4}{a[0]:<8}{a[2]:>8.1f}')\nwhile True:\n print('-=' * 20)\n opt = int(input('Enter the student ID to show the grades: (999 to exit) ')\n )\n if opt == 999:\n print('Exiting...')\n break\n if opt <= len(student) - 1:\n print(f'Grades of {student[opt][0]} are {student[opt][1]}')\nprint('Have a nice day!!!')\n",
"step-4": "student = []\nwhile True:\n name = str(input('Name: ')).capitalize().strip()\n grade1 = float(input('Grade 1: '))\n grade2 = float(input('Grade 2: '))\n avgrade = (grade1 + grade2) / 2\n student.append([name, [grade1, grade2], avgrade])\n resp = ' '\n while resp not in 'NnYy':\n resp = str(input('Another student? [Y/N]'))\n if resp == 'N':\n break\nprint('-=' * 15)\nprint(f'{\"No.\":<4}{\"Name:\":<10}{\"Average Grade:\":>8}')\nprint('-=' * 15)\nfor i, a in enumerate(student):\n print(f'{i:<4}{a[0]:<8}{a[2]:>8.1f}')\nwhile True:\n print('-=' * 20)\n opt = int(input('Enter the student ID to show the grades: (999 to exit) '))\n if opt == 999:\n print('Exiting...')\n break\n if opt <= len(student) - 1:\n print(f'Grades of {student[opt][0]} are {student[opt][1]}')\nprint('Have a nice day!!!')\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
def merge_the_tools(string, k):
# your code goes here
num_sub_strings = len(string)/k
#print num_sub_strings
for idx in range(num_sub_strings):
print "".join(set(list(string[idx * k : (idx + 1) * k])))
|
normal
|
{
"blob_id": "e95bda8be2294c295d89f1c035bc209128fa29c8",
"index": 228,
"step-1": "def merge_the_tools(string, k):\n # your code goes here\n num_sub_strings = len(string)/k\n #print num_sub_strings\n\n for idx in range(num_sub_strings):\n print \"\".join(set(list(string[idx * k : (idx + 1) * k])))\n\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
class Formater():
def clean_number (posible_number):
sanitize_number = posible_number.replace(' ', '')
number_of_dots = sanitize_number.count('.')
if number_of_dots > 1:
return None
if number_of_dots == 1:
dot_position = sanitize_number.index('.')
try:
sanitize_number.index(',', dot_position)
except Exception:
sanitize_number = sanitize_number.replace(',', '')
else:
return None
finally:
try:
return float(sanitize_number)
except Exception:
return None
if number_of_dots == 0:
sanitize_number = sanitize_number.replace(',', '')
try:
return int(sanitize_number)
except Exception:
return None
|
normal
|
{
"blob_id": "02c32cf04529ff8b5edddf4e4117f8c4fdf27da9",
"index": 8612,
"step-1": "<mask token>\n",
"step-2": "class Formater:\n <mask token>\n",
"step-3": "class Formater:\n\n def clean_number(posible_number):\n sanitize_number = posible_number.replace(' ', '')\n number_of_dots = sanitize_number.count('.')\n if number_of_dots > 1:\n return None\n if number_of_dots == 1:\n dot_position = sanitize_number.index('.')\n try:\n sanitize_number.index(',', dot_position)\n except Exception:\n sanitize_number = sanitize_number.replace(',', '')\n else:\n return None\n finally:\n try:\n return float(sanitize_number)\n except Exception:\n return None\n if number_of_dots == 0:\n sanitize_number = sanitize_number.replace(',', '')\n try:\n return int(sanitize_number)\n except Exception:\n return None\n",
"step-4": "class Formater():\n def clean_number (posible_number):\n sanitize_number = posible_number.replace(' ', '')\n number_of_dots = sanitize_number.count('.')\n\n if number_of_dots > 1:\n return None\n if number_of_dots == 1:\n dot_position = sanitize_number.index('.')\n try:\n sanitize_number.index(',', dot_position)\n except Exception:\n sanitize_number = sanitize_number.replace(',', '')\n else:\n return None\n finally:\n try:\n return float(sanitize_number)\n except Exception:\n return None\n if number_of_dots == 0:\n sanitize_number = sanitize_number.replace(',', '')\n try:\n return int(sanitize_number)\n except Exception:\n return None",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2018-05-16 12:24
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('main', '0036_auto_20180516_1818'),
]
operations = [
migrations.AddField(
model_name='promotion',
name='image',
field=models.ImageField(default=1, upload_to='images/promotion', verbose_name='Image 1318x790'),
preserve_default=False,
),
]
|
normal
|
{
"blob_id": "a7add26a919a41e52ae41c6b4c4079eadaa8aa1d",
"index": 851,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('main', '0036_auto_20180516_1818')]\n operations = [migrations.AddField(model_name='promotion', name='image',\n field=models.ImageField(default=1, upload_to='images/promotion',\n verbose_name='Image 1318x790'), preserve_default=False)]\n",
"step-4": "from __future__ import unicode_literals\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n dependencies = [('main', '0036_auto_20180516_1818')]\n operations = [migrations.AddField(model_name='promotion', name='image',\n field=models.ImageField(default=1, upload_to='images/promotion',\n verbose_name='Image 1318x790'), preserve_default=False)]\n",
"step-5": "# -*- coding: utf-8 -*-\n# Generated by Django 1.11 on 2018-05-16 12:24\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('main', '0036_auto_20180516_1818'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='promotion',\n name='image',\n field=models.ImageField(default=1, upload_to='images/promotion', verbose_name='Image 1318x790'),\n preserve_default=False,\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import ambulance_game as abg
import numpy as np
import sympy as sym
from sympy.abc import a, b, c, d, e, f, g, h, i, j
def get_symbolic_pi(num_of_servers, threshold, system_capacity, buffer_capacity):
Q_sym = abg.markov.get_symbolic_transition_matrix(
num_of_servers=num_of_servers,
threshold=threshold,
system_capacity=system_capacity,
buffer_capacity=buffer_capacity,
)
dimension = Q_sym.shape[0]
if dimension > 7:
return "Capacity of 6 exceeded"
M_sym = sym.Matrix([Q_sym.transpose()[:-1, :], sym.ones(1, dimension)])
b_sym = sym.Matrix([sym.zeros(dimension - 1, 1), [1]])
system = M_sym.col_insert(dimension, b_sym)
sol = sym.solve_linear_system_LU(system, [a, b, c, d, e, f, g])
return sol
def get_symbolic_state_probabilities_1222():
num_of_servers = 1
threshold = 2
system_capacity = 2
buffer_capacity = 2
sym_pi_1222 = get_symbolic_pi(
num_of_servers=num_of_servers,
threshold=threshold,
system_capacity=system_capacity,
buffer_capacity=buffer_capacity,
)
all_states_1222 = abg.markov.build_states(
threshold=threshold,
system_capacity=system_capacity,
buffer_capacity=buffer_capacity,
)
sym_state_probs_1222 = [0 for _ in range(len(all_states_1222))]
sym_state_probs_1222[0] = sym.factor(sym_pi_1222[a]) # (0,0)
sym_state_probs_1222[1] = sym.factor(sym_pi_1222[b]) # (0,1)
sym_state_probs_1222[2] = sym.factor(sym_pi_1222[c]) # (1,1)
sym_state_probs_1222[3] = sym.factor(sym_pi_1222[d]) # (0,2)
sym_state_probs_1222[4] = sym.factor(sym_pi_1222[e]) # (1,2)
sym_state_recursive_ratios_1222 = sym.zeros(
buffer_capacity + 1, system_capacity + 1
)
sym_state_recursive_ratios_1222[0, 0] = 1
sym_state_recursive_ratios_1222[0, 1] = sym.factor(
sym_state_probs_1222[1] / sym_state_probs_1222[0]
) # (0,0) -> (0,1)
sym_state_recursive_ratios_1222[0, 2] = sym.factor(
sym_state_probs_1222[2] / sym_state_probs_1222[1]
) # (0,1) -> (1,1)
sym_state_recursive_ratios_1222[1, 2] = sym.factor(
sym_state_probs_1222[3] / sym_state_probs_1222[2]
) # (0,1) -> (0,2)
sym_state_recursive_ratios_1222[2, 2] = sym.factor(
sym_state_probs_1222[4] / sym_state_probs_1222[3]
) # (0,2) -> (1,2)
return sym_state_probs_1222, sym_state_recursive_ratios_1222
def get_symbolic_state_probabilities_1121():
num_of_servers = 1
threshold = 1
system_capacity = 2
buffer_capacity = 1
all_states_1121 = abg.markov.build_states(
threshold=threshold,
system_capacity=system_capacity,
buffer_capacity=buffer_capacity,
)
sym_pi_1121 = get_symbolic_pi(
num_of_servers=num_of_servers,
threshold=threshold,
system_capacity=system_capacity,
buffer_capacity=buffer_capacity,
)
sym_state_probs_1121 = [0 for _ in range(len(all_states_1121))]
sym_state_probs_1121[0] = sym.factor(sym_pi_1121[a]) # (0,0)
sym_state_probs_1121[1] = sym.factor(sym_pi_1121[b]) # (0,1)
sym_state_probs_1121[2] = sym.factor(sym_pi_1121[c]) # (1,1)
sym_state_probs_1121[3] = sym.factor(sym_pi_1121[d]) # (0,2)
sym_state_probs_1121[4] = sym.factor(sym_pi_1121[e]) # (1,2)
sym_state_recursive_ratios_1121 = sym.zeros(
buffer_capacity + 1, system_capacity + 1
)
sym_state_recursive_ratios_1121[0, 0] = 1
sym_state_recursive_ratios_1121[0, 1] = sym.factor(
sym_state_probs_1121[1] / sym_state_probs_1121[0]
) # (0,0) -> (0,1)
sym_state_recursive_ratios_1121[1, 1] = sym.factor(
sym_state_probs_1121[2] / sym_state_probs_1121[1]
) # (0,1) -> (1,1)
sym_state_recursive_ratios_1121[0, 2] = sym.factor(
sym_state_probs_1121[3] / sym_state_probs_1121[1]
) # (0,1) -> (0,2)
sym_state_recursive_ratios_1121[1, 2] = sym.factor(
sym_state_probs_1121[4] / sym_state_probs_1121[3]
) # (0,2) -> (1,2)
sym_state_recursive_ratios_right_1121 = sym_state_recursive_ratios_1121.copy()
sym_state_recursive_ratios_right_1121[1, 2] = sym.factor(
sym_state_probs_1121[4] / sym_state_probs_1121[2]
) # (1,1) -> (1,2)
sym_state_recursive_ratios_P0_1121 = sym.zeros(
buffer_capacity + 1, system_capacity + 1
)
sym_state_recursive_ratios_P0_1121[0, 0] = 1
sym_state_recursive_ratios_P0_1121[0, 1] = sym.factor(
sym_state_probs_1121[1] / sym_state_probs_1121[0]
) # (0,0) -> (0,1)
sym_state_recursive_ratios_P0_1121[1, 1] = sym.factor(
sym_state_probs_1121[2] / sym_state_probs_1121[0]
) # (0,0) -> (1,1)
sym_state_recursive_ratios_P0_1121[0, 2] = sym.factor(
sym_state_probs_1121[3] / sym_state_probs_1121[0]
) # (0,0) -> (0,2)
sym_state_recursive_ratios_P0_1121[1, 2] = sym.factor(
sym_state_probs_1121[4] / sym_state_probs_1121[0]
) # (0,0) -> (1,2)
return (
sym_state_probs_1121,
sym_state_recursive_ratios_1121,
sym_state_recursive_ratios_right_1121,
sym_state_recursive_ratios_P0_1121,
)
def get_symbolic_state_probabilities_1122():
# num_of_servers = 1
threshold = 1
system_capacity = 2
buffer_capacity = 2
all_states_1122 = abg.markov.build_states(
threshold=threshold,
system_capacity=system_capacity,
buffer_capacity=buffer_capacity,
)
sym_state_probs_1122 = [0 for _ in range(len(all_states_1122))]
sym_Lambda = sym.symbols("Lambda")
sym_lambda_1 = sym.symbols("lambda_1")
sym_lambda_2 = sym.symbols("lambda_2")
sym_mu = sym.symbols("mu")
sym_state_probs_1122[0] = (
(sym_mu**6)
+ 2 * (sym_lambda_2) * (sym_mu**5)
+ (sym_lambda_2**2) * (sym_mu**4)
) # (0,0)
sym_state_probs_1122[1] = (sym_Lambda * sym_mu**3) * (
sym_mu**2 + 2 * sym_mu * sym_lambda_2 + sym_lambda_2**2
) # (0,1)
sym_state_probs_1122[2] = (sym_Lambda * sym_lambda_2 * sym_mu**2) * (
sym_lambda_2**2
+ sym_lambda_2 * sym_lambda_1
+ sym_lambda_1 * sym_mu
+ sym_mu**2
+ 2 * sym_lambda_2 * sym_mu
) # (1,1)
sym_state_probs_1122[3] = (sym_Lambda * sym_lambda_2**2 * sym_mu) * (
sym_lambda_2**2
+ 2 * sym_lambda_1 * sym_lambda_2
+ 3 * sym_lambda_1 * sym_mu
+ sym_mu**2
+ 2 * sym_lambda_2 * sym_mu
+ sym_lambda_1**2
) # (2,1)
sym_state_probs_1122[4] = (sym_Lambda * sym_lambda_1 * sym_mu**3) * (
sym_lambda_2 + sym_mu
) # (0,2)
sym_state_probs_1122[5] = (
sym_Lambda * sym_lambda_1 * sym_lambda_2 * sym_mu**2
) * (
2 * sym_mu + sym_lambda_1 + sym_lambda_2
) # (1,2)
sym_state_probs_1122[6] = (sym_Lambda * sym_lambda_1 * sym_lambda_2**2) * (
sym_lambda_1**2
+ 4 * sym_lambda_1 * sym_mu
+ 2 * sym_lambda_1 * sym_lambda_2
+ 3 * sym_mu**2
+ sym_lambda_2**2
+ 3 * sym_lambda_2 * sym_mu
) # (2,2)
total_1122 = np.sum(sym_state_probs_1122)
sym_state_probs_1122 = [i / total_1122 for i in sym_state_probs_1122]
sym_state_recursive_ratios_1122 = sym.zeros(
buffer_capacity + 1, system_capacity + 1
)
sym_state_recursive_ratios_1122[0, 0] = 1
sym_state_recursive_ratios_1122[0, 1] = sym.factor(
sym_state_probs_1122[1] / sym_state_probs_1122[0]
) # (0,0) -> (0,1)
sym_state_recursive_ratios_1122[1, 1] = sym.factor(
sym_state_probs_1122[2] / sym_state_probs_1122[1]
) # (0,1) -> (1,1)
sym_state_recursive_ratios_1122[2, 1] = sym.factor(
sym_state_probs_1122[3] / sym_state_probs_1122[2]
) # (1,1) -> (2,1)
sym_state_recursive_ratios_1122[0, 2] = sym.factor(
sym_state_probs_1122[4] / sym_state_probs_1122[1]
) # (0,1) -> (0,2)
sym_state_recursive_ratios_1122[1, 2] = sym.factor(
sym_state_probs_1122[5] / sym_state_probs_1122[4]
) # (0,2) -> (1,2)
sym_state_recursive_ratios_1122[2, 2] = sym.factor(
sym_state_probs_1122[6] / sym_state_probs_1122[5]
) # (1,2) -> (2,2)
sym_state_recursive_ratios_right_1122 = sym_state_recursive_ratios_1122.copy()
sym_state_recursive_ratios_right_1122[1, 2] = sym.factor(
sym_state_probs_1122[5] / sym_state_probs_1122[2]
) # (1,1) -> (1,2)
sym_state_recursive_ratios_right_1122[2, 2] = sym.factor(
sym_state_probs_1122[6] / sym_state_probs_1122[3]
) # (2,1) -> (2,2)
sym_state_recursive_ratios_P0_1122 = sym.zeros(
buffer_capacity + 1, system_capacity + 1
)
sym_state_recursive_ratios_P0_1122[0, 0] = 1
sym_state_recursive_ratios_P0_1122[0, 1] = sym.factor(
sym_state_probs_1122[1] / sym_state_probs_1122[0]
) # (0,0) -> (0,1)
sym_state_recursive_ratios_P0_1122[1, 1] = sym.factor(
sym_state_probs_1122[2] / sym_state_probs_1122[0]
) # (0,0) -> (1,1)
sym_state_recursive_ratios_P0_1122[2, 1] = sym.factor(
sym_state_probs_1122[3] / sym_state_probs_1122[0]
) # (0,0) -> (2,1)
sym_state_recursive_ratios_P0_1122[0, 2] = sym.factor(
sym_state_probs_1122[4] / sym_state_probs_1122[0]
) # (0,0) -> (0,2)
sym_state_recursive_ratios_P0_1122[1, 2] = sym.factor(
sym_state_probs_1122[5] / sym_state_probs_1122[0]
) # (0,0) -> (1,2)
sym_state_recursive_ratios_P0_1122[2, 2] = sym.factor(
sym_state_probs_1122[6] / sym_state_probs_1122[0]
) # (0,0) -> (2,2)
return (
sym_state_probs_1122,
sym_state_recursive_ratios_1122,
sym_state_recursive_ratios_right_1122,
sym_state_recursive_ratios_P0_1122,
)
def get_symbolic_state_probabilities_1123():
num_of_servers = 1
threshold = 1
system_capacity = 2
buffer_capacity = 3
Q_sym_1123 = abg.markov.get_symbolic_transition_matrix(
num_of_servers, threshold, system_capacity, buffer_capacity
)
p00, p01, p11, p21, p31, p02, p12, p22, p32 = sym.symbols(
"p00, p01, p11, p21, p31, p02, p12, p22, p32"
)
pi_1123 = sym.Matrix([p00, p01, p11, p21, p31, p02, p12, p22, p32])
dimension_1123 = Q_sym_1123.shape[0]
M_sym_1123 = sym.Matrix(
[Q_sym_1123.transpose()[:-1, :], sym.ones(1, dimension_1123)]
)
sym_diff_equations_1123 = M_sym_1123 @ pi_1123
b_sym_1123 = sym.Matrix([sym.zeros(dimension_1123 - 1, 1), [1]])
eq0_1123 = sym.Eq(sym_diff_equations_1123[0], b_sym_1123[0])
eq1_1123 = sym.Eq(sym_diff_equations_1123[1], b_sym_1123[1])
eq2_1123 = sym.Eq(sym_diff_equations_1123[2], b_sym_1123[2])
eq3_1123 = sym.Eq(sym_diff_equations_1123[3], b_sym_1123[3])
eq4_1123 = sym.Eq(sym_diff_equations_1123[4], b_sym_1123[4])
eq5_1123 = sym.Eq(sym_diff_equations_1123[5], b_sym_1123[5])
eq6_1123 = sym.Eq(sym_diff_equations_1123[6], b_sym_1123[6])
eq7_1123 = sym.Eq(sym_diff_equations_1123[7], b_sym_1123[7])
eq8_1123 = sym.Eq(sym_diff_equations_1123[8], b_sym_1123[8])
sym_state_probs_1123 = sym.solve(
[
eq0_1123,
eq1_1123,
eq2_1123,
eq3_1123,
eq4_1123,
eq5_1123,
eq6_1123,
eq7_1123,
eq8_1123,
],
(p00, p01, p11, p21, p31, p02, p12, p22, p32),
)
sym_state_recursive_ratios_1123 = sym.zeros(
buffer_capacity + 1, system_capacity + 1
)
sym_state_recursive_ratios_1123[0, 0] = 1
sym_state_recursive_ratios_1123[0, 1] = sym.factor(
sym_state_probs_1123[p01] / sym_state_probs_1123[p00]
) # (0,0) -> (0,1)
sym_state_recursive_ratios_1123[1, 1] = sym.factor(
sym_state_probs_1123[p11] / sym_state_probs_1123[p01]
) # (0,1) -> (1,1)
sym_state_recursive_ratios_1123[2, 1] = sym.factor(
sym_state_probs_1123[p21] / sym_state_probs_1123[p11]
) # (1,1) -> (2,1)
sym_state_recursive_ratios_1123[3, 1] = sym.factor(
sym_state_probs_1123[p31] / sym_state_probs_1123[p21]
) # (2,1) -> (3,1)
sym_state_recursive_ratios_1123[0, 2] = sym.factor(
sym_state_probs_1123[p02] / sym_state_probs_1123[p01]
) # (0,1) -> (0,2)
sym_state_recursive_ratios_1123[1, 2] = sym.factor(
sym_state_probs_1123[p12] / sym_state_probs_1123[p02]
) # (0,2) -> (1,2)
sym_state_recursive_ratios_1123[2, 2] = sym.factor(
sym_state_probs_1123[p22] / sym_state_probs_1123[p12]
) # (1,2) -> (2,2)
sym_state_recursive_ratios_1123[2, 2] = sym.factor(
sym_state_probs_1123[p32] / sym_state_probs_1123[p22]
) # (2,2) -> (3,2)
sym_state_recursive_ratios_right_1123 = sym_state_recursive_ratios_1123.copy()
sym_state_recursive_ratios_right_1123[1, 2] = sym.factor(
sym_state_probs_1123[p12] / sym_state_probs_1123[p11]
) # (1,1) -> (1,2)
sym_state_recursive_ratios_right_1123[2, 2] = sym.factor(
sym_state_probs_1123[p22] / sym_state_probs_1123[p21]
) # (2,1) -> (2,2)
sym_state_recursive_ratios_right_1123[3, 2] = sym.factor(
sym_state_probs_1123[p32] / sym_state_probs_1123[p22]
) # (2,2) -> (3,2)
sym_state_recursive_ratios_P0_1123 = sym.zeros(
buffer_capacity + 1, system_capacity + 1
)
sym_state_recursive_ratios_P0_1123[0, 0] = 1
sym_state_recursive_ratios_P0_1123[0, 1] = sym.factor(
sym_state_probs_1123[p01] / sym_state_probs_1123[p00]
) # (0,0) -> (0,1)
sym_state_recursive_ratios_P0_1123[1, 1] = sym.factor(
sym_state_probs_1123[p11] / sym_state_probs_1123[p00]
) # (0,0) -> (1,1)
sym_state_recursive_ratios_P0_1123[2, 1] = sym.factor(
sym_state_probs_1123[p21] / sym_state_probs_1123[p00]
) # (0,0) -> (2,1)
sym_state_recursive_ratios_P0_1123[3, 1] = sym.factor(
sym_state_probs_1123[p31] / sym_state_probs_1123[p00]
) # (0,0) -> (3,1)
sym_state_recursive_ratios_P0_1123[0, 2] = sym.factor(
sym_state_probs_1123[p02] / sym_state_probs_1123[p00]
) # (0,0) -> (0,2)
sym_state_recursive_ratios_P0_1123[1, 2] = sym.factor(
sym_state_probs_1123[p12] / sym_state_probs_1123[p00]
) # (0,0) -> (1,2)
sym_state_recursive_ratios_P0_1123[2, 2] = sym.factor(
sym_state_probs_1123[p22] / sym_state_probs_1123[p00]
) # (0,0) -> (2,2)
sym_state_recursive_ratios_P0_1123[3, 2] = sym.factor(
sym_state_probs_1123[p32] / sym_state_probs_1123[p00]
) # (0,0) -> (3,2)
return (
sym_state_probs_1123,
sym_state_recursive_ratios_1123,
sym_state_recursive_ratios_right_1123,
sym_state_recursive_ratios_P0_1123,
)
def get_symbolic_state_probabilities_1341():
# num_of_servers = 1
threshold = 3
system_capacity = 4
buffer_capacity = 1
all_states_1341 = abg.markov.build_states(
threshold=threshold,
system_capacity=system_capacity,
buffer_capacity=buffer_capacity,
)
sym_state_probs_1341 = [0 for _ in range(len(all_states_1341))]
sym_Lambda = sym.symbols("Lambda")
sym_lambda_1 = sym.symbols("lambda_1")
sym_lambda_2 = sym.symbols("lambda_2")
sym_mu = sym.symbols("mu")
sym_state_probs_1341[0] = (sym_lambda_2) * (sym_mu**5) + (sym_mu**6) # (0,0)
sym_state_probs_1341[1] = sym_Lambda * sym_lambda_2 * (sym_mu**4) + sym_Lambda * (
sym_mu**5
) # (0,1)
sym_state_probs_1341[2] = (sym_Lambda**2) * sym_lambda_2 * (sym_mu**3) + (
sym_Lambda**2
) * (
sym_mu**4
) # (0,2)
sym_state_probs_1341[3] = (sym_Lambda**3) * sym_lambda_2 * (sym_mu**2) + (
sym_Lambda**3
) * (
sym_mu**3
) # (0,3)
sym_state_probs_1341[4] = (
(sym_Lambda**3) * sym_lambda_1 * sym_lambda_2 * sym_mu
+ (sym_Lambda**3) * sym_lambda_2 * (sym_mu**2)
+ (sym_Lambda**3) * sym_lambda_2 * sym_lambda_2 * sym_mu
) # (1,3)
sym_state_probs_1341[5] = (sym_Lambda**3) * sym_lambda_1 * (sym_mu**2) # (0,4)
sym_state_probs_1341[6] = (
(sym_Lambda**3) * (sym_lambda_1**2) * sym_lambda_2
+ (sym_Lambda**3) * sym_lambda_1 * (sym_lambda_2**2)
+ 2 * (sym_Lambda**3) * sym_lambda_1 * sym_lambda_2 * sym_mu
) # (1,4)
total_1341 = np.sum(sym_state_probs_1341)
sym_state_probs_1341 = [i / total_1341 for i in sym_state_probs_1341]
sym_state_recursive_ratios_1341 = sym.zeros(
buffer_capacity + 1, system_capacity + 1
)
sym_state_recursive_ratios_1341[0, 0] = 1
sym_state_recursive_ratios_1341[0, 1] = sym.factor(
sym_state_probs_1341[1] / sym_state_probs_1341[0]
) # (0,0) -> (0,1)
sym_state_recursive_ratios_1341[0, 2] = sym.factor(
sym_state_probs_1341[2] / sym_state_probs_1341[1]
) # (0,1) -> (0,2)
sym_state_recursive_ratios_1341[0, 3] = sym.factor(
sym_state_probs_1341[3] / sym_state_probs_1341[2]
) # (0,2) -> (0,3)
sym_state_recursive_ratios_1341[0, 4] = sym.factor(
sym_state_probs_1341[5] / sym_state_probs_1341[3]
) # (0,3) -> (0,4)
sym_state_recursive_ratios_1341[1, 3] = sym.factor(
sym_state_probs_1341[4] / sym_state_probs_1341[3]
) # (0,3) -> (1,3)
sym_state_recursive_ratios_1341[1, 4] = sym.factor(
sym_state_probs_1341[6] / sym_state_probs_1341[5]
) # (0,4) -> (1,4)
sym_state_recursive_ratios_right_1341 = sym_state_recursive_ratios_1341.copy()
sym_state_recursive_ratios_right_1341[1, 4] = sym.factor(
sym_state_probs_1341[6] / sym_state_probs_1341[4]
) # (1,3) -> (1,4)
sym_state_recursive_ratios_P0_1341 = sym.zeros(
buffer_capacity + 1, system_capacity + 1
)
sym_state_recursive_ratios_P0_1341[0, 0] = 1
sym_state_recursive_ratios_P0_1341[0, 1] = sym.factor(
sym_state_probs_1341[1] / sym_state_probs_1341[0]
) # (0,0) -> (0,1)
sym_state_recursive_ratios_P0_1341[0, 2] = sym.factor(
sym_state_probs_1341[2] / sym_state_probs_1341[0]
) # (0,0) -> (0,2)
sym_state_recursive_ratios_P0_1341[0, 3] = sym.factor(
sym_state_probs_1341[3] / sym_state_probs_1341[0]
) # (0,0) -> (0,3)
sym_state_recursive_ratios_P0_1341[1, 3] = sym.factor(
sym_state_probs_1341[4] / sym_state_probs_1341[0]
) # (0,0) -> (1,3)
sym_state_recursive_ratios_P0_1341[0, 4] = sym.factor(
sym_state_probs_1341[5] / sym_state_probs_1341[0]
) # (0,0) -> (0,4)
sym_state_recursive_ratios_P0_1341[1, 4] = sym.factor(
sym_state_probs_1341[6] / sym_state_probs_1341[0]
) # (0,0) -> (1,4)
return (
sym_state_probs_1341,
sym_state_recursive_ratios_1341,
sym_state_recursive_ratios_right_1341,
sym_state_recursive_ratios_P0_1341,
)
def get_symbolic_state_probabilities_1131():
# num_of_servers = 1
threshold = 1
system_capacity = 3
buffer_capacity = 1
all_states_1131 = abg.markov.build_states(
threshold=threshold,
system_capacity=system_capacity,
buffer_capacity=buffer_capacity,
)
sym_state_probs_1131 = [0 for _ in range(len(all_states_1131))]
sym_Lambda = sym.symbols("Lambda")
sym_lambda_1 = sym.symbols("lambda_1")
sym_lambda_2 = sym.symbols("lambda_2")
sym_mu = sym.symbols("mu")
# (0,0)
sym_state_probs_1131[0] = (
(sym_mu**6)
+ 2 * (sym_lambda_2 * (sym_mu**5))
+ ((sym_lambda_2**2) * (sym_mu**4))
+ (sym_lambda_1 * sym_lambda_2 * (sym_mu**4))
)
# (0,1)
sym_state_probs_1131[1] = sym_state_probs_1131[0] * sym_Lambda / sym_mu
# (1,1)
sym_state_probs_1131[2] = (
(sym_Lambda * (sym_lambda_1**2) * sym_lambda_2 * (sym_mu**2))
+ (sym_Lambda * sym_lambda_2 * sym_lambda_1 * (sym_mu**3))
+ 2 * (sym_Lambda * sym_lambda_1 * (sym_lambda_2**2) * (sym_mu**2))
+ 2 * (sym_Lambda * (sym_lambda_2**2) * (sym_mu**3))
+ (sym_Lambda * (sym_lambda_2**3) * (sym_mu**2))
+ (sym_Lambda * sym_lambda_2 * (sym_mu**4))
)
# (0,2)
sym_state_probs_1131[3] = (
sym_Lambda * sym_lambda_1 * sym_mu**3 * (sym_lambda_2 + sym_mu)
)
# (1,2)
sym_state_probs_1131[4] = (sym_Lambda * sym_lambda_2 * sym_lambda_1 * sym_mu) * (
(sym_lambda_2**2)
+ 2 * sym_lambda_2 * sym_lambda_1
+ 3 * sym_lambda_2 * sym_mu
+ (sym_lambda_1**2)
+ 2 * sym_lambda_1 * sym_mu
+ 2 * (sym_mu**2)
)
# (0,3)
sym_state_probs_1131[5] = sym_Lambda * (sym_lambda_1**2) * (sym_mu**3)
# (1,3)
sym_state_probs_1131[6] = (sym_Lambda * sym_lambda_2 * (sym_lambda_1**2)) * (
(sym_lambda_2**2)
+ 2 * sym_lambda_2 * sym_lambda_1
+ 3 * sym_lambda_2 * sym_mu
+ (sym_lambda_1**2)
+ 2 * sym_lambda_1 * sym_mu
+ 3 * (sym_mu**2)
)
denominator = (
sym_Lambda * sym_lambda_2**3 * sym_lambda_1**2
+ sym_Lambda * sym_lambda_2**3 * sym_lambda_1 * sym_mu
+ sym_Lambda * sym_lambda_2**3 * sym_mu**2
+ 2 * sym_Lambda * sym_lambda_2**2 * sym_lambda_1**3
+ 5 * sym_Lambda * sym_lambda_2**2 * sym_lambda_1**2 * sym_mu
+ 5 * sym_Lambda * sym_lambda_2**2 * sym_lambda_1 * sym_mu**2
+ 3 * sym_Lambda * sym_lambda_2**2 * sym_mu**3
+ sym_Lambda * sym_lambda_2 * sym_lambda_1**4
+ 3 * sym_Lambda * sym_lambda_2 * sym_lambda_1**3 * sym_mu
+ 6 * sym_Lambda * sym_lambda_2 * sym_lambda_1**2 * sym_mu**2
+ 5 * sym_Lambda * sym_lambda_2 * sym_lambda_1 * sym_mu**3
+ 3 * sym_Lambda * sym_lambda_2 * sym_mu**4
+ sym_Lambda * sym_lambda_1**2 * sym_mu**3
+ sym_Lambda * sym_lambda_1 * sym_mu**4
+ sym_Lambda * sym_mu**5
+ sym_lambda_2**2 * sym_mu**4
+ sym_lambda_2 * sym_lambda_1 * sym_mu**4
+ 2 * sym_lambda_2 * sym_mu**5
+ sym_mu**6
)
sym_state_probs_1131 = [i / denominator for i in sym_state_probs_1131]
sym_state_recursive_ratios_1131 = sym.zeros(
buffer_capacity + 1, system_capacity + 1
)
sym_state_recursive_ratios_1131[0, 0] = 1
sym_state_recursive_ratios_1131[0, 1] = sym.factor(
sym_state_probs_1131[1] / sym_state_probs_1131[0]
) # (0,0) -> (0,1)
sym_state_recursive_ratios_1131[1, 1] = sym.factor(
sym_state_probs_1131[2] / sym_state_probs_1131[1]
) # (0,1) -> (1,1)
sym_state_recursive_ratios_1131[0, 2] = sym.factor(
sym_state_probs_1131[3] / sym_state_probs_1131[1]
) # (0,1) -> (0,2)
sym_state_recursive_ratios_1131[1, 2] = sym.factor(
sym_state_probs_1131[4] / sym_state_probs_1131[3]
) # (0,2) -> (1,2)
sym_state_recursive_ratios_1131[0, 3] = sym.factor(
sym_state_probs_1131[5] / sym_state_probs_1131[3]
) # (0,2) -> (0,3)
sym_state_recursive_ratios_1131[1, 3] = sym.factor(
sym_state_probs_1131[6] / sym_state_probs_1131[5]
) # (0,3) -> (1,3)
sym_state_recursive_ratios_right_1131 = sym_state_recursive_ratios_1131.copy()
sym_state_recursive_ratios_right_1131[1, 2] = sym.factor(
sym_state_probs_1131[4] / sym_state_probs_1131[2]
) # (1,1) -> (1,2)
sym_state_recursive_ratios_right_1131[1, 3] = sym.factor(
sym_state_probs_1131[6] / sym_state_probs_1131[4]
) # (1,2) -> (1,3)
sym_state_recursive_ratios_P0_1131 = sym.zeros(
buffer_capacity + 1, system_capacity + 1
)
sym_state_recursive_ratios_P0_1131[0, 0] = 1
sym_state_recursive_ratios_P0_1131[0, 1] = sym.factor(
sym_state_probs_1131[1] / sym_state_probs_1131[0]
) # (0,0) -> (0,1)
sym_state_recursive_ratios_P0_1131[1, 1] = sym.factor(
sym_state_probs_1131[2] / sym_state_probs_1131[0]
) # (0,0) -> (1,1)
sym_state_recursive_ratios_P0_1131[0, 2] = sym.factor(
sym_state_probs_1131[3] / sym_state_probs_1131[0]
) # (0,0) -> (0,2)
sym_state_recursive_ratios_P0_1131[1, 2] = sym.factor(
sym_state_probs_1131[4] / sym_state_probs_1131[0]
) # (0,0) -> (1,2)
sym_state_recursive_ratios_P0_1131[0, 3] = sym.factor(
sym_state_probs_1131[5] / sym_state_probs_1131[0]
) # (0,0) -> (0,3)
sym_state_recursive_ratios_P0_1131[1, 3] = sym.factor(
sym_state_probs_1131[6] / sym_state_probs_1131[0]
) # (0,0) -> (1,3)
return (
sym_state_probs_1131,
sym_state_recursive_ratios_1131,
sym_state_recursive_ratios_right_1131,
sym_state_recursive_ratios_P0_1131,
)
def get_symbolic_state_probabilities_1132():
num_of_servers = 1
threshold = 1
system_capacity = 3
buffer_capacity = 2
Q_sym_1132 = abg.markov.get_symbolic_transition_matrix(
num_of_servers, threshold, system_capacity, buffer_capacity
)
p00, p01, p11, p21, p02, p12, p22, p03, p13, p23 = sym.symbols(
"p00, p01, p11, p21, p02, p12, p22, p03, p13, p23"
)
pi_1132 = sym.Matrix([p00, p01, p11, p21, p02, p12, p22, p03, p13, p23])
dimension_1132 = Q_sym_1132.shape[0]
M_sym_1132 = sym.Matrix(
[Q_sym_1132.transpose()[:-1, :], sym.ones(1, dimension_1132)]
)
sym_diff_equations_1132 = M_sym_1132 @ pi_1132
b_sym_1132 = sym.Matrix([sym.zeros(dimension_1132 - 1, 1), [1]])
eq0_1132 = sym.Eq(sym_diff_equations_1132[0], b_sym_1132[0])
eq1_1132 = sym.Eq(sym_diff_equations_1132[1], b_sym_1132[1])
eq2_1132 = sym.Eq(sym_diff_equations_1132[2], b_sym_1132[2])
eq3_1132 = sym.Eq(sym_diff_equations_1132[3], b_sym_1132[3])
eq4_1132 = sym.Eq(sym_diff_equations_1132[4], b_sym_1132[4])
eq5_1132 = sym.Eq(sym_diff_equations_1132[5], b_sym_1132[5])
eq6_1132 = sym.Eq(sym_diff_equations_1132[6], b_sym_1132[6])
eq7_1132 = sym.Eq(sym_diff_equations_1132[7], b_sym_1132[7])
eq8_1132 = sym.Eq(sym_diff_equations_1132[8], b_sym_1132[8])
eq9_1132 = sym.Eq(sym_diff_equations_1132[9], b_sym_1132[9])
sym_state_probs_1132 = sym.solve(
[
eq0_1132,
eq1_1132,
eq2_1132,
eq3_1132,
eq4_1132,
eq5_1132,
eq6_1132,
eq7_1132,
eq8_1132,
eq9_1132,
],
(p00, p01, p11, p21, p02, p12, p22, p03, p13, p23),
)
sym_state_recursive_ratios_1132 = sym.zeros(
buffer_capacity + 1, system_capacity + 1
)
sym_state_recursive_ratios_1132[0, 0] = 1
sym_state_recursive_ratios_1132[0, 1] = sym.factor(
sym_state_probs_1132[p01] / sym_state_probs_1132[p00]
) # (0,0) -> (0,1)
sym_state_recursive_ratios_1132[1, 1] = sym.factor(
sym_state_probs_1132[p11] / sym_state_probs_1132[p01]
) # (0,1) -> (1,1)
sym_state_recursive_ratios_1132[2, 1] = sym.factor(
sym_state_probs_1132[p21] / sym_state_probs_1132[p11]
) # (1,1) -> (2,1)
sym_state_recursive_ratios_1132[0, 2] = sym.factor(
sym_state_probs_1132[p02] / sym_state_probs_1132[p01]
) # (0,1) -> (0,2)
sym_state_recursive_ratios_1132[1, 2] = sym.factor(
sym_state_probs_1132[p12] / sym_state_probs_1132[p02]
) # (0,2) -> (1,2)
sym_state_recursive_ratios_1132[2, 2] = sym.factor(
sym_state_probs_1132[p22] / sym_state_probs_1132[p12]
) # (1,2) -> (2,2)
sym_state_recursive_ratios_1132[0, 3] = sym.factor(
sym_state_probs_1132[p03] / sym_state_probs_1132[p02]
) # (0,2) -> (0,3)
sym_state_recursive_ratios_1132[1, 3] = sym.factor(
sym_state_probs_1132[p13] / sym_state_probs_1132[p03]
) # (0,3) -> (1,3)
sym_state_recursive_ratios_1132[2, 3] = sym.factor(
sym_state_probs_1132[p23] / sym_state_probs_1132[p13]
) # (1,3) -> (2,3)
sym_state_recursive_ratios_right_1132 = sym_state_recursive_ratios_1132.copy()
sym_state_recursive_ratios_right_1132[1, 2] = sym.factor(
sym_state_probs_1132[p12] / sym_state_probs_1132[p11]
) # (1,1) -> (1,2)
sym_state_recursive_ratios_right_1132[1, 3] = sym.factor(
sym_state_probs_1132[p13] / sym_state_probs_1132[p12]
) # (1,2) -> (1,3)
sym_state_recursive_ratios_right_1132[2, 2] = sym.factor(
sym_state_probs_1132[p22] / sym_state_probs_1132[p21]
) # (2,1) -> (2,2)
sym_state_recursive_ratios_right_1132[2, 3] = sym.factor(
sym_state_probs_1132[p23] / sym_state_probs_1132[p22]
) # (2,2) -> (2,3)
sym_state_recursive_ratios_P0_1132 = sym.zeros(
buffer_capacity + 1, system_capacity + 1
)
sym_state_recursive_ratios_P0_1132[0, 0] = 1
sym_state_recursive_ratios_P0_1132[0, 1] = sym.factor(
sym_state_probs_1132[p01] / sym_state_probs_1132[p00]
) # (0,0) -> (0,1)
sym_state_recursive_ratios_P0_1132[1, 1] = sym.factor(
sym_state_probs_1132[p11] / sym_state_probs_1132[p00]
) # (0,0) -> (1,1)
sym_state_recursive_ratios_P0_1132[2, 1] = sym.factor(
sym_state_probs_1132[p21] / sym_state_probs_1132[p00]
) # (0,0) -> (2,1)
sym_state_recursive_ratios_P0_1132[0, 2] = sym.factor(
sym_state_probs_1132[p02] / sym_state_probs_1132[p00]
) # (0,0) -> (0,2)
sym_state_recursive_ratios_P0_1132[1, 2] = sym.factor(
sym_state_probs_1132[p12] / sym_state_probs_1132[p00]
) # (0,0) -> (1,2)
sym_state_recursive_ratios_P0_1132[2, 2] = sym.factor(
sym_state_probs_1132[p22] / sym_state_probs_1132[p00]
) # (0,0) -> (2,2)
sym_state_recursive_ratios_P0_1132[0, 3] = sym.factor(
sym_state_probs_1132[p03] / sym_state_probs_1132[p00]
) # (0,0) -> (0,3)
sym_state_recursive_ratios_P0_1132[1, 3] = sym.factor(
sym_state_probs_1132[p13] / sym_state_probs_1132[p00]
) # (0,0) -> (1,3)
sym_state_recursive_ratios_P0_1132[2, 3] = sym.factor(
sym_state_probs_1132[p23] / sym_state_probs_1132[p00]
) # (0,0) -> (2,3)
return (
sym_state_probs_1132,
sym_state_recursive_ratios_1132,
sym_state_recursive_ratios_right_1132,
sym_state_recursive_ratios_P0_1132,
)
def get_symbolic_state_probabilities_1141():
num_of_servers = 1
threshold = 1
system_capacity = 4
buffer_capacity = 1
Q_sym_1141 = abg.markov.get_symbolic_transition_matrix(
num_of_servers, threshold, system_capacity, buffer_capacity
)
p00, p01, p11, p02, p12, p03, p13, p04, p14 = sym.symbols(
"p00, p01, p11, p02, p12, p03, p13, p04, p14"
)
pi_1141 = sym.Matrix([p00, p01, p11, p02, p12, p03, p13, p04, p14])
dimension_1141 = Q_sym_1141.shape[0]
M_sym_1141 = sym.Matrix(
[Q_sym_1141.transpose()[:-1, :], sym.ones(1, dimension_1141)]
)
sym_diff_equations_1141 = M_sym_1141 @ pi_1141
b_sym_1141 = sym.Matrix([sym.zeros(dimension_1141 - 1, 1), [1]])
eq0_1141 = sym.Eq(sym_diff_equations_1141[0], b_sym_1141[0])
eq1_1141 = sym.Eq(sym_diff_equations_1141[1], b_sym_1141[1])
eq2_1141 = sym.Eq(sym_diff_equations_1141[2], b_sym_1141[2])
eq3_1141 = sym.Eq(sym_diff_equations_1141[3], b_sym_1141[3])
eq4_1141 = sym.Eq(sym_diff_equations_1141[4], b_sym_1141[4])
eq5_1141 = sym.Eq(sym_diff_equations_1141[5], b_sym_1141[5])
eq6_1141 = sym.Eq(sym_diff_equations_1141[6], b_sym_1141[6])
eq7_1141 = sym.Eq(sym_diff_equations_1141[7], b_sym_1141[7])
eq8_1141 = sym.Eq(sym_diff_equations_1141[8], b_sym_1141[8])
sym_state_probs_1141 = sym.solve(
[
eq0_1141,
eq1_1141,
eq2_1141,
eq3_1141,
eq4_1141,
eq5_1141,
eq6_1141,
eq7_1141,
eq8_1141,
],
(p00, p01, p11, p02, p12, p03, p13, p04, p14),
)
sym_state_recursive_ratios_1141 = sym.zeros(
buffer_capacity + 1, system_capacity + 1
)
sym_state_recursive_ratios_1141[0, 0] = 1
sym_state_recursive_ratios_1141[0, 1] = sym.factor(
sym_state_probs_1141[p01] / sym_state_probs_1141[p00]
) # (0,0) -> (0,1)
sym_state_recursive_ratios_1141[1, 1] = sym.factor(
sym_state_probs_1141[p11] / sym_state_probs_1141[p01]
) # (0,1) -> (1,1)
sym_state_recursive_ratios_1141[0, 2] = sym.factor(
sym_state_probs_1141[p02] / sym_state_probs_1141[p01]
) # (0,1) -> (0,2)
sym_state_recursive_ratios_1141[1, 2] = sym.factor(
sym_state_probs_1141[p12] / sym_state_probs_1141[p02]
) # (0,2) -> (1,2)
sym_state_recursive_ratios_1141[0, 3] = sym.factor(
sym_state_probs_1141[p03] / sym_state_probs_1141[p02]
) # (0,2) -> (0,3)
sym_state_recursive_ratios_1141[1, 3] = sym.factor(
sym_state_probs_1141[p13] / sym_state_probs_1141[p03]
) # (0,3) -> (1,3)
sym_state_recursive_ratios_1141[0, 4] = sym.factor(
sym_state_probs_1141[p04] / sym_state_probs_1141[p03]
) # (0,3) -> (0,4)
sym_state_recursive_ratios_1141[1, 4] = sym.factor(
sym_state_probs_1141[p14] / sym_state_probs_1141[p04]
) # (0,4) -> (1,4)
sym_state_recursive_ratios_right_1141 = sym_state_recursive_ratios_1141.copy()
sym_state_recursive_ratios_right_1141[1, 2] = sym.factor(
sym_state_probs_1141[p12] / sym_state_probs_1141[p11]
) # (1,1) -> (1,2)
sym_state_recursive_ratios_right_1141[1, 3] = sym.factor(
sym_state_probs_1141[p13] / sym_state_probs_1141[p12]
) # (1,2) -> (1,3)
sym_state_recursive_ratios_right_1141[1, 4] = sym.factor(
sym_state_probs_1141[p14] / sym_state_probs_1141[p13]
) # (1,3) -> (1,4)
sym_state_recursive_ratios_P0_1141 = sym.zeros(
buffer_capacity + 1, system_capacity + 1
)
sym_state_recursive_ratios_P0_1141[0, 0] = 1
sym_state_recursive_ratios_P0_1141[0, 1] = sym.factor(
sym_state_probs_1141[p01] / sym_state_probs_1141[p00]
) # (0,0) -> (0,1)
sym_state_recursive_ratios_P0_1141[1, 1] = sym.factor(
sym_state_probs_1141[p11] / sym_state_probs_1141[p00]
) # (0,0) -> (1,1)
sym_state_recursive_ratios_P0_1141[0, 2] = sym.factor(
sym_state_probs_1141[p02] / sym_state_probs_1141[p00]
) # (0,0) -> (0,2)
sym_state_recursive_ratios_P0_1141[1, 2] = sym.factor(
sym_state_probs_1141[p12] / sym_state_probs_1141[p00]
) # (0,0) -> (1,2)
sym_state_recursive_ratios_P0_1141[0, 3] = sym.factor(
sym_state_probs_1141[p03] / sym_state_probs_1141[p00]
) # (0,0) -> (0,3)
sym_state_recursive_ratios_P0_1141[1, 3] = sym.factor(
sym_state_probs_1141[p13] / sym_state_probs_1141[p00]
) # (0,0) -> (1,3)
sym_state_recursive_ratios_P0_1141[0, 4] = sym.factor(
sym_state_probs_1141[p04] / sym_state_probs_1141[p00]
) # (0,0) -> (0,4)
sym_state_recursive_ratios_P0_1141[1, 4] = sym.factor(
sym_state_probs_1141[p14] / sym_state_probs_1141[p00]
) # (0,0) -> (1,4)
return (
sym_state_probs_1141,
sym_state_recursive_ratios_1141,
sym_state_recursive_ratios_right_1141,
sym_state_recursive_ratios_P0_1141,
)
def get_symbolic_state_probabilities_1142():
num_of_servers = 1
threshold = 1
system_capacity = 4
buffer_capacity = 2
Q_sym_1142 = abg.markov.get_symbolic_transition_matrix(
num_of_servers=num_of_servers,
threshold=threshold,
system_capacity=system_capacity,
buffer_capacity=buffer_capacity,
)
p00, p01, p11, p21, p02, p12, p22, p03, p13, p23, p04, p14, p24 = sym.symbols(
"p00, p01, p11, p21, p02, p12, p22, p03, p13, p23, p04, p14, p24"
)
pi_1142 = sym.Matrix(
[p00, p01, p11, p21, p02, p12, p22, p03, p13, p23, p04, p14, p24]
)
dimension_1142 = Q_sym_1142.shape[0]
M_sym_1142 = sym.Matrix(
[Q_sym_1142.transpose()[:-1, :], sym.ones(1, dimension_1142)]
)
sym_diff_equations_1142 = M_sym_1142 @ pi_1142
b_sym_1142 = sym.Matrix([sym.zeros(dimension_1142 - 1, 1), [1]])
eq0_1142 = sym.Eq(sym_diff_equations_1142[0], b_sym_1142[0])
eq1_1142 = sym.Eq(sym_diff_equations_1142[1], b_sym_1142[1])
eq2_1142 = sym.Eq(sym_diff_equations_1142[2], b_sym_1142[2])
eq3_1142 = sym.Eq(sym_diff_equations_1142[3], b_sym_1142[3])
eq4_1142 = sym.Eq(sym_diff_equations_1142[4], b_sym_1142[4])
eq5_1142 = sym.Eq(sym_diff_equations_1142[5], b_sym_1142[5])
eq6_1142 = sym.Eq(sym_diff_equations_1142[6], b_sym_1142[6])
eq7_1142 = sym.Eq(sym_diff_equations_1142[7], b_sym_1142[7])
eq8_1142 = sym.Eq(sym_diff_equations_1142[8], b_sym_1142[8])
eq9_1142 = sym.Eq(sym_diff_equations_1142[9], b_sym_1142[9])
eq10_1142 = sym.Eq(sym_diff_equations_1142[10], b_sym_1142[10])
eq11_1142 = sym.Eq(sym_diff_equations_1142[11], b_sym_1142[11])
eq12_1142 = sym.Eq(sym_diff_equations_1142[12], b_sym_1142[12])
sym_state_probs_1142 = sym.solve(
[
eq0_1142,
eq1_1142,
eq2_1142,
eq3_1142,
eq4_1142,
eq5_1142,
eq6_1142,
eq7_1142,
eq8_1142,
eq9_1142,
eq10_1142,
eq11_1142,
eq12_1142,
],
(p00, p01, p11, p21, p02, p12, p22, p03, p13, p23, p04, p14, p24),
)
sym_state_recursive_ratios_1142 = sym.zeros(
buffer_capacity + 1, system_capacity + 1
)
sym_state_recursive_ratios_1142[0, 0] = 1
sym_state_recursive_ratios_1142[0, 1] = sym.factor(
sym_state_probs_1142[p01] / sym_state_probs_1142[p00]
) # (0,0) -> (0,1)
sym_state_recursive_ratios_1142[1, 1] = sym.factor(
sym_state_probs_1142[p11] / sym_state_probs_1142[p01]
) # (0,1) -> (1,1)
sym_state_recursive_ratios_1142[2, 1] = sym.factor(
sym_state_probs_1142[p21] / sym_state_probs_1142[p11]
) # (1,1) -> (2,1)
sym_state_recursive_ratios_1142[0, 2] = sym.factor(
sym_state_probs_1142[p02] / sym_state_probs_1142[p01]
) # (0,1) -> (0,2)
sym_state_recursive_ratios_1142[1, 2] = sym.factor(
sym_state_probs_1142[p12] / sym_state_probs_1142[p02]
) # (0,2) -> (1,2)
sym_state_recursive_ratios_1142[2, 2] = sym.factor(
sym_state_probs_1142[p22] / sym_state_probs_1142[p12]
) # (1,2) -> (2,2)
sym_state_recursive_ratios_1142[0, 3] = sym.factor(
sym_state_probs_1142[p03] / sym_state_probs_1142[p02]
) # (0,2) -> (0,3)
sym_state_recursive_ratios_1142[1, 3] = sym.factor(
sym_state_probs_1142[p13] / sym_state_probs_1142[p03]
) # (0,3) -> (1,3)
sym_state_recursive_ratios_1142[2, 3] = sym.factor(
sym_state_probs_1142[p23] / sym_state_probs_1142[p13]
) # (1,3) -> (2,3)
sym_state_recursive_ratios_1142[0, 4] = sym.factor(
sym_state_probs_1142[p04] / sym_state_probs_1142[p03]
) # (0,3) -> (0,4)
sym_state_recursive_ratios_1142[1, 4] = sym.factor(
sym_state_probs_1142[p14] / sym_state_probs_1142[p04]
) # (0,4) -> (1,4)
sym_state_recursive_ratios_1142[2, 4] = sym.factor(
sym_state_probs_1142[p24] / sym_state_probs_1142[p14]
) # (1,4) -> (2,4)
sym_state_recursive_ratios_right_1142 = sym_state_recursive_ratios_1142.copy()
sym_state_recursive_ratios_right_1142[1, 2] = sym.factor(
sym_state_probs_1142[p12] / sym_state_probs_1142[p11]
) # (1,1) -> (1,2)
sym_state_recursive_ratios_right_1142[1, 3] = sym.factor(
sym_state_probs_1142[p13] / sym_state_probs_1142[p12]
) # (1,2) -> (1,3)
sym_state_recursive_ratios_right_1142[1, 4] = sym.factor(
sym_state_probs_1142[p14] / sym_state_probs_1142[p13]
) # (1,3) -> (1,4)
sym_state_recursive_ratios_right_1142[2, 2] = sym.factor(
sym_state_probs_1142[p22] / sym_state_probs_1142[p21]
) # (2,1) -> (2,2)
sym_state_recursive_ratios_right_1142[2, 3] = sym.factor(
sym_state_probs_1142[p23] / sym_state_probs_1142[p22]
) # (2,2) -> (2,3)
sym_state_recursive_ratios_right_1142[2, 4] = sym.factor(
sym_state_probs_1142[p24] / sym_state_probs_1142[p23]
) # (2,3) -> (2,4)
sym_state_recursive_ratios_P0_1142 = sym.zeros(
buffer_capacity + 1, system_capacity + 1
)
sym_state_recursive_ratios_P0_1142[0, 0] = 1
sym_state_recursive_ratios_P0_1142[0, 1] = sym.factor(
sym_state_probs_1142[p01] / sym_state_probs_1142[p00]
) # (0,0) -> (0,1)
sym_state_recursive_ratios_P0_1142[1, 1] = sym.factor(
sym_state_probs_1142[p11] / sym_state_probs_1142[p00]
) # (0,0) -> (1,1)
sym_state_recursive_ratios_P0_1142[2, 1] = sym.factor(
sym_state_probs_1142[p21] / sym_state_probs_1142[p00]
) # (0,0) -> (2,1)
sym_state_recursive_ratios_P0_1142[0, 2] = sym.factor(
sym_state_probs_1142[p02] / sym_state_probs_1142[p00]
) # (0,0) -> (0,2)
sym_state_recursive_ratios_P0_1142[1, 2] = sym.factor(
sym_state_probs_1142[p12] / sym_state_probs_1142[p00]
) # (0,0) -> (1,2)
sym_state_recursive_ratios_P0_1142[2, 2] = sym.factor(
sym_state_probs_1142[p22] / sym_state_probs_1142[p00]
) # (0,0) -> (2,2)
sym_state_recursive_ratios_P0_1142[0, 3] = sym.factor(
sym_state_probs_1142[p03] / sym_state_probs_1142[p00]
) # (0,0) -> (0,3)
sym_state_recursive_ratios_P0_1142[1, 3] = sym.factor(
sym_state_probs_1142[p13] / sym_state_probs_1142[p00]
) # (0,0) -> (1,3)
sym_state_recursive_ratios_P0_1142[2, 3] = sym.factor(
sym_state_probs_1142[p23] / sym_state_probs_1142[p00]
) # (0,0) -> (2,3)
sym_state_recursive_ratios_P0_1142[0, 4] = sym.factor(
sym_state_probs_1142[p04] / sym_state_probs_1142[p00]
) # (0,0) -> (0,4)
sym_state_recursive_ratios_P0_1142[1, 4] = sym.factor(
sym_state_probs_1142[p14] / sym_state_probs_1142[p00]
) # (0,0) -> (1,4)
sym_state_recursive_ratios_P0_1142[2, 4] = sym.factor(
sym_state_probs_1142[p24] / sym_state_probs_1142[p00]
) # (0,0) -> (2,4)
return (
sym_state_probs_1142,
sym_state_recursive_ratios_1142,
sym_state_recursive_ratios_right_1142,
sym_state_recursive_ratios_P0_1142,
)
def get_symbolic_state_probabilities_1151():
num_of_servers = 1
threshold = 1
system_capacity = 5
buffer_capacity = 1
Q_sym_1151 = abg.markov.get_symbolic_transition_matrix(
num_of_servers, threshold, system_capacity, buffer_capacity
)
p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15 = sym.symbols(
"p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15"
)
pi_1151 = sym.Matrix([p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15])
dimension_1151 = Q_sym_1151.shape[0]
M_sym_1151 = sym.Matrix(
[Q_sym_1151.transpose()[:-1, :], sym.ones(1, dimension_1151)]
)
sym_diff_equations_1151 = M_sym_1151 @ pi_1151
b_sym_1151 = sym.Matrix([sym.zeros(dimension_1151 - 1, 1), [1]])
eq0_1151 = sym.Eq(sym_diff_equations_1151[0], b_sym_1151[0])
eq1_1151 = sym.Eq(sym_diff_equations_1151[1], b_sym_1151[1])
eq2_1151 = sym.Eq(sym_diff_equations_1151[2], b_sym_1151[2])
eq3_1151 = sym.Eq(sym_diff_equations_1151[3], b_sym_1151[3])
eq4_1151 = sym.Eq(sym_diff_equations_1151[4], b_sym_1151[4])
eq5_1151 = sym.Eq(sym_diff_equations_1151[5], b_sym_1151[5])
eq6_1151 = sym.Eq(sym_diff_equations_1151[6], b_sym_1151[6])
eq7_1151 = sym.Eq(sym_diff_equations_1151[7], b_sym_1151[7])
eq8_1151 = sym.Eq(sym_diff_equations_1151[8], b_sym_1151[8])
eq9_1151 = sym.Eq(sym_diff_equations_1151[9], b_sym_1151[9])
eq10_1151 = sym.Eq(sym_diff_equations_1151[10], b_sym_1151[10])
sym_state_probs_1151 = sym.solve(
[
eq0_1151,
eq1_1151,
eq2_1151,
eq3_1151,
eq4_1151,
eq5_1151,
eq6_1151,
eq7_1151,
eq8_1151,
eq9_1151,
eq10_1151,
],
(p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15),
)
sym_state_recursive_ratios_1151 = sym.zeros(
buffer_capacity + 1, system_capacity + 1
)
sym_state_recursive_ratios_1151[0, 0] = 1
sym_state_recursive_ratios_1151[0, 1] = sym.factor(
sym_state_probs_1151[p01] / sym_state_probs_1151[p00]
) # (0,0) -> (0,1)
sym_state_recursive_ratios_1151[1, 1] = sym.factor(
sym_state_probs_1151[p11] / sym_state_probs_1151[p01]
) # (0,1) -> (1,1)
sym_state_recursive_ratios_1151[0, 2] = sym.factor(
sym_state_probs_1151[p02] / sym_state_probs_1151[p01]
) # (0,1) -> (0,2)
sym_state_recursive_ratios_1151[1, 2] = sym.factor(
sym_state_probs_1151[p12] / sym_state_probs_1151[p02]
) # (0,2) -> (1,2)
sym_state_recursive_ratios_1151[0, 3] = sym.factor(
sym_state_probs_1151[p03] / sym_state_probs_1151[p02]
) # (0,2) -> (0,3)
sym_state_recursive_ratios_1151[1, 3] = sym.factor(
sym_state_probs_1151[p13] / sym_state_probs_1151[p03]
) # (0,3) -> (1,3)
sym_state_recursive_ratios_1151[0, 4] = sym.factor(
sym_state_probs_1151[p04] / sym_state_probs_1151[p03]
) # (0,3) -> (0,4)
sym_state_recursive_ratios_1151[1, 4] = sym.factor(
sym_state_probs_1151[p14] / sym_state_probs_1151[p04]
) # (0,4) -> (1,4)
sym_state_recursive_ratios_1151[0, 5] = sym.factor(
sym_state_probs_1151[p05] / sym_state_probs_1151[p04]
) # (0,4) -> (0,5)
sym_state_recursive_ratios_1151[1, 5] = sym.factor(
sym_state_probs_1151[p15] / sym_state_probs_1151[p05]
) # (0,5) -> (1,5)
sym_state_recursive_ratios_right_1151 = sym_state_recursive_ratios_1151.copy()
sym_state_recursive_ratios_right_1151[1, 2] = sym.factor(
sym_state_probs_1151[p12] / sym_state_probs_1151[p11]
) # (1,1) -> (1,2)
sym_state_recursive_ratios_right_1151[1, 3] = sym.factor(
sym_state_probs_1151[p13] / sym_state_probs_1151[p12]
) # (1,2) -> (1,3)
sym_state_recursive_ratios_right_1151[1, 4] = sym.factor(
sym_state_probs_1151[p14] / sym_state_probs_1151[p13]
) # (1,3) -> (1,4)
sym_state_recursive_ratios_right_1151[1, 5] = sym.factor(
sym_state_probs_1151[p15] / sym_state_probs_1151[p14]
) # (1,4) -> (1,5)
sym_state_recursive_ratios_P0_1151 = sym.zeros(
buffer_capacity + 1, system_capacity + 1
)
sym_state_recursive_ratios_P0_1151[0, 0] = 1
sym_state_recursive_ratios_P0_1151[0, 1] = sym.factor(
sym_state_probs_1151[p01] / sym_state_probs_1151[p00]
) # (0,0) -> (0,1)
sym_state_recursive_ratios_P0_1151[1, 1] = sym.factor(
sym_state_probs_1151[p11] / sym_state_probs_1151[p00]
) # (0,0) -> (1,1)
sym_state_recursive_ratios_P0_1151[0, 2] = sym.factor(
sym_state_probs_1151[p02] / sym_state_probs_1151[p00]
) # (0,0) -> (0,2)
sym_state_recursive_ratios_P0_1151[1, 2] = sym.factor(
sym_state_probs_1151[p12] / sym_state_probs_1151[p00]
) # (0,0) -> (1,2)
sym_state_recursive_ratios_P0_1151[0, 3] = sym.factor(
sym_state_probs_1151[p03] / sym_state_probs_1151[p00]
) # (0,0) -> (0,3)
sym_state_recursive_ratios_P0_1151[1, 3] = sym.factor(
sym_state_probs_1151[p13] / sym_state_probs_1151[p00]
) # (0,0) -> (1,3)
sym_state_recursive_ratios_P0_1151[0, 4] = sym.factor(
sym_state_probs_1151[p04] / sym_state_probs_1151[p00]
) # (0,0) -> (0,4)
sym_state_recursive_ratios_P0_1151[1, 4] = sym.factor(
sym_state_probs_1151[p14] / sym_state_probs_1151[p00]
) # (0,0) -> (1,4)
sym_state_recursive_ratios_P0_1151[0, 5] = sym.factor(
sym_state_probs_1151[p05] / sym_state_probs_1151[p00]
) # (0,0) -> (0,5)
sym_state_recursive_ratios_P0_1151[1, 5] = sym.factor(
sym_state_probs_1151[p15] / sym_state_probs_1151[p00]
) # (0,0) -> (1,5)
return (
sym_state_probs_1151,
sym_state_recursive_ratios_1151,
sym_state_recursive_ratios_right_1151,
sym_state_recursive_ratios_P0_1151,
)
def get_symbolic_state_probabilities_1161():
num_of_servers = 1
threshold = 1
system_capacity = 6
buffer_capacity = 1
Q_sym_1161 = abg.markov.get_symbolic_transition_matrix(
num_of_servers, threshold, system_capacity, buffer_capacity
)
p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15, p06, p16 = sym.symbols(
"p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15, p06, p16"
)
pi_1161 = sym.Matrix(
[p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15, p06, p16]
)
dimension_1161 = Q_sym_1161.shape[0]
M_sym_1161 = sym.Matrix(
[Q_sym_1161.transpose()[:-1, :], sym.ones(1, dimension_1161)]
)
sym_diff_equations_1161 = M_sym_1161 @ pi_1161
b_sym_1161 = sym.Matrix([sym.zeros(dimension_1161 - 1, 1), [1]])
eq0_1161 = sym.Eq(sym_diff_equations_1161[0], b_sym_1161[0])
eq1_1161 = sym.Eq(sym_diff_equations_1161[1], b_sym_1161[1])
eq2_1161 = sym.Eq(sym_diff_equations_1161[2], b_sym_1161[2])
eq3_1161 = sym.Eq(sym_diff_equations_1161[3], b_sym_1161[3])
eq4_1161 = sym.Eq(sym_diff_equations_1161[4], b_sym_1161[4])
eq5_1161 = sym.Eq(sym_diff_equations_1161[5], b_sym_1161[5])
eq6_1161 = sym.Eq(sym_diff_equations_1161[6], b_sym_1161[6])
eq7_1161 = sym.Eq(sym_diff_equations_1161[7], b_sym_1161[7])
eq8_1161 = sym.Eq(sym_diff_equations_1161[8], b_sym_1161[8])
eq9_1161 = sym.Eq(sym_diff_equations_1161[9], b_sym_1161[9])
eq10_1161 = sym.Eq(sym_diff_equations_1161[10], b_sym_1161[10])
eq11_1161 = sym.Eq(sym_diff_equations_1161[11], b_sym_1161[11])
eq12_1161 = sym.Eq(sym_diff_equations_1161[12], b_sym_1161[12])
sym_state_probs_1161 = sym.solve(
[
eq0_1161,
eq1_1161,
eq2_1161,
eq3_1161,
eq4_1161,
eq5_1161,
eq6_1161,
eq7_1161,
eq8_1161,
eq9_1161,
eq10_1161,
eq11_1161,
eq12_1161,
],
(p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15, p06, p16),
)
sym_state_recursive_ratios_1161 = sym.zeros(
buffer_capacity + 1, system_capacity + 1
)
sym_state_recursive_ratios_1161[0, 0] = 1
sym_state_recursive_ratios_1161[0, 1] = sym.factor(
sym_state_probs_1161[p01] / sym_state_probs_1161[p00]
) # (0,0) -> (0,1)
sym_state_recursive_ratios_1161[1, 1] = sym.factor(
sym_state_probs_1161[p11] / sym_state_probs_1161[p01]
) # (0,1) -> (1,1)
sym_state_recursive_ratios_1161[0, 2] = sym.factor(
sym_state_probs_1161[p02] / sym_state_probs_1161[p01]
) # (0,1) -> (0,2)
sym_state_recursive_ratios_1161[1, 2] = sym.factor(
sym_state_probs_1161[p12] / sym_state_probs_1161[p02]
) # (0,2) -> (1,2)
sym_state_recursive_ratios_1161[0, 3] = sym.factor(
sym_state_probs_1161[p03] / sym_state_probs_1161[p02]
) # (0,2) -> (0,3)
sym_state_recursive_ratios_1161[1, 3] = sym.factor(
sym_state_probs_1161[p13] / sym_state_probs_1161[p03]
) # (0,3) -> (1,3)
sym_state_recursive_ratios_1161[0, 4] = sym.factor(
sym_state_probs_1161[p04] / sym_state_probs_1161[p03]
) # (0,3) -> (0,4)
sym_state_recursive_ratios_1161[1, 4] = sym.factor(
sym_state_probs_1161[p14] / sym_state_probs_1161[p04]
) # (0,4) -> (1,4)
sym_state_recursive_ratios_1161[0, 5] = sym.factor(
sym_state_probs_1161[p05] / sym_state_probs_1161[p04]
) # (0,4) -> (0,5)
sym_state_recursive_ratios_1161[1, 5] = sym.factor(
sym_state_probs_1161[p15] / sym_state_probs_1161[p05]
) # (0,5) -> (1,5)
sym_state_recursive_ratios_1161[0, 6] = sym.factor(
sym_state_probs_1161[p06] / sym_state_probs_1161[p05]
) # (0,5) -> (0,6)
sym_state_recursive_ratios_1161[1, 6] = sym.factor(
sym_state_probs_1161[p16] / sym_state_probs_1161[p06]
) # (0,6) -> (1,6)
sym_state_recursive_ratios_right_1161 = sym_state_recursive_ratios_1161.copy()
sym_state_recursive_ratios_right_1161[1, 2] = sym.factor(
sym_state_probs_1161[p12] / sym_state_probs_1161[p11]
) # (1,1) -> (1,2)
sym_state_recursive_ratios_right_1161[1, 3] = sym.factor(
sym_state_probs_1161[p13] / sym_state_probs_1161[p12]
) # (1,2) -> (1,3)
sym_state_recursive_ratios_right_1161[1, 4] = sym.factor(
sym_state_probs_1161[p14] / sym_state_probs_1161[p13]
) # (1,3) -> (1,4)
sym_state_recursive_ratios_right_1161[1, 5] = sym.factor(
sym_state_probs_1161[p15] / sym_state_probs_1161[p14]
) # (1,4) -> (1,5)
sym_state_recursive_ratios_right_1161[1, 6] = sym.factor(
sym_state_probs_1161[p16] / sym_state_probs_1161[p15]
) # (1,5) -> (1,6)
sym_state_recursive_ratios_P0_1161 = sym.zeros(
buffer_capacity + 1, system_capacity + 1
)
sym_state_recursive_ratios_P0_1161[0, 0] = 1
sym_state_recursive_ratios_P0_1161[0, 1] = sym.factor(
sym_state_probs_1161[p01] / sym_state_probs_1161[p00]
) # (0,0) -> (0,1)
sym_state_recursive_ratios_P0_1161[1, 1] = sym.factor(
sym_state_probs_1161[p11] / sym_state_probs_1161[p00]
) # (0,0) -> (1,1)
sym_state_recursive_ratios_P0_1161[0, 2] = sym.factor(
sym_state_probs_1161[p02] / sym_state_probs_1161[p00]
) # (0,0) -> (0,2)
sym_state_recursive_ratios_P0_1161[1, 2] = sym.factor(
sym_state_probs_1161[p12] / sym_state_probs_1161[p00]
) # (0,0) -> (1,2)
sym_state_recursive_ratios_P0_1161[0, 3] = sym.factor(
sym_state_probs_1161[p03] / sym_state_probs_1161[p00]
) # (0,0) -> (0,3)
sym_state_recursive_ratios_P0_1161[1, 3] = sym.factor(
sym_state_probs_1161[p13] / sym_state_probs_1161[p00]
) # (0,0) -> (1,3)
sym_state_recursive_ratios_P0_1161[0, 4] = sym.factor(
sym_state_probs_1161[p04] / sym_state_probs_1161[p00]
) # (0,0) -> (0,4)
sym_state_recursive_ratios_P0_1161[1, 4] = sym.factor(
sym_state_probs_1161[p14] / sym_state_probs_1161[p00]
) # (0,0) -> (1,4)
sym_state_recursive_ratios_P0_1161[0, 5] = sym.factor(
sym_state_probs_1161[p05] / sym_state_probs_1161[p00]
) # (0,0) -> (0,5)
sym_state_recursive_ratios_P0_1161[1, 5] = sym.factor(
sym_state_probs_1161[p15] / sym_state_probs_1161[p00]
) # (0,0) -> (1,5)
sym_state_recursive_ratios_P0_1161[0, 6] = sym.factor(
sym_state_probs_1161[p06] / sym_state_probs_1161[p00]
) # (0,0) -> (0,6)
sym_state_recursive_ratios_P0_1161[1, 6] = sym.factor(
sym_state_probs_1161[p16] / sym_state_probs_1161[p00]
) # (0,0) -> (1,6)
return (
sym_state_probs_1161,
sym_state_recursive_ratios_1161,
sym_state_recursive_ratios_right_1161,
sym_state_recursive_ratios_P0_1161,
)
def get_symbolic_state_probabilities_1171():
num_of_servers = 1
threshold = 1
system_capacity = 7
buffer_capacity = 1
Q_sym_1171 = abg.markov.get_symbolic_transition_matrix(
num_of_servers, threshold, system_capacity, buffer_capacity
)
(
p00,
p01,
p11,
p02,
p12,
p03,
p13,
p04,
p14,
p05,
p15,
p06,
p16,
p07,
p17,
) = sym.symbols(
"p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15, p06, p16, p07, p17"
)
pi_1171 = sym.Matrix(
[p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15, p06, p16, p07, p17]
)
dimension_1171 = Q_sym_1171.shape[0]
M_sym_1171 = sym.Matrix(
[Q_sym_1171.transpose()[:-1, :], sym.ones(1, dimension_1171)]
)
sym_diff_equations_1171 = M_sym_1171 @ pi_1171
b_sym_1171 = sym.Matrix([sym.zeros(dimension_1171 - 1, 1), [1]])
eq0_1171 = sym.Eq(sym_diff_equations_1171[0], b_sym_1171[0])
eq1_1171 = sym.Eq(sym_diff_equations_1171[1], b_sym_1171[1])
eq2_1171 = sym.Eq(sym_diff_equations_1171[2], b_sym_1171[2])
eq3_1171 = sym.Eq(sym_diff_equations_1171[3], b_sym_1171[3])
eq4_1171 = sym.Eq(sym_diff_equations_1171[4], b_sym_1171[4])
eq5_1171 = sym.Eq(sym_diff_equations_1171[5], b_sym_1171[5])
eq6_1171 = sym.Eq(sym_diff_equations_1171[6], b_sym_1171[6])
eq7_1171 = sym.Eq(sym_diff_equations_1171[7], b_sym_1171[7])
eq8_1171 = sym.Eq(sym_diff_equations_1171[8], b_sym_1171[8])
eq9_1171 = sym.Eq(sym_diff_equations_1171[9], b_sym_1171[9])
eq10_1171 = sym.Eq(sym_diff_equations_1171[10], b_sym_1171[10])
eq11_1171 = sym.Eq(sym_diff_equations_1171[11], b_sym_1171[11])
eq12_1171 = sym.Eq(sym_diff_equations_1171[12], b_sym_1171[12])
eq13_1171 = sym.Eq(sym_diff_equations_1171[13], b_sym_1171[13])
eq14_1171 = sym.Eq(sym_diff_equations_1171[14], b_sym_1171[14])
sym_state_probs_1171 = sym.solve(
[
eq0_1171,
eq1_1171,
eq2_1171,
eq3_1171,
eq4_1171,
eq5_1171,
eq6_1171,
eq7_1171,
eq8_1171,
eq9_1171,
eq10_1171,
eq11_1171,
eq12_1171,
eq13_1171,
eq14_1171,
],
(p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15, p06, p16, p07, p17),
)
sym_state_recursive_ratios_1171 = sym.zeros(
buffer_capacity + 1, system_capacity + 1
)
sym_state_recursive_ratios_1171[0, 0] = 1
sym_state_recursive_ratios_1171[0, 1] = sym.factor(
sym_state_probs_1171[p01] / sym_state_probs_1171[p00]
) # (0,0) -> (0,1)
sym_state_recursive_ratios_1171[1, 1] = sym.factor(
sym_state_probs_1171[p11] / sym_state_probs_1171[p01]
) # (0,1) -> (1,1)
sym_state_recursive_ratios_1171[0, 2] = sym.factor(
sym_state_probs_1171[p02] / sym_state_probs_1171[p01]
) # (0,1) -> (0,2)
sym_state_recursive_ratios_1171[1, 2] = sym.factor(
sym_state_probs_1171[p12] / sym_state_probs_1171[p02]
) # (0,2) -> (1,2)
sym_state_recursive_ratios_1171[0, 3] = sym.factor(
sym_state_probs_1171[p03] / sym_state_probs_1171[p02]
) # (0,2) -> (0,3)
sym_state_recursive_ratios_1171[1, 3] = sym.factor(
sym_state_probs_1171[p13] / sym_state_probs_1171[p03]
) # (0,3) -> (1,3)
sym_state_recursive_ratios_1171[0, 4] = sym.factor(
sym_state_probs_1171[p04] / sym_state_probs_1171[p03]
) # (0,3) -> (0,4)
sym_state_recursive_ratios_1171[1, 4] = sym.factor(
sym_state_probs_1171[p14] / sym_state_probs_1171[p04]
) # (0,4) -> (1,4)
sym_state_recursive_ratios_1171[0, 5] = sym.factor(
sym_state_probs_1171[p05] / sym_state_probs_1171[p04]
) # (0,4) -> (0,5)
sym_state_recursive_ratios_1171[1, 5] = sym.factor(
sym_state_probs_1171[p15] / sym_state_probs_1171[p05]
) # (0,5) -> (1,5)
sym_state_recursive_ratios_1171[0, 6] = sym.factor(
sym_state_probs_1171[p06] / sym_state_probs_1171[p05]
) # (0,5) -> (0,6)
sym_state_recursive_ratios_1171[1, 6] = sym.factor(
sym_state_probs_1171[p16] / sym_state_probs_1171[p06]
) # (0,6) -> (1,6)
sym_state_recursive_ratios_1171[0, 7] = sym.factor(
sym_state_probs_1171[p07] / sym_state_probs_1171[p06]
) # (0,6) -> (0,7)
sym_state_recursive_ratios_1171[1, 7] = sym.factor(
sym_state_probs_1171[p17] / sym_state_probs_1171[p07]
) # (0,7) -> (1,7)
sym_state_recursive_ratios_right_1171 = sym_state_recursive_ratios_1171.copy()
sym_state_recursive_ratios_right_1171[1, 2] = sym.factor(
sym_state_probs_1171[p12] / sym_state_probs_1171[p11]
) # (1,1) -> (1,2)
sym_state_recursive_ratios_right_1171[1, 3] = sym.factor(
sym_state_probs_1171[p13] / sym_state_probs_1171[p12]
) # (1,2) -> (1,3)
sym_state_recursive_ratios_right_1171[1, 4] = sym.factor(
sym_state_probs_1171[p14] / sym_state_probs_1171[p13]
) # (1,3) -> (1,4)
sym_state_recursive_ratios_right_1171[1, 5] = sym.factor(
sym_state_probs_1171[p15] / sym_state_probs_1171[p14]
) # (1,4) -> (1,5)
sym_state_recursive_ratios_right_1171[1, 6] = sym.factor(
sym_state_probs_1171[p16] / sym_state_probs_1171[p15]
) # (1,5) -> (1,6)
sym_state_recursive_ratios_right_1171[1, 7] = sym.factor(
sym_state_probs_1171[p17] / sym_state_probs_1171[p16]
) # (1,6) -> (1,7)
sym_state_recursive_ratios_P0_1171 = sym.zeros(
buffer_capacity + 1, system_capacity + 1
)
sym_state_recursive_ratios_P0_1171[0, 0] = 1
sym_state_recursive_ratios_P0_1171[0, 1] = sym.factor(
sym_state_probs_1171[p01] / sym_state_probs_1171[p00]
) # (0,0) -> (0,1)
sym_state_recursive_ratios_P0_1171[1, 1] = sym.factor(
sym_state_probs_1171[p11] / sym_state_probs_1171[p00]
) # (0,0) -> (1,1)
sym_state_recursive_ratios_P0_1171[0, 2] = sym.factor(
sym_state_probs_1171[p02] / sym_state_probs_1171[p00]
) # (0,0) -> (0,2)
sym_state_recursive_ratios_P0_1171[1, 2] = sym.factor(
sym_state_probs_1171[p12] / sym_state_probs_1171[p00]
) # (0,0) -> (1,2)
sym_state_recursive_ratios_P0_1171[0, 3] = sym.factor(
sym_state_probs_1171[p03] / sym_state_probs_1171[p00]
) # (0,0) -> (0,3)
sym_state_recursive_ratios_P0_1171[1, 3] = sym.factor(
sym_state_probs_1171[p13] / sym_state_probs_1171[p00]
) # (0,0) -> (1,3)
sym_state_recursive_ratios_P0_1171[0, 4] = sym.factor(
sym_state_probs_1171[p04] / sym_state_probs_1171[p00]
) # (0,0) -> (0,4)
sym_state_recursive_ratios_P0_1171[1, 4] = sym.factor(
sym_state_probs_1171[p14] / sym_state_probs_1171[p00]
) # (0,0) -> (1,4)
sym_state_recursive_ratios_P0_1171[0, 5] = sym.factor(
sym_state_probs_1171[p05] / sym_state_probs_1171[p00]
) # (0,0) -> (0,5)
sym_state_recursive_ratios_P0_1171[1, 5] = sym.factor(
sym_state_probs_1171[p15] / sym_state_probs_1171[p00]
) # (0,0) -> (1,5)
sym_state_recursive_ratios_P0_1171[0, 6] = sym.factor(
sym_state_probs_1171[p06] / sym_state_probs_1171[p00]
) # (0,0) -> (0,6)
sym_state_recursive_ratios_P0_1171[1, 6] = sym.factor(
sym_state_probs_1171[p16] / sym_state_probs_1171[p00]
) # (0,0) -> (1,6)
sym_state_recursive_ratios_P0_1171[0, 7] = sym.factor(
sym_state_probs_1171[p07] / sym_state_probs_1171[p00]
) # (0,0) -> (0,7)
sym_state_recursive_ratios_P0_1171[1, 7] = sym.factor(
sym_state_probs_1171[p17] / sym_state_probs_1171[p00]
) # (0,0) -> (1,7)
return (
sym_state_probs_1171,
sym_state_recursive_ratios_1171,
sym_state_recursive_ratios_right_1171,
sym_state_recursive_ratios_P0_1171,
)
def get_symbolic_state_probabilities_1181():
num_of_servers = 1
threshold = 1
system_capacity = 8
buffer_capacity = 1
Q_sym_1181 = abg.markov.get_symbolic_transition_matrix(
num_of_servers, threshold, system_capacity, buffer_capacity
)
(
p00,
p01,
p11,
p02,
p12,
p03,
p13,
p04,
p14,
p05,
p15,
p06,
p16,
p07,
p17,
p08,
p18,
) = sym.symbols(
"p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15, p06, p16, p07, p17, p08, p18"
)
pi_1181 = sym.Matrix(
[
p00,
p01,
p11,
p02,
p12,
p03,
p13,
p04,
p14,
p05,
p15,
p06,
p16,
p07,
p17,
p08,
p18,
]
)
dimension_1181 = Q_sym_1181.shape[0]
M_sym_1181 = sym.Matrix(
[Q_sym_1181.transpose()[:-1, :], sym.ones(1, dimension_1181)]
)
sym_diff_equations_1181 = M_sym_1181 @ pi_1181
b_sym_1181 = sym.Matrix([sym.zeros(dimension_1181 - 1, 1), [1]])
eq0_1181 = sym.Eq(sym_diff_equations_1181[0], b_sym_1181[0])
eq1_1181 = sym.Eq(sym_diff_equations_1181[1], b_sym_1181[1])
eq2_1181 = sym.Eq(sym_diff_equations_1181[2], b_sym_1181[2])
eq3_1181 = sym.Eq(sym_diff_equations_1181[3], b_sym_1181[3])
eq4_1181 = sym.Eq(sym_diff_equations_1181[4], b_sym_1181[4])
eq5_1181 = sym.Eq(sym_diff_equations_1181[5], b_sym_1181[5])
eq6_1181 = sym.Eq(sym_diff_equations_1181[6], b_sym_1181[6])
eq7_1181 = sym.Eq(sym_diff_equations_1181[7], b_sym_1181[7])
eq8_1181 = sym.Eq(sym_diff_equations_1181[8], b_sym_1181[8])
eq9_1181 = sym.Eq(sym_diff_equations_1181[9], b_sym_1181[9])
eq10_1181 = sym.Eq(sym_diff_equations_1181[10], b_sym_1181[10])
eq11_1181 = sym.Eq(sym_diff_equations_1181[11], b_sym_1181[11])
eq12_1181 = sym.Eq(sym_diff_equations_1181[12], b_sym_1181[12])
eq13_1181 = sym.Eq(sym_diff_equations_1181[13], b_sym_1181[13])
eq14_1181 = sym.Eq(sym_diff_equations_1181[14], b_sym_1181[14])
eq15_1181 = sym.Eq(sym_diff_equations_1181[15], b_sym_1181[15])
eq16_1181 = sym.Eq(sym_diff_equations_1181[16], b_sym_1181[16])
sym_state_probs_1181 = sym.solve(
[
eq0_1181,
eq1_1181,
eq2_1181,
eq3_1181,
eq4_1181,
eq5_1181,
eq6_1181,
eq7_1181,
eq8_1181,
eq9_1181,
eq10_1181,
eq11_1181,
eq12_1181,
eq13_1181,
eq14_1181,
eq15_1181,
eq16_1181,
],
(
p00,
p01,
p11,
p02,
p12,
p03,
p13,
p04,
p14,
p05,
p15,
p06,
p16,
p07,
p17,
p08,
p18,
),
)
sym_state_recursive_ratios_1181 = sym.zeros(
buffer_capacity + 1, system_capacity + 1
)
sym_state_recursive_ratios_1181[0, 0] = 1
sym_state_recursive_ratios_1181[0, 1] = sym.factor(
sym_state_probs_1181[p01] / sym_state_probs_1181[p00]
) # (0,0) -> (0,1)
sym_state_recursive_ratios_1181[1, 1] = sym.factor(
sym_state_probs_1181[p11] / sym_state_probs_1181[p01]
) # (0,1) -> (1,1)
sym_state_recursive_ratios_1181[0, 2] = sym.factor(
sym_state_probs_1181[p02] / sym_state_probs_1181[p01]
) # (0,1) -> (0,2)
sym_state_recursive_ratios_1181[1, 2] = sym.factor(
sym_state_probs_1181[p12] / sym_state_probs_1181[p02]
) # (0,2) -> (1,2)
sym_state_recursive_ratios_1181[0, 3] = sym.factor(
sym_state_probs_1181[p03] / sym_state_probs_1181[p02]
) # (0,2) -> (0,3)
sym_state_recursive_ratios_1181[1, 3] = sym.factor(
sym_state_probs_1181[p13] / sym_state_probs_1181[p03]
) # (0,3) -> (1,3)
sym_state_recursive_ratios_1181[0, 4] = sym.factor(
sym_state_probs_1181[p04] / sym_state_probs_1181[p03]
) # (0,3) -> (0,4)
sym_state_recursive_ratios_1181[1, 4] = sym.factor(
sym_state_probs_1181[p14] / sym_state_probs_1181[p04]
) # (0,4) -> (1,4)
sym_state_recursive_ratios_1181[0, 5] = sym.factor(
sym_state_probs_1181[p05] / sym_state_probs_1181[p04]
) # (0,4) -> (0,5)
sym_state_recursive_ratios_1181[1, 5] = sym.factor(
sym_state_probs_1181[p15] / sym_state_probs_1181[p05]
) # (0,5) -> (1,5)
sym_state_recursive_ratios_1181[0, 6] = sym.factor(
sym_state_probs_1181[p06] / sym_state_probs_1181[p05]
) # (0,5) -> (0,6)
sym_state_recursive_ratios_1181[1, 6] = sym.factor(
sym_state_probs_1181[p16] / sym_state_probs_1181[p06]
) # (0,6) -> (1,6)
sym_state_recursive_ratios_1181[0, 7] = sym.factor(
sym_state_probs_1181[p07] / sym_state_probs_1181[p06]
) # (0,6) -> (0,7)
sym_state_recursive_ratios_1181[1, 7] = sym.factor(
sym_state_probs_1181[p17] / sym_state_probs_1181[p07]
) # (0,7) -> (1,7)
sym_state_recursive_ratios_1181[0, 8] = sym.factor(
sym_state_probs_1181[p08] / sym_state_probs_1181[p07]
) # (0,7) -> (0,8)
sym_state_recursive_ratios_1181[1, 8] = sym.factor(
sym_state_probs_1181[p18] / sym_state_probs_1181[p08]
) # (0,8) -> (1,8)
sym_state_recursive_ratios_right_1181 = sym_state_recursive_ratios_1181.copy()
sym_state_recursive_ratios_right_1181[1, 2] = sym.factor(
sym_state_probs_1181[p12] / sym_state_probs_1181[p11]
) # (1,1) -> (1,2)
sym_state_recursive_ratios_right_1181[1, 3] = sym.factor(
sym_state_probs_1181[p13] / sym_state_probs_1181[p12]
) # (1,2) -> (1,3)
sym_state_recursive_ratios_right_1181[1, 4] = sym.factor(
sym_state_probs_1181[p14] / sym_state_probs_1181[p13]
) # (1,3) -> (1,4)
sym_state_recursive_ratios_right_1181[1, 5] = sym.factor(
sym_state_probs_1181[p15] / sym_state_probs_1181[p14]
) # (1,4) -> (1,5)
sym_state_recursive_ratios_right_1181[1, 6] = sym.factor(
sym_state_probs_1181[p16] / sym_state_probs_1181[p15]
) # (1,5) -> (1,6)
sym_state_recursive_ratios_right_1181[1, 7] = sym.factor(
sym_state_probs_1181[p17] / sym_state_probs_1181[p16]
) # (1,6) -> (1,7)
sym_state_recursive_ratios_right_1181[1, 8] = sym.factor(
sym_state_probs_1181[p18] / sym_state_probs_1181[p17]
) # (1,7) -> (1,8)
sym_state_recursive_ratios_P0_1181 = sym.zeros(
buffer_capacity + 1, system_capacity + 1
)
sym_state_recursive_ratios_P0_1181[0, 0] = 1
sym_state_recursive_ratios_P0_1181[0, 1] = sym.factor(
sym_state_probs_1181[p01] / sym_state_probs_1181[p00]
) # (0,0) -> (0,1)
sym_state_recursive_ratios_P0_1181[1, 1] = sym.factor(
sym_state_probs_1181[p11] / sym_state_probs_1181[p00]
) # (0,0) -> (1,1)
sym_state_recursive_ratios_P0_1181[0, 2] = sym.factor(
sym_state_probs_1181[p02] / sym_state_probs_1181[p00]
) # (0,0) -> (0,2)
sym_state_recursive_ratios_P0_1181[1, 2] = sym.factor(
sym_state_probs_1181[p12] / sym_state_probs_1181[p00]
) # (0,0) -> (1,2)
sym_state_recursive_ratios_P0_1181[0, 3] = sym.factor(
sym_state_probs_1181[p03] / sym_state_probs_1181[p00]
) # (0,0) -> (0,3)
sym_state_recursive_ratios_P0_1181[1, 3] = sym.factor(
sym_state_probs_1181[p13] / sym_state_probs_1181[p00]
) # (0,0) -> (1,3)
sym_state_recursive_ratios_P0_1181[0, 4] = sym.factor(
sym_state_probs_1181[p04] / sym_state_probs_1181[p00]
) # (0,0) -> (0,4)
sym_state_recursive_ratios_P0_1181[1, 4] = sym.factor(
sym_state_probs_1181[p14] / sym_state_probs_1181[p00]
) # (0,0) -> (1,4)
sym_state_recursive_ratios_P0_1181[0, 5] = sym.factor(
sym_state_probs_1181[p05] / sym_state_probs_1181[p00]
) # (0,0) -> (0,5)
sym_state_recursive_ratios_P0_1181[1, 5] = sym.factor(
sym_state_probs_1181[p15] / sym_state_probs_1181[p00]
) # (0,0) -> (1,5)
sym_state_recursive_ratios_P0_1181[0, 6] = sym.factor(
sym_state_probs_1181[p06] / sym_state_probs_1181[p00]
) # (0,0) -> (0,6)
sym_state_recursive_ratios_P0_1181[1, 6] = sym.factor(
sym_state_probs_1181[p16] / sym_state_probs_1181[p00]
) # (0,0) -> (1,6)
sym_state_recursive_ratios_P0_1181[0, 7] = sym.factor(
sym_state_probs_1181[p07] / sym_state_probs_1181[p00]
) # (0,0) -> (0,7)
sym_state_recursive_ratios_P0_1181[1, 7] = sym.factor(
sym_state_probs_1181[p17] / sym_state_probs_1181[p00]
) # (0,0) -> (1,7)
sym_state_recursive_ratios_P0_1181[0, 8] = sym.factor(
sym_state_probs_1181[p08] / sym_state_probs_1181[p00]
) # (0,0) -> (0,8)
sym_state_recursive_ratios_P0_1181[1, 8] = sym.factor(
sym_state_probs_1181[p18] / sym_state_probs_1181[p00]
) # (0,0) -> (1,8)
return (
sym_state_probs_1181,
sym_state_recursive_ratios_1181,
sym_state_recursive_ratios_right_1181,
sym_state_recursive_ratios_P0_1181,
)
def get_symbolic_state_probabilities_1191():
num_of_servers = 1
threshold = 1
system_capacity = 9
buffer_capacity = 1
Q_sym_1191 = abg.markov.get_symbolic_transition_matrix(
num_of_servers, threshold, system_capacity, buffer_capacity
)
(
p00,
p01,
p11,
p02,
p12,
p03,
p13,
p04,
p14,
p05,
p15,
p06,
p16,
p07,
p17,
p08,
p18,
p09,
p19,
) = sym.symbols(
"p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15, p06, p16, p07, p17, p08, p18, p09, p19"
)
pi_1191 = sym.Matrix(
[
p00,
p01,
p11,
p02,
p12,
p03,
p13,
p04,
p14,
p05,
p15,
p06,
p16,
p07,
p17,
p08,
p18,
p09,
p19,
]
)
dimension_1191 = Q_sym_1191.shape[0]
M_sym_1191 = sym.Matrix(
[Q_sym_1191.transpose()[:-1, :], sym.ones(1, dimension_1191)]
)
sym_diff_equations_1191 = M_sym_1191 @ pi_1191
b_sym_1191 = sym.Matrix([sym.zeros(dimension_1191 - 1, 1), [1]])
eq0_1191 = sym.Eq(sym_diff_equations_1191[0], b_sym_1191[0])
eq1_1191 = sym.Eq(sym_diff_equations_1191[1], b_sym_1191[1])
eq2_1191 = sym.Eq(sym_diff_equations_1191[2], b_sym_1191[2])
eq3_1191 = sym.Eq(sym_diff_equations_1191[3], b_sym_1191[3])
eq4_1191 = sym.Eq(sym_diff_equations_1191[4], b_sym_1191[4])
eq5_1191 = sym.Eq(sym_diff_equations_1191[5], b_sym_1191[5])
eq6_1191 = sym.Eq(sym_diff_equations_1191[6], b_sym_1191[6])
eq7_1191 = sym.Eq(sym_diff_equations_1191[7], b_sym_1191[7])
eq8_1191 = sym.Eq(sym_diff_equations_1191[8], b_sym_1191[8])
eq9_1191 = sym.Eq(sym_diff_equations_1191[9], b_sym_1191[9])
eq10_1191 = sym.Eq(sym_diff_equations_1191[10], b_sym_1191[10])
eq11_1191 = sym.Eq(sym_diff_equations_1191[11], b_sym_1191[11])
eq12_1191 = sym.Eq(sym_diff_equations_1191[12], b_sym_1191[12])
eq13_1191 = sym.Eq(sym_diff_equations_1191[13], b_sym_1191[13])
eq14_1191 = sym.Eq(sym_diff_equations_1191[14], b_sym_1191[14])
eq15_1191 = sym.Eq(sym_diff_equations_1191[15], b_sym_1191[15])
eq16_1191 = sym.Eq(sym_diff_equations_1191[16], b_sym_1191[16])
eq17_1191 = sym.Eq(sym_diff_equations_1191[17], b_sym_1191[17])
eq18_1191 = sym.Eq(sym_diff_equations_1191[18], b_sym_1191[18])
sym_state_probs_1191 = sym.solve(
[
eq0_1191,
eq1_1191,
eq2_1191,
eq3_1191,
eq4_1191,
eq5_1191,
eq6_1191,
eq7_1191,
eq8_1191,
eq9_1191,
eq10_1191,
eq11_1191,
eq12_1191,
eq13_1191,
eq14_1191,
eq15_1191,
eq16_1191,
eq17_1191,
eq18_1191,
],
(
p00,
p01,
p11,
p02,
p12,
p03,
p13,
p04,
p14,
p05,
p15,
p06,
p16,
p07,
p17,
p08,
p18,
p09,
p19,
),
)
sym_state_recursive_ratios_1191 = sym.zeros(
buffer_capacity + 1, system_capacity + 1
)
sym_state_recursive_ratios_1191[0, 0] = 1
sym_state_recursive_ratios_1191[0, 1] = sym.factor(
sym_state_probs_1191[p01] / sym_state_probs_1191[p00]
) # (0,0) -> (0,1)
sym_state_recursive_ratios_1191[1, 1] = sym.factor(
sym_state_probs_1191[p11] / sym_state_probs_1191[p01]
) # (0,1) -> (1,1)
sym_state_recursive_ratios_1191[0, 2] = sym.factor(
sym_state_probs_1191[p02] / sym_state_probs_1191[p01]
) # (0,1) -> (0,2)
sym_state_recursive_ratios_1191[1, 2] = sym.factor(
sym_state_probs_1191[p12] / sym_state_probs_1191[p02]
) # (0,2) -> (1,2)
sym_state_recursive_ratios_1191[0, 3] = sym.factor(
sym_state_probs_1191[p03] / sym_state_probs_1191[p02]
) # (0,2) -> (0,3)
sym_state_recursive_ratios_1191[1, 3] = sym.factor(
sym_state_probs_1191[p13] / sym_state_probs_1191[p03]
) # (0,3) -> (1,3)
sym_state_recursive_ratios_1191[0, 4] = sym.factor(
sym_state_probs_1191[p04] / sym_state_probs_1191[p03]
) # (0,3) -> (0,4)
sym_state_recursive_ratios_1191[1, 4] = sym.factor(
sym_state_probs_1191[p14] / sym_state_probs_1191[p04]
) # (0,4) -> (1,4)
sym_state_recursive_ratios_1191[0, 5] = sym.factor(
sym_state_probs_1191[p05] / sym_state_probs_1191[p04]
) # (0,4) -> (0,5)
sym_state_recursive_ratios_1191[1, 5] = sym.factor(
sym_state_probs_1191[p15] / sym_state_probs_1191[p05]
) # (0,5) -> (1,5)
sym_state_recursive_ratios_1191[0, 6] = sym.factor(
sym_state_probs_1191[p06] / sym_state_probs_1191[p05]
) # (0,5) -> (0,6)
sym_state_recursive_ratios_1191[1, 6] = sym.factor(
sym_state_probs_1191[p16] / sym_state_probs_1191[p06]
) # (0,6) -> (1,6)
sym_state_recursive_ratios_1191[0, 7] = sym.factor(
sym_state_probs_1191[p07] / sym_state_probs_1191[p06]
) # (0,6) -> (0,7)
sym_state_recursive_ratios_1191[1, 7] = sym.factor(
sym_state_probs_1191[p17] / sym_state_probs_1191[p07]
) # (0,7) -> (1,7)
sym_state_recursive_ratios_1191[0, 8] = sym.factor(
sym_state_probs_1191[p08] / sym_state_probs_1191[p07]
) # (0,7) -> (0,8)
sym_state_recursive_ratios_1191[1, 8] = sym.factor(
sym_state_probs_1191[p18] / sym_state_probs_1191[p08]
) # (0,8) -> (1,8)
sym_state_recursive_ratios_1191[0, 9] = sym.factor(
sym_state_probs_1191[p09] / sym_state_probs_1191[p08]
) # (0,8) -> (0,9)
sym_state_recursive_ratios_1191[1, 9] = sym.factor(
sym_state_probs_1191[p19] / sym_state_probs_1191[p09]
) # (0,9) -> (1,9)
sym_state_recursive_ratios_right_1191 = sym_state_recursive_ratios_1191.copy()
sym_state_recursive_ratios_right_1191[1, 2] = sym.factor(
sym_state_probs_1191[p12] / sym_state_probs_1191[p11]
) # (1,1) -> (1,2)
sym_state_recursive_ratios_right_1191[1, 3] = sym.factor(
sym_state_probs_1191[p13] / sym_state_probs_1191[p12]
) # (1,2) -> (1,3)
sym_state_recursive_ratios_right_1191[1, 4] = sym.factor(
sym_state_probs_1191[p14] / sym_state_probs_1191[p13]
) # (1,3) -> (1,4)
sym_state_recursive_ratios_right_1191[1, 5] = sym.factor(
sym_state_probs_1191[p15] / sym_state_probs_1191[p14]
) # (1,4) -> (1,5)
sym_state_recursive_ratios_right_1191[1, 6] = sym.factor(
sym_state_probs_1191[p16] / sym_state_probs_1191[p15]
) # (1,5) -> (1,6)
sym_state_recursive_ratios_right_1191[1, 7] = sym.factor(
sym_state_probs_1191[p17] / sym_state_probs_1191[p16]
) # (1,6) -> (1,7)
sym_state_recursive_ratios_right_1191[1, 8] = sym.factor(
sym_state_probs_1191[p18] / sym_state_probs_1191[p17]
) # (1,7) -> (1,8)
sym_state_recursive_ratios_right_1191[1, 8] = sym.factor(
sym_state_probs_1191[p18] / sym_state_probs_1191[p17]
) # (1,8) -> (1,9)
sym_state_recursive_ratios_P0_1191 = sym.zeros(
buffer_capacity + 1, system_capacity + 1
)
sym_state_recursive_ratios_P0_1191[0, 0] = 1
sym_state_recursive_ratios_P0_1191[0, 1] = sym.factor(
sym_state_probs_1191[p01] / sym_state_probs_1191[p00]
) # (0,0) -> (0,1)
sym_state_recursive_ratios_P0_1191[1, 1] = sym.factor(
sym_state_probs_1191[p11] / sym_state_probs_1191[p00]
) # (0,0) -> (1,1)
sym_state_recursive_ratios_P0_1191[0, 2] = sym.factor(
sym_state_probs_1191[p02] / sym_state_probs_1191[p00]
) # (0,0) -> (0,2)
sym_state_recursive_ratios_P0_1191[1, 2] = sym.factor(
sym_state_probs_1191[p12] / sym_state_probs_1191[p00]
) # (0,0) -> (1,2)
sym_state_recursive_ratios_P0_1191[0, 3] = sym.factor(
sym_state_probs_1191[p03] / sym_state_probs_1191[p00]
) # (0,0) -> (0,3)
sym_state_recursive_ratios_P0_1191[1, 3] = sym.factor(
sym_state_probs_1191[p13] / sym_state_probs_1191[p00]
) # (0,0) -> (1,3)
sym_state_recursive_ratios_P0_1191[0, 4] = sym.factor(
sym_state_probs_1191[p04] / sym_state_probs_1191[p00]
) # (0,0) -> (0,4)
sym_state_recursive_ratios_P0_1191[1, 4] = sym.factor(
sym_state_probs_1191[p14] / sym_state_probs_1191[p00]
) # (0,0) -> (1,4)
sym_state_recursive_ratios_P0_1191[0, 5] = sym.factor(
sym_state_probs_1191[p05] / sym_state_probs_1191[p00]
) # (0,0) -> (0,5)
sym_state_recursive_ratios_P0_1191[1, 5] = sym.factor(
sym_state_probs_1191[p15] / sym_state_probs_1191[p00]
) # (0,0) -> (1,5)
sym_state_recursive_ratios_P0_1191[0, 6] = sym.factor(
sym_state_probs_1191[p06] / sym_state_probs_1191[p00]
) # (0,0) -> (0,6)
sym_state_recursive_ratios_P0_1191[1, 6] = sym.factor(
sym_state_probs_1191[p16] / sym_state_probs_1191[p00]
) # (0,0) -> (1,6)
sym_state_recursive_ratios_P0_1191[0, 7] = sym.factor(
sym_state_probs_1191[p07] / sym_state_probs_1191[p00]
) # (0,0) -> (0,7)
sym_state_recursive_ratios_P0_1191[1, 7] = sym.factor(
sym_state_probs_1191[p17] / sym_state_probs_1191[p00]
) # (0,0) -> (1,7)
sym_state_recursive_ratios_P0_1191[0, 8] = sym.factor(
sym_state_probs_1191[p08] / sym_state_probs_1191[p00]
) # (0,0) -> (0,8)
sym_state_recursive_ratios_P0_1191[1, 8] = sym.factor(
sym_state_probs_1191[p18] / sym_state_probs_1191[p00]
) # (0,0) -> (1,8)
sym_state_recursive_ratios_P0_1191[0, 9] = sym.factor(
sym_state_probs_1191[p09] / sym_state_probs_1191[p00]
) # (0,0) -> (0,9)
sym_state_recursive_ratios_P0_1191[1, 9] = sym.factor(
sym_state_probs_1191[p19] / sym_state_probs_1191[p00]
) # (0,0) -> (1,9)
return (
sym_state_probs_1191,
sym_state_recursive_ratios_1191,
sym_state_recursive_ratios_right_1191,
sym_state_recursive_ratios_P0_1191,
)
|
normal
|
{
"blob_id": "9dd59fee46bd4bec87cc8c40099110b483ad0496",
"index": 6990,
"step-1": "<mask token>\n\n\ndef get_symbolic_state_probabilities_1222():\n num_of_servers = 1\n threshold = 2\n system_capacity = 2\n buffer_capacity = 2\n sym_pi_1222 = get_symbolic_pi(num_of_servers=num_of_servers, threshold=\n threshold, system_capacity=system_capacity, buffer_capacity=\n buffer_capacity)\n all_states_1222 = abg.markov.build_states(threshold=threshold,\n system_capacity=system_capacity, buffer_capacity=buffer_capacity)\n sym_state_probs_1222 = [(0) for _ in range(len(all_states_1222))]\n sym_state_probs_1222[0] = sym.factor(sym_pi_1222[a])\n sym_state_probs_1222[1] = sym.factor(sym_pi_1222[b])\n sym_state_probs_1222[2] = sym.factor(sym_pi_1222[c])\n sym_state_probs_1222[3] = sym.factor(sym_pi_1222[d])\n sym_state_probs_1222[4] = sym.factor(sym_pi_1222[e])\n sym_state_recursive_ratios_1222 = sym.zeros(buffer_capacity + 1, \n system_capacity + 1)\n sym_state_recursive_ratios_1222[0, 0] = 1\n sym_state_recursive_ratios_1222[0, 1] = sym.factor(sym_state_probs_1222\n [1] / sym_state_probs_1222[0])\n sym_state_recursive_ratios_1222[0, 2] = sym.factor(sym_state_probs_1222\n [2] / sym_state_probs_1222[1])\n sym_state_recursive_ratios_1222[1, 2] = sym.factor(sym_state_probs_1222\n [3] / sym_state_probs_1222[2])\n sym_state_recursive_ratios_1222[2, 2] = sym.factor(sym_state_probs_1222\n [4] / sym_state_probs_1222[3])\n return sym_state_probs_1222, sym_state_recursive_ratios_1222\n\n\n<mask token>\n\n\ndef get_symbolic_state_probabilities_1123():\n num_of_servers = 1\n threshold = 1\n system_capacity = 2\n buffer_capacity = 3\n Q_sym_1123 = abg.markov.get_symbolic_transition_matrix(num_of_servers,\n threshold, system_capacity, buffer_capacity)\n p00, p01, p11, p21, p31, p02, p12, p22, p32 = sym.symbols(\n 'p00, p01, p11, p21, p31, p02, p12, p22, p32')\n pi_1123 = sym.Matrix([p00, p01, p11, p21, p31, p02, p12, p22, p32])\n dimension_1123 = Q_sym_1123.shape[0]\n M_sym_1123 = sym.Matrix([Q_sym_1123.transpose()[:-1, :], sym.ones(1,\n dimension_1123)])\n sym_diff_equations_1123 = M_sym_1123 @ pi_1123\n b_sym_1123 = sym.Matrix([sym.zeros(dimension_1123 - 1, 1), [1]])\n eq0_1123 = sym.Eq(sym_diff_equations_1123[0], b_sym_1123[0])\n eq1_1123 = sym.Eq(sym_diff_equations_1123[1], b_sym_1123[1])\n eq2_1123 = sym.Eq(sym_diff_equations_1123[2], b_sym_1123[2])\n eq3_1123 = sym.Eq(sym_diff_equations_1123[3], b_sym_1123[3])\n eq4_1123 = sym.Eq(sym_diff_equations_1123[4], b_sym_1123[4])\n eq5_1123 = sym.Eq(sym_diff_equations_1123[5], b_sym_1123[5])\n eq6_1123 = sym.Eq(sym_diff_equations_1123[6], b_sym_1123[6])\n eq7_1123 = sym.Eq(sym_diff_equations_1123[7], b_sym_1123[7])\n eq8_1123 = sym.Eq(sym_diff_equations_1123[8], b_sym_1123[8])\n sym_state_probs_1123 = sym.solve([eq0_1123, eq1_1123, eq2_1123,\n eq3_1123, eq4_1123, eq5_1123, eq6_1123, eq7_1123, eq8_1123], (p00,\n p01, p11, p21, p31, p02, p12, p22, p32))\n sym_state_recursive_ratios_1123 = sym.zeros(buffer_capacity + 1, \n system_capacity + 1)\n sym_state_recursive_ratios_1123[0, 0] = 1\n sym_state_recursive_ratios_1123[0, 1] = sym.factor(sym_state_probs_1123\n [p01] / sym_state_probs_1123[p00])\n sym_state_recursive_ratios_1123[1, 1] = sym.factor(sym_state_probs_1123\n [p11] / sym_state_probs_1123[p01])\n sym_state_recursive_ratios_1123[2, 1] = sym.factor(sym_state_probs_1123\n [p21] / sym_state_probs_1123[p11])\n sym_state_recursive_ratios_1123[3, 1] = sym.factor(sym_state_probs_1123\n [p31] / sym_state_probs_1123[p21])\n sym_state_recursive_ratios_1123[0, 2] = sym.factor(sym_state_probs_1123\n [p02] / sym_state_probs_1123[p01])\n sym_state_recursive_ratios_1123[1, 2] = sym.factor(sym_state_probs_1123\n [p12] / sym_state_probs_1123[p02])\n sym_state_recursive_ratios_1123[2, 2] = sym.factor(sym_state_probs_1123\n [p22] / sym_state_probs_1123[p12])\n sym_state_recursive_ratios_1123[2, 2] = sym.factor(sym_state_probs_1123\n [p32] / sym_state_probs_1123[p22])\n sym_state_recursive_ratios_right_1123 = (sym_state_recursive_ratios_1123\n .copy())\n sym_state_recursive_ratios_right_1123[1, 2] = sym.factor(\n sym_state_probs_1123[p12] / sym_state_probs_1123[p11])\n sym_state_recursive_ratios_right_1123[2, 2] = sym.factor(\n sym_state_probs_1123[p22] / sym_state_probs_1123[p21])\n sym_state_recursive_ratios_right_1123[3, 2] = sym.factor(\n sym_state_probs_1123[p32] / sym_state_probs_1123[p22])\n sym_state_recursive_ratios_P0_1123 = sym.zeros(buffer_capacity + 1, \n system_capacity + 1)\n sym_state_recursive_ratios_P0_1123[0, 0] = 1\n sym_state_recursive_ratios_P0_1123[0, 1] = sym.factor(\n sym_state_probs_1123[p01] / sym_state_probs_1123[p00])\n sym_state_recursive_ratios_P0_1123[1, 1] = sym.factor(\n sym_state_probs_1123[p11] / sym_state_probs_1123[p00])\n sym_state_recursive_ratios_P0_1123[2, 1] = sym.factor(\n sym_state_probs_1123[p21] / sym_state_probs_1123[p00])\n sym_state_recursive_ratios_P0_1123[3, 1] = sym.factor(\n sym_state_probs_1123[p31] / sym_state_probs_1123[p00])\n sym_state_recursive_ratios_P0_1123[0, 2] = sym.factor(\n sym_state_probs_1123[p02] / sym_state_probs_1123[p00])\n sym_state_recursive_ratios_P0_1123[1, 2] = sym.factor(\n sym_state_probs_1123[p12] / sym_state_probs_1123[p00])\n sym_state_recursive_ratios_P0_1123[2, 2] = sym.factor(\n sym_state_probs_1123[p22] / sym_state_probs_1123[p00])\n sym_state_recursive_ratios_P0_1123[3, 2] = sym.factor(\n sym_state_probs_1123[p32] / sym_state_probs_1123[p00])\n return (sym_state_probs_1123, sym_state_recursive_ratios_1123,\n sym_state_recursive_ratios_right_1123,\n sym_state_recursive_ratios_P0_1123)\n\n\n<mask token>\n\n\ndef get_symbolic_state_probabilities_1151():\n num_of_servers = 1\n threshold = 1\n system_capacity = 5\n buffer_capacity = 1\n Q_sym_1151 = abg.markov.get_symbolic_transition_matrix(num_of_servers,\n threshold, system_capacity, buffer_capacity)\n p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15 = sym.symbols(\n 'p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15')\n pi_1151 = sym.Matrix([p00, p01, p11, p02, p12, p03, p13, p04, p14, p05,\n p15])\n dimension_1151 = Q_sym_1151.shape[0]\n M_sym_1151 = sym.Matrix([Q_sym_1151.transpose()[:-1, :], sym.ones(1,\n dimension_1151)])\n sym_diff_equations_1151 = M_sym_1151 @ pi_1151\n b_sym_1151 = sym.Matrix([sym.zeros(dimension_1151 - 1, 1), [1]])\n eq0_1151 = sym.Eq(sym_diff_equations_1151[0], b_sym_1151[0])\n eq1_1151 = sym.Eq(sym_diff_equations_1151[1], b_sym_1151[1])\n eq2_1151 = sym.Eq(sym_diff_equations_1151[2], b_sym_1151[2])\n eq3_1151 = sym.Eq(sym_diff_equations_1151[3], b_sym_1151[3])\n eq4_1151 = sym.Eq(sym_diff_equations_1151[4], b_sym_1151[4])\n eq5_1151 = sym.Eq(sym_diff_equations_1151[5], b_sym_1151[5])\n eq6_1151 = sym.Eq(sym_diff_equations_1151[6], b_sym_1151[6])\n eq7_1151 = sym.Eq(sym_diff_equations_1151[7], b_sym_1151[7])\n eq8_1151 = sym.Eq(sym_diff_equations_1151[8], b_sym_1151[8])\n eq9_1151 = sym.Eq(sym_diff_equations_1151[9], b_sym_1151[9])\n eq10_1151 = sym.Eq(sym_diff_equations_1151[10], b_sym_1151[10])\n sym_state_probs_1151 = sym.solve([eq0_1151, eq1_1151, eq2_1151,\n eq3_1151, eq4_1151, eq5_1151, eq6_1151, eq7_1151, eq8_1151,\n eq9_1151, eq10_1151], (p00, p01, p11, p02, p12, p03, p13, p04, p14,\n p05, p15))\n sym_state_recursive_ratios_1151 = sym.zeros(buffer_capacity + 1, \n system_capacity + 1)\n sym_state_recursive_ratios_1151[0, 0] = 1\n sym_state_recursive_ratios_1151[0, 1] = sym.factor(sym_state_probs_1151\n [p01] / sym_state_probs_1151[p00])\n sym_state_recursive_ratios_1151[1, 1] = sym.factor(sym_state_probs_1151\n [p11] / sym_state_probs_1151[p01])\n sym_state_recursive_ratios_1151[0, 2] = sym.factor(sym_state_probs_1151\n [p02] / sym_state_probs_1151[p01])\n sym_state_recursive_ratios_1151[1, 2] = sym.factor(sym_state_probs_1151\n [p12] / sym_state_probs_1151[p02])\n sym_state_recursive_ratios_1151[0, 3] = sym.factor(sym_state_probs_1151\n [p03] / sym_state_probs_1151[p02])\n sym_state_recursive_ratios_1151[1, 3] = sym.factor(sym_state_probs_1151\n [p13] / sym_state_probs_1151[p03])\n sym_state_recursive_ratios_1151[0, 4] = sym.factor(sym_state_probs_1151\n [p04] / sym_state_probs_1151[p03])\n sym_state_recursive_ratios_1151[1, 4] = sym.factor(sym_state_probs_1151\n [p14] / sym_state_probs_1151[p04])\n sym_state_recursive_ratios_1151[0, 5] = sym.factor(sym_state_probs_1151\n [p05] / sym_state_probs_1151[p04])\n sym_state_recursive_ratios_1151[1, 5] = sym.factor(sym_state_probs_1151\n [p15] / sym_state_probs_1151[p05])\n sym_state_recursive_ratios_right_1151 = (sym_state_recursive_ratios_1151\n .copy())\n sym_state_recursive_ratios_right_1151[1, 2] = sym.factor(\n sym_state_probs_1151[p12] / sym_state_probs_1151[p11])\n sym_state_recursive_ratios_right_1151[1, 3] = sym.factor(\n sym_state_probs_1151[p13] / sym_state_probs_1151[p12])\n sym_state_recursive_ratios_right_1151[1, 4] = sym.factor(\n sym_state_probs_1151[p14] / sym_state_probs_1151[p13])\n sym_state_recursive_ratios_right_1151[1, 5] = sym.factor(\n sym_state_probs_1151[p15] / sym_state_probs_1151[p14])\n sym_state_recursive_ratios_P0_1151 = sym.zeros(buffer_capacity + 1, \n system_capacity + 1)\n sym_state_recursive_ratios_P0_1151[0, 0] = 1\n sym_state_recursive_ratios_P0_1151[0, 1] = sym.factor(\n sym_state_probs_1151[p01] / sym_state_probs_1151[p00])\n sym_state_recursive_ratios_P0_1151[1, 1] = sym.factor(\n sym_state_probs_1151[p11] / sym_state_probs_1151[p00])\n sym_state_recursive_ratios_P0_1151[0, 2] = sym.factor(\n sym_state_probs_1151[p02] / sym_state_probs_1151[p00])\n sym_state_recursive_ratios_P0_1151[1, 2] = sym.factor(\n sym_state_probs_1151[p12] / sym_state_probs_1151[p00])\n sym_state_recursive_ratios_P0_1151[0, 3] = sym.factor(\n sym_state_probs_1151[p03] / sym_state_probs_1151[p00])\n sym_state_recursive_ratios_P0_1151[1, 3] = sym.factor(\n sym_state_probs_1151[p13] / sym_state_probs_1151[p00])\n sym_state_recursive_ratios_P0_1151[0, 4] = sym.factor(\n sym_state_probs_1151[p04] / sym_state_probs_1151[p00])\n sym_state_recursive_ratios_P0_1151[1, 4] = sym.factor(\n sym_state_probs_1151[p14] / sym_state_probs_1151[p00])\n sym_state_recursive_ratios_P0_1151[0, 5] = sym.factor(\n sym_state_probs_1151[p05] / sym_state_probs_1151[p00])\n sym_state_recursive_ratios_P0_1151[1, 5] = sym.factor(\n sym_state_probs_1151[p15] / sym_state_probs_1151[p00])\n return (sym_state_probs_1151, sym_state_recursive_ratios_1151,\n sym_state_recursive_ratios_right_1151,\n sym_state_recursive_ratios_P0_1151)\n\n\ndef get_symbolic_state_probabilities_1161():\n num_of_servers = 1\n threshold = 1\n system_capacity = 6\n buffer_capacity = 1\n Q_sym_1161 = abg.markov.get_symbolic_transition_matrix(num_of_servers,\n threshold, system_capacity, buffer_capacity)\n p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15, p06, p16 = (sym.\n symbols(\n 'p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15, p06, p16'))\n pi_1161 = sym.Matrix([p00, p01, p11, p02, p12, p03, p13, p04, p14, p05,\n p15, p06, p16])\n dimension_1161 = Q_sym_1161.shape[0]\n M_sym_1161 = sym.Matrix([Q_sym_1161.transpose()[:-1, :], sym.ones(1,\n dimension_1161)])\n sym_diff_equations_1161 = M_sym_1161 @ pi_1161\n b_sym_1161 = sym.Matrix([sym.zeros(dimension_1161 - 1, 1), [1]])\n eq0_1161 = sym.Eq(sym_diff_equations_1161[0], b_sym_1161[0])\n eq1_1161 = sym.Eq(sym_diff_equations_1161[1], b_sym_1161[1])\n eq2_1161 = sym.Eq(sym_diff_equations_1161[2], b_sym_1161[2])\n eq3_1161 = sym.Eq(sym_diff_equations_1161[3], b_sym_1161[3])\n eq4_1161 = sym.Eq(sym_diff_equations_1161[4], b_sym_1161[4])\n eq5_1161 = sym.Eq(sym_diff_equations_1161[5], b_sym_1161[5])\n eq6_1161 = sym.Eq(sym_diff_equations_1161[6], b_sym_1161[6])\n eq7_1161 = sym.Eq(sym_diff_equations_1161[7], b_sym_1161[7])\n eq8_1161 = sym.Eq(sym_diff_equations_1161[8], b_sym_1161[8])\n eq9_1161 = sym.Eq(sym_diff_equations_1161[9], b_sym_1161[9])\n eq10_1161 = sym.Eq(sym_diff_equations_1161[10], b_sym_1161[10])\n eq11_1161 = sym.Eq(sym_diff_equations_1161[11], b_sym_1161[11])\n eq12_1161 = sym.Eq(sym_diff_equations_1161[12], b_sym_1161[12])\n sym_state_probs_1161 = sym.solve([eq0_1161, eq1_1161, eq2_1161,\n eq3_1161, eq4_1161, eq5_1161, eq6_1161, eq7_1161, eq8_1161,\n eq9_1161, eq10_1161, eq11_1161, eq12_1161], (p00, p01, p11, p02,\n p12, p03, p13, p04, p14, p05, p15, p06, p16))\n sym_state_recursive_ratios_1161 = sym.zeros(buffer_capacity + 1, \n system_capacity + 1)\n sym_state_recursive_ratios_1161[0, 0] = 1\n sym_state_recursive_ratios_1161[0, 1] = sym.factor(sym_state_probs_1161\n [p01] / sym_state_probs_1161[p00])\n sym_state_recursive_ratios_1161[1, 1] = sym.factor(sym_state_probs_1161\n [p11] / sym_state_probs_1161[p01])\n sym_state_recursive_ratios_1161[0, 2] = sym.factor(sym_state_probs_1161\n [p02] / sym_state_probs_1161[p01])\n sym_state_recursive_ratios_1161[1, 2] = sym.factor(sym_state_probs_1161\n [p12] / sym_state_probs_1161[p02])\n sym_state_recursive_ratios_1161[0, 3] = sym.factor(sym_state_probs_1161\n [p03] / sym_state_probs_1161[p02])\n sym_state_recursive_ratios_1161[1, 3] = sym.factor(sym_state_probs_1161\n [p13] / sym_state_probs_1161[p03])\n sym_state_recursive_ratios_1161[0, 4] = sym.factor(sym_state_probs_1161\n [p04] / sym_state_probs_1161[p03])\n sym_state_recursive_ratios_1161[1, 4] = sym.factor(sym_state_probs_1161\n [p14] / sym_state_probs_1161[p04])\n sym_state_recursive_ratios_1161[0, 5] = sym.factor(sym_state_probs_1161\n [p05] / sym_state_probs_1161[p04])\n sym_state_recursive_ratios_1161[1, 5] = sym.factor(sym_state_probs_1161\n [p15] / sym_state_probs_1161[p05])\n sym_state_recursive_ratios_1161[0, 6] = sym.factor(sym_state_probs_1161\n [p06] / sym_state_probs_1161[p05])\n sym_state_recursive_ratios_1161[1, 6] = sym.factor(sym_state_probs_1161\n [p16] / sym_state_probs_1161[p06])\n sym_state_recursive_ratios_right_1161 = (sym_state_recursive_ratios_1161\n .copy())\n sym_state_recursive_ratios_right_1161[1, 2] = sym.factor(\n sym_state_probs_1161[p12] / sym_state_probs_1161[p11])\n sym_state_recursive_ratios_right_1161[1, 3] = sym.factor(\n sym_state_probs_1161[p13] / sym_state_probs_1161[p12])\n sym_state_recursive_ratios_right_1161[1, 4] = sym.factor(\n sym_state_probs_1161[p14] / sym_state_probs_1161[p13])\n sym_state_recursive_ratios_right_1161[1, 5] = sym.factor(\n sym_state_probs_1161[p15] / sym_state_probs_1161[p14])\n sym_state_recursive_ratios_right_1161[1, 6] = sym.factor(\n sym_state_probs_1161[p16] / sym_state_probs_1161[p15])\n sym_state_recursive_ratios_P0_1161 = sym.zeros(buffer_capacity + 1, \n system_capacity + 1)\n sym_state_recursive_ratios_P0_1161[0, 0] = 1\n sym_state_recursive_ratios_P0_1161[0, 1] = sym.factor(\n sym_state_probs_1161[p01] / sym_state_probs_1161[p00])\n sym_state_recursive_ratios_P0_1161[1, 1] = sym.factor(\n sym_state_probs_1161[p11] / sym_state_probs_1161[p00])\n sym_state_recursive_ratios_P0_1161[0, 2] = sym.factor(\n sym_state_probs_1161[p02] / sym_state_probs_1161[p00])\n sym_state_recursive_ratios_P0_1161[1, 2] = sym.factor(\n sym_state_probs_1161[p12] / sym_state_probs_1161[p00])\n sym_state_recursive_ratios_P0_1161[0, 3] = sym.factor(\n sym_state_probs_1161[p03] / sym_state_probs_1161[p00])\n sym_state_recursive_ratios_P0_1161[1, 3] = sym.factor(\n sym_state_probs_1161[p13] / sym_state_probs_1161[p00])\n sym_state_recursive_ratios_P0_1161[0, 4] = sym.factor(\n sym_state_probs_1161[p04] / sym_state_probs_1161[p00])\n sym_state_recursive_ratios_P0_1161[1, 4] = sym.factor(\n sym_state_probs_1161[p14] / sym_state_probs_1161[p00])\n sym_state_recursive_ratios_P0_1161[0, 5] = sym.factor(\n sym_state_probs_1161[p05] / sym_state_probs_1161[p00])\n sym_state_recursive_ratios_P0_1161[1, 5] = sym.factor(\n sym_state_probs_1161[p15] / sym_state_probs_1161[p00])\n sym_state_recursive_ratios_P0_1161[0, 6] = sym.factor(\n sym_state_probs_1161[p06] / sym_state_probs_1161[p00])\n sym_state_recursive_ratios_P0_1161[1, 6] = sym.factor(\n sym_state_probs_1161[p16] / sym_state_probs_1161[p00])\n return (sym_state_probs_1161, sym_state_recursive_ratios_1161,\n sym_state_recursive_ratios_right_1161,\n sym_state_recursive_ratios_P0_1161)\n\n\ndef get_symbolic_state_probabilities_1171():\n num_of_servers = 1\n threshold = 1\n system_capacity = 7\n buffer_capacity = 1\n Q_sym_1171 = abg.markov.get_symbolic_transition_matrix(num_of_servers,\n threshold, system_capacity, buffer_capacity)\n (p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15, p06, p16, p07, p17\n ) = (sym.symbols(\n 'p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15, p06, p16, p07, p17'\n ))\n pi_1171 = sym.Matrix([p00, p01, p11, p02, p12, p03, p13, p04, p14, p05,\n p15, p06, p16, p07, p17])\n dimension_1171 = Q_sym_1171.shape[0]\n M_sym_1171 = sym.Matrix([Q_sym_1171.transpose()[:-1, :], sym.ones(1,\n dimension_1171)])\n sym_diff_equations_1171 = M_sym_1171 @ pi_1171\n b_sym_1171 = sym.Matrix([sym.zeros(dimension_1171 - 1, 1), [1]])\n eq0_1171 = sym.Eq(sym_diff_equations_1171[0], b_sym_1171[0])\n eq1_1171 = sym.Eq(sym_diff_equations_1171[1], b_sym_1171[1])\n eq2_1171 = sym.Eq(sym_diff_equations_1171[2], b_sym_1171[2])\n eq3_1171 = sym.Eq(sym_diff_equations_1171[3], b_sym_1171[3])\n eq4_1171 = sym.Eq(sym_diff_equations_1171[4], b_sym_1171[4])\n eq5_1171 = sym.Eq(sym_diff_equations_1171[5], b_sym_1171[5])\n eq6_1171 = sym.Eq(sym_diff_equations_1171[6], b_sym_1171[6])\n eq7_1171 = sym.Eq(sym_diff_equations_1171[7], b_sym_1171[7])\n eq8_1171 = sym.Eq(sym_diff_equations_1171[8], b_sym_1171[8])\n eq9_1171 = sym.Eq(sym_diff_equations_1171[9], b_sym_1171[9])\n eq10_1171 = sym.Eq(sym_diff_equations_1171[10], b_sym_1171[10])\n eq11_1171 = sym.Eq(sym_diff_equations_1171[11], b_sym_1171[11])\n eq12_1171 = sym.Eq(sym_diff_equations_1171[12], b_sym_1171[12])\n eq13_1171 = sym.Eq(sym_diff_equations_1171[13], b_sym_1171[13])\n eq14_1171 = sym.Eq(sym_diff_equations_1171[14], b_sym_1171[14])\n sym_state_probs_1171 = sym.solve([eq0_1171, eq1_1171, eq2_1171,\n eq3_1171, eq4_1171, eq5_1171, eq6_1171, eq7_1171, eq8_1171,\n eq9_1171, eq10_1171, eq11_1171, eq12_1171, eq13_1171, eq14_1171], (\n p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15, p06, p16,\n p07, p17))\n sym_state_recursive_ratios_1171 = sym.zeros(buffer_capacity + 1, \n system_capacity + 1)\n sym_state_recursive_ratios_1171[0, 0] = 1\n sym_state_recursive_ratios_1171[0, 1] = sym.factor(sym_state_probs_1171\n [p01] / sym_state_probs_1171[p00])\n sym_state_recursive_ratios_1171[1, 1] = sym.factor(sym_state_probs_1171\n [p11] / sym_state_probs_1171[p01])\n sym_state_recursive_ratios_1171[0, 2] = sym.factor(sym_state_probs_1171\n [p02] / sym_state_probs_1171[p01])\n sym_state_recursive_ratios_1171[1, 2] = sym.factor(sym_state_probs_1171\n [p12] / sym_state_probs_1171[p02])\n sym_state_recursive_ratios_1171[0, 3] = sym.factor(sym_state_probs_1171\n [p03] / sym_state_probs_1171[p02])\n sym_state_recursive_ratios_1171[1, 3] = sym.factor(sym_state_probs_1171\n [p13] / sym_state_probs_1171[p03])\n sym_state_recursive_ratios_1171[0, 4] = sym.factor(sym_state_probs_1171\n [p04] / sym_state_probs_1171[p03])\n sym_state_recursive_ratios_1171[1, 4] = sym.factor(sym_state_probs_1171\n [p14] / sym_state_probs_1171[p04])\n sym_state_recursive_ratios_1171[0, 5] = sym.factor(sym_state_probs_1171\n [p05] / sym_state_probs_1171[p04])\n sym_state_recursive_ratios_1171[1, 5] = sym.factor(sym_state_probs_1171\n [p15] / sym_state_probs_1171[p05])\n sym_state_recursive_ratios_1171[0, 6] = sym.factor(sym_state_probs_1171\n [p06] / sym_state_probs_1171[p05])\n sym_state_recursive_ratios_1171[1, 6] = sym.factor(sym_state_probs_1171\n [p16] / sym_state_probs_1171[p06])\n sym_state_recursive_ratios_1171[0, 7] = sym.factor(sym_state_probs_1171\n [p07] / sym_state_probs_1171[p06])\n sym_state_recursive_ratios_1171[1, 7] = sym.factor(sym_state_probs_1171\n [p17] / sym_state_probs_1171[p07])\n sym_state_recursive_ratios_right_1171 = (sym_state_recursive_ratios_1171\n .copy())\n sym_state_recursive_ratios_right_1171[1, 2] = sym.factor(\n sym_state_probs_1171[p12] / sym_state_probs_1171[p11])\n sym_state_recursive_ratios_right_1171[1, 3] = sym.factor(\n sym_state_probs_1171[p13] / sym_state_probs_1171[p12])\n sym_state_recursive_ratios_right_1171[1, 4] = sym.factor(\n sym_state_probs_1171[p14] / sym_state_probs_1171[p13])\n sym_state_recursive_ratios_right_1171[1, 5] = sym.factor(\n sym_state_probs_1171[p15] / sym_state_probs_1171[p14])\n sym_state_recursive_ratios_right_1171[1, 6] = sym.factor(\n sym_state_probs_1171[p16] / sym_state_probs_1171[p15])\n sym_state_recursive_ratios_right_1171[1, 7] = sym.factor(\n sym_state_probs_1171[p17] / sym_state_probs_1171[p16])\n sym_state_recursive_ratios_P0_1171 = sym.zeros(buffer_capacity + 1, \n system_capacity + 1)\n sym_state_recursive_ratios_P0_1171[0, 0] = 1\n sym_state_recursive_ratios_P0_1171[0, 1] = sym.factor(\n sym_state_probs_1171[p01] / sym_state_probs_1171[p00])\n sym_state_recursive_ratios_P0_1171[1, 1] = sym.factor(\n sym_state_probs_1171[p11] / sym_state_probs_1171[p00])\n sym_state_recursive_ratios_P0_1171[0, 2] = sym.factor(\n sym_state_probs_1171[p02] / sym_state_probs_1171[p00])\n sym_state_recursive_ratios_P0_1171[1, 2] = sym.factor(\n sym_state_probs_1171[p12] / sym_state_probs_1171[p00])\n sym_state_recursive_ratios_P0_1171[0, 3] = sym.factor(\n sym_state_probs_1171[p03] / sym_state_probs_1171[p00])\n sym_state_recursive_ratios_P0_1171[1, 3] = sym.factor(\n sym_state_probs_1171[p13] / sym_state_probs_1171[p00])\n sym_state_recursive_ratios_P0_1171[0, 4] = sym.factor(\n sym_state_probs_1171[p04] / sym_state_probs_1171[p00])\n sym_state_recursive_ratios_P0_1171[1, 4] = sym.factor(\n sym_state_probs_1171[p14] / sym_state_probs_1171[p00])\n sym_state_recursive_ratios_P0_1171[0, 5] = sym.factor(\n sym_state_probs_1171[p05] / sym_state_probs_1171[p00])\n sym_state_recursive_ratios_P0_1171[1, 5] = sym.factor(\n sym_state_probs_1171[p15] / sym_state_probs_1171[p00])\n sym_state_recursive_ratios_P0_1171[0, 6] = sym.factor(\n sym_state_probs_1171[p06] / sym_state_probs_1171[p00])\n sym_state_recursive_ratios_P0_1171[1, 6] = sym.factor(\n sym_state_probs_1171[p16] / sym_state_probs_1171[p00])\n sym_state_recursive_ratios_P0_1171[0, 7] = sym.factor(\n sym_state_probs_1171[p07] / sym_state_probs_1171[p00])\n sym_state_recursive_ratios_P0_1171[1, 7] = sym.factor(\n sym_state_probs_1171[p17] / sym_state_probs_1171[p00])\n return (sym_state_probs_1171, sym_state_recursive_ratios_1171,\n sym_state_recursive_ratios_right_1171,\n sym_state_recursive_ratios_P0_1171)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef get_symbolic_pi(num_of_servers, threshold, system_capacity, buffer_capacity\n ):\n Q_sym = abg.markov.get_symbolic_transition_matrix(num_of_servers=\n num_of_servers, threshold=threshold, system_capacity=\n system_capacity, buffer_capacity=buffer_capacity)\n dimension = Q_sym.shape[0]\n if dimension > 7:\n return 'Capacity of 6 exceeded'\n M_sym = sym.Matrix([Q_sym.transpose()[:-1, :], sym.ones(1, dimension)])\n b_sym = sym.Matrix([sym.zeros(dimension - 1, 1), [1]])\n system = M_sym.col_insert(dimension, b_sym)\n sol = sym.solve_linear_system_LU(system, [a, b, c, d, e, f, g])\n return sol\n\n\ndef get_symbolic_state_probabilities_1222():\n num_of_servers = 1\n threshold = 2\n system_capacity = 2\n buffer_capacity = 2\n sym_pi_1222 = get_symbolic_pi(num_of_servers=num_of_servers, threshold=\n threshold, system_capacity=system_capacity, buffer_capacity=\n buffer_capacity)\n all_states_1222 = abg.markov.build_states(threshold=threshold,\n system_capacity=system_capacity, buffer_capacity=buffer_capacity)\n sym_state_probs_1222 = [(0) for _ in range(len(all_states_1222))]\n sym_state_probs_1222[0] = sym.factor(sym_pi_1222[a])\n sym_state_probs_1222[1] = sym.factor(sym_pi_1222[b])\n sym_state_probs_1222[2] = sym.factor(sym_pi_1222[c])\n sym_state_probs_1222[3] = sym.factor(sym_pi_1222[d])\n sym_state_probs_1222[4] = sym.factor(sym_pi_1222[e])\n sym_state_recursive_ratios_1222 = sym.zeros(buffer_capacity + 1, \n system_capacity + 1)\n sym_state_recursive_ratios_1222[0, 0] = 1\n sym_state_recursive_ratios_1222[0, 1] = sym.factor(sym_state_probs_1222\n [1] / sym_state_probs_1222[0])\n sym_state_recursive_ratios_1222[0, 2] = sym.factor(sym_state_probs_1222\n [2] / sym_state_probs_1222[1])\n sym_state_recursive_ratios_1222[1, 2] = sym.factor(sym_state_probs_1222\n [3] / sym_state_probs_1222[2])\n sym_state_recursive_ratios_1222[2, 2] = sym.factor(sym_state_probs_1222\n [4] / sym_state_probs_1222[3])\n return sym_state_probs_1222, sym_state_recursive_ratios_1222\n\n\n<mask token>\n\n\ndef get_symbolic_state_probabilities_1122():\n threshold = 1\n system_capacity = 2\n buffer_capacity = 2\n all_states_1122 = abg.markov.build_states(threshold=threshold,\n system_capacity=system_capacity, buffer_capacity=buffer_capacity)\n sym_state_probs_1122 = [(0) for _ in range(len(all_states_1122))]\n sym_Lambda = sym.symbols('Lambda')\n sym_lambda_1 = sym.symbols('lambda_1')\n sym_lambda_2 = sym.symbols('lambda_2')\n sym_mu = sym.symbols('mu')\n sym_state_probs_1122[0] = (sym_mu ** 6 + 2 * sym_lambda_2 * sym_mu ** 5 +\n sym_lambda_2 ** 2 * sym_mu ** 4)\n sym_state_probs_1122[1] = sym_Lambda * sym_mu ** 3 * (sym_mu ** 2 + 2 *\n sym_mu * sym_lambda_2 + sym_lambda_2 ** 2)\n sym_state_probs_1122[2] = sym_Lambda * sym_lambda_2 * sym_mu ** 2 * (\n sym_lambda_2 ** 2 + sym_lambda_2 * sym_lambda_1 + sym_lambda_1 *\n sym_mu + sym_mu ** 2 + 2 * sym_lambda_2 * sym_mu)\n sym_state_probs_1122[3] = sym_Lambda * sym_lambda_2 ** 2 * sym_mu * (\n sym_lambda_2 ** 2 + 2 * sym_lambda_1 * sym_lambda_2 + 3 *\n sym_lambda_1 * sym_mu + sym_mu ** 2 + 2 * sym_lambda_2 * sym_mu + \n sym_lambda_1 ** 2)\n sym_state_probs_1122[4] = sym_Lambda * sym_lambda_1 * sym_mu ** 3 * (\n sym_lambda_2 + sym_mu)\n sym_state_probs_1122[5\n ] = sym_Lambda * sym_lambda_1 * sym_lambda_2 * sym_mu ** 2 * (2 *\n sym_mu + sym_lambda_1 + sym_lambda_2)\n sym_state_probs_1122[6] = sym_Lambda * sym_lambda_1 * sym_lambda_2 ** 2 * (\n sym_lambda_1 ** 2 + 4 * sym_lambda_1 * sym_mu + 2 * sym_lambda_1 *\n sym_lambda_2 + 3 * sym_mu ** 2 + sym_lambda_2 ** 2 + 3 *\n sym_lambda_2 * sym_mu)\n total_1122 = np.sum(sym_state_probs_1122)\n sym_state_probs_1122 = [(i / total_1122) for i in sym_state_probs_1122]\n sym_state_recursive_ratios_1122 = sym.zeros(buffer_capacity + 1, \n system_capacity + 1)\n sym_state_recursive_ratios_1122[0, 0] = 1\n sym_state_recursive_ratios_1122[0, 1] = sym.factor(sym_state_probs_1122\n [1] / sym_state_probs_1122[0])\n sym_state_recursive_ratios_1122[1, 1] = sym.factor(sym_state_probs_1122\n [2] / sym_state_probs_1122[1])\n sym_state_recursive_ratios_1122[2, 1] = sym.factor(sym_state_probs_1122\n [3] / sym_state_probs_1122[2])\n sym_state_recursive_ratios_1122[0, 2] = sym.factor(sym_state_probs_1122\n [4] / sym_state_probs_1122[1])\n sym_state_recursive_ratios_1122[1, 2] = sym.factor(sym_state_probs_1122\n [5] / sym_state_probs_1122[4])\n sym_state_recursive_ratios_1122[2, 2] = sym.factor(sym_state_probs_1122\n [6] / sym_state_probs_1122[5])\n sym_state_recursive_ratios_right_1122 = (sym_state_recursive_ratios_1122\n .copy())\n sym_state_recursive_ratios_right_1122[1, 2] = sym.factor(\n sym_state_probs_1122[5] / sym_state_probs_1122[2])\n sym_state_recursive_ratios_right_1122[2, 2] = sym.factor(\n sym_state_probs_1122[6] / sym_state_probs_1122[3])\n sym_state_recursive_ratios_P0_1122 = sym.zeros(buffer_capacity + 1, \n system_capacity + 1)\n sym_state_recursive_ratios_P0_1122[0, 0] = 1\n sym_state_recursive_ratios_P0_1122[0, 1] = sym.factor(\n sym_state_probs_1122[1] / sym_state_probs_1122[0])\n sym_state_recursive_ratios_P0_1122[1, 1] = sym.factor(\n sym_state_probs_1122[2] / sym_state_probs_1122[0])\n sym_state_recursive_ratios_P0_1122[2, 1] = sym.factor(\n sym_state_probs_1122[3] / sym_state_probs_1122[0])\n sym_state_recursive_ratios_P0_1122[0, 2] = sym.factor(\n sym_state_probs_1122[4] / sym_state_probs_1122[0])\n sym_state_recursive_ratios_P0_1122[1, 2] = sym.factor(\n sym_state_probs_1122[5] / sym_state_probs_1122[0])\n sym_state_recursive_ratios_P0_1122[2, 2] = sym.factor(\n sym_state_probs_1122[6] / sym_state_probs_1122[0])\n return (sym_state_probs_1122, sym_state_recursive_ratios_1122,\n sym_state_recursive_ratios_right_1122,\n sym_state_recursive_ratios_P0_1122)\n\n\ndef get_symbolic_state_probabilities_1123():\n num_of_servers = 1\n threshold = 1\n system_capacity = 2\n buffer_capacity = 3\n Q_sym_1123 = abg.markov.get_symbolic_transition_matrix(num_of_servers,\n threshold, system_capacity, buffer_capacity)\n p00, p01, p11, p21, p31, p02, p12, p22, p32 = sym.symbols(\n 'p00, p01, p11, p21, p31, p02, p12, p22, p32')\n pi_1123 = sym.Matrix([p00, p01, p11, p21, p31, p02, p12, p22, p32])\n dimension_1123 = Q_sym_1123.shape[0]\n M_sym_1123 = sym.Matrix([Q_sym_1123.transpose()[:-1, :], sym.ones(1,\n dimension_1123)])\n sym_diff_equations_1123 = M_sym_1123 @ pi_1123\n b_sym_1123 = sym.Matrix([sym.zeros(dimension_1123 - 1, 1), [1]])\n eq0_1123 = sym.Eq(sym_diff_equations_1123[0], b_sym_1123[0])\n eq1_1123 = sym.Eq(sym_diff_equations_1123[1], b_sym_1123[1])\n eq2_1123 = sym.Eq(sym_diff_equations_1123[2], b_sym_1123[2])\n eq3_1123 = sym.Eq(sym_diff_equations_1123[3], b_sym_1123[3])\n eq4_1123 = sym.Eq(sym_diff_equations_1123[4], b_sym_1123[4])\n eq5_1123 = sym.Eq(sym_diff_equations_1123[5], b_sym_1123[5])\n eq6_1123 = sym.Eq(sym_diff_equations_1123[6], b_sym_1123[6])\n eq7_1123 = sym.Eq(sym_diff_equations_1123[7], b_sym_1123[7])\n eq8_1123 = sym.Eq(sym_diff_equations_1123[8], b_sym_1123[8])\n sym_state_probs_1123 = sym.solve([eq0_1123, eq1_1123, eq2_1123,\n eq3_1123, eq4_1123, eq5_1123, eq6_1123, eq7_1123, eq8_1123], (p00,\n p01, p11, p21, p31, p02, p12, p22, p32))\n sym_state_recursive_ratios_1123 = sym.zeros(buffer_capacity + 1, \n system_capacity + 1)\n sym_state_recursive_ratios_1123[0, 0] = 1\n sym_state_recursive_ratios_1123[0, 1] = sym.factor(sym_state_probs_1123\n [p01] / sym_state_probs_1123[p00])\n sym_state_recursive_ratios_1123[1, 1] = sym.factor(sym_state_probs_1123\n [p11] / sym_state_probs_1123[p01])\n sym_state_recursive_ratios_1123[2, 1] = sym.factor(sym_state_probs_1123\n [p21] / sym_state_probs_1123[p11])\n sym_state_recursive_ratios_1123[3, 1] = sym.factor(sym_state_probs_1123\n [p31] / sym_state_probs_1123[p21])\n sym_state_recursive_ratios_1123[0, 2] = sym.factor(sym_state_probs_1123\n [p02] / sym_state_probs_1123[p01])\n sym_state_recursive_ratios_1123[1, 2] = sym.factor(sym_state_probs_1123\n [p12] / sym_state_probs_1123[p02])\n sym_state_recursive_ratios_1123[2, 2] = sym.factor(sym_state_probs_1123\n [p22] / sym_state_probs_1123[p12])\n sym_state_recursive_ratios_1123[2, 2] = sym.factor(sym_state_probs_1123\n [p32] / sym_state_probs_1123[p22])\n sym_state_recursive_ratios_right_1123 = (sym_state_recursive_ratios_1123\n .copy())\n sym_state_recursive_ratios_right_1123[1, 2] = sym.factor(\n sym_state_probs_1123[p12] / sym_state_probs_1123[p11])\n sym_state_recursive_ratios_right_1123[2, 2] = sym.factor(\n sym_state_probs_1123[p22] / sym_state_probs_1123[p21])\n sym_state_recursive_ratios_right_1123[3, 2] = sym.factor(\n sym_state_probs_1123[p32] / sym_state_probs_1123[p22])\n sym_state_recursive_ratios_P0_1123 = sym.zeros(buffer_capacity + 1, \n system_capacity + 1)\n sym_state_recursive_ratios_P0_1123[0, 0] = 1\n sym_state_recursive_ratios_P0_1123[0, 1] = sym.factor(\n sym_state_probs_1123[p01] / sym_state_probs_1123[p00])\n sym_state_recursive_ratios_P0_1123[1, 1] = sym.factor(\n sym_state_probs_1123[p11] / sym_state_probs_1123[p00])\n sym_state_recursive_ratios_P0_1123[2, 1] = sym.factor(\n sym_state_probs_1123[p21] / sym_state_probs_1123[p00])\n sym_state_recursive_ratios_P0_1123[3, 1] = sym.factor(\n sym_state_probs_1123[p31] / sym_state_probs_1123[p00])\n sym_state_recursive_ratios_P0_1123[0, 2] = sym.factor(\n sym_state_probs_1123[p02] / sym_state_probs_1123[p00])\n sym_state_recursive_ratios_P0_1123[1, 2] = sym.factor(\n sym_state_probs_1123[p12] / sym_state_probs_1123[p00])\n sym_state_recursive_ratios_P0_1123[2, 2] = sym.factor(\n sym_state_probs_1123[p22] / sym_state_probs_1123[p00])\n sym_state_recursive_ratios_P0_1123[3, 2] = sym.factor(\n sym_state_probs_1123[p32] / sym_state_probs_1123[p00])\n return (sym_state_probs_1123, sym_state_recursive_ratios_1123,\n sym_state_recursive_ratios_right_1123,\n sym_state_recursive_ratios_P0_1123)\n\n\ndef get_symbolic_state_probabilities_1341():\n threshold = 3\n system_capacity = 4\n buffer_capacity = 1\n all_states_1341 = abg.markov.build_states(threshold=threshold,\n system_capacity=system_capacity, buffer_capacity=buffer_capacity)\n sym_state_probs_1341 = [(0) for _ in range(len(all_states_1341))]\n sym_Lambda = sym.symbols('Lambda')\n sym_lambda_1 = sym.symbols('lambda_1')\n sym_lambda_2 = sym.symbols('lambda_2')\n sym_mu = sym.symbols('mu')\n sym_state_probs_1341[0] = sym_lambda_2 * sym_mu ** 5 + sym_mu ** 6\n sym_state_probs_1341[1\n ] = sym_Lambda * sym_lambda_2 * sym_mu ** 4 + sym_Lambda * sym_mu ** 5\n sym_state_probs_1341[2] = (sym_Lambda ** 2 * sym_lambda_2 * sym_mu ** 3 +\n sym_Lambda ** 2 * sym_mu ** 4)\n sym_state_probs_1341[3] = (sym_Lambda ** 3 * sym_lambda_2 * sym_mu ** 2 +\n sym_Lambda ** 3 * sym_mu ** 3)\n sym_state_probs_1341[4] = (sym_Lambda ** 3 * sym_lambda_1 *\n sym_lambda_2 * sym_mu + sym_Lambda ** 3 * sym_lambda_2 * sym_mu ** \n 2 + sym_Lambda ** 3 * sym_lambda_2 * sym_lambda_2 * sym_mu)\n sym_state_probs_1341[5] = sym_Lambda ** 3 * sym_lambda_1 * sym_mu ** 2\n sym_state_probs_1341[6] = (sym_Lambda ** 3 * sym_lambda_1 ** 2 *\n sym_lambda_2 + sym_Lambda ** 3 * sym_lambda_1 * sym_lambda_2 ** 2 +\n 2 * sym_Lambda ** 3 * sym_lambda_1 * sym_lambda_2 * sym_mu)\n total_1341 = np.sum(sym_state_probs_1341)\n sym_state_probs_1341 = [(i / total_1341) for i in sym_state_probs_1341]\n sym_state_recursive_ratios_1341 = sym.zeros(buffer_capacity + 1, \n system_capacity + 1)\n sym_state_recursive_ratios_1341[0, 0] = 1\n sym_state_recursive_ratios_1341[0, 1] = sym.factor(sym_state_probs_1341\n [1] / sym_state_probs_1341[0])\n sym_state_recursive_ratios_1341[0, 2] = sym.factor(sym_state_probs_1341\n [2] / sym_state_probs_1341[1])\n sym_state_recursive_ratios_1341[0, 3] = sym.factor(sym_state_probs_1341\n [3] / sym_state_probs_1341[2])\n sym_state_recursive_ratios_1341[0, 4] = sym.factor(sym_state_probs_1341\n [5] / sym_state_probs_1341[3])\n sym_state_recursive_ratios_1341[1, 3] = sym.factor(sym_state_probs_1341\n [4] / sym_state_probs_1341[3])\n sym_state_recursive_ratios_1341[1, 4] = sym.factor(sym_state_probs_1341\n [6] / sym_state_probs_1341[5])\n sym_state_recursive_ratios_right_1341 = (sym_state_recursive_ratios_1341\n .copy())\n sym_state_recursive_ratios_right_1341[1, 4] = sym.factor(\n sym_state_probs_1341[6] / sym_state_probs_1341[4])\n sym_state_recursive_ratios_P0_1341 = sym.zeros(buffer_capacity + 1, \n system_capacity + 1)\n sym_state_recursive_ratios_P0_1341[0, 0] = 1\n sym_state_recursive_ratios_P0_1341[0, 1] = sym.factor(\n sym_state_probs_1341[1] / sym_state_probs_1341[0])\n sym_state_recursive_ratios_P0_1341[0, 2] = sym.factor(\n sym_state_probs_1341[2] / sym_state_probs_1341[0])\n sym_state_recursive_ratios_P0_1341[0, 3] = sym.factor(\n sym_state_probs_1341[3] / sym_state_probs_1341[0])\n sym_state_recursive_ratios_P0_1341[1, 3] = sym.factor(\n sym_state_probs_1341[4] / sym_state_probs_1341[0])\n sym_state_recursive_ratios_P0_1341[0, 4] = sym.factor(\n sym_state_probs_1341[5] / sym_state_probs_1341[0])\n sym_state_recursive_ratios_P0_1341[1, 4] = sym.factor(\n sym_state_probs_1341[6] / sym_state_probs_1341[0])\n return (sym_state_probs_1341, sym_state_recursive_ratios_1341,\n sym_state_recursive_ratios_right_1341,\n sym_state_recursive_ratios_P0_1341)\n\n\ndef get_symbolic_state_probabilities_1131():\n threshold = 1\n system_capacity = 3\n buffer_capacity = 1\n all_states_1131 = abg.markov.build_states(threshold=threshold,\n system_capacity=system_capacity, buffer_capacity=buffer_capacity)\n sym_state_probs_1131 = [(0) for _ in range(len(all_states_1131))]\n sym_Lambda = sym.symbols('Lambda')\n sym_lambda_1 = sym.symbols('lambda_1')\n sym_lambda_2 = sym.symbols('lambda_2')\n sym_mu = sym.symbols('mu')\n sym_state_probs_1131[0] = (sym_mu ** 6 + 2 * (sym_lambda_2 * sym_mu ** \n 5) + sym_lambda_2 ** 2 * sym_mu ** 4 + sym_lambda_1 * sym_lambda_2 *\n sym_mu ** 4)\n sym_state_probs_1131[1] = sym_state_probs_1131[0] * sym_Lambda / sym_mu\n sym_state_probs_1131[2] = (sym_Lambda * sym_lambda_1 ** 2 *\n sym_lambda_2 * sym_mu ** 2 + sym_Lambda * sym_lambda_2 *\n sym_lambda_1 * sym_mu ** 3 + 2 * (sym_Lambda * sym_lambda_1 * \n sym_lambda_2 ** 2 * sym_mu ** 2) + 2 * (sym_Lambda * sym_lambda_2 **\n 2 * sym_mu ** 3) + sym_Lambda * sym_lambda_2 ** 3 * sym_mu ** 2 + \n sym_Lambda * sym_lambda_2 * sym_mu ** 4)\n sym_state_probs_1131[3] = sym_Lambda * sym_lambda_1 * sym_mu ** 3 * (\n sym_lambda_2 + sym_mu)\n sym_state_probs_1131[4\n ] = sym_Lambda * sym_lambda_2 * sym_lambda_1 * sym_mu * (\n sym_lambda_2 ** 2 + 2 * sym_lambda_2 * sym_lambda_1 + 3 *\n sym_lambda_2 * sym_mu + sym_lambda_1 ** 2 + 2 * sym_lambda_1 *\n sym_mu + 2 * sym_mu ** 2)\n sym_state_probs_1131[5] = sym_Lambda * sym_lambda_1 ** 2 * sym_mu ** 3\n sym_state_probs_1131[6] = sym_Lambda * sym_lambda_2 * sym_lambda_1 ** 2 * (\n sym_lambda_2 ** 2 + 2 * sym_lambda_2 * sym_lambda_1 + 3 *\n sym_lambda_2 * sym_mu + sym_lambda_1 ** 2 + 2 * sym_lambda_1 *\n sym_mu + 3 * sym_mu ** 2)\n denominator = (sym_Lambda * sym_lambda_2 ** 3 * sym_lambda_1 ** 2 + \n sym_Lambda * sym_lambda_2 ** 3 * sym_lambda_1 * sym_mu + sym_Lambda *\n sym_lambda_2 ** 3 * sym_mu ** 2 + 2 * sym_Lambda * sym_lambda_2 ** \n 2 * sym_lambda_1 ** 3 + 5 * sym_Lambda * sym_lambda_2 ** 2 * \n sym_lambda_1 ** 2 * sym_mu + 5 * sym_Lambda * sym_lambda_2 ** 2 *\n sym_lambda_1 * sym_mu ** 2 + 3 * sym_Lambda * sym_lambda_2 ** 2 * \n sym_mu ** 3 + sym_Lambda * sym_lambda_2 * sym_lambda_1 ** 4 + 3 *\n sym_Lambda * sym_lambda_2 * sym_lambda_1 ** 3 * sym_mu + 6 *\n sym_Lambda * sym_lambda_2 * sym_lambda_1 ** 2 * sym_mu ** 2 + 5 *\n sym_Lambda * sym_lambda_2 * sym_lambda_1 * sym_mu ** 3 + 3 *\n sym_Lambda * sym_lambda_2 * sym_mu ** 4 + sym_Lambda * sym_lambda_1 **\n 2 * sym_mu ** 3 + sym_Lambda * sym_lambda_1 * sym_mu ** 4 + \n sym_Lambda * sym_mu ** 5 + sym_lambda_2 ** 2 * sym_mu ** 4 + \n sym_lambda_2 * sym_lambda_1 * sym_mu ** 4 + 2 * sym_lambda_2 * \n sym_mu ** 5 + sym_mu ** 6)\n sym_state_probs_1131 = [(i / denominator) for i in sym_state_probs_1131]\n sym_state_recursive_ratios_1131 = sym.zeros(buffer_capacity + 1, \n system_capacity + 1)\n sym_state_recursive_ratios_1131[0, 0] = 1\n sym_state_recursive_ratios_1131[0, 1] = sym.factor(sym_state_probs_1131\n [1] / sym_state_probs_1131[0])\n sym_state_recursive_ratios_1131[1, 1] = sym.factor(sym_state_probs_1131\n [2] / sym_state_probs_1131[1])\n sym_state_recursive_ratios_1131[0, 2] = sym.factor(sym_state_probs_1131\n [3] / sym_state_probs_1131[1])\n sym_state_recursive_ratios_1131[1, 2] = sym.factor(sym_state_probs_1131\n [4] / sym_state_probs_1131[3])\n sym_state_recursive_ratios_1131[0, 3] = sym.factor(sym_state_probs_1131\n [5] / sym_state_probs_1131[3])\n sym_state_recursive_ratios_1131[1, 3] = sym.factor(sym_state_probs_1131\n [6] / sym_state_probs_1131[5])\n sym_state_recursive_ratios_right_1131 = (sym_state_recursive_ratios_1131\n .copy())\n sym_state_recursive_ratios_right_1131[1, 2] = sym.factor(\n sym_state_probs_1131[4] / sym_state_probs_1131[2])\n sym_state_recursive_ratios_right_1131[1, 3] = sym.factor(\n sym_state_probs_1131[6] / sym_state_probs_1131[4])\n sym_state_recursive_ratios_P0_1131 = sym.zeros(buffer_capacity + 1, \n system_capacity + 1)\n sym_state_recursive_ratios_P0_1131[0, 0] = 1\n sym_state_recursive_ratios_P0_1131[0, 1] = sym.factor(\n sym_state_probs_1131[1] / sym_state_probs_1131[0])\n sym_state_recursive_ratios_P0_1131[1, 1] = sym.factor(\n sym_state_probs_1131[2] / sym_state_probs_1131[0])\n sym_state_recursive_ratios_P0_1131[0, 2] = sym.factor(\n sym_state_probs_1131[3] / sym_state_probs_1131[0])\n sym_state_recursive_ratios_P0_1131[1, 2] = sym.factor(\n sym_state_probs_1131[4] / sym_state_probs_1131[0])\n sym_state_recursive_ratios_P0_1131[0, 3] = sym.factor(\n sym_state_probs_1131[5] / sym_state_probs_1131[0])\n sym_state_recursive_ratios_P0_1131[1, 3] = sym.factor(\n sym_state_probs_1131[6] / sym_state_probs_1131[0])\n return (sym_state_probs_1131, sym_state_recursive_ratios_1131,\n sym_state_recursive_ratios_right_1131,\n sym_state_recursive_ratios_P0_1131)\n\n\ndef get_symbolic_state_probabilities_1132():\n num_of_servers = 1\n threshold = 1\n system_capacity = 3\n buffer_capacity = 2\n Q_sym_1132 = abg.markov.get_symbolic_transition_matrix(num_of_servers,\n threshold, system_capacity, buffer_capacity)\n p00, p01, p11, p21, p02, p12, p22, p03, p13, p23 = sym.symbols(\n 'p00, p01, p11, p21, p02, p12, p22, p03, p13, p23')\n pi_1132 = sym.Matrix([p00, p01, p11, p21, p02, p12, p22, p03, p13, p23])\n dimension_1132 = Q_sym_1132.shape[0]\n M_sym_1132 = sym.Matrix([Q_sym_1132.transpose()[:-1, :], sym.ones(1,\n dimension_1132)])\n sym_diff_equations_1132 = M_sym_1132 @ pi_1132\n b_sym_1132 = sym.Matrix([sym.zeros(dimension_1132 - 1, 1), [1]])\n eq0_1132 = sym.Eq(sym_diff_equations_1132[0], b_sym_1132[0])\n eq1_1132 = sym.Eq(sym_diff_equations_1132[1], b_sym_1132[1])\n eq2_1132 = sym.Eq(sym_diff_equations_1132[2], b_sym_1132[2])\n eq3_1132 = sym.Eq(sym_diff_equations_1132[3], b_sym_1132[3])\n eq4_1132 = sym.Eq(sym_diff_equations_1132[4], b_sym_1132[4])\n eq5_1132 = sym.Eq(sym_diff_equations_1132[5], b_sym_1132[5])\n eq6_1132 = sym.Eq(sym_diff_equations_1132[6], b_sym_1132[6])\n eq7_1132 = sym.Eq(sym_diff_equations_1132[7], b_sym_1132[7])\n eq8_1132 = sym.Eq(sym_diff_equations_1132[8], b_sym_1132[8])\n eq9_1132 = sym.Eq(sym_diff_equations_1132[9], b_sym_1132[9])\n sym_state_probs_1132 = sym.solve([eq0_1132, eq1_1132, eq2_1132,\n eq3_1132, eq4_1132, eq5_1132, eq6_1132, eq7_1132, eq8_1132,\n eq9_1132], (p00, p01, p11, p21, p02, p12, p22, p03, p13, p23))\n sym_state_recursive_ratios_1132 = sym.zeros(buffer_capacity + 1, \n system_capacity + 1)\n sym_state_recursive_ratios_1132[0, 0] = 1\n sym_state_recursive_ratios_1132[0, 1] = sym.factor(sym_state_probs_1132\n [p01] / sym_state_probs_1132[p00])\n sym_state_recursive_ratios_1132[1, 1] = sym.factor(sym_state_probs_1132\n [p11] / sym_state_probs_1132[p01])\n sym_state_recursive_ratios_1132[2, 1] = sym.factor(sym_state_probs_1132\n [p21] / sym_state_probs_1132[p11])\n sym_state_recursive_ratios_1132[0, 2] = sym.factor(sym_state_probs_1132\n [p02] / sym_state_probs_1132[p01])\n sym_state_recursive_ratios_1132[1, 2] = sym.factor(sym_state_probs_1132\n [p12] / sym_state_probs_1132[p02])\n sym_state_recursive_ratios_1132[2, 2] = sym.factor(sym_state_probs_1132\n [p22] / sym_state_probs_1132[p12])\n sym_state_recursive_ratios_1132[0, 3] = sym.factor(sym_state_probs_1132\n [p03] / sym_state_probs_1132[p02])\n sym_state_recursive_ratios_1132[1, 3] = sym.factor(sym_state_probs_1132\n [p13] / sym_state_probs_1132[p03])\n sym_state_recursive_ratios_1132[2, 3] = sym.factor(sym_state_probs_1132\n [p23] / sym_state_probs_1132[p13])\n sym_state_recursive_ratios_right_1132 = (sym_state_recursive_ratios_1132\n .copy())\n sym_state_recursive_ratios_right_1132[1, 2] = sym.factor(\n sym_state_probs_1132[p12] / sym_state_probs_1132[p11])\n sym_state_recursive_ratios_right_1132[1, 3] = sym.factor(\n sym_state_probs_1132[p13] / sym_state_probs_1132[p12])\n sym_state_recursive_ratios_right_1132[2, 2] = sym.factor(\n sym_state_probs_1132[p22] / sym_state_probs_1132[p21])\n sym_state_recursive_ratios_right_1132[2, 3] = sym.factor(\n sym_state_probs_1132[p23] / sym_state_probs_1132[p22])\n sym_state_recursive_ratios_P0_1132 = sym.zeros(buffer_capacity + 1, \n system_capacity + 1)\n sym_state_recursive_ratios_P0_1132[0, 0] = 1\n sym_state_recursive_ratios_P0_1132[0, 1] = sym.factor(\n sym_state_probs_1132[p01] / sym_state_probs_1132[p00])\n sym_state_recursive_ratios_P0_1132[1, 1] = sym.factor(\n sym_state_probs_1132[p11] / sym_state_probs_1132[p00])\n sym_state_recursive_ratios_P0_1132[2, 1] = sym.factor(\n sym_state_probs_1132[p21] / sym_state_probs_1132[p00])\n sym_state_recursive_ratios_P0_1132[0, 2] = sym.factor(\n sym_state_probs_1132[p02] / sym_state_probs_1132[p00])\n sym_state_recursive_ratios_P0_1132[1, 2] = sym.factor(\n sym_state_probs_1132[p12] / sym_state_probs_1132[p00])\n sym_state_recursive_ratios_P0_1132[2, 2] = sym.factor(\n sym_state_probs_1132[p22] / sym_state_probs_1132[p00])\n sym_state_recursive_ratios_P0_1132[0, 3] = sym.factor(\n sym_state_probs_1132[p03] / sym_state_probs_1132[p00])\n sym_state_recursive_ratios_P0_1132[1, 3] = sym.factor(\n sym_state_probs_1132[p13] / sym_state_probs_1132[p00])\n sym_state_recursive_ratios_P0_1132[2, 3] = sym.factor(\n sym_state_probs_1132[p23] / sym_state_probs_1132[p00])\n return (sym_state_probs_1132, sym_state_recursive_ratios_1132,\n sym_state_recursive_ratios_right_1132,\n sym_state_recursive_ratios_P0_1132)\n\n\n<mask token>\n\n\ndef get_symbolic_state_probabilities_1151():\n num_of_servers = 1\n threshold = 1\n system_capacity = 5\n buffer_capacity = 1\n Q_sym_1151 = abg.markov.get_symbolic_transition_matrix(num_of_servers,\n threshold, system_capacity, buffer_capacity)\n p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15 = sym.symbols(\n 'p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15')\n pi_1151 = sym.Matrix([p00, p01, p11, p02, p12, p03, p13, p04, p14, p05,\n p15])\n dimension_1151 = Q_sym_1151.shape[0]\n M_sym_1151 = sym.Matrix([Q_sym_1151.transpose()[:-1, :], sym.ones(1,\n dimension_1151)])\n sym_diff_equations_1151 = M_sym_1151 @ pi_1151\n b_sym_1151 = sym.Matrix([sym.zeros(dimension_1151 - 1, 1), [1]])\n eq0_1151 = sym.Eq(sym_diff_equations_1151[0], b_sym_1151[0])\n eq1_1151 = sym.Eq(sym_diff_equations_1151[1], b_sym_1151[1])\n eq2_1151 = sym.Eq(sym_diff_equations_1151[2], b_sym_1151[2])\n eq3_1151 = sym.Eq(sym_diff_equations_1151[3], b_sym_1151[3])\n eq4_1151 = sym.Eq(sym_diff_equations_1151[4], b_sym_1151[4])\n eq5_1151 = sym.Eq(sym_diff_equations_1151[5], b_sym_1151[5])\n eq6_1151 = sym.Eq(sym_diff_equations_1151[6], b_sym_1151[6])\n eq7_1151 = sym.Eq(sym_diff_equations_1151[7], b_sym_1151[7])\n eq8_1151 = sym.Eq(sym_diff_equations_1151[8], b_sym_1151[8])\n eq9_1151 = sym.Eq(sym_diff_equations_1151[9], b_sym_1151[9])\n eq10_1151 = sym.Eq(sym_diff_equations_1151[10], b_sym_1151[10])\n sym_state_probs_1151 = sym.solve([eq0_1151, eq1_1151, eq2_1151,\n eq3_1151, eq4_1151, eq5_1151, eq6_1151, eq7_1151, eq8_1151,\n eq9_1151, eq10_1151], (p00, p01, p11, p02, p12, p03, p13, p04, p14,\n p05, p15))\n sym_state_recursive_ratios_1151 = sym.zeros(buffer_capacity + 1, \n system_capacity + 1)\n sym_state_recursive_ratios_1151[0, 0] = 1\n sym_state_recursive_ratios_1151[0, 1] = sym.factor(sym_state_probs_1151\n [p01] / sym_state_probs_1151[p00])\n sym_state_recursive_ratios_1151[1, 1] = sym.factor(sym_state_probs_1151\n [p11] / sym_state_probs_1151[p01])\n sym_state_recursive_ratios_1151[0, 2] = sym.factor(sym_state_probs_1151\n [p02] / sym_state_probs_1151[p01])\n sym_state_recursive_ratios_1151[1, 2] = sym.factor(sym_state_probs_1151\n [p12] / sym_state_probs_1151[p02])\n sym_state_recursive_ratios_1151[0, 3] = sym.factor(sym_state_probs_1151\n [p03] / sym_state_probs_1151[p02])\n sym_state_recursive_ratios_1151[1, 3] = sym.factor(sym_state_probs_1151\n [p13] / sym_state_probs_1151[p03])\n sym_state_recursive_ratios_1151[0, 4] = sym.factor(sym_state_probs_1151\n [p04] / sym_state_probs_1151[p03])\n sym_state_recursive_ratios_1151[1, 4] = sym.factor(sym_state_probs_1151\n [p14] / sym_state_probs_1151[p04])\n sym_state_recursive_ratios_1151[0, 5] = sym.factor(sym_state_probs_1151\n [p05] / sym_state_probs_1151[p04])\n sym_state_recursive_ratios_1151[1, 5] = sym.factor(sym_state_probs_1151\n [p15] / sym_state_probs_1151[p05])\n sym_state_recursive_ratios_right_1151 = (sym_state_recursive_ratios_1151\n .copy())\n sym_state_recursive_ratios_right_1151[1, 2] = sym.factor(\n sym_state_probs_1151[p12] / sym_state_probs_1151[p11])\n sym_state_recursive_ratios_right_1151[1, 3] = sym.factor(\n sym_state_probs_1151[p13] / sym_state_probs_1151[p12])\n sym_state_recursive_ratios_right_1151[1, 4] = sym.factor(\n sym_state_probs_1151[p14] / sym_state_probs_1151[p13])\n sym_state_recursive_ratios_right_1151[1, 5] = sym.factor(\n sym_state_probs_1151[p15] / sym_state_probs_1151[p14])\n sym_state_recursive_ratios_P0_1151 = sym.zeros(buffer_capacity + 1, \n system_capacity + 1)\n sym_state_recursive_ratios_P0_1151[0, 0] = 1\n sym_state_recursive_ratios_P0_1151[0, 1] = sym.factor(\n sym_state_probs_1151[p01] / sym_state_probs_1151[p00])\n sym_state_recursive_ratios_P0_1151[1, 1] = sym.factor(\n sym_state_probs_1151[p11] / sym_state_probs_1151[p00])\n sym_state_recursive_ratios_P0_1151[0, 2] = sym.factor(\n sym_state_probs_1151[p02] / sym_state_probs_1151[p00])\n sym_state_recursive_ratios_P0_1151[1, 2] = sym.factor(\n sym_state_probs_1151[p12] / sym_state_probs_1151[p00])\n sym_state_recursive_ratios_P0_1151[0, 3] = sym.factor(\n sym_state_probs_1151[p03] / sym_state_probs_1151[p00])\n sym_state_recursive_ratios_P0_1151[1, 3] = sym.factor(\n sym_state_probs_1151[p13] / sym_state_probs_1151[p00])\n sym_state_recursive_ratios_P0_1151[0, 4] = sym.factor(\n sym_state_probs_1151[p04] / sym_state_probs_1151[p00])\n sym_state_recursive_ratios_P0_1151[1, 4] = sym.factor(\n sym_state_probs_1151[p14] / sym_state_probs_1151[p00])\n sym_state_recursive_ratios_P0_1151[0, 5] = sym.factor(\n sym_state_probs_1151[p05] / sym_state_probs_1151[p00])\n sym_state_recursive_ratios_P0_1151[1, 5] = sym.factor(\n sym_state_probs_1151[p15] / sym_state_probs_1151[p00])\n return (sym_state_probs_1151, sym_state_recursive_ratios_1151,\n sym_state_recursive_ratios_right_1151,\n sym_state_recursive_ratios_P0_1151)\n\n\ndef get_symbolic_state_probabilities_1161():\n num_of_servers = 1\n threshold = 1\n system_capacity = 6\n buffer_capacity = 1\n Q_sym_1161 = abg.markov.get_symbolic_transition_matrix(num_of_servers,\n threshold, system_capacity, buffer_capacity)\n p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15, p06, p16 = (sym.\n symbols(\n 'p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15, p06, p16'))\n pi_1161 = sym.Matrix([p00, p01, p11, p02, p12, p03, p13, p04, p14, p05,\n p15, p06, p16])\n dimension_1161 = Q_sym_1161.shape[0]\n M_sym_1161 = sym.Matrix([Q_sym_1161.transpose()[:-1, :], sym.ones(1,\n dimension_1161)])\n sym_diff_equations_1161 = M_sym_1161 @ pi_1161\n b_sym_1161 = sym.Matrix([sym.zeros(dimension_1161 - 1, 1), [1]])\n eq0_1161 = sym.Eq(sym_diff_equations_1161[0], b_sym_1161[0])\n eq1_1161 = sym.Eq(sym_diff_equations_1161[1], b_sym_1161[1])\n eq2_1161 = sym.Eq(sym_diff_equations_1161[2], b_sym_1161[2])\n eq3_1161 = sym.Eq(sym_diff_equations_1161[3], b_sym_1161[3])\n eq4_1161 = sym.Eq(sym_diff_equations_1161[4], b_sym_1161[4])\n eq5_1161 = sym.Eq(sym_diff_equations_1161[5], b_sym_1161[5])\n eq6_1161 = sym.Eq(sym_diff_equations_1161[6], b_sym_1161[6])\n eq7_1161 = sym.Eq(sym_diff_equations_1161[7], b_sym_1161[7])\n eq8_1161 = sym.Eq(sym_diff_equations_1161[8], b_sym_1161[8])\n eq9_1161 = sym.Eq(sym_diff_equations_1161[9], b_sym_1161[9])\n eq10_1161 = sym.Eq(sym_diff_equations_1161[10], b_sym_1161[10])\n eq11_1161 = sym.Eq(sym_diff_equations_1161[11], b_sym_1161[11])\n eq12_1161 = sym.Eq(sym_diff_equations_1161[12], b_sym_1161[12])\n sym_state_probs_1161 = sym.solve([eq0_1161, eq1_1161, eq2_1161,\n eq3_1161, eq4_1161, eq5_1161, eq6_1161, eq7_1161, eq8_1161,\n eq9_1161, eq10_1161, eq11_1161, eq12_1161], (p00, p01, p11, p02,\n p12, p03, p13, p04, p14, p05, p15, p06, p16))\n sym_state_recursive_ratios_1161 = sym.zeros(buffer_capacity + 1, \n system_capacity + 1)\n sym_state_recursive_ratios_1161[0, 0] = 1\n sym_state_recursive_ratios_1161[0, 1] = sym.factor(sym_state_probs_1161\n [p01] / sym_state_probs_1161[p00])\n sym_state_recursive_ratios_1161[1, 1] = sym.factor(sym_state_probs_1161\n [p11] / sym_state_probs_1161[p01])\n sym_state_recursive_ratios_1161[0, 2] = sym.factor(sym_state_probs_1161\n [p02] / sym_state_probs_1161[p01])\n sym_state_recursive_ratios_1161[1, 2] = sym.factor(sym_state_probs_1161\n [p12] / sym_state_probs_1161[p02])\n sym_state_recursive_ratios_1161[0, 3] = sym.factor(sym_state_probs_1161\n [p03] / sym_state_probs_1161[p02])\n sym_state_recursive_ratios_1161[1, 3] = sym.factor(sym_state_probs_1161\n [p13] / sym_state_probs_1161[p03])\n sym_state_recursive_ratios_1161[0, 4] = sym.factor(sym_state_probs_1161\n [p04] / sym_state_probs_1161[p03])\n sym_state_recursive_ratios_1161[1, 4] = sym.factor(sym_state_probs_1161\n [p14] / sym_state_probs_1161[p04])\n sym_state_recursive_ratios_1161[0, 5] = sym.factor(sym_state_probs_1161\n [p05] / sym_state_probs_1161[p04])\n sym_state_recursive_ratios_1161[1, 5] = sym.factor(sym_state_probs_1161\n [p15] / sym_state_probs_1161[p05])\n sym_state_recursive_ratios_1161[0, 6] = sym.factor(sym_state_probs_1161\n [p06] / sym_state_probs_1161[p05])\n sym_state_recursive_ratios_1161[1, 6] = sym.factor(sym_state_probs_1161\n [p16] / sym_state_probs_1161[p06])\n sym_state_recursive_ratios_right_1161 = (sym_state_recursive_ratios_1161\n .copy())\n sym_state_recursive_ratios_right_1161[1, 2] = sym.factor(\n sym_state_probs_1161[p12] / sym_state_probs_1161[p11])\n sym_state_recursive_ratios_right_1161[1, 3] = sym.factor(\n sym_state_probs_1161[p13] / sym_state_probs_1161[p12])\n sym_state_recursive_ratios_right_1161[1, 4] = sym.factor(\n sym_state_probs_1161[p14] / sym_state_probs_1161[p13])\n sym_state_recursive_ratios_right_1161[1, 5] = sym.factor(\n sym_state_probs_1161[p15] / sym_state_probs_1161[p14])\n sym_state_recursive_ratios_right_1161[1, 6] = sym.factor(\n sym_state_probs_1161[p16] / sym_state_probs_1161[p15])\n sym_state_recursive_ratios_P0_1161 = sym.zeros(buffer_capacity + 1, \n system_capacity + 1)\n sym_state_recursive_ratios_P0_1161[0, 0] = 1\n sym_state_recursive_ratios_P0_1161[0, 1] = sym.factor(\n sym_state_probs_1161[p01] / sym_state_probs_1161[p00])\n sym_state_recursive_ratios_P0_1161[1, 1] = sym.factor(\n sym_state_probs_1161[p11] / sym_state_probs_1161[p00])\n sym_state_recursive_ratios_P0_1161[0, 2] = sym.factor(\n sym_state_probs_1161[p02] / sym_state_probs_1161[p00])\n sym_state_recursive_ratios_P0_1161[1, 2] = sym.factor(\n sym_state_probs_1161[p12] / sym_state_probs_1161[p00])\n sym_state_recursive_ratios_P0_1161[0, 3] = sym.factor(\n sym_state_probs_1161[p03] / sym_state_probs_1161[p00])\n sym_state_recursive_ratios_P0_1161[1, 3] = sym.factor(\n sym_state_probs_1161[p13] / sym_state_probs_1161[p00])\n sym_state_recursive_ratios_P0_1161[0, 4] = sym.factor(\n sym_state_probs_1161[p04] / sym_state_probs_1161[p00])\n sym_state_recursive_ratios_P0_1161[1, 4] = sym.factor(\n sym_state_probs_1161[p14] / sym_state_probs_1161[p00])\n sym_state_recursive_ratios_P0_1161[0, 5] = sym.factor(\n sym_state_probs_1161[p05] / sym_state_probs_1161[p00])\n sym_state_recursive_ratios_P0_1161[1, 5] = sym.factor(\n sym_state_probs_1161[p15] / sym_state_probs_1161[p00])\n sym_state_recursive_ratios_P0_1161[0, 6] = sym.factor(\n sym_state_probs_1161[p06] / sym_state_probs_1161[p00])\n sym_state_recursive_ratios_P0_1161[1, 6] = sym.factor(\n sym_state_probs_1161[p16] / sym_state_probs_1161[p00])\n return (sym_state_probs_1161, sym_state_recursive_ratios_1161,\n sym_state_recursive_ratios_right_1161,\n sym_state_recursive_ratios_P0_1161)\n\n\ndef get_symbolic_state_probabilities_1171():\n num_of_servers = 1\n threshold = 1\n system_capacity = 7\n buffer_capacity = 1\n Q_sym_1171 = abg.markov.get_symbolic_transition_matrix(num_of_servers,\n threshold, system_capacity, buffer_capacity)\n (p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15, p06, p16, p07, p17\n ) = (sym.symbols(\n 'p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15, p06, p16, p07, p17'\n ))\n pi_1171 = sym.Matrix([p00, p01, p11, p02, p12, p03, p13, p04, p14, p05,\n p15, p06, p16, p07, p17])\n dimension_1171 = Q_sym_1171.shape[0]\n M_sym_1171 = sym.Matrix([Q_sym_1171.transpose()[:-1, :], sym.ones(1,\n dimension_1171)])\n sym_diff_equations_1171 = M_sym_1171 @ pi_1171\n b_sym_1171 = sym.Matrix([sym.zeros(dimension_1171 - 1, 1), [1]])\n eq0_1171 = sym.Eq(sym_diff_equations_1171[0], b_sym_1171[0])\n eq1_1171 = sym.Eq(sym_diff_equations_1171[1], b_sym_1171[1])\n eq2_1171 = sym.Eq(sym_diff_equations_1171[2], b_sym_1171[2])\n eq3_1171 = sym.Eq(sym_diff_equations_1171[3], b_sym_1171[3])\n eq4_1171 = sym.Eq(sym_diff_equations_1171[4], b_sym_1171[4])\n eq5_1171 = sym.Eq(sym_diff_equations_1171[5], b_sym_1171[5])\n eq6_1171 = sym.Eq(sym_diff_equations_1171[6], b_sym_1171[6])\n eq7_1171 = sym.Eq(sym_diff_equations_1171[7], b_sym_1171[7])\n eq8_1171 = sym.Eq(sym_diff_equations_1171[8], b_sym_1171[8])\n eq9_1171 = sym.Eq(sym_diff_equations_1171[9], b_sym_1171[9])\n eq10_1171 = sym.Eq(sym_diff_equations_1171[10], b_sym_1171[10])\n eq11_1171 = sym.Eq(sym_diff_equations_1171[11], b_sym_1171[11])\n eq12_1171 = sym.Eq(sym_diff_equations_1171[12], b_sym_1171[12])\n eq13_1171 = sym.Eq(sym_diff_equations_1171[13], b_sym_1171[13])\n eq14_1171 = sym.Eq(sym_diff_equations_1171[14], b_sym_1171[14])\n sym_state_probs_1171 = sym.solve([eq0_1171, eq1_1171, eq2_1171,\n eq3_1171, eq4_1171, eq5_1171, eq6_1171, eq7_1171, eq8_1171,\n eq9_1171, eq10_1171, eq11_1171, eq12_1171, eq13_1171, eq14_1171], (\n p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15, p06, p16,\n p07, p17))\n sym_state_recursive_ratios_1171 = sym.zeros(buffer_capacity + 1, \n system_capacity + 1)\n sym_state_recursive_ratios_1171[0, 0] = 1\n sym_state_recursive_ratios_1171[0, 1] = sym.factor(sym_state_probs_1171\n [p01] / sym_state_probs_1171[p00])\n sym_state_recursive_ratios_1171[1, 1] = sym.factor(sym_state_probs_1171\n [p11] / sym_state_probs_1171[p01])\n sym_state_recursive_ratios_1171[0, 2] = sym.factor(sym_state_probs_1171\n [p02] / sym_state_probs_1171[p01])\n sym_state_recursive_ratios_1171[1, 2] = sym.factor(sym_state_probs_1171\n [p12] / sym_state_probs_1171[p02])\n sym_state_recursive_ratios_1171[0, 3] = sym.factor(sym_state_probs_1171\n [p03] / sym_state_probs_1171[p02])\n sym_state_recursive_ratios_1171[1, 3] = sym.factor(sym_state_probs_1171\n [p13] / sym_state_probs_1171[p03])\n sym_state_recursive_ratios_1171[0, 4] = sym.factor(sym_state_probs_1171\n [p04] / sym_state_probs_1171[p03])\n sym_state_recursive_ratios_1171[1, 4] = sym.factor(sym_state_probs_1171\n [p14] / sym_state_probs_1171[p04])\n sym_state_recursive_ratios_1171[0, 5] = sym.factor(sym_state_probs_1171\n [p05] / sym_state_probs_1171[p04])\n sym_state_recursive_ratios_1171[1, 5] = sym.factor(sym_state_probs_1171\n [p15] / sym_state_probs_1171[p05])\n sym_state_recursive_ratios_1171[0, 6] = sym.factor(sym_state_probs_1171\n [p06] / sym_state_probs_1171[p05])\n sym_state_recursive_ratios_1171[1, 6] = sym.factor(sym_state_probs_1171\n [p16] / sym_state_probs_1171[p06])\n sym_state_recursive_ratios_1171[0, 7] = sym.factor(sym_state_probs_1171\n [p07] / sym_state_probs_1171[p06])\n sym_state_recursive_ratios_1171[1, 7] = sym.factor(sym_state_probs_1171\n [p17] / sym_state_probs_1171[p07])\n sym_state_recursive_ratios_right_1171 = (sym_state_recursive_ratios_1171\n .copy())\n sym_state_recursive_ratios_right_1171[1, 2] = sym.factor(\n sym_state_probs_1171[p12] / sym_state_probs_1171[p11])\n sym_state_recursive_ratios_right_1171[1, 3] = sym.factor(\n sym_state_probs_1171[p13] / sym_state_probs_1171[p12])\n sym_state_recursive_ratios_right_1171[1, 4] = sym.factor(\n sym_state_probs_1171[p14] / sym_state_probs_1171[p13])\n sym_state_recursive_ratios_right_1171[1, 5] = sym.factor(\n sym_state_probs_1171[p15] / sym_state_probs_1171[p14])\n sym_state_recursive_ratios_right_1171[1, 6] = sym.factor(\n sym_state_probs_1171[p16] / sym_state_probs_1171[p15])\n sym_state_recursive_ratios_right_1171[1, 7] = sym.factor(\n sym_state_probs_1171[p17] / sym_state_probs_1171[p16])\n sym_state_recursive_ratios_P0_1171 = sym.zeros(buffer_capacity + 1, \n system_capacity + 1)\n sym_state_recursive_ratios_P0_1171[0, 0] = 1\n sym_state_recursive_ratios_P0_1171[0, 1] = sym.factor(\n sym_state_probs_1171[p01] / sym_state_probs_1171[p00])\n sym_state_recursive_ratios_P0_1171[1, 1] = sym.factor(\n sym_state_probs_1171[p11] / sym_state_probs_1171[p00])\n sym_state_recursive_ratios_P0_1171[0, 2] = sym.factor(\n sym_state_probs_1171[p02] / sym_state_probs_1171[p00])\n sym_state_recursive_ratios_P0_1171[1, 2] = sym.factor(\n sym_state_probs_1171[p12] / sym_state_probs_1171[p00])\n sym_state_recursive_ratios_P0_1171[0, 3] = sym.factor(\n sym_state_probs_1171[p03] / sym_state_probs_1171[p00])\n sym_state_recursive_ratios_P0_1171[1, 3] = sym.factor(\n sym_state_probs_1171[p13] / sym_state_probs_1171[p00])\n sym_state_recursive_ratios_P0_1171[0, 4] = sym.factor(\n sym_state_probs_1171[p04] / sym_state_probs_1171[p00])\n sym_state_recursive_ratios_P0_1171[1, 4] = sym.factor(\n sym_state_probs_1171[p14] / sym_state_probs_1171[p00])\n sym_state_recursive_ratios_P0_1171[0, 5] = sym.factor(\n sym_state_probs_1171[p05] / sym_state_probs_1171[p00])\n sym_state_recursive_ratios_P0_1171[1, 5] = sym.factor(\n sym_state_probs_1171[p15] / sym_state_probs_1171[p00])\n sym_state_recursive_ratios_P0_1171[0, 6] = sym.factor(\n sym_state_probs_1171[p06] / sym_state_probs_1171[p00])\n sym_state_recursive_ratios_P0_1171[1, 6] = sym.factor(\n sym_state_probs_1171[p16] / sym_state_probs_1171[p00])\n sym_state_recursive_ratios_P0_1171[0, 7] = sym.factor(\n sym_state_probs_1171[p07] / sym_state_probs_1171[p00])\n sym_state_recursive_ratios_P0_1171[1, 7] = sym.factor(\n sym_state_probs_1171[p17] / sym_state_probs_1171[p00])\n return (sym_state_probs_1171, sym_state_recursive_ratios_1171,\n sym_state_recursive_ratios_right_1171,\n sym_state_recursive_ratios_P0_1171)\n\n\ndef get_symbolic_state_probabilities_1181():\n num_of_servers = 1\n threshold = 1\n system_capacity = 8\n buffer_capacity = 1\n Q_sym_1181 = abg.markov.get_symbolic_transition_matrix(num_of_servers,\n threshold, system_capacity, buffer_capacity)\n (p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15, p06, p16, p07,\n p17, p08, p18) = (sym.symbols(\n 'p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15, p06, p16, p07, p17, p08, p18'\n ))\n pi_1181 = sym.Matrix([p00, p01, p11, p02, p12, p03, p13, p04, p14, p05,\n p15, p06, p16, p07, p17, p08, p18])\n dimension_1181 = Q_sym_1181.shape[0]\n M_sym_1181 = sym.Matrix([Q_sym_1181.transpose()[:-1, :], sym.ones(1,\n dimension_1181)])\n sym_diff_equations_1181 = M_sym_1181 @ pi_1181\n b_sym_1181 = sym.Matrix([sym.zeros(dimension_1181 - 1, 1), [1]])\n eq0_1181 = sym.Eq(sym_diff_equations_1181[0], b_sym_1181[0])\n eq1_1181 = sym.Eq(sym_diff_equations_1181[1], b_sym_1181[1])\n eq2_1181 = sym.Eq(sym_diff_equations_1181[2], b_sym_1181[2])\n eq3_1181 = sym.Eq(sym_diff_equations_1181[3], b_sym_1181[3])\n eq4_1181 = sym.Eq(sym_diff_equations_1181[4], b_sym_1181[4])\n eq5_1181 = sym.Eq(sym_diff_equations_1181[5], b_sym_1181[5])\n eq6_1181 = sym.Eq(sym_diff_equations_1181[6], b_sym_1181[6])\n eq7_1181 = sym.Eq(sym_diff_equations_1181[7], b_sym_1181[7])\n eq8_1181 = sym.Eq(sym_diff_equations_1181[8], b_sym_1181[8])\n eq9_1181 = sym.Eq(sym_diff_equations_1181[9], b_sym_1181[9])\n eq10_1181 = sym.Eq(sym_diff_equations_1181[10], b_sym_1181[10])\n eq11_1181 = sym.Eq(sym_diff_equations_1181[11], b_sym_1181[11])\n eq12_1181 = sym.Eq(sym_diff_equations_1181[12], b_sym_1181[12])\n eq13_1181 = sym.Eq(sym_diff_equations_1181[13], b_sym_1181[13])\n eq14_1181 = sym.Eq(sym_diff_equations_1181[14], b_sym_1181[14])\n eq15_1181 = sym.Eq(sym_diff_equations_1181[15], b_sym_1181[15])\n eq16_1181 = sym.Eq(sym_diff_equations_1181[16], b_sym_1181[16])\n sym_state_probs_1181 = sym.solve([eq0_1181, eq1_1181, eq2_1181,\n eq3_1181, eq4_1181, eq5_1181, eq6_1181, eq7_1181, eq8_1181,\n eq9_1181, eq10_1181, eq11_1181, eq12_1181, eq13_1181, eq14_1181,\n eq15_1181, eq16_1181], (p00, p01, p11, p02, p12, p03, p13, p04, p14,\n p05, p15, p06, p16, p07, p17, p08, p18))\n sym_state_recursive_ratios_1181 = sym.zeros(buffer_capacity + 1, \n system_capacity + 1)\n sym_state_recursive_ratios_1181[0, 0] = 1\n sym_state_recursive_ratios_1181[0, 1] = sym.factor(sym_state_probs_1181\n [p01] / sym_state_probs_1181[p00])\n sym_state_recursive_ratios_1181[1, 1] = sym.factor(sym_state_probs_1181\n [p11] / sym_state_probs_1181[p01])\n sym_state_recursive_ratios_1181[0, 2] = sym.factor(sym_state_probs_1181\n [p02] / sym_state_probs_1181[p01])\n sym_state_recursive_ratios_1181[1, 2] = sym.factor(sym_state_probs_1181\n [p12] / sym_state_probs_1181[p02])\n sym_state_recursive_ratios_1181[0, 3] = sym.factor(sym_state_probs_1181\n [p03] / sym_state_probs_1181[p02])\n sym_state_recursive_ratios_1181[1, 3] = sym.factor(sym_state_probs_1181\n [p13] / sym_state_probs_1181[p03])\n sym_state_recursive_ratios_1181[0, 4] = sym.factor(sym_state_probs_1181\n [p04] / sym_state_probs_1181[p03])\n sym_state_recursive_ratios_1181[1, 4] = sym.factor(sym_state_probs_1181\n [p14] / sym_state_probs_1181[p04])\n sym_state_recursive_ratios_1181[0, 5] = sym.factor(sym_state_probs_1181\n [p05] / sym_state_probs_1181[p04])\n sym_state_recursive_ratios_1181[1, 5] = sym.factor(sym_state_probs_1181\n [p15] / sym_state_probs_1181[p05])\n sym_state_recursive_ratios_1181[0, 6] = sym.factor(sym_state_probs_1181\n [p06] / sym_state_probs_1181[p05])\n sym_state_recursive_ratios_1181[1, 6] = sym.factor(sym_state_probs_1181\n [p16] / sym_state_probs_1181[p06])\n sym_state_recursive_ratios_1181[0, 7] = sym.factor(sym_state_probs_1181\n [p07] / sym_state_probs_1181[p06])\n sym_state_recursive_ratios_1181[1, 7] = sym.factor(sym_state_probs_1181\n [p17] / sym_state_probs_1181[p07])\n sym_state_recursive_ratios_1181[0, 8] = sym.factor(sym_state_probs_1181\n [p08] / sym_state_probs_1181[p07])\n sym_state_recursive_ratios_1181[1, 8] = sym.factor(sym_state_probs_1181\n [p18] / sym_state_probs_1181[p08])\n sym_state_recursive_ratios_right_1181 = (sym_state_recursive_ratios_1181\n .copy())\n sym_state_recursive_ratios_right_1181[1, 2] = sym.factor(\n sym_state_probs_1181[p12] / sym_state_probs_1181[p11])\n sym_state_recursive_ratios_right_1181[1, 3] = sym.factor(\n sym_state_probs_1181[p13] / sym_state_probs_1181[p12])\n sym_state_recursive_ratios_right_1181[1, 4] = sym.factor(\n sym_state_probs_1181[p14] / sym_state_probs_1181[p13])\n sym_state_recursive_ratios_right_1181[1, 5] = sym.factor(\n sym_state_probs_1181[p15] / sym_state_probs_1181[p14])\n sym_state_recursive_ratios_right_1181[1, 6] = sym.factor(\n sym_state_probs_1181[p16] / sym_state_probs_1181[p15])\n sym_state_recursive_ratios_right_1181[1, 7] = sym.factor(\n sym_state_probs_1181[p17] / sym_state_probs_1181[p16])\n sym_state_recursive_ratios_right_1181[1, 8] = sym.factor(\n sym_state_probs_1181[p18] / sym_state_probs_1181[p17])\n sym_state_recursive_ratios_P0_1181 = sym.zeros(buffer_capacity + 1, \n system_capacity + 1)\n sym_state_recursive_ratios_P0_1181[0, 0] = 1\n sym_state_recursive_ratios_P0_1181[0, 1] = sym.factor(\n sym_state_probs_1181[p01] / sym_state_probs_1181[p00])\n sym_state_recursive_ratios_P0_1181[1, 1] = sym.factor(\n sym_state_probs_1181[p11] / sym_state_probs_1181[p00])\n sym_state_recursive_ratios_P0_1181[0, 2] = sym.factor(\n sym_state_probs_1181[p02] / sym_state_probs_1181[p00])\n sym_state_recursive_ratios_P0_1181[1, 2] = sym.factor(\n sym_state_probs_1181[p12] / sym_state_probs_1181[p00])\n sym_state_recursive_ratios_P0_1181[0, 3] = sym.factor(\n sym_state_probs_1181[p03] / sym_state_probs_1181[p00])\n sym_state_recursive_ratios_P0_1181[1, 3] = sym.factor(\n sym_state_probs_1181[p13] / sym_state_probs_1181[p00])\n sym_state_recursive_ratios_P0_1181[0, 4] = sym.factor(\n sym_state_probs_1181[p04] / sym_state_probs_1181[p00])\n sym_state_recursive_ratios_P0_1181[1, 4] = sym.factor(\n sym_state_probs_1181[p14] / sym_state_probs_1181[p00])\n sym_state_recursive_ratios_P0_1181[0, 5] = sym.factor(\n sym_state_probs_1181[p05] / sym_state_probs_1181[p00])\n sym_state_recursive_ratios_P0_1181[1, 5] = sym.factor(\n sym_state_probs_1181[p15] / sym_state_probs_1181[p00])\n sym_state_recursive_ratios_P0_1181[0, 6] = sym.factor(\n sym_state_probs_1181[p06] / sym_state_probs_1181[p00])\n sym_state_recursive_ratios_P0_1181[1, 6] = sym.factor(\n sym_state_probs_1181[p16] / sym_state_probs_1181[p00])\n sym_state_recursive_ratios_P0_1181[0, 7] = sym.factor(\n sym_state_probs_1181[p07] / sym_state_probs_1181[p00])\n sym_state_recursive_ratios_P0_1181[1, 7] = sym.factor(\n sym_state_probs_1181[p17] / sym_state_probs_1181[p00])\n sym_state_recursive_ratios_P0_1181[0, 8] = sym.factor(\n sym_state_probs_1181[p08] / sym_state_probs_1181[p00])\n sym_state_recursive_ratios_P0_1181[1, 8] = sym.factor(\n sym_state_probs_1181[p18] / sym_state_probs_1181[p00])\n return (sym_state_probs_1181, sym_state_recursive_ratios_1181,\n sym_state_recursive_ratios_right_1181,\n sym_state_recursive_ratios_P0_1181)\n\n\ndef get_symbolic_state_probabilities_1191():\n num_of_servers = 1\n threshold = 1\n system_capacity = 9\n buffer_capacity = 1\n Q_sym_1191 = abg.markov.get_symbolic_transition_matrix(num_of_servers,\n threshold, system_capacity, buffer_capacity)\n (p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15, p06, p16, p07,\n p17, p08, p18, p09, p19) = (sym.symbols(\n 'p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15, p06, p16, p07, p17, p08, p18, p09, p19'\n ))\n pi_1191 = sym.Matrix([p00, p01, p11, p02, p12, p03, p13, p04, p14, p05,\n p15, p06, p16, p07, p17, p08, p18, p09, p19])\n dimension_1191 = Q_sym_1191.shape[0]\n M_sym_1191 = sym.Matrix([Q_sym_1191.transpose()[:-1, :], sym.ones(1,\n dimension_1191)])\n sym_diff_equations_1191 = M_sym_1191 @ pi_1191\n b_sym_1191 = sym.Matrix([sym.zeros(dimension_1191 - 1, 1), [1]])\n eq0_1191 = sym.Eq(sym_diff_equations_1191[0], b_sym_1191[0])\n eq1_1191 = sym.Eq(sym_diff_equations_1191[1], b_sym_1191[1])\n eq2_1191 = sym.Eq(sym_diff_equations_1191[2], b_sym_1191[2])\n eq3_1191 = sym.Eq(sym_diff_equations_1191[3], b_sym_1191[3])\n eq4_1191 = sym.Eq(sym_diff_equations_1191[4], b_sym_1191[4])\n eq5_1191 = sym.Eq(sym_diff_equations_1191[5], b_sym_1191[5])\n eq6_1191 = sym.Eq(sym_diff_equations_1191[6], b_sym_1191[6])\n eq7_1191 = sym.Eq(sym_diff_equations_1191[7], b_sym_1191[7])\n eq8_1191 = sym.Eq(sym_diff_equations_1191[8], b_sym_1191[8])\n eq9_1191 = sym.Eq(sym_diff_equations_1191[9], b_sym_1191[9])\n eq10_1191 = sym.Eq(sym_diff_equations_1191[10], b_sym_1191[10])\n eq11_1191 = sym.Eq(sym_diff_equations_1191[11], b_sym_1191[11])\n eq12_1191 = sym.Eq(sym_diff_equations_1191[12], b_sym_1191[12])\n eq13_1191 = sym.Eq(sym_diff_equations_1191[13], b_sym_1191[13])\n eq14_1191 = sym.Eq(sym_diff_equations_1191[14], b_sym_1191[14])\n eq15_1191 = sym.Eq(sym_diff_equations_1191[15], b_sym_1191[15])\n eq16_1191 = sym.Eq(sym_diff_equations_1191[16], b_sym_1191[16])\n eq17_1191 = sym.Eq(sym_diff_equations_1191[17], b_sym_1191[17])\n eq18_1191 = sym.Eq(sym_diff_equations_1191[18], b_sym_1191[18])\n sym_state_probs_1191 = sym.solve([eq0_1191, eq1_1191, eq2_1191,\n eq3_1191, eq4_1191, eq5_1191, eq6_1191, eq7_1191, eq8_1191,\n eq9_1191, eq10_1191, eq11_1191, eq12_1191, eq13_1191, eq14_1191,\n eq15_1191, eq16_1191, eq17_1191, eq18_1191], (p00, p01, p11, p02,\n p12, p03, p13, p04, p14, p05, p15, p06, p16, p07, p17, p08, p18,\n p09, p19))\n sym_state_recursive_ratios_1191 = sym.zeros(buffer_capacity + 1, \n system_capacity + 1)\n sym_state_recursive_ratios_1191[0, 0] = 1\n sym_state_recursive_ratios_1191[0, 1] = sym.factor(sym_state_probs_1191\n [p01] / sym_state_probs_1191[p00])\n sym_state_recursive_ratios_1191[1, 1] = sym.factor(sym_state_probs_1191\n [p11] / sym_state_probs_1191[p01])\n sym_state_recursive_ratios_1191[0, 2] = sym.factor(sym_state_probs_1191\n [p02] / sym_state_probs_1191[p01])\n sym_state_recursive_ratios_1191[1, 2] = sym.factor(sym_state_probs_1191\n [p12] / sym_state_probs_1191[p02])\n sym_state_recursive_ratios_1191[0, 3] = sym.factor(sym_state_probs_1191\n [p03] / sym_state_probs_1191[p02])\n sym_state_recursive_ratios_1191[1, 3] = sym.factor(sym_state_probs_1191\n [p13] / sym_state_probs_1191[p03])\n sym_state_recursive_ratios_1191[0, 4] = sym.factor(sym_state_probs_1191\n [p04] / sym_state_probs_1191[p03])\n sym_state_recursive_ratios_1191[1, 4] = sym.factor(sym_state_probs_1191\n [p14] / sym_state_probs_1191[p04])\n sym_state_recursive_ratios_1191[0, 5] = sym.factor(sym_state_probs_1191\n [p05] / sym_state_probs_1191[p04])\n sym_state_recursive_ratios_1191[1, 5] = sym.factor(sym_state_probs_1191\n [p15] / sym_state_probs_1191[p05])\n sym_state_recursive_ratios_1191[0, 6] = sym.factor(sym_state_probs_1191\n [p06] / sym_state_probs_1191[p05])\n sym_state_recursive_ratios_1191[1, 6] = sym.factor(sym_state_probs_1191\n [p16] / sym_state_probs_1191[p06])\n sym_state_recursive_ratios_1191[0, 7] = sym.factor(sym_state_probs_1191\n [p07] / sym_state_probs_1191[p06])\n sym_state_recursive_ratios_1191[1, 7] = sym.factor(sym_state_probs_1191\n [p17] / sym_state_probs_1191[p07])\n sym_state_recursive_ratios_1191[0, 8] = sym.factor(sym_state_probs_1191\n [p08] / sym_state_probs_1191[p07])\n sym_state_recursive_ratios_1191[1, 8] = sym.factor(sym_state_probs_1191\n [p18] / sym_state_probs_1191[p08])\n sym_state_recursive_ratios_1191[0, 9] = sym.factor(sym_state_probs_1191\n [p09] / sym_state_probs_1191[p08])\n sym_state_recursive_ratios_1191[1, 9] = sym.factor(sym_state_probs_1191\n [p19] / sym_state_probs_1191[p09])\n sym_state_recursive_ratios_right_1191 = (sym_state_recursive_ratios_1191\n .copy())\n sym_state_recursive_ratios_right_1191[1, 2] = sym.factor(\n sym_state_probs_1191[p12] / sym_state_probs_1191[p11])\n sym_state_recursive_ratios_right_1191[1, 3] = sym.factor(\n sym_state_probs_1191[p13] / sym_state_probs_1191[p12])\n sym_state_recursive_ratios_right_1191[1, 4] = sym.factor(\n sym_state_probs_1191[p14] / sym_state_probs_1191[p13])\n sym_state_recursive_ratios_right_1191[1, 5] = sym.factor(\n sym_state_probs_1191[p15] / sym_state_probs_1191[p14])\n sym_state_recursive_ratios_right_1191[1, 6] = sym.factor(\n sym_state_probs_1191[p16] / sym_state_probs_1191[p15])\n sym_state_recursive_ratios_right_1191[1, 7] = sym.factor(\n sym_state_probs_1191[p17] / sym_state_probs_1191[p16])\n sym_state_recursive_ratios_right_1191[1, 8] = sym.factor(\n sym_state_probs_1191[p18] / sym_state_probs_1191[p17])\n sym_state_recursive_ratios_right_1191[1, 8] = sym.factor(\n sym_state_probs_1191[p18] / sym_state_probs_1191[p17])\n sym_state_recursive_ratios_P0_1191 = sym.zeros(buffer_capacity + 1, \n system_capacity + 1)\n sym_state_recursive_ratios_P0_1191[0, 0] = 1\n sym_state_recursive_ratios_P0_1191[0, 1] = sym.factor(\n sym_state_probs_1191[p01] / sym_state_probs_1191[p00])\n sym_state_recursive_ratios_P0_1191[1, 1] = sym.factor(\n sym_state_probs_1191[p11] / sym_state_probs_1191[p00])\n sym_state_recursive_ratios_P0_1191[0, 2] = sym.factor(\n sym_state_probs_1191[p02] / sym_state_probs_1191[p00])\n sym_state_recursive_ratios_P0_1191[1, 2] = sym.factor(\n sym_state_probs_1191[p12] / sym_state_probs_1191[p00])\n sym_state_recursive_ratios_P0_1191[0, 3] = sym.factor(\n sym_state_probs_1191[p03] / sym_state_probs_1191[p00])\n sym_state_recursive_ratios_P0_1191[1, 3] = sym.factor(\n sym_state_probs_1191[p13] / sym_state_probs_1191[p00])\n sym_state_recursive_ratios_P0_1191[0, 4] = sym.factor(\n sym_state_probs_1191[p04] / sym_state_probs_1191[p00])\n sym_state_recursive_ratios_P0_1191[1, 4] = sym.factor(\n sym_state_probs_1191[p14] / sym_state_probs_1191[p00])\n sym_state_recursive_ratios_P0_1191[0, 5] = sym.factor(\n sym_state_probs_1191[p05] / sym_state_probs_1191[p00])\n sym_state_recursive_ratios_P0_1191[1, 5] = sym.factor(\n sym_state_probs_1191[p15] / sym_state_probs_1191[p00])\n sym_state_recursive_ratios_P0_1191[0, 6] = sym.factor(\n sym_state_probs_1191[p06] / sym_state_probs_1191[p00])\n sym_state_recursive_ratios_P0_1191[1, 6] = sym.factor(\n sym_state_probs_1191[p16] / sym_state_probs_1191[p00])\n sym_state_recursive_ratios_P0_1191[0, 7] = sym.factor(\n sym_state_probs_1191[p07] / sym_state_probs_1191[p00])\n sym_state_recursive_ratios_P0_1191[1, 7] = sym.factor(\n sym_state_probs_1191[p17] / sym_state_probs_1191[p00])\n sym_state_recursive_ratios_P0_1191[0, 8] = sym.factor(\n sym_state_probs_1191[p08] / sym_state_probs_1191[p00])\n sym_state_recursive_ratios_P0_1191[1, 8] = sym.factor(\n sym_state_probs_1191[p18] / sym_state_probs_1191[p00])\n sym_state_recursive_ratios_P0_1191[0, 9] = sym.factor(\n sym_state_probs_1191[p09] / sym_state_probs_1191[p00])\n sym_state_recursive_ratios_P0_1191[1, 9] = sym.factor(\n sym_state_probs_1191[p19] / sym_state_probs_1191[p00])\n return (sym_state_probs_1191, sym_state_recursive_ratios_1191,\n sym_state_recursive_ratios_right_1191,\n sym_state_recursive_ratios_P0_1191)\n",
"step-3": "<mask token>\n\n\ndef get_symbolic_pi(num_of_servers, threshold, system_capacity, buffer_capacity\n ):\n Q_sym = abg.markov.get_symbolic_transition_matrix(num_of_servers=\n num_of_servers, threshold=threshold, system_capacity=\n system_capacity, buffer_capacity=buffer_capacity)\n dimension = Q_sym.shape[0]\n if dimension > 7:\n return 'Capacity of 6 exceeded'\n M_sym = sym.Matrix([Q_sym.transpose()[:-1, :], sym.ones(1, dimension)])\n b_sym = sym.Matrix([sym.zeros(dimension - 1, 1), [1]])\n system = M_sym.col_insert(dimension, b_sym)\n sol = sym.solve_linear_system_LU(system, [a, b, c, d, e, f, g])\n return sol\n\n\ndef get_symbolic_state_probabilities_1222():\n num_of_servers = 1\n threshold = 2\n system_capacity = 2\n buffer_capacity = 2\n sym_pi_1222 = get_symbolic_pi(num_of_servers=num_of_servers, threshold=\n threshold, system_capacity=system_capacity, buffer_capacity=\n buffer_capacity)\n all_states_1222 = abg.markov.build_states(threshold=threshold,\n system_capacity=system_capacity, buffer_capacity=buffer_capacity)\n sym_state_probs_1222 = [(0) for _ in range(len(all_states_1222))]\n sym_state_probs_1222[0] = sym.factor(sym_pi_1222[a])\n sym_state_probs_1222[1] = sym.factor(sym_pi_1222[b])\n sym_state_probs_1222[2] = sym.factor(sym_pi_1222[c])\n sym_state_probs_1222[3] = sym.factor(sym_pi_1222[d])\n sym_state_probs_1222[4] = sym.factor(sym_pi_1222[e])\n sym_state_recursive_ratios_1222 = sym.zeros(buffer_capacity + 1, \n system_capacity + 1)\n sym_state_recursive_ratios_1222[0, 0] = 1\n sym_state_recursive_ratios_1222[0, 1] = sym.factor(sym_state_probs_1222\n [1] / sym_state_probs_1222[0])\n sym_state_recursive_ratios_1222[0, 2] = sym.factor(sym_state_probs_1222\n [2] / sym_state_probs_1222[1])\n sym_state_recursive_ratios_1222[1, 2] = sym.factor(sym_state_probs_1222\n [3] / sym_state_probs_1222[2])\n sym_state_recursive_ratios_1222[2, 2] = sym.factor(sym_state_probs_1222\n [4] / sym_state_probs_1222[3])\n return sym_state_probs_1222, sym_state_recursive_ratios_1222\n\n\ndef get_symbolic_state_probabilities_1121():\n num_of_servers = 1\n threshold = 1\n system_capacity = 2\n buffer_capacity = 1\n all_states_1121 = abg.markov.build_states(threshold=threshold,\n system_capacity=system_capacity, buffer_capacity=buffer_capacity)\n sym_pi_1121 = get_symbolic_pi(num_of_servers=num_of_servers, threshold=\n threshold, system_capacity=system_capacity, buffer_capacity=\n buffer_capacity)\n sym_state_probs_1121 = [(0) for _ in range(len(all_states_1121))]\n sym_state_probs_1121[0] = sym.factor(sym_pi_1121[a])\n sym_state_probs_1121[1] = sym.factor(sym_pi_1121[b])\n sym_state_probs_1121[2] = sym.factor(sym_pi_1121[c])\n sym_state_probs_1121[3] = sym.factor(sym_pi_1121[d])\n sym_state_probs_1121[4] = sym.factor(sym_pi_1121[e])\n sym_state_recursive_ratios_1121 = sym.zeros(buffer_capacity + 1, \n system_capacity + 1)\n sym_state_recursive_ratios_1121[0, 0] = 1\n sym_state_recursive_ratios_1121[0, 1] = sym.factor(sym_state_probs_1121\n [1] / sym_state_probs_1121[0])\n sym_state_recursive_ratios_1121[1, 1] = sym.factor(sym_state_probs_1121\n [2] / sym_state_probs_1121[1])\n sym_state_recursive_ratios_1121[0, 2] = sym.factor(sym_state_probs_1121\n [3] / sym_state_probs_1121[1])\n sym_state_recursive_ratios_1121[1, 2] = sym.factor(sym_state_probs_1121\n [4] / sym_state_probs_1121[3])\n sym_state_recursive_ratios_right_1121 = (sym_state_recursive_ratios_1121\n .copy())\n sym_state_recursive_ratios_right_1121[1, 2] = sym.factor(\n sym_state_probs_1121[4] / sym_state_probs_1121[2])\n sym_state_recursive_ratios_P0_1121 = sym.zeros(buffer_capacity + 1, \n system_capacity + 1)\n sym_state_recursive_ratios_P0_1121[0, 0] = 1\n sym_state_recursive_ratios_P0_1121[0, 1] = sym.factor(\n sym_state_probs_1121[1] / sym_state_probs_1121[0])\n sym_state_recursive_ratios_P0_1121[1, 1] = sym.factor(\n sym_state_probs_1121[2] / sym_state_probs_1121[0])\n sym_state_recursive_ratios_P0_1121[0, 2] = sym.factor(\n sym_state_probs_1121[3] / sym_state_probs_1121[0])\n sym_state_recursive_ratios_P0_1121[1, 2] = sym.factor(\n sym_state_probs_1121[4] / sym_state_probs_1121[0])\n return (sym_state_probs_1121, sym_state_recursive_ratios_1121,\n sym_state_recursive_ratios_right_1121,\n sym_state_recursive_ratios_P0_1121)\n\n\ndef get_symbolic_state_probabilities_1122():\n threshold = 1\n system_capacity = 2\n buffer_capacity = 2\n all_states_1122 = abg.markov.build_states(threshold=threshold,\n system_capacity=system_capacity, buffer_capacity=buffer_capacity)\n sym_state_probs_1122 = [(0) for _ in range(len(all_states_1122))]\n sym_Lambda = sym.symbols('Lambda')\n sym_lambda_1 = sym.symbols('lambda_1')\n sym_lambda_2 = sym.symbols('lambda_2')\n sym_mu = sym.symbols('mu')\n sym_state_probs_1122[0] = (sym_mu ** 6 + 2 * sym_lambda_2 * sym_mu ** 5 +\n sym_lambda_2 ** 2 * sym_mu ** 4)\n sym_state_probs_1122[1] = sym_Lambda * sym_mu ** 3 * (sym_mu ** 2 + 2 *\n sym_mu * sym_lambda_2 + sym_lambda_2 ** 2)\n sym_state_probs_1122[2] = sym_Lambda * sym_lambda_2 * sym_mu ** 2 * (\n sym_lambda_2 ** 2 + sym_lambda_2 * sym_lambda_1 + sym_lambda_1 *\n sym_mu + sym_mu ** 2 + 2 * sym_lambda_2 * sym_mu)\n sym_state_probs_1122[3] = sym_Lambda * sym_lambda_2 ** 2 * sym_mu * (\n sym_lambda_2 ** 2 + 2 * sym_lambda_1 * sym_lambda_2 + 3 *\n sym_lambda_1 * sym_mu + sym_mu ** 2 + 2 * sym_lambda_2 * sym_mu + \n sym_lambda_1 ** 2)\n sym_state_probs_1122[4] = sym_Lambda * sym_lambda_1 * sym_mu ** 3 * (\n sym_lambda_2 + sym_mu)\n sym_state_probs_1122[5\n ] = sym_Lambda * sym_lambda_1 * sym_lambda_2 * sym_mu ** 2 * (2 *\n sym_mu + sym_lambda_1 + sym_lambda_2)\n sym_state_probs_1122[6] = sym_Lambda * sym_lambda_1 * sym_lambda_2 ** 2 * (\n sym_lambda_1 ** 2 + 4 * sym_lambda_1 * sym_mu + 2 * sym_lambda_1 *\n sym_lambda_2 + 3 * sym_mu ** 2 + sym_lambda_2 ** 2 + 3 *\n sym_lambda_2 * sym_mu)\n total_1122 = np.sum(sym_state_probs_1122)\n sym_state_probs_1122 = [(i / total_1122) for i in sym_state_probs_1122]\n sym_state_recursive_ratios_1122 = sym.zeros(buffer_capacity + 1, \n system_capacity + 1)\n sym_state_recursive_ratios_1122[0, 0] = 1\n sym_state_recursive_ratios_1122[0, 1] = sym.factor(sym_state_probs_1122\n [1] / sym_state_probs_1122[0])\n sym_state_recursive_ratios_1122[1, 1] = sym.factor(sym_state_probs_1122\n [2] / sym_state_probs_1122[1])\n sym_state_recursive_ratios_1122[2, 1] = sym.factor(sym_state_probs_1122\n [3] / sym_state_probs_1122[2])\n sym_state_recursive_ratios_1122[0, 2] = sym.factor(sym_state_probs_1122\n [4] / sym_state_probs_1122[1])\n sym_state_recursive_ratios_1122[1, 2] = sym.factor(sym_state_probs_1122\n [5] / sym_state_probs_1122[4])\n sym_state_recursive_ratios_1122[2, 2] = sym.factor(sym_state_probs_1122\n [6] / sym_state_probs_1122[5])\n sym_state_recursive_ratios_right_1122 = (sym_state_recursive_ratios_1122\n .copy())\n sym_state_recursive_ratios_right_1122[1, 2] = sym.factor(\n sym_state_probs_1122[5] / sym_state_probs_1122[2])\n sym_state_recursive_ratios_right_1122[2, 2] = sym.factor(\n sym_state_probs_1122[6] / sym_state_probs_1122[3])\n sym_state_recursive_ratios_P0_1122 = sym.zeros(buffer_capacity + 1, \n system_capacity + 1)\n sym_state_recursive_ratios_P0_1122[0, 0] = 1\n sym_state_recursive_ratios_P0_1122[0, 1] = sym.factor(\n sym_state_probs_1122[1] / sym_state_probs_1122[0])\n sym_state_recursive_ratios_P0_1122[1, 1] = sym.factor(\n sym_state_probs_1122[2] / sym_state_probs_1122[0])\n sym_state_recursive_ratios_P0_1122[2, 1] = sym.factor(\n sym_state_probs_1122[3] / sym_state_probs_1122[0])\n sym_state_recursive_ratios_P0_1122[0, 2] = sym.factor(\n sym_state_probs_1122[4] / sym_state_probs_1122[0])\n sym_state_recursive_ratios_P0_1122[1, 2] = sym.factor(\n sym_state_probs_1122[5] / sym_state_probs_1122[0])\n sym_state_recursive_ratios_P0_1122[2, 2] = sym.factor(\n sym_state_probs_1122[6] / sym_state_probs_1122[0])\n return (sym_state_probs_1122, sym_state_recursive_ratios_1122,\n sym_state_recursive_ratios_right_1122,\n sym_state_recursive_ratios_P0_1122)\n\n\ndef get_symbolic_state_probabilities_1123():\n num_of_servers = 1\n threshold = 1\n system_capacity = 2\n buffer_capacity = 3\n Q_sym_1123 = abg.markov.get_symbolic_transition_matrix(num_of_servers,\n threshold, system_capacity, buffer_capacity)\n p00, p01, p11, p21, p31, p02, p12, p22, p32 = sym.symbols(\n 'p00, p01, p11, p21, p31, p02, p12, p22, p32')\n pi_1123 = sym.Matrix([p00, p01, p11, p21, p31, p02, p12, p22, p32])\n dimension_1123 = Q_sym_1123.shape[0]\n M_sym_1123 = sym.Matrix([Q_sym_1123.transpose()[:-1, :], sym.ones(1,\n dimension_1123)])\n sym_diff_equations_1123 = M_sym_1123 @ pi_1123\n b_sym_1123 = sym.Matrix([sym.zeros(dimension_1123 - 1, 1), [1]])\n eq0_1123 = sym.Eq(sym_diff_equations_1123[0], b_sym_1123[0])\n eq1_1123 = sym.Eq(sym_diff_equations_1123[1], b_sym_1123[1])\n eq2_1123 = sym.Eq(sym_diff_equations_1123[2], b_sym_1123[2])\n eq3_1123 = sym.Eq(sym_diff_equations_1123[3], b_sym_1123[3])\n eq4_1123 = sym.Eq(sym_diff_equations_1123[4], b_sym_1123[4])\n eq5_1123 = sym.Eq(sym_diff_equations_1123[5], b_sym_1123[5])\n eq6_1123 = sym.Eq(sym_diff_equations_1123[6], b_sym_1123[6])\n eq7_1123 = sym.Eq(sym_diff_equations_1123[7], b_sym_1123[7])\n eq8_1123 = sym.Eq(sym_diff_equations_1123[8], b_sym_1123[8])\n sym_state_probs_1123 = sym.solve([eq0_1123, eq1_1123, eq2_1123,\n eq3_1123, eq4_1123, eq5_1123, eq6_1123, eq7_1123, eq8_1123], (p00,\n p01, p11, p21, p31, p02, p12, p22, p32))\n sym_state_recursive_ratios_1123 = sym.zeros(buffer_capacity + 1, \n system_capacity + 1)\n sym_state_recursive_ratios_1123[0, 0] = 1\n sym_state_recursive_ratios_1123[0, 1] = sym.factor(sym_state_probs_1123\n [p01] / sym_state_probs_1123[p00])\n sym_state_recursive_ratios_1123[1, 1] = sym.factor(sym_state_probs_1123\n [p11] / sym_state_probs_1123[p01])\n sym_state_recursive_ratios_1123[2, 1] = sym.factor(sym_state_probs_1123\n [p21] / sym_state_probs_1123[p11])\n sym_state_recursive_ratios_1123[3, 1] = sym.factor(sym_state_probs_1123\n [p31] / sym_state_probs_1123[p21])\n sym_state_recursive_ratios_1123[0, 2] = sym.factor(sym_state_probs_1123\n [p02] / sym_state_probs_1123[p01])\n sym_state_recursive_ratios_1123[1, 2] = sym.factor(sym_state_probs_1123\n [p12] / sym_state_probs_1123[p02])\n sym_state_recursive_ratios_1123[2, 2] = sym.factor(sym_state_probs_1123\n [p22] / sym_state_probs_1123[p12])\n sym_state_recursive_ratios_1123[2, 2] = sym.factor(sym_state_probs_1123\n [p32] / sym_state_probs_1123[p22])\n sym_state_recursive_ratios_right_1123 = (sym_state_recursive_ratios_1123\n .copy())\n sym_state_recursive_ratios_right_1123[1, 2] = sym.factor(\n sym_state_probs_1123[p12] / sym_state_probs_1123[p11])\n sym_state_recursive_ratios_right_1123[2, 2] = sym.factor(\n sym_state_probs_1123[p22] / sym_state_probs_1123[p21])\n sym_state_recursive_ratios_right_1123[3, 2] = sym.factor(\n sym_state_probs_1123[p32] / sym_state_probs_1123[p22])\n sym_state_recursive_ratios_P0_1123 = sym.zeros(buffer_capacity + 1, \n system_capacity + 1)\n sym_state_recursive_ratios_P0_1123[0, 0] = 1\n sym_state_recursive_ratios_P0_1123[0, 1] = sym.factor(\n sym_state_probs_1123[p01] / sym_state_probs_1123[p00])\n sym_state_recursive_ratios_P0_1123[1, 1] = sym.factor(\n sym_state_probs_1123[p11] / sym_state_probs_1123[p00])\n sym_state_recursive_ratios_P0_1123[2, 1] = sym.factor(\n sym_state_probs_1123[p21] / sym_state_probs_1123[p00])\n sym_state_recursive_ratios_P0_1123[3, 1] = sym.factor(\n sym_state_probs_1123[p31] / sym_state_probs_1123[p00])\n sym_state_recursive_ratios_P0_1123[0, 2] = sym.factor(\n sym_state_probs_1123[p02] / sym_state_probs_1123[p00])\n sym_state_recursive_ratios_P0_1123[1, 2] = sym.factor(\n sym_state_probs_1123[p12] / sym_state_probs_1123[p00])\n sym_state_recursive_ratios_P0_1123[2, 2] = sym.factor(\n sym_state_probs_1123[p22] / sym_state_probs_1123[p00])\n sym_state_recursive_ratios_P0_1123[3, 2] = sym.factor(\n sym_state_probs_1123[p32] / sym_state_probs_1123[p00])\n return (sym_state_probs_1123, sym_state_recursive_ratios_1123,\n sym_state_recursive_ratios_right_1123,\n sym_state_recursive_ratios_P0_1123)\n\n\ndef get_symbolic_state_probabilities_1341():\n threshold = 3\n system_capacity = 4\n buffer_capacity = 1\n all_states_1341 = abg.markov.build_states(threshold=threshold,\n system_capacity=system_capacity, buffer_capacity=buffer_capacity)\n sym_state_probs_1341 = [(0) for _ in range(len(all_states_1341))]\n sym_Lambda = sym.symbols('Lambda')\n sym_lambda_1 = sym.symbols('lambda_1')\n sym_lambda_2 = sym.symbols('lambda_2')\n sym_mu = sym.symbols('mu')\n sym_state_probs_1341[0] = sym_lambda_2 * sym_mu ** 5 + sym_mu ** 6\n sym_state_probs_1341[1\n ] = sym_Lambda * sym_lambda_2 * sym_mu ** 4 + sym_Lambda * sym_mu ** 5\n sym_state_probs_1341[2] = (sym_Lambda ** 2 * sym_lambda_2 * sym_mu ** 3 +\n sym_Lambda ** 2 * sym_mu ** 4)\n sym_state_probs_1341[3] = (sym_Lambda ** 3 * sym_lambda_2 * sym_mu ** 2 +\n sym_Lambda ** 3 * sym_mu ** 3)\n sym_state_probs_1341[4] = (sym_Lambda ** 3 * sym_lambda_1 *\n sym_lambda_2 * sym_mu + sym_Lambda ** 3 * sym_lambda_2 * sym_mu ** \n 2 + sym_Lambda ** 3 * sym_lambda_2 * sym_lambda_2 * sym_mu)\n sym_state_probs_1341[5] = sym_Lambda ** 3 * sym_lambda_1 * sym_mu ** 2\n sym_state_probs_1341[6] = (sym_Lambda ** 3 * sym_lambda_1 ** 2 *\n sym_lambda_2 + sym_Lambda ** 3 * sym_lambda_1 * sym_lambda_2 ** 2 +\n 2 * sym_Lambda ** 3 * sym_lambda_1 * sym_lambda_2 * sym_mu)\n total_1341 = np.sum(sym_state_probs_1341)\n sym_state_probs_1341 = [(i / total_1341) for i in sym_state_probs_1341]\n sym_state_recursive_ratios_1341 = sym.zeros(buffer_capacity + 1, \n system_capacity + 1)\n sym_state_recursive_ratios_1341[0, 0] = 1\n sym_state_recursive_ratios_1341[0, 1] = sym.factor(sym_state_probs_1341\n [1] / sym_state_probs_1341[0])\n sym_state_recursive_ratios_1341[0, 2] = sym.factor(sym_state_probs_1341\n [2] / sym_state_probs_1341[1])\n sym_state_recursive_ratios_1341[0, 3] = sym.factor(sym_state_probs_1341\n [3] / sym_state_probs_1341[2])\n sym_state_recursive_ratios_1341[0, 4] = sym.factor(sym_state_probs_1341\n [5] / sym_state_probs_1341[3])\n sym_state_recursive_ratios_1341[1, 3] = sym.factor(sym_state_probs_1341\n [4] / sym_state_probs_1341[3])\n sym_state_recursive_ratios_1341[1, 4] = sym.factor(sym_state_probs_1341\n [6] / sym_state_probs_1341[5])\n sym_state_recursive_ratios_right_1341 = (sym_state_recursive_ratios_1341\n .copy())\n sym_state_recursive_ratios_right_1341[1, 4] = sym.factor(\n sym_state_probs_1341[6] / sym_state_probs_1341[4])\n sym_state_recursive_ratios_P0_1341 = sym.zeros(buffer_capacity + 1, \n system_capacity + 1)\n sym_state_recursive_ratios_P0_1341[0, 0] = 1\n sym_state_recursive_ratios_P0_1341[0, 1] = sym.factor(\n sym_state_probs_1341[1] / sym_state_probs_1341[0])\n sym_state_recursive_ratios_P0_1341[0, 2] = sym.factor(\n sym_state_probs_1341[2] / sym_state_probs_1341[0])\n sym_state_recursive_ratios_P0_1341[0, 3] = sym.factor(\n sym_state_probs_1341[3] / sym_state_probs_1341[0])\n sym_state_recursive_ratios_P0_1341[1, 3] = sym.factor(\n sym_state_probs_1341[4] / sym_state_probs_1341[0])\n sym_state_recursive_ratios_P0_1341[0, 4] = sym.factor(\n sym_state_probs_1341[5] / sym_state_probs_1341[0])\n sym_state_recursive_ratios_P0_1341[1, 4] = sym.factor(\n sym_state_probs_1341[6] / sym_state_probs_1341[0])\n return (sym_state_probs_1341, sym_state_recursive_ratios_1341,\n sym_state_recursive_ratios_right_1341,\n sym_state_recursive_ratios_P0_1341)\n\n\ndef get_symbolic_state_probabilities_1131():\n threshold = 1\n system_capacity = 3\n buffer_capacity = 1\n all_states_1131 = abg.markov.build_states(threshold=threshold,\n system_capacity=system_capacity, buffer_capacity=buffer_capacity)\n sym_state_probs_1131 = [(0) for _ in range(len(all_states_1131))]\n sym_Lambda = sym.symbols('Lambda')\n sym_lambda_1 = sym.symbols('lambda_1')\n sym_lambda_2 = sym.symbols('lambda_2')\n sym_mu = sym.symbols('mu')\n sym_state_probs_1131[0] = (sym_mu ** 6 + 2 * (sym_lambda_2 * sym_mu ** \n 5) + sym_lambda_2 ** 2 * sym_mu ** 4 + sym_lambda_1 * sym_lambda_2 *\n sym_mu ** 4)\n sym_state_probs_1131[1] = sym_state_probs_1131[0] * sym_Lambda / sym_mu\n sym_state_probs_1131[2] = (sym_Lambda * sym_lambda_1 ** 2 *\n sym_lambda_2 * sym_mu ** 2 + sym_Lambda * sym_lambda_2 *\n sym_lambda_1 * sym_mu ** 3 + 2 * (sym_Lambda * sym_lambda_1 * \n sym_lambda_2 ** 2 * sym_mu ** 2) + 2 * (sym_Lambda * sym_lambda_2 **\n 2 * sym_mu ** 3) + sym_Lambda * sym_lambda_2 ** 3 * sym_mu ** 2 + \n sym_Lambda * sym_lambda_2 * sym_mu ** 4)\n sym_state_probs_1131[3] = sym_Lambda * sym_lambda_1 * sym_mu ** 3 * (\n sym_lambda_2 + sym_mu)\n sym_state_probs_1131[4\n ] = sym_Lambda * sym_lambda_2 * sym_lambda_1 * sym_mu * (\n sym_lambda_2 ** 2 + 2 * sym_lambda_2 * sym_lambda_1 + 3 *\n sym_lambda_2 * sym_mu + sym_lambda_1 ** 2 + 2 * sym_lambda_1 *\n sym_mu + 2 * sym_mu ** 2)\n sym_state_probs_1131[5] = sym_Lambda * sym_lambda_1 ** 2 * sym_mu ** 3\n sym_state_probs_1131[6] = sym_Lambda * sym_lambda_2 * sym_lambda_1 ** 2 * (\n sym_lambda_2 ** 2 + 2 * sym_lambda_2 * sym_lambda_1 + 3 *\n sym_lambda_2 * sym_mu + sym_lambda_1 ** 2 + 2 * sym_lambda_1 *\n sym_mu + 3 * sym_mu ** 2)\n denominator = (sym_Lambda * sym_lambda_2 ** 3 * sym_lambda_1 ** 2 + \n sym_Lambda * sym_lambda_2 ** 3 * sym_lambda_1 * sym_mu + sym_Lambda *\n sym_lambda_2 ** 3 * sym_mu ** 2 + 2 * sym_Lambda * sym_lambda_2 ** \n 2 * sym_lambda_1 ** 3 + 5 * sym_Lambda * sym_lambda_2 ** 2 * \n sym_lambda_1 ** 2 * sym_mu + 5 * sym_Lambda * sym_lambda_2 ** 2 *\n sym_lambda_1 * sym_mu ** 2 + 3 * sym_Lambda * sym_lambda_2 ** 2 * \n sym_mu ** 3 + sym_Lambda * sym_lambda_2 * sym_lambda_1 ** 4 + 3 *\n sym_Lambda * sym_lambda_2 * sym_lambda_1 ** 3 * sym_mu + 6 *\n sym_Lambda * sym_lambda_2 * sym_lambda_1 ** 2 * sym_mu ** 2 + 5 *\n sym_Lambda * sym_lambda_2 * sym_lambda_1 * sym_mu ** 3 + 3 *\n sym_Lambda * sym_lambda_2 * sym_mu ** 4 + sym_Lambda * sym_lambda_1 **\n 2 * sym_mu ** 3 + sym_Lambda * sym_lambda_1 * sym_mu ** 4 + \n sym_Lambda * sym_mu ** 5 + sym_lambda_2 ** 2 * sym_mu ** 4 + \n sym_lambda_2 * sym_lambda_1 * sym_mu ** 4 + 2 * sym_lambda_2 * \n sym_mu ** 5 + sym_mu ** 6)\n sym_state_probs_1131 = [(i / denominator) for i in sym_state_probs_1131]\n sym_state_recursive_ratios_1131 = sym.zeros(buffer_capacity + 1, \n system_capacity + 1)\n sym_state_recursive_ratios_1131[0, 0] = 1\n sym_state_recursive_ratios_1131[0, 1] = sym.factor(sym_state_probs_1131\n [1] / sym_state_probs_1131[0])\n sym_state_recursive_ratios_1131[1, 1] = sym.factor(sym_state_probs_1131\n [2] / sym_state_probs_1131[1])\n sym_state_recursive_ratios_1131[0, 2] = sym.factor(sym_state_probs_1131\n [3] / sym_state_probs_1131[1])\n sym_state_recursive_ratios_1131[1, 2] = sym.factor(sym_state_probs_1131\n [4] / sym_state_probs_1131[3])\n sym_state_recursive_ratios_1131[0, 3] = sym.factor(sym_state_probs_1131\n [5] / sym_state_probs_1131[3])\n sym_state_recursive_ratios_1131[1, 3] = sym.factor(sym_state_probs_1131\n [6] / sym_state_probs_1131[5])\n sym_state_recursive_ratios_right_1131 = (sym_state_recursive_ratios_1131\n .copy())\n sym_state_recursive_ratios_right_1131[1, 2] = sym.factor(\n sym_state_probs_1131[4] / sym_state_probs_1131[2])\n sym_state_recursive_ratios_right_1131[1, 3] = sym.factor(\n sym_state_probs_1131[6] / sym_state_probs_1131[4])\n sym_state_recursive_ratios_P0_1131 = sym.zeros(buffer_capacity + 1, \n system_capacity + 1)\n sym_state_recursive_ratios_P0_1131[0, 0] = 1\n sym_state_recursive_ratios_P0_1131[0, 1] = sym.factor(\n sym_state_probs_1131[1] / sym_state_probs_1131[0])\n sym_state_recursive_ratios_P0_1131[1, 1] = sym.factor(\n sym_state_probs_1131[2] / sym_state_probs_1131[0])\n sym_state_recursive_ratios_P0_1131[0, 2] = sym.factor(\n sym_state_probs_1131[3] / sym_state_probs_1131[0])\n sym_state_recursive_ratios_P0_1131[1, 2] = sym.factor(\n sym_state_probs_1131[4] / sym_state_probs_1131[0])\n sym_state_recursive_ratios_P0_1131[0, 3] = sym.factor(\n sym_state_probs_1131[5] / sym_state_probs_1131[0])\n sym_state_recursive_ratios_P0_1131[1, 3] = sym.factor(\n sym_state_probs_1131[6] / sym_state_probs_1131[0])\n return (sym_state_probs_1131, sym_state_recursive_ratios_1131,\n sym_state_recursive_ratios_right_1131,\n sym_state_recursive_ratios_P0_1131)\n\n\ndef get_symbolic_state_probabilities_1132():\n num_of_servers = 1\n threshold = 1\n system_capacity = 3\n buffer_capacity = 2\n Q_sym_1132 = abg.markov.get_symbolic_transition_matrix(num_of_servers,\n threshold, system_capacity, buffer_capacity)\n p00, p01, p11, p21, p02, p12, p22, p03, p13, p23 = sym.symbols(\n 'p00, p01, p11, p21, p02, p12, p22, p03, p13, p23')\n pi_1132 = sym.Matrix([p00, p01, p11, p21, p02, p12, p22, p03, p13, p23])\n dimension_1132 = Q_sym_1132.shape[0]\n M_sym_1132 = sym.Matrix([Q_sym_1132.transpose()[:-1, :], sym.ones(1,\n dimension_1132)])\n sym_diff_equations_1132 = M_sym_1132 @ pi_1132\n b_sym_1132 = sym.Matrix([sym.zeros(dimension_1132 - 1, 1), [1]])\n eq0_1132 = sym.Eq(sym_diff_equations_1132[0], b_sym_1132[0])\n eq1_1132 = sym.Eq(sym_diff_equations_1132[1], b_sym_1132[1])\n eq2_1132 = sym.Eq(sym_diff_equations_1132[2], b_sym_1132[2])\n eq3_1132 = sym.Eq(sym_diff_equations_1132[3], b_sym_1132[3])\n eq4_1132 = sym.Eq(sym_diff_equations_1132[4], b_sym_1132[4])\n eq5_1132 = sym.Eq(sym_diff_equations_1132[5], b_sym_1132[5])\n eq6_1132 = sym.Eq(sym_diff_equations_1132[6], b_sym_1132[6])\n eq7_1132 = sym.Eq(sym_diff_equations_1132[7], b_sym_1132[7])\n eq8_1132 = sym.Eq(sym_diff_equations_1132[8], b_sym_1132[8])\n eq9_1132 = sym.Eq(sym_diff_equations_1132[9], b_sym_1132[9])\n sym_state_probs_1132 = sym.solve([eq0_1132, eq1_1132, eq2_1132,\n eq3_1132, eq4_1132, eq5_1132, eq6_1132, eq7_1132, eq8_1132,\n eq9_1132], (p00, p01, p11, p21, p02, p12, p22, p03, p13, p23))\n sym_state_recursive_ratios_1132 = sym.zeros(buffer_capacity + 1, \n system_capacity + 1)\n sym_state_recursive_ratios_1132[0, 0] = 1\n sym_state_recursive_ratios_1132[0, 1] = sym.factor(sym_state_probs_1132\n [p01] / sym_state_probs_1132[p00])\n sym_state_recursive_ratios_1132[1, 1] = sym.factor(sym_state_probs_1132\n [p11] / sym_state_probs_1132[p01])\n sym_state_recursive_ratios_1132[2, 1] = sym.factor(sym_state_probs_1132\n [p21] / sym_state_probs_1132[p11])\n sym_state_recursive_ratios_1132[0, 2] = sym.factor(sym_state_probs_1132\n [p02] / sym_state_probs_1132[p01])\n sym_state_recursive_ratios_1132[1, 2] = sym.factor(sym_state_probs_1132\n [p12] / sym_state_probs_1132[p02])\n sym_state_recursive_ratios_1132[2, 2] = sym.factor(sym_state_probs_1132\n [p22] / sym_state_probs_1132[p12])\n sym_state_recursive_ratios_1132[0, 3] = sym.factor(sym_state_probs_1132\n [p03] / sym_state_probs_1132[p02])\n sym_state_recursive_ratios_1132[1, 3] = sym.factor(sym_state_probs_1132\n [p13] / sym_state_probs_1132[p03])\n sym_state_recursive_ratios_1132[2, 3] = sym.factor(sym_state_probs_1132\n [p23] / sym_state_probs_1132[p13])\n sym_state_recursive_ratios_right_1132 = (sym_state_recursive_ratios_1132\n .copy())\n sym_state_recursive_ratios_right_1132[1, 2] = sym.factor(\n sym_state_probs_1132[p12] / sym_state_probs_1132[p11])\n sym_state_recursive_ratios_right_1132[1, 3] = sym.factor(\n sym_state_probs_1132[p13] / sym_state_probs_1132[p12])\n sym_state_recursive_ratios_right_1132[2, 2] = sym.factor(\n sym_state_probs_1132[p22] / sym_state_probs_1132[p21])\n sym_state_recursive_ratios_right_1132[2, 3] = sym.factor(\n sym_state_probs_1132[p23] / sym_state_probs_1132[p22])\n sym_state_recursive_ratios_P0_1132 = sym.zeros(buffer_capacity + 1, \n system_capacity + 1)\n sym_state_recursive_ratios_P0_1132[0, 0] = 1\n sym_state_recursive_ratios_P0_1132[0, 1] = sym.factor(\n sym_state_probs_1132[p01] / sym_state_probs_1132[p00])\n sym_state_recursive_ratios_P0_1132[1, 1] = sym.factor(\n sym_state_probs_1132[p11] / sym_state_probs_1132[p00])\n sym_state_recursive_ratios_P0_1132[2, 1] = sym.factor(\n sym_state_probs_1132[p21] / sym_state_probs_1132[p00])\n sym_state_recursive_ratios_P0_1132[0, 2] = sym.factor(\n sym_state_probs_1132[p02] / sym_state_probs_1132[p00])\n sym_state_recursive_ratios_P0_1132[1, 2] = sym.factor(\n sym_state_probs_1132[p12] / sym_state_probs_1132[p00])\n sym_state_recursive_ratios_P0_1132[2, 2] = sym.factor(\n sym_state_probs_1132[p22] / sym_state_probs_1132[p00])\n sym_state_recursive_ratios_P0_1132[0, 3] = sym.factor(\n sym_state_probs_1132[p03] / sym_state_probs_1132[p00])\n sym_state_recursive_ratios_P0_1132[1, 3] = sym.factor(\n sym_state_probs_1132[p13] / sym_state_probs_1132[p00])\n sym_state_recursive_ratios_P0_1132[2, 3] = sym.factor(\n sym_state_probs_1132[p23] / sym_state_probs_1132[p00])\n return (sym_state_probs_1132, sym_state_recursive_ratios_1132,\n sym_state_recursive_ratios_right_1132,\n sym_state_recursive_ratios_P0_1132)\n\n\n<mask token>\n\n\ndef get_symbolic_state_probabilities_1151():\n num_of_servers = 1\n threshold = 1\n system_capacity = 5\n buffer_capacity = 1\n Q_sym_1151 = abg.markov.get_symbolic_transition_matrix(num_of_servers,\n threshold, system_capacity, buffer_capacity)\n p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15 = sym.symbols(\n 'p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15')\n pi_1151 = sym.Matrix([p00, p01, p11, p02, p12, p03, p13, p04, p14, p05,\n p15])\n dimension_1151 = Q_sym_1151.shape[0]\n M_sym_1151 = sym.Matrix([Q_sym_1151.transpose()[:-1, :], sym.ones(1,\n dimension_1151)])\n sym_diff_equations_1151 = M_sym_1151 @ pi_1151\n b_sym_1151 = sym.Matrix([sym.zeros(dimension_1151 - 1, 1), [1]])\n eq0_1151 = sym.Eq(sym_diff_equations_1151[0], b_sym_1151[0])\n eq1_1151 = sym.Eq(sym_diff_equations_1151[1], b_sym_1151[1])\n eq2_1151 = sym.Eq(sym_diff_equations_1151[2], b_sym_1151[2])\n eq3_1151 = sym.Eq(sym_diff_equations_1151[3], b_sym_1151[3])\n eq4_1151 = sym.Eq(sym_diff_equations_1151[4], b_sym_1151[4])\n eq5_1151 = sym.Eq(sym_diff_equations_1151[5], b_sym_1151[5])\n eq6_1151 = sym.Eq(sym_diff_equations_1151[6], b_sym_1151[6])\n eq7_1151 = sym.Eq(sym_diff_equations_1151[7], b_sym_1151[7])\n eq8_1151 = sym.Eq(sym_diff_equations_1151[8], b_sym_1151[8])\n eq9_1151 = sym.Eq(sym_diff_equations_1151[9], b_sym_1151[9])\n eq10_1151 = sym.Eq(sym_diff_equations_1151[10], b_sym_1151[10])\n sym_state_probs_1151 = sym.solve([eq0_1151, eq1_1151, eq2_1151,\n eq3_1151, eq4_1151, eq5_1151, eq6_1151, eq7_1151, eq8_1151,\n eq9_1151, eq10_1151], (p00, p01, p11, p02, p12, p03, p13, p04, p14,\n p05, p15))\n sym_state_recursive_ratios_1151 = sym.zeros(buffer_capacity + 1, \n system_capacity + 1)\n sym_state_recursive_ratios_1151[0, 0] = 1\n sym_state_recursive_ratios_1151[0, 1] = sym.factor(sym_state_probs_1151\n [p01] / sym_state_probs_1151[p00])\n sym_state_recursive_ratios_1151[1, 1] = sym.factor(sym_state_probs_1151\n [p11] / sym_state_probs_1151[p01])\n sym_state_recursive_ratios_1151[0, 2] = sym.factor(sym_state_probs_1151\n [p02] / sym_state_probs_1151[p01])\n sym_state_recursive_ratios_1151[1, 2] = sym.factor(sym_state_probs_1151\n [p12] / sym_state_probs_1151[p02])\n sym_state_recursive_ratios_1151[0, 3] = sym.factor(sym_state_probs_1151\n [p03] / sym_state_probs_1151[p02])\n sym_state_recursive_ratios_1151[1, 3] = sym.factor(sym_state_probs_1151\n [p13] / sym_state_probs_1151[p03])\n sym_state_recursive_ratios_1151[0, 4] = sym.factor(sym_state_probs_1151\n [p04] / sym_state_probs_1151[p03])\n sym_state_recursive_ratios_1151[1, 4] = sym.factor(sym_state_probs_1151\n [p14] / sym_state_probs_1151[p04])\n sym_state_recursive_ratios_1151[0, 5] = sym.factor(sym_state_probs_1151\n [p05] / sym_state_probs_1151[p04])\n sym_state_recursive_ratios_1151[1, 5] = sym.factor(sym_state_probs_1151\n [p15] / sym_state_probs_1151[p05])\n sym_state_recursive_ratios_right_1151 = (sym_state_recursive_ratios_1151\n .copy())\n sym_state_recursive_ratios_right_1151[1, 2] = sym.factor(\n sym_state_probs_1151[p12] / sym_state_probs_1151[p11])\n sym_state_recursive_ratios_right_1151[1, 3] = sym.factor(\n sym_state_probs_1151[p13] / sym_state_probs_1151[p12])\n sym_state_recursive_ratios_right_1151[1, 4] = sym.factor(\n sym_state_probs_1151[p14] / sym_state_probs_1151[p13])\n sym_state_recursive_ratios_right_1151[1, 5] = sym.factor(\n sym_state_probs_1151[p15] / sym_state_probs_1151[p14])\n sym_state_recursive_ratios_P0_1151 = sym.zeros(buffer_capacity + 1, \n system_capacity + 1)\n sym_state_recursive_ratios_P0_1151[0, 0] = 1\n sym_state_recursive_ratios_P0_1151[0, 1] = sym.factor(\n sym_state_probs_1151[p01] / sym_state_probs_1151[p00])\n sym_state_recursive_ratios_P0_1151[1, 1] = sym.factor(\n sym_state_probs_1151[p11] / sym_state_probs_1151[p00])\n sym_state_recursive_ratios_P0_1151[0, 2] = sym.factor(\n sym_state_probs_1151[p02] / sym_state_probs_1151[p00])\n sym_state_recursive_ratios_P0_1151[1, 2] = sym.factor(\n sym_state_probs_1151[p12] / sym_state_probs_1151[p00])\n sym_state_recursive_ratios_P0_1151[0, 3] = sym.factor(\n sym_state_probs_1151[p03] / sym_state_probs_1151[p00])\n sym_state_recursive_ratios_P0_1151[1, 3] = sym.factor(\n sym_state_probs_1151[p13] / sym_state_probs_1151[p00])\n sym_state_recursive_ratios_P0_1151[0, 4] = sym.factor(\n sym_state_probs_1151[p04] / sym_state_probs_1151[p00])\n sym_state_recursive_ratios_P0_1151[1, 4] = sym.factor(\n sym_state_probs_1151[p14] / sym_state_probs_1151[p00])\n sym_state_recursive_ratios_P0_1151[0, 5] = sym.factor(\n sym_state_probs_1151[p05] / sym_state_probs_1151[p00])\n sym_state_recursive_ratios_P0_1151[1, 5] = sym.factor(\n sym_state_probs_1151[p15] / sym_state_probs_1151[p00])\n return (sym_state_probs_1151, sym_state_recursive_ratios_1151,\n sym_state_recursive_ratios_right_1151,\n sym_state_recursive_ratios_P0_1151)\n\n\ndef get_symbolic_state_probabilities_1161():\n num_of_servers = 1\n threshold = 1\n system_capacity = 6\n buffer_capacity = 1\n Q_sym_1161 = abg.markov.get_symbolic_transition_matrix(num_of_servers,\n threshold, system_capacity, buffer_capacity)\n p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15, p06, p16 = (sym.\n symbols(\n 'p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15, p06, p16'))\n pi_1161 = sym.Matrix([p00, p01, p11, p02, p12, p03, p13, p04, p14, p05,\n p15, p06, p16])\n dimension_1161 = Q_sym_1161.shape[0]\n M_sym_1161 = sym.Matrix([Q_sym_1161.transpose()[:-1, :], sym.ones(1,\n dimension_1161)])\n sym_diff_equations_1161 = M_sym_1161 @ pi_1161\n b_sym_1161 = sym.Matrix([sym.zeros(dimension_1161 - 1, 1), [1]])\n eq0_1161 = sym.Eq(sym_diff_equations_1161[0], b_sym_1161[0])\n eq1_1161 = sym.Eq(sym_diff_equations_1161[1], b_sym_1161[1])\n eq2_1161 = sym.Eq(sym_diff_equations_1161[2], b_sym_1161[2])\n eq3_1161 = sym.Eq(sym_diff_equations_1161[3], b_sym_1161[3])\n eq4_1161 = sym.Eq(sym_diff_equations_1161[4], b_sym_1161[4])\n eq5_1161 = sym.Eq(sym_diff_equations_1161[5], b_sym_1161[5])\n eq6_1161 = sym.Eq(sym_diff_equations_1161[6], b_sym_1161[6])\n eq7_1161 = sym.Eq(sym_diff_equations_1161[7], b_sym_1161[7])\n eq8_1161 = sym.Eq(sym_diff_equations_1161[8], b_sym_1161[8])\n eq9_1161 = sym.Eq(sym_diff_equations_1161[9], b_sym_1161[9])\n eq10_1161 = sym.Eq(sym_diff_equations_1161[10], b_sym_1161[10])\n eq11_1161 = sym.Eq(sym_diff_equations_1161[11], b_sym_1161[11])\n eq12_1161 = sym.Eq(sym_diff_equations_1161[12], b_sym_1161[12])\n sym_state_probs_1161 = sym.solve([eq0_1161, eq1_1161, eq2_1161,\n eq3_1161, eq4_1161, eq5_1161, eq6_1161, eq7_1161, eq8_1161,\n eq9_1161, eq10_1161, eq11_1161, eq12_1161], (p00, p01, p11, p02,\n p12, p03, p13, p04, p14, p05, p15, p06, p16))\n sym_state_recursive_ratios_1161 = sym.zeros(buffer_capacity + 1, \n system_capacity + 1)\n sym_state_recursive_ratios_1161[0, 0] = 1\n sym_state_recursive_ratios_1161[0, 1] = sym.factor(sym_state_probs_1161\n [p01] / sym_state_probs_1161[p00])\n sym_state_recursive_ratios_1161[1, 1] = sym.factor(sym_state_probs_1161\n [p11] / sym_state_probs_1161[p01])\n sym_state_recursive_ratios_1161[0, 2] = sym.factor(sym_state_probs_1161\n [p02] / sym_state_probs_1161[p01])\n sym_state_recursive_ratios_1161[1, 2] = sym.factor(sym_state_probs_1161\n [p12] / sym_state_probs_1161[p02])\n sym_state_recursive_ratios_1161[0, 3] = sym.factor(sym_state_probs_1161\n [p03] / sym_state_probs_1161[p02])\n sym_state_recursive_ratios_1161[1, 3] = sym.factor(sym_state_probs_1161\n [p13] / sym_state_probs_1161[p03])\n sym_state_recursive_ratios_1161[0, 4] = sym.factor(sym_state_probs_1161\n [p04] / sym_state_probs_1161[p03])\n sym_state_recursive_ratios_1161[1, 4] = sym.factor(sym_state_probs_1161\n [p14] / sym_state_probs_1161[p04])\n sym_state_recursive_ratios_1161[0, 5] = sym.factor(sym_state_probs_1161\n [p05] / sym_state_probs_1161[p04])\n sym_state_recursive_ratios_1161[1, 5] = sym.factor(sym_state_probs_1161\n [p15] / sym_state_probs_1161[p05])\n sym_state_recursive_ratios_1161[0, 6] = sym.factor(sym_state_probs_1161\n [p06] / sym_state_probs_1161[p05])\n sym_state_recursive_ratios_1161[1, 6] = sym.factor(sym_state_probs_1161\n [p16] / sym_state_probs_1161[p06])\n sym_state_recursive_ratios_right_1161 = (sym_state_recursive_ratios_1161\n .copy())\n sym_state_recursive_ratios_right_1161[1, 2] = sym.factor(\n sym_state_probs_1161[p12] / sym_state_probs_1161[p11])\n sym_state_recursive_ratios_right_1161[1, 3] = sym.factor(\n sym_state_probs_1161[p13] / sym_state_probs_1161[p12])\n sym_state_recursive_ratios_right_1161[1, 4] = sym.factor(\n sym_state_probs_1161[p14] / sym_state_probs_1161[p13])\n sym_state_recursive_ratios_right_1161[1, 5] = sym.factor(\n sym_state_probs_1161[p15] / sym_state_probs_1161[p14])\n sym_state_recursive_ratios_right_1161[1, 6] = sym.factor(\n sym_state_probs_1161[p16] / sym_state_probs_1161[p15])\n sym_state_recursive_ratios_P0_1161 = sym.zeros(buffer_capacity + 1, \n system_capacity + 1)\n sym_state_recursive_ratios_P0_1161[0, 0] = 1\n sym_state_recursive_ratios_P0_1161[0, 1] = sym.factor(\n sym_state_probs_1161[p01] / sym_state_probs_1161[p00])\n sym_state_recursive_ratios_P0_1161[1, 1] = sym.factor(\n sym_state_probs_1161[p11] / sym_state_probs_1161[p00])\n sym_state_recursive_ratios_P0_1161[0, 2] = sym.factor(\n sym_state_probs_1161[p02] / sym_state_probs_1161[p00])\n sym_state_recursive_ratios_P0_1161[1, 2] = sym.factor(\n sym_state_probs_1161[p12] / sym_state_probs_1161[p00])\n sym_state_recursive_ratios_P0_1161[0, 3] = sym.factor(\n sym_state_probs_1161[p03] / sym_state_probs_1161[p00])\n sym_state_recursive_ratios_P0_1161[1, 3] = sym.factor(\n sym_state_probs_1161[p13] / sym_state_probs_1161[p00])\n sym_state_recursive_ratios_P0_1161[0, 4] = sym.factor(\n sym_state_probs_1161[p04] / sym_state_probs_1161[p00])\n sym_state_recursive_ratios_P0_1161[1, 4] = sym.factor(\n sym_state_probs_1161[p14] / sym_state_probs_1161[p00])\n sym_state_recursive_ratios_P0_1161[0, 5] = sym.factor(\n sym_state_probs_1161[p05] / sym_state_probs_1161[p00])\n sym_state_recursive_ratios_P0_1161[1, 5] = sym.factor(\n sym_state_probs_1161[p15] / sym_state_probs_1161[p00])\n sym_state_recursive_ratios_P0_1161[0, 6] = sym.factor(\n sym_state_probs_1161[p06] / sym_state_probs_1161[p00])\n sym_state_recursive_ratios_P0_1161[1, 6] = sym.factor(\n sym_state_probs_1161[p16] / sym_state_probs_1161[p00])\n return (sym_state_probs_1161, sym_state_recursive_ratios_1161,\n sym_state_recursive_ratios_right_1161,\n sym_state_recursive_ratios_P0_1161)\n\n\ndef get_symbolic_state_probabilities_1171():\n num_of_servers = 1\n threshold = 1\n system_capacity = 7\n buffer_capacity = 1\n Q_sym_1171 = abg.markov.get_symbolic_transition_matrix(num_of_servers,\n threshold, system_capacity, buffer_capacity)\n (p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15, p06, p16, p07, p17\n ) = (sym.symbols(\n 'p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15, p06, p16, p07, p17'\n ))\n pi_1171 = sym.Matrix([p00, p01, p11, p02, p12, p03, p13, p04, p14, p05,\n p15, p06, p16, p07, p17])\n dimension_1171 = Q_sym_1171.shape[0]\n M_sym_1171 = sym.Matrix([Q_sym_1171.transpose()[:-1, :], sym.ones(1,\n dimension_1171)])\n sym_diff_equations_1171 = M_sym_1171 @ pi_1171\n b_sym_1171 = sym.Matrix([sym.zeros(dimension_1171 - 1, 1), [1]])\n eq0_1171 = sym.Eq(sym_diff_equations_1171[0], b_sym_1171[0])\n eq1_1171 = sym.Eq(sym_diff_equations_1171[1], b_sym_1171[1])\n eq2_1171 = sym.Eq(sym_diff_equations_1171[2], b_sym_1171[2])\n eq3_1171 = sym.Eq(sym_diff_equations_1171[3], b_sym_1171[3])\n eq4_1171 = sym.Eq(sym_diff_equations_1171[4], b_sym_1171[4])\n eq5_1171 = sym.Eq(sym_diff_equations_1171[5], b_sym_1171[5])\n eq6_1171 = sym.Eq(sym_diff_equations_1171[6], b_sym_1171[6])\n eq7_1171 = sym.Eq(sym_diff_equations_1171[7], b_sym_1171[7])\n eq8_1171 = sym.Eq(sym_diff_equations_1171[8], b_sym_1171[8])\n eq9_1171 = sym.Eq(sym_diff_equations_1171[9], b_sym_1171[9])\n eq10_1171 = sym.Eq(sym_diff_equations_1171[10], b_sym_1171[10])\n eq11_1171 = sym.Eq(sym_diff_equations_1171[11], b_sym_1171[11])\n eq12_1171 = sym.Eq(sym_diff_equations_1171[12], b_sym_1171[12])\n eq13_1171 = sym.Eq(sym_diff_equations_1171[13], b_sym_1171[13])\n eq14_1171 = sym.Eq(sym_diff_equations_1171[14], b_sym_1171[14])\n sym_state_probs_1171 = sym.solve([eq0_1171, eq1_1171, eq2_1171,\n eq3_1171, eq4_1171, eq5_1171, eq6_1171, eq7_1171, eq8_1171,\n eq9_1171, eq10_1171, eq11_1171, eq12_1171, eq13_1171, eq14_1171], (\n p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15, p06, p16,\n p07, p17))\n sym_state_recursive_ratios_1171 = sym.zeros(buffer_capacity + 1, \n system_capacity + 1)\n sym_state_recursive_ratios_1171[0, 0] = 1\n sym_state_recursive_ratios_1171[0, 1] = sym.factor(sym_state_probs_1171\n [p01] / sym_state_probs_1171[p00])\n sym_state_recursive_ratios_1171[1, 1] = sym.factor(sym_state_probs_1171\n [p11] / sym_state_probs_1171[p01])\n sym_state_recursive_ratios_1171[0, 2] = sym.factor(sym_state_probs_1171\n [p02] / sym_state_probs_1171[p01])\n sym_state_recursive_ratios_1171[1, 2] = sym.factor(sym_state_probs_1171\n [p12] / sym_state_probs_1171[p02])\n sym_state_recursive_ratios_1171[0, 3] = sym.factor(sym_state_probs_1171\n [p03] / sym_state_probs_1171[p02])\n sym_state_recursive_ratios_1171[1, 3] = sym.factor(sym_state_probs_1171\n [p13] / sym_state_probs_1171[p03])\n sym_state_recursive_ratios_1171[0, 4] = sym.factor(sym_state_probs_1171\n [p04] / sym_state_probs_1171[p03])\n sym_state_recursive_ratios_1171[1, 4] = sym.factor(sym_state_probs_1171\n [p14] / sym_state_probs_1171[p04])\n sym_state_recursive_ratios_1171[0, 5] = sym.factor(sym_state_probs_1171\n [p05] / sym_state_probs_1171[p04])\n sym_state_recursive_ratios_1171[1, 5] = sym.factor(sym_state_probs_1171\n [p15] / sym_state_probs_1171[p05])\n sym_state_recursive_ratios_1171[0, 6] = sym.factor(sym_state_probs_1171\n [p06] / sym_state_probs_1171[p05])\n sym_state_recursive_ratios_1171[1, 6] = sym.factor(sym_state_probs_1171\n [p16] / sym_state_probs_1171[p06])\n sym_state_recursive_ratios_1171[0, 7] = sym.factor(sym_state_probs_1171\n [p07] / sym_state_probs_1171[p06])\n sym_state_recursive_ratios_1171[1, 7] = sym.factor(sym_state_probs_1171\n [p17] / sym_state_probs_1171[p07])\n sym_state_recursive_ratios_right_1171 = (sym_state_recursive_ratios_1171\n .copy())\n sym_state_recursive_ratios_right_1171[1, 2] = sym.factor(\n sym_state_probs_1171[p12] / sym_state_probs_1171[p11])\n sym_state_recursive_ratios_right_1171[1, 3] = sym.factor(\n sym_state_probs_1171[p13] / sym_state_probs_1171[p12])\n sym_state_recursive_ratios_right_1171[1, 4] = sym.factor(\n sym_state_probs_1171[p14] / sym_state_probs_1171[p13])\n sym_state_recursive_ratios_right_1171[1, 5] = sym.factor(\n sym_state_probs_1171[p15] / sym_state_probs_1171[p14])\n sym_state_recursive_ratios_right_1171[1, 6] = sym.factor(\n sym_state_probs_1171[p16] / sym_state_probs_1171[p15])\n sym_state_recursive_ratios_right_1171[1, 7] = sym.factor(\n sym_state_probs_1171[p17] / sym_state_probs_1171[p16])\n sym_state_recursive_ratios_P0_1171 = sym.zeros(buffer_capacity + 1, \n system_capacity + 1)\n sym_state_recursive_ratios_P0_1171[0, 0] = 1\n sym_state_recursive_ratios_P0_1171[0, 1] = sym.factor(\n sym_state_probs_1171[p01] / sym_state_probs_1171[p00])\n sym_state_recursive_ratios_P0_1171[1, 1] = sym.factor(\n sym_state_probs_1171[p11] / sym_state_probs_1171[p00])\n sym_state_recursive_ratios_P0_1171[0, 2] = sym.factor(\n sym_state_probs_1171[p02] / sym_state_probs_1171[p00])\n sym_state_recursive_ratios_P0_1171[1, 2] = sym.factor(\n sym_state_probs_1171[p12] / sym_state_probs_1171[p00])\n sym_state_recursive_ratios_P0_1171[0, 3] = sym.factor(\n sym_state_probs_1171[p03] / sym_state_probs_1171[p00])\n sym_state_recursive_ratios_P0_1171[1, 3] = sym.factor(\n sym_state_probs_1171[p13] / sym_state_probs_1171[p00])\n sym_state_recursive_ratios_P0_1171[0, 4] = sym.factor(\n sym_state_probs_1171[p04] / sym_state_probs_1171[p00])\n sym_state_recursive_ratios_P0_1171[1, 4] = sym.factor(\n sym_state_probs_1171[p14] / sym_state_probs_1171[p00])\n sym_state_recursive_ratios_P0_1171[0, 5] = sym.factor(\n sym_state_probs_1171[p05] / sym_state_probs_1171[p00])\n sym_state_recursive_ratios_P0_1171[1, 5] = sym.factor(\n sym_state_probs_1171[p15] / sym_state_probs_1171[p00])\n sym_state_recursive_ratios_P0_1171[0, 6] = sym.factor(\n sym_state_probs_1171[p06] / sym_state_probs_1171[p00])\n sym_state_recursive_ratios_P0_1171[1, 6] = sym.factor(\n sym_state_probs_1171[p16] / sym_state_probs_1171[p00])\n sym_state_recursive_ratios_P0_1171[0, 7] = sym.factor(\n sym_state_probs_1171[p07] / sym_state_probs_1171[p00])\n sym_state_recursive_ratios_P0_1171[1, 7] = sym.factor(\n sym_state_probs_1171[p17] / sym_state_probs_1171[p00])\n return (sym_state_probs_1171, sym_state_recursive_ratios_1171,\n sym_state_recursive_ratios_right_1171,\n sym_state_recursive_ratios_P0_1171)\n\n\ndef get_symbolic_state_probabilities_1181():\n num_of_servers = 1\n threshold = 1\n system_capacity = 8\n buffer_capacity = 1\n Q_sym_1181 = abg.markov.get_symbolic_transition_matrix(num_of_servers,\n threshold, system_capacity, buffer_capacity)\n (p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15, p06, p16, p07,\n p17, p08, p18) = (sym.symbols(\n 'p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15, p06, p16, p07, p17, p08, p18'\n ))\n pi_1181 = sym.Matrix([p00, p01, p11, p02, p12, p03, p13, p04, p14, p05,\n p15, p06, p16, p07, p17, p08, p18])\n dimension_1181 = Q_sym_1181.shape[0]\n M_sym_1181 = sym.Matrix([Q_sym_1181.transpose()[:-1, :], sym.ones(1,\n dimension_1181)])\n sym_diff_equations_1181 = M_sym_1181 @ pi_1181\n b_sym_1181 = sym.Matrix([sym.zeros(dimension_1181 - 1, 1), [1]])\n eq0_1181 = sym.Eq(sym_diff_equations_1181[0], b_sym_1181[0])\n eq1_1181 = sym.Eq(sym_diff_equations_1181[1], b_sym_1181[1])\n eq2_1181 = sym.Eq(sym_diff_equations_1181[2], b_sym_1181[2])\n eq3_1181 = sym.Eq(sym_diff_equations_1181[3], b_sym_1181[3])\n eq4_1181 = sym.Eq(sym_diff_equations_1181[4], b_sym_1181[4])\n eq5_1181 = sym.Eq(sym_diff_equations_1181[5], b_sym_1181[5])\n eq6_1181 = sym.Eq(sym_diff_equations_1181[6], b_sym_1181[6])\n eq7_1181 = sym.Eq(sym_diff_equations_1181[7], b_sym_1181[7])\n eq8_1181 = sym.Eq(sym_diff_equations_1181[8], b_sym_1181[8])\n eq9_1181 = sym.Eq(sym_diff_equations_1181[9], b_sym_1181[9])\n eq10_1181 = sym.Eq(sym_diff_equations_1181[10], b_sym_1181[10])\n eq11_1181 = sym.Eq(sym_diff_equations_1181[11], b_sym_1181[11])\n eq12_1181 = sym.Eq(sym_diff_equations_1181[12], b_sym_1181[12])\n eq13_1181 = sym.Eq(sym_diff_equations_1181[13], b_sym_1181[13])\n eq14_1181 = sym.Eq(sym_diff_equations_1181[14], b_sym_1181[14])\n eq15_1181 = sym.Eq(sym_diff_equations_1181[15], b_sym_1181[15])\n eq16_1181 = sym.Eq(sym_diff_equations_1181[16], b_sym_1181[16])\n sym_state_probs_1181 = sym.solve([eq0_1181, eq1_1181, eq2_1181,\n eq3_1181, eq4_1181, eq5_1181, eq6_1181, eq7_1181, eq8_1181,\n eq9_1181, eq10_1181, eq11_1181, eq12_1181, eq13_1181, eq14_1181,\n eq15_1181, eq16_1181], (p00, p01, p11, p02, p12, p03, p13, p04, p14,\n p05, p15, p06, p16, p07, p17, p08, p18))\n sym_state_recursive_ratios_1181 = sym.zeros(buffer_capacity + 1, \n system_capacity + 1)\n sym_state_recursive_ratios_1181[0, 0] = 1\n sym_state_recursive_ratios_1181[0, 1] = sym.factor(sym_state_probs_1181\n [p01] / sym_state_probs_1181[p00])\n sym_state_recursive_ratios_1181[1, 1] = sym.factor(sym_state_probs_1181\n [p11] / sym_state_probs_1181[p01])\n sym_state_recursive_ratios_1181[0, 2] = sym.factor(sym_state_probs_1181\n [p02] / sym_state_probs_1181[p01])\n sym_state_recursive_ratios_1181[1, 2] = sym.factor(sym_state_probs_1181\n [p12] / sym_state_probs_1181[p02])\n sym_state_recursive_ratios_1181[0, 3] = sym.factor(sym_state_probs_1181\n [p03] / sym_state_probs_1181[p02])\n sym_state_recursive_ratios_1181[1, 3] = sym.factor(sym_state_probs_1181\n [p13] / sym_state_probs_1181[p03])\n sym_state_recursive_ratios_1181[0, 4] = sym.factor(sym_state_probs_1181\n [p04] / sym_state_probs_1181[p03])\n sym_state_recursive_ratios_1181[1, 4] = sym.factor(sym_state_probs_1181\n [p14] / sym_state_probs_1181[p04])\n sym_state_recursive_ratios_1181[0, 5] = sym.factor(sym_state_probs_1181\n [p05] / sym_state_probs_1181[p04])\n sym_state_recursive_ratios_1181[1, 5] = sym.factor(sym_state_probs_1181\n [p15] / sym_state_probs_1181[p05])\n sym_state_recursive_ratios_1181[0, 6] = sym.factor(sym_state_probs_1181\n [p06] / sym_state_probs_1181[p05])\n sym_state_recursive_ratios_1181[1, 6] = sym.factor(sym_state_probs_1181\n [p16] / sym_state_probs_1181[p06])\n sym_state_recursive_ratios_1181[0, 7] = sym.factor(sym_state_probs_1181\n [p07] / sym_state_probs_1181[p06])\n sym_state_recursive_ratios_1181[1, 7] = sym.factor(sym_state_probs_1181\n [p17] / sym_state_probs_1181[p07])\n sym_state_recursive_ratios_1181[0, 8] = sym.factor(sym_state_probs_1181\n [p08] / sym_state_probs_1181[p07])\n sym_state_recursive_ratios_1181[1, 8] = sym.factor(sym_state_probs_1181\n [p18] / sym_state_probs_1181[p08])\n sym_state_recursive_ratios_right_1181 = (sym_state_recursive_ratios_1181\n .copy())\n sym_state_recursive_ratios_right_1181[1, 2] = sym.factor(\n sym_state_probs_1181[p12] / sym_state_probs_1181[p11])\n sym_state_recursive_ratios_right_1181[1, 3] = sym.factor(\n sym_state_probs_1181[p13] / sym_state_probs_1181[p12])\n sym_state_recursive_ratios_right_1181[1, 4] = sym.factor(\n sym_state_probs_1181[p14] / sym_state_probs_1181[p13])\n sym_state_recursive_ratios_right_1181[1, 5] = sym.factor(\n sym_state_probs_1181[p15] / sym_state_probs_1181[p14])\n sym_state_recursive_ratios_right_1181[1, 6] = sym.factor(\n sym_state_probs_1181[p16] / sym_state_probs_1181[p15])\n sym_state_recursive_ratios_right_1181[1, 7] = sym.factor(\n sym_state_probs_1181[p17] / sym_state_probs_1181[p16])\n sym_state_recursive_ratios_right_1181[1, 8] = sym.factor(\n sym_state_probs_1181[p18] / sym_state_probs_1181[p17])\n sym_state_recursive_ratios_P0_1181 = sym.zeros(buffer_capacity + 1, \n system_capacity + 1)\n sym_state_recursive_ratios_P0_1181[0, 0] = 1\n sym_state_recursive_ratios_P0_1181[0, 1] = sym.factor(\n sym_state_probs_1181[p01] / sym_state_probs_1181[p00])\n sym_state_recursive_ratios_P0_1181[1, 1] = sym.factor(\n sym_state_probs_1181[p11] / sym_state_probs_1181[p00])\n sym_state_recursive_ratios_P0_1181[0, 2] = sym.factor(\n sym_state_probs_1181[p02] / sym_state_probs_1181[p00])\n sym_state_recursive_ratios_P0_1181[1, 2] = sym.factor(\n sym_state_probs_1181[p12] / sym_state_probs_1181[p00])\n sym_state_recursive_ratios_P0_1181[0, 3] = sym.factor(\n sym_state_probs_1181[p03] / sym_state_probs_1181[p00])\n sym_state_recursive_ratios_P0_1181[1, 3] = sym.factor(\n sym_state_probs_1181[p13] / sym_state_probs_1181[p00])\n sym_state_recursive_ratios_P0_1181[0, 4] = sym.factor(\n sym_state_probs_1181[p04] / sym_state_probs_1181[p00])\n sym_state_recursive_ratios_P0_1181[1, 4] = sym.factor(\n sym_state_probs_1181[p14] / sym_state_probs_1181[p00])\n sym_state_recursive_ratios_P0_1181[0, 5] = sym.factor(\n sym_state_probs_1181[p05] / sym_state_probs_1181[p00])\n sym_state_recursive_ratios_P0_1181[1, 5] = sym.factor(\n sym_state_probs_1181[p15] / sym_state_probs_1181[p00])\n sym_state_recursive_ratios_P0_1181[0, 6] = sym.factor(\n sym_state_probs_1181[p06] / sym_state_probs_1181[p00])\n sym_state_recursive_ratios_P0_1181[1, 6] = sym.factor(\n sym_state_probs_1181[p16] / sym_state_probs_1181[p00])\n sym_state_recursive_ratios_P0_1181[0, 7] = sym.factor(\n sym_state_probs_1181[p07] / sym_state_probs_1181[p00])\n sym_state_recursive_ratios_P0_1181[1, 7] = sym.factor(\n sym_state_probs_1181[p17] / sym_state_probs_1181[p00])\n sym_state_recursive_ratios_P0_1181[0, 8] = sym.factor(\n sym_state_probs_1181[p08] / sym_state_probs_1181[p00])\n sym_state_recursive_ratios_P0_1181[1, 8] = sym.factor(\n sym_state_probs_1181[p18] / sym_state_probs_1181[p00])\n return (sym_state_probs_1181, sym_state_recursive_ratios_1181,\n sym_state_recursive_ratios_right_1181,\n sym_state_recursive_ratios_P0_1181)\n\n\ndef get_symbolic_state_probabilities_1191():\n num_of_servers = 1\n threshold = 1\n system_capacity = 9\n buffer_capacity = 1\n Q_sym_1191 = abg.markov.get_symbolic_transition_matrix(num_of_servers,\n threshold, system_capacity, buffer_capacity)\n (p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15, p06, p16, p07,\n p17, p08, p18, p09, p19) = (sym.symbols(\n 'p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15, p06, p16, p07, p17, p08, p18, p09, p19'\n ))\n pi_1191 = sym.Matrix([p00, p01, p11, p02, p12, p03, p13, p04, p14, p05,\n p15, p06, p16, p07, p17, p08, p18, p09, p19])\n dimension_1191 = Q_sym_1191.shape[0]\n M_sym_1191 = sym.Matrix([Q_sym_1191.transpose()[:-1, :], sym.ones(1,\n dimension_1191)])\n sym_diff_equations_1191 = M_sym_1191 @ pi_1191\n b_sym_1191 = sym.Matrix([sym.zeros(dimension_1191 - 1, 1), [1]])\n eq0_1191 = sym.Eq(sym_diff_equations_1191[0], b_sym_1191[0])\n eq1_1191 = sym.Eq(sym_diff_equations_1191[1], b_sym_1191[1])\n eq2_1191 = sym.Eq(sym_diff_equations_1191[2], b_sym_1191[2])\n eq3_1191 = sym.Eq(sym_diff_equations_1191[3], b_sym_1191[3])\n eq4_1191 = sym.Eq(sym_diff_equations_1191[4], b_sym_1191[4])\n eq5_1191 = sym.Eq(sym_diff_equations_1191[5], b_sym_1191[5])\n eq6_1191 = sym.Eq(sym_diff_equations_1191[6], b_sym_1191[6])\n eq7_1191 = sym.Eq(sym_diff_equations_1191[7], b_sym_1191[7])\n eq8_1191 = sym.Eq(sym_diff_equations_1191[8], b_sym_1191[8])\n eq9_1191 = sym.Eq(sym_diff_equations_1191[9], b_sym_1191[9])\n eq10_1191 = sym.Eq(sym_diff_equations_1191[10], b_sym_1191[10])\n eq11_1191 = sym.Eq(sym_diff_equations_1191[11], b_sym_1191[11])\n eq12_1191 = sym.Eq(sym_diff_equations_1191[12], b_sym_1191[12])\n eq13_1191 = sym.Eq(sym_diff_equations_1191[13], b_sym_1191[13])\n eq14_1191 = sym.Eq(sym_diff_equations_1191[14], b_sym_1191[14])\n eq15_1191 = sym.Eq(sym_diff_equations_1191[15], b_sym_1191[15])\n eq16_1191 = sym.Eq(sym_diff_equations_1191[16], b_sym_1191[16])\n eq17_1191 = sym.Eq(sym_diff_equations_1191[17], b_sym_1191[17])\n eq18_1191 = sym.Eq(sym_diff_equations_1191[18], b_sym_1191[18])\n sym_state_probs_1191 = sym.solve([eq0_1191, eq1_1191, eq2_1191,\n eq3_1191, eq4_1191, eq5_1191, eq6_1191, eq7_1191, eq8_1191,\n eq9_1191, eq10_1191, eq11_1191, eq12_1191, eq13_1191, eq14_1191,\n eq15_1191, eq16_1191, eq17_1191, eq18_1191], (p00, p01, p11, p02,\n p12, p03, p13, p04, p14, p05, p15, p06, p16, p07, p17, p08, p18,\n p09, p19))\n sym_state_recursive_ratios_1191 = sym.zeros(buffer_capacity + 1, \n system_capacity + 1)\n sym_state_recursive_ratios_1191[0, 0] = 1\n sym_state_recursive_ratios_1191[0, 1] = sym.factor(sym_state_probs_1191\n [p01] / sym_state_probs_1191[p00])\n sym_state_recursive_ratios_1191[1, 1] = sym.factor(sym_state_probs_1191\n [p11] / sym_state_probs_1191[p01])\n sym_state_recursive_ratios_1191[0, 2] = sym.factor(sym_state_probs_1191\n [p02] / sym_state_probs_1191[p01])\n sym_state_recursive_ratios_1191[1, 2] = sym.factor(sym_state_probs_1191\n [p12] / sym_state_probs_1191[p02])\n sym_state_recursive_ratios_1191[0, 3] = sym.factor(sym_state_probs_1191\n [p03] / sym_state_probs_1191[p02])\n sym_state_recursive_ratios_1191[1, 3] = sym.factor(sym_state_probs_1191\n [p13] / sym_state_probs_1191[p03])\n sym_state_recursive_ratios_1191[0, 4] = sym.factor(sym_state_probs_1191\n [p04] / sym_state_probs_1191[p03])\n sym_state_recursive_ratios_1191[1, 4] = sym.factor(sym_state_probs_1191\n [p14] / sym_state_probs_1191[p04])\n sym_state_recursive_ratios_1191[0, 5] = sym.factor(sym_state_probs_1191\n [p05] / sym_state_probs_1191[p04])\n sym_state_recursive_ratios_1191[1, 5] = sym.factor(sym_state_probs_1191\n [p15] / sym_state_probs_1191[p05])\n sym_state_recursive_ratios_1191[0, 6] = sym.factor(sym_state_probs_1191\n [p06] / sym_state_probs_1191[p05])\n sym_state_recursive_ratios_1191[1, 6] = sym.factor(sym_state_probs_1191\n [p16] / sym_state_probs_1191[p06])\n sym_state_recursive_ratios_1191[0, 7] = sym.factor(sym_state_probs_1191\n [p07] / sym_state_probs_1191[p06])\n sym_state_recursive_ratios_1191[1, 7] = sym.factor(sym_state_probs_1191\n [p17] / sym_state_probs_1191[p07])\n sym_state_recursive_ratios_1191[0, 8] = sym.factor(sym_state_probs_1191\n [p08] / sym_state_probs_1191[p07])\n sym_state_recursive_ratios_1191[1, 8] = sym.factor(sym_state_probs_1191\n [p18] / sym_state_probs_1191[p08])\n sym_state_recursive_ratios_1191[0, 9] = sym.factor(sym_state_probs_1191\n [p09] / sym_state_probs_1191[p08])\n sym_state_recursive_ratios_1191[1, 9] = sym.factor(sym_state_probs_1191\n [p19] / sym_state_probs_1191[p09])\n sym_state_recursive_ratios_right_1191 = (sym_state_recursive_ratios_1191\n .copy())\n sym_state_recursive_ratios_right_1191[1, 2] = sym.factor(\n sym_state_probs_1191[p12] / sym_state_probs_1191[p11])\n sym_state_recursive_ratios_right_1191[1, 3] = sym.factor(\n sym_state_probs_1191[p13] / sym_state_probs_1191[p12])\n sym_state_recursive_ratios_right_1191[1, 4] = sym.factor(\n sym_state_probs_1191[p14] / sym_state_probs_1191[p13])\n sym_state_recursive_ratios_right_1191[1, 5] = sym.factor(\n sym_state_probs_1191[p15] / sym_state_probs_1191[p14])\n sym_state_recursive_ratios_right_1191[1, 6] = sym.factor(\n sym_state_probs_1191[p16] / sym_state_probs_1191[p15])\n sym_state_recursive_ratios_right_1191[1, 7] = sym.factor(\n sym_state_probs_1191[p17] / sym_state_probs_1191[p16])\n sym_state_recursive_ratios_right_1191[1, 8] = sym.factor(\n sym_state_probs_1191[p18] / sym_state_probs_1191[p17])\n sym_state_recursive_ratios_right_1191[1, 8] = sym.factor(\n sym_state_probs_1191[p18] / sym_state_probs_1191[p17])\n sym_state_recursive_ratios_P0_1191 = sym.zeros(buffer_capacity + 1, \n system_capacity + 1)\n sym_state_recursive_ratios_P0_1191[0, 0] = 1\n sym_state_recursive_ratios_P0_1191[0, 1] = sym.factor(\n sym_state_probs_1191[p01] / sym_state_probs_1191[p00])\n sym_state_recursive_ratios_P0_1191[1, 1] = sym.factor(\n sym_state_probs_1191[p11] / sym_state_probs_1191[p00])\n sym_state_recursive_ratios_P0_1191[0, 2] = sym.factor(\n sym_state_probs_1191[p02] / sym_state_probs_1191[p00])\n sym_state_recursive_ratios_P0_1191[1, 2] = sym.factor(\n sym_state_probs_1191[p12] / sym_state_probs_1191[p00])\n sym_state_recursive_ratios_P0_1191[0, 3] = sym.factor(\n sym_state_probs_1191[p03] / sym_state_probs_1191[p00])\n sym_state_recursive_ratios_P0_1191[1, 3] = sym.factor(\n sym_state_probs_1191[p13] / sym_state_probs_1191[p00])\n sym_state_recursive_ratios_P0_1191[0, 4] = sym.factor(\n sym_state_probs_1191[p04] / sym_state_probs_1191[p00])\n sym_state_recursive_ratios_P0_1191[1, 4] = sym.factor(\n sym_state_probs_1191[p14] / sym_state_probs_1191[p00])\n sym_state_recursive_ratios_P0_1191[0, 5] = sym.factor(\n sym_state_probs_1191[p05] / sym_state_probs_1191[p00])\n sym_state_recursive_ratios_P0_1191[1, 5] = sym.factor(\n sym_state_probs_1191[p15] / sym_state_probs_1191[p00])\n sym_state_recursive_ratios_P0_1191[0, 6] = sym.factor(\n sym_state_probs_1191[p06] / sym_state_probs_1191[p00])\n sym_state_recursive_ratios_P0_1191[1, 6] = sym.factor(\n sym_state_probs_1191[p16] / sym_state_probs_1191[p00])\n sym_state_recursive_ratios_P0_1191[0, 7] = sym.factor(\n sym_state_probs_1191[p07] / sym_state_probs_1191[p00])\n sym_state_recursive_ratios_P0_1191[1, 7] = sym.factor(\n sym_state_probs_1191[p17] / sym_state_probs_1191[p00])\n sym_state_recursive_ratios_P0_1191[0, 8] = sym.factor(\n sym_state_probs_1191[p08] / sym_state_probs_1191[p00])\n sym_state_recursive_ratios_P0_1191[1, 8] = sym.factor(\n sym_state_probs_1191[p18] / sym_state_probs_1191[p00])\n sym_state_recursive_ratios_P0_1191[0, 9] = sym.factor(\n sym_state_probs_1191[p09] / sym_state_probs_1191[p00])\n sym_state_recursive_ratios_P0_1191[1, 9] = sym.factor(\n sym_state_probs_1191[p19] / sym_state_probs_1191[p00])\n return (sym_state_probs_1191, sym_state_recursive_ratios_1191,\n sym_state_recursive_ratios_right_1191,\n sym_state_recursive_ratios_P0_1191)\n",
"step-4": "import ambulance_game as abg\nimport numpy as np\nimport sympy as sym\nfrom sympy.abc import a, b, c, d, e, f, g, h, i, j\n\n\ndef get_symbolic_pi(num_of_servers, threshold, system_capacity, buffer_capacity\n ):\n Q_sym = abg.markov.get_symbolic_transition_matrix(num_of_servers=\n num_of_servers, threshold=threshold, system_capacity=\n system_capacity, buffer_capacity=buffer_capacity)\n dimension = Q_sym.shape[0]\n if dimension > 7:\n return 'Capacity of 6 exceeded'\n M_sym = sym.Matrix([Q_sym.transpose()[:-1, :], sym.ones(1, dimension)])\n b_sym = sym.Matrix([sym.zeros(dimension - 1, 1), [1]])\n system = M_sym.col_insert(dimension, b_sym)\n sol = sym.solve_linear_system_LU(system, [a, b, c, d, e, f, g])\n return sol\n\n\ndef get_symbolic_state_probabilities_1222():\n num_of_servers = 1\n threshold = 2\n system_capacity = 2\n buffer_capacity = 2\n sym_pi_1222 = get_symbolic_pi(num_of_servers=num_of_servers, threshold=\n threshold, system_capacity=system_capacity, buffer_capacity=\n buffer_capacity)\n all_states_1222 = abg.markov.build_states(threshold=threshold,\n system_capacity=system_capacity, buffer_capacity=buffer_capacity)\n sym_state_probs_1222 = [(0) for _ in range(len(all_states_1222))]\n sym_state_probs_1222[0] = sym.factor(sym_pi_1222[a])\n sym_state_probs_1222[1] = sym.factor(sym_pi_1222[b])\n sym_state_probs_1222[2] = sym.factor(sym_pi_1222[c])\n sym_state_probs_1222[3] = sym.factor(sym_pi_1222[d])\n sym_state_probs_1222[4] = sym.factor(sym_pi_1222[e])\n sym_state_recursive_ratios_1222 = sym.zeros(buffer_capacity + 1, \n system_capacity + 1)\n sym_state_recursive_ratios_1222[0, 0] = 1\n sym_state_recursive_ratios_1222[0, 1] = sym.factor(sym_state_probs_1222\n [1] / sym_state_probs_1222[0])\n sym_state_recursive_ratios_1222[0, 2] = sym.factor(sym_state_probs_1222\n [2] / sym_state_probs_1222[1])\n sym_state_recursive_ratios_1222[1, 2] = sym.factor(sym_state_probs_1222\n [3] / sym_state_probs_1222[2])\n sym_state_recursive_ratios_1222[2, 2] = sym.factor(sym_state_probs_1222\n [4] / sym_state_probs_1222[3])\n return sym_state_probs_1222, sym_state_recursive_ratios_1222\n\n\ndef get_symbolic_state_probabilities_1121():\n num_of_servers = 1\n threshold = 1\n system_capacity = 2\n buffer_capacity = 1\n all_states_1121 = abg.markov.build_states(threshold=threshold,\n system_capacity=system_capacity, buffer_capacity=buffer_capacity)\n sym_pi_1121 = get_symbolic_pi(num_of_servers=num_of_servers, threshold=\n threshold, system_capacity=system_capacity, buffer_capacity=\n buffer_capacity)\n sym_state_probs_1121 = [(0) for _ in range(len(all_states_1121))]\n sym_state_probs_1121[0] = sym.factor(sym_pi_1121[a])\n sym_state_probs_1121[1] = sym.factor(sym_pi_1121[b])\n sym_state_probs_1121[2] = sym.factor(sym_pi_1121[c])\n sym_state_probs_1121[3] = sym.factor(sym_pi_1121[d])\n sym_state_probs_1121[4] = sym.factor(sym_pi_1121[e])\n sym_state_recursive_ratios_1121 = sym.zeros(buffer_capacity + 1, \n system_capacity + 1)\n sym_state_recursive_ratios_1121[0, 0] = 1\n sym_state_recursive_ratios_1121[0, 1] = sym.factor(sym_state_probs_1121\n [1] / sym_state_probs_1121[0])\n sym_state_recursive_ratios_1121[1, 1] = sym.factor(sym_state_probs_1121\n [2] / sym_state_probs_1121[1])\n sym_state_recursive_ratios_1121[0, 2] = sym.factor(sym_state_probs_1121\n [3] / sym_state_probs_1121[1])\n sym_state_recursive_ratios_1121[1, 2] = sym.factor(sym_state_probs_1121\n [4] / sym_state_probs_1121[3])\n sym_state_recursive_ratios_right_1121 = (sym_state_recursive_ratios_1121\n .copy())\n sym_state_recursive_ratios_right_1121[1, 2] = sym.factor(\n sym_state_probs_1121[4] / sym_state_probs_1121[2])\n sym_state_recursive_ratios_P0_1121 = sym.zeros(buffer_capacity + 1, \n system_capacity + 1)\n sym_state_recursive_ratios_P0_1121[0, 0] = 1\n sym_state_recursive_ratios_P0_1121[0, 1] = sym.factor(\n sym_state_probs_1121[1] / sym_state_probs_1121[0])\n sym_state_recursive_ratios_P0_1121[1, 1] = sym.factor(\n sym_state_probs_1121[2] / sym_state_probs_1121[0])\n sym_state_recursive_ratios_P0_1121[0, 2] = sym.factor(\n sym_state_probs_1121[3] / sym_state_probs_1121[0])\n sym_state_recursive_ratios_P0_1121[1, 2] = sym.factor(\n sym_state_probs_1121[4] / sym_state_probs_1121[0])\n return (sym_state_probs_1121, sym_state_recursive_ratios_1121,\n sym_state_recursive_ratios_right_1121,\n sym_state_recursive_ratios_P0_1121)\n\n\ndef get_symbolic_state_probabilities_1122():\n threshold = 1\n system_capacity = 2\n buffer_capacity = 2\n all_states_1122 = abg.markov.build_states(threshold=threshold,\n system_capacity=system_capacity, buffer_capacity=buffer_capacity)\n sym_state_probs_1122 = [(0) for _ in range(len(all_states_1122))]\n sym_Lambda = sym.symbols('Lambda')\n sym_lambda_1 = sym.symbols('lambda_1')\n sym_lambda_2 = sym.symbols('lambda_2')\n sym_mu = sym.symbols('mu')\n sym_state_probs_1122[0] = (sym_mu ** 6 + 2 * sym_lambda_2 * sym_mu ** 5 +\n sym_lambda_2 ** 2 * sym_mu ** 4)\n sym_state_probs_1122[1] = sym_Lambda * sym_mu ** 3 * (sym_mu ** 2 + 2 *\n sym_mu * sym_lambda_2 + sym_lambda_2 ** 2)\n sym_state_probs_1122[2] = sym_Lambda * sym_lambda_2 * sym_mu ** 2 * (\n sym_lambda_2 ** 2 + sym_lambda_2 * sym_lambda_1 + sym_lambda_1 *\n sym_mu + sym_mu ** 2 + 2 * sym_lambda_2 * sym_mu)\n sym_state_probs_1122[3] = sym_Lambda * sym_lambda_2 ** 2 * sym_mu * (\n sym_lambda_2 ** 2 + 2 * sym_lambda_1 * sym_lambda_2 + 3 *\n sym_lambda_1 * sym_mu + sym_mu ** 2 + 2 * sym_lambda_2 * sym_mu + \n sym_lambda_1 ** 2)\n sym_state_probs_1122[4] = sym_Lambda * sym_lambda_1 * sym_mu ** 3 * (\n sym_lambda_2 + sym_mu)\n sym_state_probs_1122[5\n ] = sym_Lambda * sym_lambda_1 * sym_lambda_2 * sym_mu ** 2 * (2 *\n sym_mu + sym_lambda_1 + sym_lambda_2)\n sym_state_probs_1122[6] = sym_Lambda * sym_lambda_1 * sym_lambda_2 ** 2 * (\n sym_lambda_1 ** 2 + 4 * sym_lambda_1 * sym_mu + 2 * sym_lambda_1 *\n sym_lambda_2 + 3 * sym_mu ** 2 + sym_lambda_2 ** 2 + 3 *\n sym_lambda_2 * sym_mu)\n total_1122 = np.sum(sym_state_probs_1122)\n sym_state_probs_1122 = [(i / total_1122) for i in sym_state_probs_1122]\n sym_state_recursive_ratios_1122 = sym.zeros(buffer_capacity + 1, \n system_capacity + 1)\n sym_state_recursive_ratios_1122[0, 0] = 1\n sym_state_recursive_ratios_1122[0, 1] = sym.factor(sym_state_probs_1122\n [1] / sym_state_probs_1122[0])\n sym_state_recursive_ratios_1122[1, 1] = sym.factor(sym_state_probs_1122\n [2] / sym_state_probs_1122[1])\n sym_state_recursive_ratios_1122[2, 1] = sym.factor(sym_state_probs_1122\n [3] / sym_state_probs_1122[2])\n sym_state_recursive_ratios_1122[0, 2] = sym.factor(sym_state_probs_1122\n [4] / sym_state_probs_1122[1])\n sym_state_recursive_ratios_1122[1, 2] = sym.factor(sym_state_probs_1122\n [5] / sym_state_probs_1122[4])\n sym_state_recursive_ratios_1122[2, 2] = sym.factor(sym_state_probs_1122\n [6] / sym_state_probs_1122[5])\n sym_state_recursive_ratios_right_1122 = (sym_state_recursive_ratios_1122\n .copy())\n sym_state_recursive_ratios_right_1122[1, 2] = sym.factor(\n sym_state_probs_1122[5] / sym_state_probs_1122[2])\n sym_state_recursive_ratios_right_1122[2, 2] = sym.factor(\n sym_state_probs_1122[6] / sym_state_probs_1122[3])\n sym_state_recursive_ratios_P0_1122 = sym.zeros(buffer_capacity + 1, \n system_capacity + 1)\n sym_state_recursive_ratios_P0_1122[0, 0] = 1\n sym_state_recursive_ratios_P0_1122[0, 1] = sym.factor(\n sym_state_probs_1122[1] / sym_state_probs_1122[0])\n sym_state_recursive_ratios_P0_1122[1, 1] = sym.factor(\n sym_state_probs_1122[2] / sym_state_probs_1122[0])\n sym_state_recursive_ratios_P0_1122[2, 1] = sym.factor(\n sym_state_probs_1122[3] / sym_state_probs_1122[0])\n sym_state_recursive_ratios_P0_1122[0, 2] = sym.factor(\n sym_state_probs_1122[4] / sym_state_probs_1122[0])\n sym_state_recursive_ratios_P0_1122[1, 2] = sym.factor(\n sym_state_probs_1122[5] / sym_state_probs_1122[0])\n sym_state_recursive_ratios_P0_1122[2, 2] = sym.factor(\n sym_state_probs_1122[6] / sym_state_probs_1122[0])\n return (sym_state_probs_1122, sym_state_recursive_ratios_1122,\n sym_state_recursive_ratios_right_1122,\n sym_state_recursive_ratios_P0_1122)\n\n\ndef get_symbolic_state_probabilities_1123():\n num_of_servers = 1\n threshold = 1\n system_capacity = 2\n buffer_capacity = 3\n Q_sym_1123 = abg.markov.get_symbolic_transition_matrix(num_of_servers,\n threshold, system_capacity, buffer_capacity)\n p00, p01, p11, p21, p31, p02, p12, p22, p32 = sym.symbols(\n 'p00, p01, p11, p21, p31, p02, p12, p22, p32')\n pi_1123 = sym.Matrix([p00, p01, p11, p21, p31, p02, p12, p22, p32])\n dimension_1123 = Q_sym_1123.shape[0]\n M_sym_1123 = sym.Matrix([Q_sym_1123.transpose()[:-1, :], sym.ones(1,\n dimension_1123)])\n sym_diff_equations_1123 = M_sym_1123 @ pi_1123\n b_sym_1123 = sym.Matrix([sym.zeros(dimension_1123 - 1, 1), [1]])\n eq0_1123 = sym.Eq(sym_diff_equations_1123[0], b_sym_1123[0])\n eq1_1123 = sym.Eq(sym_diff_equations_1123[1], b_sym_1123[1])\n eq2_1123 = sym.Eq(sym_diff_equations_1123[2], b_sym_1123[2])\n eq3_1123 = sym.Eq(sym_diff_equations_1123[3], b_sym_1123[3])\n eq4_1123 = sym.Eq(sym_diff_equations_1123[4], b_sym_1123[4])\n eq5_1123 = sym.Eq(sym_diff_equations_1123[5], b_sym_1123[5])\n eq6_1123 = sym.Eq(sym_diff_equations_1123[6], b_sym_1123[6])\n eq7_1123 = sym.Eq(sym_diff_equations_1123[7], b_sym_1123[7])\n eq8_1123 = sym.Eq(sym_diff_equations_1123[8], b_sym_1123[8])\n sym_state_probs_1123 = sym.solve([eq0_1123, eq1_1123, eq2_1123,\n eq3_1123, eq4_1123, eq5_1123, eq6_1123, eq7_1123, eq8_1123], (p00,\n p01, p11, p21, p31, p02, p12, p22, p32))\n sym_state_recursive_ratios_1123 = sym.zeros(buffer_capacity + 1, \n system_capacity + 1)\n sym_state_recursive_ratios_1123[0, 0] = 1\n sym_state_recursive_ratios_1123[0, 1] = sym.factor(sym_state_probs_1123\n [p01] / sym_state_probs_1123[p00])\n sym_state_recursive_ratios_1123[1, 1] = sym.factor(sym_state_probs_1123\n [p11] / sym_state_probs_1123[p01])\n sym_state_recursive_ratios_1123[2, 1] = sym.factor(sym_state_probs_1123\n [p21] / sym_state_probs_1123[p11])\n sym_state_recursive_ratios_1123[3, 1] = sym.factor(sym_state_probs_1123\n [p31] / sym_state_probs_1123[p21])\n sym_state_recursive_ratios_1123[0, 2] = sym.factor(sym_state_probs_1123\n [p02] / sym_state_probs_1123[p01])\n sym_state_recursive_ratios_1123[1, 2] = sym.factor(sym_state_probs_1123\n [p12] / sym_state_probs_1123[p02])\n sym_state_recursive_ratios_1123[2, 2] = sym.factor(sym_state_probs_1123\n [p22] / sym_state_probs_1123[p12])\n sym_state_recursive_ratios_1123[2, 2] = sym.factor(sym_state_probs_1123\n [p32] / sym_state_probs_1123[p22])\n sym_state_recursive_ratios_right_1123 = (sym_state_recursive_ratios_1123\n .copy())\n sym_state_recursive_ratios_right_1123[1, 2] = sym.factor(\n sym_state_probs_1123[p12] / sym_state_probs_1123[p11])\n sym_state_recursive_ratios_right_1123[2, 2] = sym.factor(\n sym_state_probs_1123[p22] / sym_state_probs_1123[p21])\n sym_state_recursive_ratios_right_1123[3, 2] = sym.factor(\n sym_state_probs_1123[p32] / sym_state_probs_1123[p22])\n sym_state_recursive_ratios_P0_1123 = sym.zeros(buffer_capacity + 1, \n system_capacity + 1)\n sym_state_recursive_ratios_P0_1123[0, 0] = 1\n sym_state_recursive_ratios_P0_1123[0, 1] = sym.factor(\n sym_state_probs_1123[p01] / sym_state_probs_1123[p00])\n sym_state_recursive_ratios_P0_1123[1, 1] = sym.factor(\n sym_state_probs_1123[p11] / sym_state_probs_1123[p00])\n sym_state_recursive_ratios_P0_1123[2, 1] = sym.factor(\n sym_state_probs_1123[p21] / sym_state_probs_1123[p00])\n sym_state_recursive_ratios_P0_1123[3, 1] = sym.factor(\n sym_state_probs_1123[p31] / sym_state_probs_1123[p00])\n sym_state_recursive_ratios_P0_1123[0, 2] = sym.factor(\n sym_state_probs_1123[p02] / sym_state_probs_1123[p00])\n sym_state_recursive_ratios_P0_1123[1, 2] = sym.factor(\n sym_state_probs_1123[p12] / sym_state_probs_1123[p00])\n sym_state_recursive_ratios_P0_1123[2, 2] = sym.factor(\n sym_state_probs_1123[p22] / sym_state_probs_1123[p00])\n sym_state_recursive_ratios_P0_1123[3, 2] = sym.factor(\n sym_state_probs_1123[p32] / sym_state_probs_1123[p00])\n return (sym_state_probs_1123, sym_state_recursive_ratios_1123,\n sym_state_recursive_ratios_right_1123,\n sym_state_recursive_ratios_P0_1123)\n\n\ndef get_symbolic_state_probabilities_1341():\n threshold = 3\n system_capacity = 4\n buffer_capacity = 1\n all_states_1341 = abg.markov.build_states(threshold=threshold,\n system_capacity=system_capacity, buffer_capacity=buffer_capacity)\n sym_state_probs_1341 = [(0) for _ in range(len(all_states_1341))]\n sym_Lambda = sym.symbols('Lambda')\n sym_lambda_1 = sym.symbols('lambda_1')\n sym_lambda_2 = sym.symbols('lambda_2')\n sym_mu = sym.symbols('mu')\n sym_state_probs_1341[0] = sym_lambda_2 * sym_mu ** 5 + sym_mu ** 6\n sym_state_probs_1341[1\n ] = sym_Lambda * sym_lambda_2 * sym_mu ** 4 + sym_Lambda * sym_mu ** 5\n sym_state_probs_1341[2] = (sym_Lambda ** 2 * sym_lambda_2 * sym_mu ** 3 +\n sym_Lambda ** 2 * sym_mu ** 4)\n sym_state_probs_1341[3] = (sym_Lambda ** 3 * sym_lambda_2 * sym_mu ** 2 +\n sym_Lambda ** 3 * sym_mu ** 3)\n sym_state_probs_1341[4] = (sym_Lambda ** 3 * sym_lambda_1 *\n sym_lambda_2 * sym_mu + sym_Lambda ** 3 * sym_lambda_2 * sym_mu ** \n 2 + sym_Lambda ** 3 * sym_lambda_2 * sym_lambda_2 * sym_mu)\n sym_state_probs_1341[5] = sym_Lambda ** 3 * sym_lambda_1 * sym_mu ** 2\n sym_state_probs_1341[6] = (sym_Lambda ** 3 * sym_lambda_1 ** 2 *\n sym_lambda_2 + sym_Lambda ** 3 * sym_lambda_1 * sym_lambda_2 ** 2 +\n 2 * sym_Lambda ** 3 * sym_lambda_1 * sym_lambda_2 * sym_mu)\n total_1341 = np.sum(sym_state_probs_1341)\n sym_state_probs_1341 = [(i / total_1341) for i in sym_state_probs_1341]\n sym_state_recursive_ratios_1341 = sym.zeros(buffer_capacity + 1, \n system_capacity + 1)\n sym_state_recursive_ratios_1341[0, 0] = 1\n sym_state_recursive_ratios_1341[0, 1] = sym.factor(sym_state_probs_1341\n [1] / sym_state_probs_1341[0])\n sym_state_recursive_ratios_1341[0, 2] = sym.factor(sym_state_probs_1341\n [2] / sym_state_probs_1341[1])\n sym_state_recursive_ratios_1341[0, 3] = sym.factor(sym_state_probs_1341\n [3] / sym_state_probs_1341[2])\n sym_state_recursive_ratios_1341[0, 4] = sym.factor(sym_state_probs_1341\n [5] / sym_state_probs_1341[3])\n sym_state_recursive_ratios_1341[1, 3] = sym.factor(sym_state_probs_1341\n [4] / sym_state_probs_1341[3])\n sym_state_recursive_ratios_1341[1, 4] = sym.factor(sym_state_probs_1341\n [6] / sym_state_probs_1341[5])\n sym_state_recursive_ratios_right_1341 = (sym_state_recursive_ratios_1341\n .copy())\n sym_state_recursive_ratios_right_1341[1, 4] = sym.factor(\n sym_state_probs_1341[6] / sym_state_probs_1341[4])\n sym_state_recursive_ratios_P0_1341 = sym.zeros(buffer_capacity + 1, \n system_capacity + 1)\n sym_state_recursive_ratios_P0_1341[0, 0] = 1\n sym_state_recursive_ratios_P0_1341[0, 1] = sym.factor(\n sym_state_probs_1341[1] / sym_state_probs_1341[0])\n sym_state_recursive_ratios_P0_1341[0, 2] = sym.factor(\n sym_state_probs_1341[2] / sym_state_probs_1341[0])\n sym_state_recursive_ratios_P0_1341[0, 3] = sym.factor(\n sym_state_probs_1341[3] / sym_state_probs_1341[0])\n sym_state_recursive_ratios_P0_1341[1, 3] = sym.factor(\n sym_state_probs_1341[4] / sym_state_probs_1341[0])\n sym_state_recursive_ratios_P0_1341[0, 4] = sym.factor(\n sym_state_probs_1341[5] / sym_state_probs_1341[0])\n sym_state_recursive_ratios_P0_1341[1, 4] = sym.factor(\n sym_state_probs_1341[6] / sym_state_probs_1341[0])\n return (sym_state_probs_1341, sym_state_recursive_ratios_1341,\n sym_state_recursive_ratios_right_1341,\n sym_state_recursive_ratios_P0_1341)\n\n\ndef get_symbolic_state_probabilities_1131():\n threshold = 1\n system_capacity = 3\n buffer_capacity = 1\n all_states_1131 = abg.markov.build_states(threshold=threshold,\n system_capacity=system_capacity, buffer_capacity=buffer_capacity)\n sym_state_probs_1131 = [(0) for _ in range(len(all_states_1131))]\n sym_Lambda = sym.symbols('Lambda')\n sym_lambda_1 = sym.symbols('lambda_1')\n sym_lambda_2 = sym.symbols('lambda_2')\n sym_mu = sym.symbols('mu')\n sym_state_probs_1131[0] = (sym_mu ** 6 + 2 * (sym_lambda_2 * sym_mu ** \n 5) + sym_lambda_2 ** 2 * sym_mu ** 4 + sym_lambda_1 * sym_lambda_2 *\n sym_mu ** 4)\n sym_state_probs_1131[1] = sym_state_probs_1131[0] * sym_Lambda / sym_mu\n sym_state_probs_1131[2] = (sym_Lambda * sym_lambda_1 ** 2 *\n sym_lambda_2 * sym_mu ** 2 + sym_Lambda * sym_lambda_2 *\n sym_lambda_1 * sym_mu ** 3 + 2 * (sym_Lambda * sym_lambda_1 * \n sym_lambda_2 ** 2 * sym_mu ** 2) + 2 * (sym_Lambda * sym_lambda_2 **\n 2 * sym_mu ** 3) + sym_Lambda * sym_lambda_2 ** 3 * sym_mu ** 2 + \n sym_Lambda * sym_lambda_2 * sym_mu ** 4)\n sym_state_probs_1131[3] = sym_Lambda * sym_lambda_1 * sym_mu ** 3 * (\n sym_lambda_2 + sym_mu)\n sym_state_probs_1131[4\n ] = sym_Lambda * sym_lambda_2 * sym_lambda_1 * sym_mu * (\n sym_lambda_2 ** 2 + 2 * sym_lambda_2 * sym_lambda_1 + 3 *\n sym_lambda_2 * sym_mu + sym_lambda_1 ** 2 + 2 * sym_lambda_1 *\n sym_mu + 2 * sym_mu ** 2)\n sym_state_probs_1131[5] = sym_Lambda * sym_lambda_1 ** 2 * sym_mu ** 3\n sym_state_probs_1131[6] = sym_Lambda * sym_lambda_2 * sym_lambda_1 ** 2 * (\n sym_lambda_2 ** 2 + 2 * sym_lambda_2 * sym_lambda_1 + 3 *\n sym_lambda_2 * sym_mu + sym_lambda_1 ** 2 + 2 * sym_lambda_1 *\n sym_mu + 3 * sym_mu ** 2)\n denominator = (sym_Lambda * sym_lambda_2 ** 3 * sym_lambda_1 ** 2 + \n sym_Lambda * sym_lambda_2 ** 3 * sym_lambda_1 * sym_mu + sym_Lambda *\n sym_lambda_2 ** 3 * sym_mu ** 2 + 2 * sym_Lambda * sym_lambda_2 ** \n 2 * sym_lambda_1 ** 3 + 5 * sym_Lambda * sym_lambda_2 ** 2 * \n sym_lambda_1 ** 2 * sym_mu + 5 * sym_Lambda * sym_lambda_2 ** 2 *\n sym_lambda_1 * sym_mu ** 2 + 3 * sym_Lambda * sym_lambda_2 ** 2 * \n sym_mu ** 3 + sym_Lambda * sym_lambda_2 * sym_lambda_1 ** 4 + 3 *\n sym_Lambda * sym_lambda_2 * sym_lambda_1 ** 3 * sym_mu + 6 *\n sym_Lambda * sym_lambda_2 * sym_lambda_1 ** 2 * sym_mu ** 2 + 5 *\n sym_Lambda * sym_lambda_2 * sym_lambda_1 * sym_mu ** 3 + 3 *\n sym_Lambda * sym_lambda_2 * sym_mu ** 4 + sym_Lambda * sym_lambda_1 **\n 2 * sym_mu ** 3 + sym_Lambda * sym_lambda_1 * sym_mu ** 4 + \n sym_Lambda * sym_mu ** 5 + sym_lambda_2 ** 2 * sym_mu ** 4 + \n sym_lambda_2 * sym_lambda_1 * sym_mu ** 4 + 2 * sym_lambda_2 * \n sym_mu ** 5 + sym_mu ** 6)\n sym_state_probs_1131 = [(i / denominator) for i in sym_state_probs_1131]\n sym_state_recursive_ratios_1131 = sym.zeros(buffer_capacity + 1, \n system_capacity + 1)\n sym_state_recursive_ratios_1131[0, 0] = 1\n sym_state_recursive_ratios_1131[0, 1] = sym.factor(sym_state_probs_1131\n [1] / sym_state_probs_1131[0])\n sym_state_recursive_ratios_1131[1, 1] = sym.factor(sym_state_probs_1131\n [2] / sym_state_probs_1131[1])\n sym_state_recursive_ratios_1131[0, 2] = sym.factor(sym_state_probs_1131\n [3] / sym_state_probs_1131[1])\n sym_state_recursive_ratios_1131[1, 2] = sym.factor(sym_state_probs_1131\n [4] / sym_state_probs_1131[3])\n sym_state_recursive_ratios_1131[0, 3] = sym.factor(sym_state_probs_1131\n [5] / sym_state_probs_1131[3])\n sym_state_recursive_ratios_1131[1, 3] = sym.factor(sym_state_probs_1131\n [6] / sym_state_probs_1131[5])\n sym_state_recursive_ratios_right_1131 = (sym_state_recursive_ratios_1131\n .copy())\n sym_state_recursive_ratios_right_1131[1, 2] = sym.factor(\n sym_state_probs_1131[4] / sym_state_probs_1131[2])\n sym_state_recursive_ratios_right_1131[1, 3] = sym.factor(\n sym_state_probs_1131[6] / sym_state_probs_1131[4])\n sym_state_recursive_ratios_P0_1131 = sym.zeros(buffer_capacity + 1, \n system_capacity + 1)\n sym_state_recursive_ratios_P0_1131[0, 0] = 1\n sym_state_recursive_ratios_P0_1131[0, 1] = sym.factor(\n sym_state_probs_1131[1] / sym_state_probs_1131[0])\n sym_state_recursive_ratios_P0_1131[1, 1] = sym.factor(\n sym_state_probs_1131[2] / sym_state_probs_1131[0])\n sym_state_recursive_ratios_P0_1131[0, 2] = sym.factor(\n sym_state_probs_1131[3] / sym_state_probs_1131[0])\n sym_state_recursive_ratios_P0_1131[1, 2] = sym.factor(\n sym_state_probs_1131[4] / sym_state_probs_1131[0])\n sym_state_recursive_ratios_P0_1131[0, 3] = sym.factor(\n sym_state_probs_1131[5] / sym_state_probs_1131[0])\n sym_state_recursive_ratios_P0_1131[1, 3] = sym.factor(\n sym_state_probs_1131[6] / sym_state_probs_1131[0])\n return (sym_state_probs_1131, sym_state_recursive_ratios_1131,\n sym_state_recursive_ratios_right_1131,\n sym_state_recursive_ratios_P0_1131)\n\n\ndef get_symbolic_state_probabilities_1132():\n num_of_servers = 1\n threshold = 1\n system_capacity = 3\n buffer_capacity = 2\n Q_sym_1132 = abg.markov.get_symbolic_transition_matrix(num_of_servers,\n threshold, system_capacity, buffer_capacity)\n p00, p01, p11, p21, p02, p12, p22, p03, p13, p23 = sym.symbols(\n 'p00, p01, p11, p21, p02, p12, p22, p03, p13, p23')\n pi_1132 = sym.Matrix([p00, p01, p11, p21, p02, p12, p22, p03, p13, p23])\n dimension_1132 = Q_sym_1132.shape[0]\n M_sym_1132 = sym.Matrix([Q_sym_1132.transpose()[:-1, :], sym.ones(1,\n dimension_1132)])\n sym_diff_equations_1132 = M_sym_1132 @ pi_1132\n b_sym_1132 = sym.Matrix([sym.zeros(dimension_1132 - 1, 1), [1]])\n eq0_1132 = sym.Eq(sym_diff_equations_1132[0], b_sym_1132[0])\n eq1_1132 = sym.Eq(sym_diff_equations_1132[1], b_sym_1132[1])\n eq2_1132 = sym.Eq(sym_diff_equations_1132[2], b_sym_1132[2])\n eq3_1132 = sym.Eq(sym_diff_equations_1132[3], b_sym_1132[3])\n eq4_1132 = sym.Eq(sym_diff_equations_1132[4], b_sym_1132[4])\n eq5_1132 = sym.Eq(sym_diff_equations_1132[5], b_sym_1132[5])\n eq6_1132 = sym.Eq(sym_diff_equations_1132[6], b_sym_1132[6])\n eq7_1132 = sym.Eq(sym_diff_equations_1132[7], b_sym_1132[7])\n eq8_1132 = sym.Eq(sym_diff_equations_1132[8], b_sym_1132[8])\n eq9_1132 = sym.Eq(sym_diff_equations_1132[9], b_sym_1132[9])\n sym_state_probs_1132 = sym.solve([eq0_1132, eq1_1132, eq2_1132,\n eq3_1132, eq4_1132, eq5_1132, eq6_1132, eq7_1132, eq8_1132,\n eq9_1132], (p00, p01, p11, p21, p02, p12, p22, p03, p13, p23))\n sym_state_recursive_ratios_1132 = sym.zeros(buffer_capacity + 1, \n system_capacity + 1)\n sym_state_recursive_ratios_1132[0, 0] = 1\n sym_state_recursive_ratios_1132[0, 1] = sym.factor(sym_state_probs_1132\n [p01] / sym_state_probs_1132[p00])\n sym_state_recursive_ratios_1132[1, 1] = sym.factor(sym_state_probs_1132\n [p11] / sym_state_probs_1132[p01])\n sym_state_recursive_ratios_1132[2, 1] = sym.factor(sym_state_probs_1132\n [p21] / sym_state_probs_1132[p11])\n sym_state_recursive_ratios_1132[0, 2] = sym.factor(sym_state_probs_1132\n [p02] / sym_state_probs_1132[p01])\n sym_state_recursive_ratios_1132[1, 2] = sym.factor(sym_state_probs_1132\n [p12] / sym_state_probs_1132[p02])\n sym_state_recursive_ratios_1132[2, 2] = sym.factor(sym_state_probs_1132\n [p22] / sym_state_probs_1132[p12])\n sym_state_recursive_ratios_1132[0, 3] = sym.factor(sym_state_probs_1132\n [p03] / sym_state_probs_1132[p02])\n sym_state_recursive_ratios_1132[1, 3] = sym.factor(sym_state_probs_1132\n [p13] / sym_state_probs_1132[p03])\n sym_state_recursive_ratios_1132[2, 3] = sym.factor(sym_state_probs_1132\n [p23] / sym_state_probs_1132[p13])\n sym_state_recursive_ratios_right_1132 = (sym_state_recursive_ratios_1132\n .copy())\n sym_state_recursive_ratios_right_1132[1, 2] = sym.factor(\n sym_state_probs_1132[p12] / sym_state_probs_1132[p11])\n sym_state_recursive_ratios_right_1132[1, 3] = sym.factor(\n sym_state_probs_1132[p13] / sym_state_probs_1132[p12])\n sym_state_recursive_ratios_right_1132[2, 2] = sym.factor(\n sym_state_probs_1132[p22] / sym_state_probs_1132[p21])\n sym_state_recursive_ratios_right_1132[2, 3] = sym.factor(\n sym_state_probs_1132[p23] / sym_state_probs_1132[p22])\n sym_state_recursive_ratios_P0_1132 = sym.zeros(buffer_capacity + 1, \n system_capacity + 1)\n sym_state_recursive_ratios_P0_1132[0, 0] = 1\n sym_state_recursive_ratios_P0_1132[0, 1] = sym.factor(\n sym_state_probs_1132[p01] / sym_state_probs_1132[p00])\n sym_state_recursive_ratios_P0_1132[1, 1] = sym.factor(\n sym_state_probs_1132[p11] / sym_state_probs_1132[p00])\n sym_state_recursive_ratios_P0_1132[2, 1] = sym.factor(\n sym_state_probs_1132[p21] / sym_state_probs_1132[p00])\n sym_state_recursive_ratios_P0_1132[0, 2] = sym.factor(\n sym_state_probs_1132[p02] / sym_state_probs_1132[p00])\n sym_state_recursive_ratios_P0_1132[1, 2] = sym.factor(\n sym_state_probs_1132[p12] / sym_state_probs_1132[p00])\n sym_state_recursive_ratios_P0_1132[2, 2] = sym.factor(\n sym_state_probs_1132[p22] / sym_state_probs_1132[p00])\n sym_state_recursive_ratios_P0_1132[0, 3] = sym.factor(\n sym_state_probs_1132[p03] / sym_state_probs_1132[p00])\n sym_state_recursive_ratios_P0_1132[1, 3] = sym.factor(\n sym_state_probs_1132[p13] / sym_state_probs_1132[p00])\n sym_state_recursive_ratios_P0_1132[2, 3] = sym.factor(\n sym_state_probs_1132[p23] / sym_state_probs_1132[p00])\n return (sym_state_probs_1132, sym_state_recursive_ratios_1132,\n sym_state_recursive_ratios_right_1132,\n sym_state_recursive_ratios_P0_1132)\n\n\ndef get_symbolic_state_probabilities_1141():\n num_of_servers = 1\n threshold = 1\n system_capacity = 4\n buffer_capacity = 1\n Q_sym_1141 = abg.markov.get_symbolic_transition_matrix(num_of_servers,\n threshold, system_capacity, buffer_capacity)\n p00, p01, p11, p02, p12, p03, p13, p04, p14 = sym.symbols(\n 'p00, p01, p11, p02, p12, p03, p13, p04, p14')\n pi_1141 = sym.Matrix([p00, p01, p11, p02, p12, p03, p13, p04, p14])\n dimension_1141 = Q_sym_1141.shape[0]\n M_sym_1141 = sym.Matrix([Q_sym_1141.transpose()[:-1, :], sym.ones(1,\n dimension_1141)])\n sym_diff_equations_1141 = M_sym_1141 @ pi_1141\n b_sym_1141 = sym.Matrix([sym.zeros(dimension_1141 - 1, 1), [1]])\n eq0_1141 = sym.Eq(sym_diff_equations_1141[0], b_sym_1141[0])\n eq1_1141 = sym.Eq(sym_diff_equations_1141[1], b_sym_1141[1])\n eq2_1141 = sym.Eq(sym_diff_equations_1141[2], b_sym_1141[2])\n eq3_1141 = sym.Eq(sym_diff_equations_1141[3], b_sym_1141[3])\n eq4_1141 = sym.Eq(sym_diff_equations_1141[4], b_sym_1141[4])\n eq5_1141 = sym.Eq(sym_diff_equations_1141[5], b_sym_1141[5])\n eq6_1141 = sym.Eq(sym_diff_equations_1141[6], b_sym_1141[6])\n eq7_1141 = sym.Eq(sym_diff_equations_1141[7], b_sym_1141[7])\n eq8_1141 = sym.Eq(sym_diff_equations_1141[8], b_sym_1141[8])\n sym_state_probs_1141 = sym.solve([eq0_1141, eq1_1141, eq2_1141,\n eq3_1141, eq4_1141, eq5_1141, eq6_1141, eq7_1141, eq8_1141], (p00,\n p01, p11, p02, p12, p03, p13, p04, p14))\n sym_state_recursive_ratios_1141 = sym.zeros(buffer_capacity + 1, \n system_capacity + 1)\n sym_state_recursive_ratios_1141[0, 0] = 1\n sym_state_recursive_ratios_1141[0, 1] = sym.factor(sym_state_probs_1141\n [p01] / sym_state_probs_1141[p00])\n sym_state_recursive_ratios_1141[1, 1] = sym.factor(sym_state_probs_1141\n [p11] / sym_state_probs_1141[p01])\n sym_state_recursive_ratios_1141[0, 2] = sym.factor(sym_state_probs_1141\n [p02] / sym_state_probs_1141[p01])\n sym_state_recursive_ratios_1141[1, 2] = sym.factor(sym_state_probs_1141\n [p12] / sym_state_probs_1141[p02])\n sym_state_recursive_ratios_1141[0, 3] = sym.factor(sym_state_probs_1141\n [p03] / sym_state_probs_1141[p02])\n sym_state_recursive_ratios_1141[1, 3] = sym.factor(sym_state_probs_1141\n [p13] / sym_state_probs_1141[p03])\n sym_state_recursive_ratios_1141[0, 4] = sym.factor(sym_state_probs_1141\n [p04] / sym_state_probs_1141[p03])\n sym_state_recursive_ratios_1141[1, 4] = sym.factor(sym_state_probs_1141\n [p14] / sym_state_probs_1141[p04])\n sym_state_recursive_ratios_right_1141 = (sym_state_recursive_ratios_1141\n .copy())\n sym_state_recursive_ratios_right_1141[1, 2] = sym.factor(\n sym_state_probs_1141[p12] / sym_state_probs_1141[p11])\n sym_state_recursive_ratios_right_1141[1, 3] = sym.factor(\n sym_state_probs_1141[p13] / sym_state_probs_1141[p12])\n sym_state_recursive_ratios_right_1141[1, 4] = sym.factor(\n sym_state_probs_1141[p14] / sym_state_probs_1141[p13])\n sym_state_recursive_ratios_P0_1141 = sym.zeros(buffer_capacity + 1, \n system_capacity + 1)\n sym_state_recursive_ratios_P0_1141[0, 0] = 1\n sym_state_recursive_ratios_P0_1141[0, 1] = sym.factor(\n sym_state_probs_1141[p01] / sym_state_probs_1141[p00])\n sym_state_recursive_ratios_P0_1141[1, 1] = sym.factor(\n sym_state_probs_1141[p11] / sym_state_probs_1141[p00])\n sym_state_recursive_ratios_P0_1141[0, 2] = sym.factor(\n sym_state_probs_1141[p02] / sym_state_probs_1141[p00])\n sym_state_recursive_ratios_P0_1141[1, 2] = sym.factor(\n sym_state_probs_1141[p12] / sym_state_probs_1141[p00])\n sym_state_recursive_ratios_P0_1141[0, 3] = sym.factor(\n sym_state_probs_1141[p03] / sym_state_probs_1141[p00])\n sym_state_recursive_ratios_P0_1141[1, 3] = sym.factor(\n sym_state_probs_1141[p13] / sym_state_probs_1141[p00])\n sym_state_recursive_ratios_P0_1141[0, 4] = sym.factor(\n sym_state_probs_1141[p04] / sym_state_probs_1141[p00])\n sym_state_recursive_ratios_P0_1141[1, 4] = sym.factor(\n sym_state_probs_1141[p14] / sym_state_probs_1141[p00])\n return (sym_state_probs_1141, sym_state_recursive_ratios_1141,\n sym_state_recursive_ratios_right_1141,\n sym_state_recursive_ratios_P0_1141)\n\n\ndef get_symbolic_state_probabilities_1142():\n num_of_servers = 1\n threshold = 1\n system_capacity = 4\n buffer_capacity = 2\n Q_sym_1142 = abg.markov.get_symbolic_transition_matrix(num_of_servers=\n num_of_servers, threshold=threshold, system_capacity=\n system_capacity, buffer_capacity=buffer_capacity)\n p00, p01, p11, p21, p02, p12, p22, p03, p13, p23, p04, p14, p24 = (sym.\n symbols(\n 'p00, p01, p11, p21, p02, p12, p22, p03, p13, p23, p04, p14, p24'))\n pi_1142 = sym.Matrix([p00, p01, p11, p21, p02, p12, p22, p03, p13, p23,\n p04, p14, p24])\n dimension_1142 = Q_sym_1142.shape[0]\n M_sym_1142 = sym.Matrix([Q_sym_1142.transpose()[:-1, :], sym.ones(1,\n dimension_1142)])\n sym_diff_equations_1142 = M_sym_1142 @ pi_1142\n b_sym_1142 = sym.Matrix([sym.zeros(dimension_1142 - 1, 1), [1]])\n eq0_1142 = sym.Eq(sym_diff_equations_1142[0], b_sym_1142[0])\n eq1_1142 = sym.Eq(sym_diff_equations_1142[1], b_sym_1142[1])\n eq2_1142 = sym.Eq(sym_diff_equations_1142[2], b_sym_1142[2])\n eq3_1142 = sym.Eq(sym_diff_equations_1142[3], b_sym_1142[3])\n eq4_1142 = sym.Eq(sym_diff_equations_1142[4], b_sym_1142[4])\n eq5_1142 = sym.Eq(sym_diff_equations_1142[5], b_sym_1142[5])\n eq6_1142 = sym.Eq(sym_diff_equations_1142[6], b_sym_1142[6])\n eq7_1142 = sym.Eq(sym_diff_equations_1142[7], b_sym_1142[7])\n eq8_1142 = sym.Eq(sym_diff_equations_1142[8], b_sym_1142[8])\n eq9_1142 = sym.Eq(sym_diff_equations_1142[9], b_sym_1142[9])\n eq10_1142 = sym.Eq(sym_diff_equations_1142[10], b_sym_1142[10])\n eq11_1142 = sym.Eq(sym_diff_equations_1142[11], b_sym_1142[11])\n eq12_1142 = sym.Eq(sym_diff_equations_1142[12], b_sym_1142[12])\n sym_state_probs_1142 = sym.solve([eq0_1142, eq1_1142, eq2_1142,\n eq3_1142, eq4_1142, eq5_1142, eq6_1142, eq7_1142, eq8_1142,\n eq9_1142, eq10_1142, eq11_1142, eq12_1142], (p00, p01, p11, p21,\n p02, p12, p22, p03, p13, p23, p04, p14, p24))\n sym_state_recursive_ratios_1142 = sym.zeros(buffer_capacity + 1, \n system_capacity + 1)\n sym_state_recursive_ratios_1142[0, 0] = 1\n sym_state_recursive_ratios_1142[0, 1] = sym.factor(sym_state_probs_1142\n [p01] / sym_state_probs_1142[p00])\n sym_state_recursive_ratios_1142[1, 1] = sym.factor(sym_state_probs_1142\n [p11] / sym_state_probs_1142[p01])\n sym_state_recursive_ratios_1142[2, 1] = sym.factor(sym_state_probs_1142\n [p21] / sym_state_probs_1142[p11])\n sym_state_recursive_ratios_1142[0, 2] = sym.factor(sym_state_probs_1142\n [p02] / sym_state_probs_1142[p01])\n sym_state_recursive_ratios_1142[1, 2] = sym.factor(sym_state_probs_1142\n [p12] / sym_state_probs_1142[p02])\n sym_state_recursive_ratios_1142[2, 2] = sym.factor(sym_state_probs_1142\n [p22] / sym_state_probs_1142[p12])\n sym_state_recursive_ratios_1142[0, 3] = sym.factor(sym_state_probs_1142\n [p03] / sym_state_probs_1142[p02])\n sym_state_recursive_ratios_1142[1, 3] = sym.factor(sym_state_probs_1142\n [p13] / sym_state_probs_1142[p03])\n sym_state_recursive_ratios_1142[2, 3] = sym.factor(sym_state_probs_1142\n [p23] / sym_state_probs_1142[p13])\n sym_state_recursive_ratios_1142[0, 4] = sym.factor(sym_state_probs_1142\n [p04] / sym_state_probs_1142[p03])\n sym_state_recursive_ratios_1142[1, 4] = sym.factor(sym_state_probs_1142\n [p14] / sym_state_probs_1142[p04])\n sym_state_recursive_ratios_1142[2, 4] = sym.factor(sym_state_probs_1142\n [p24] / sym_state_probs_1142[p14])\n sym_state_recursive_ratios_right_1142 = (sym_state_recursive_ratios_1142\n .copy())\n sym_state_recursive_ratios_right_1142[1, 2] = sym.factor(\n sym_state_probs_1142[p12] / sym_state_probs_1142[p11])\n sym_state_recursive_ratios_right_1142[1, 3] = sym.factor(\n sym_state_probs_1142[p13] / sym_state_probs_1142[p12])\n sym_state_recursive_ratios_right_1142[1, 4] = sym.factor(\n sym_state_probs_1142[p14] / sym_state_probs_1142[p13])\n sym_state_recursive_ratios_right_1142[2, 2] = sym.factor(\n sym_state_probs_1142[p22] / sym_state_probs_1142[p21])\n sym_state_recursive_ratios_right_1142[2, 3] = sym.factor(\n sym_state_probs_1142[p23] / sym_state_probs_1142[p22])\n sym_state_recursive_ratios_right_1142[2, 4] = sym.factor(\n sym_state_probs_1142[p24] / sym_state_probs_1142[p23])\n sym_state_recursive_ratios_P0_1142 = sym.zeros(buffer_capacity + 1, \n system_capacity + 1)\n sym_state_recursive_ratios_P0_1142[0, 0] = 1\n sym_state_recursive_ratios_P0_1142[0, 1] = sym.factor(\n sym_state_probs_1142[p01] / sym_state_probs_1142[p00])\n sym_state_recursive_ratios_P0_1142[1, 1] = sym.factor(\n sym_state_probs_1142[p11] / sym_state_probs_1142[p00])\n sym_state_recursive_ratios_P0_1142[2, 1] = sym.factor(\n sym_state_probs_1142[p21] / sym_state_probs_1142[p00])\n sym_state_recursive_ratios_P0_1142[0, 2] = sym.factor(\n sym_state_probs_1142[p02] / sym_state_probs_1142[p00])\n sym_state_recursive_ratios_P0_1142[1, 2] = sym.factor(\n sym_state_probs_1142[p12] / sym_state_probs_1142[p00])\n sym_state_recursive_ratios_P0_1142[2, 2] = sym.factor(\n sym_state_probs_1142[p22] / sym_state_probs_1142[p00])\n sym_state_recursive_ratios_P0_1142[0, 3] = sym.factor(\n sym_state_probs_1142[p03] / sym_state_probs_1142[p00])\n sym_state_recursive_ratios_P0_1142[1, 3] = sym.factor(\n sym_state_probs_1142[p13] / sym_state_probs_1142[p00])\n sym_state_recursive_ratios_P0_1142[2, 3] = sym.factor(\n sym_state_probs_1142[p23] / sym_state_probs_1142[p00])\n sym_state_recursive_ratios_P0_1142[0, 4] = sym.factor(\n sym_state_probs_1142[p04] / sym_state_probs_1142[p00])\n sym_state_recursive_ratios_P0_1142[1, 4] = sym.factor(\n sym_state_probs_1142[p14] / sym_state_probs_1142[p00])\n sym_state_recursive_ratios_P0_1142[2, 4] = sym.factor(\n sym_state_probs_1142[p24] / sym_state_probs_1142[p00])\n return (sym_state_probs_1142, sym_state_recursive_ratios_1142,\n sym_state_recursive_ratios_right_1142,\n sym_state_recursive_ratios_P0_1142)\n\n\ndef get_symbolic_state_probabilities_1151():\n num_of_servers = 1\n threshold = 1\n system_capacity = 5\n buffer_capacity = 1\n Q_sym_1151 = abg.markov.get_symbolic_transition_matrix(num_of_servers,\n threshold, system_capacity, buffer_capacity)\n p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15 = sym.symbols(\n 'p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15')\n pi_1151 = sym.Matrix([p00, p01, p11, p02, p12, p03, p13, p04, p14, p05,\n p15])\n dimension_1151 = Q_sym_1151.shape[0]\n M_sym_1151 = sym.Matrix([Q_sym_1151.transpose()[:-1, :], sym.ones(1,\n dimension_1151)])\n sym_diff_equations_1151 = M_sym_1151 @ pi_1151\n b_sym_1151 = sym.Matrix([sym.zeros(dimension_1151 - 1, 1), [1]])\n eq0_1151 = sym.Eq(sym_diff_equations_1151[0], b_sym_1151[0])\n eq1_1151 = sym.Eq(sym_diff_equations_1151[1], b_sym_1151[1])\n eq2_1151 = sym.Eq(sym_diff_equations_1151[2], b_sym_1151[2])\n eq3_1151 = sym.Eq(sym_diff_equations_1151[3], b_sym_1151[3])\n eq4_1151 = sym.Eq(sym_diff_equations_1151[4], b_sym_1151[4])\n eq5_1151 = sym.Eq(sym_diff_equations_1151[5], b_sym_1151[5])\n eq6_1151 = sym.Eq(sym_diff_equations_1151[6], b_sym_1151[6])\n eq7_1151 = sym.Eq(sym_diff_equations_1151[7], b_sym_1151[7])\n eq8_1151 = sym.Eq(sym_diff_equations_1151[8], b_sym_1151[8])\n eq9_1151 = sym.Eq(sym_diff_equations_1151[9], b_sym_1151[9])\n eq10_1151 = sym.Eq(sym_diff_equations_1151[10], b_sym_1151[10])\n sym_state_probs_1151 = sym.solve([eq0_1151, eq1_1151, eq2_1151,\n eq3_1151, eq4_1151, eq5_1151, eq6_1151, eq7_1151, eq8_1151,\n eq9_1151, eq10_1151], (p00, p01, p11, p02, p12, p03, p13, p04, p14,\n p05, p15))\n sym_state_recursive_ratios_1151 = sym.zeros(buffer_capacity + 1, \n system_capacity + 1)\n sym_state_recursive_ratios_1151[0, 0] = 1\n sym_state_recursive_ratios_1151[0, 1] = sym.factor(sym_state_probs_1151\n [p01] / sym_state_probs_1151[p00])\n sym_state_recursive_ratios_1151[1, 1] = sym.factor(sym_state_probs_1151\n [p11] / sym_state_probs_1151[p01])\n sym_state_recursive_ratios_1151[0, 2] = sym.factor(sym_state_probs_1151\n [p02] / sym_state_probs_1151[p01])\n sym_state_recursive_ratios_1151[1, 2] = sym.factor(sym_state_probs_1151\n [p12] / sym_state_probs_1151[p02])\n sym_state_recursive_ratios_1151[0, 3] = sym.factor(sym_state_probs_1151\n [p03] / sym_state_probs_1151[p02])\n sym_state_recursive_ratios_1151[1, 3] = sym.factor(sym_state_probs_1151\n [p13] / sym_state_probs_1151[p03])\n sym_state_recursive_ratios_1151[0, 4] = sym.factor(sym_state_probs_1151\n [p04] / sym_state_probs_1151[p03])\n sym_state_recursive_ratios_1151[1, 4] = sym.factor(sym_state_probs_1151\n [p14] / sym_state_probs_1151[p04])\n sym_state_recursive_ratios_1151[0, 5] = sym.factor(sym_state_probs_1151\n [p05] / sym_state_probs_1151[p04])\n sym_state_recursive_ratios_1151[1, 5] = sym.factor(sym_state_probs_1151\n [p15] / sym_state_probs_1151[p05])\n sym_state_recursive_ratios_right_1151 = (sym_state_recursive_ratios_1151\n .copy())\n sym_state_recursive_ratios_right_1151[1, 2] = sym.factor(\n sym_state_probs_1151[p12] / sym_state_probs_1151[p11])\n sym_state_recursive_ratios_right_1151[1, 3] = sym.factor(\n sym_state_probs_1151[p13] / sym_state_probs_1151[p12])\n sym_state_recursive_ratios_right_1151[1, 4] = sym.factor(\n sym_state_probs_1151[p14] / sym_state_probs_1151[p13])\n sym_state_recursive_ratios_right_1151[1, 5] = sym.factor(\n sym_state_probs_1151[p15] / sym_state_probs_1151[p14])\n sym_state_recursive_ratios_P0_1151 = sym.zeros(buffer_capacity + 1, \n system_capacity + 1)\n sym_state_recursive_ratios_P0_1151[0, 0] = 1\n sym_state_recursive_ratios_P0_1151[0, 1] = sym.factor(\n sym_state_probs_1151[p01] / sym_state_probs_1151[p00])\n sym_state_recursive_ratios_P0_1151[1, 1] = sym.factor(\n sym_state_probs_1151[p11] / sym_state_probs_1151[p00])\n sym_state_recursive_ratios_P0_1151[0, 2] = sym.factor(\n sym_state_probs_1151[p02] / sym_state_probs_1151[p00])\n sym_state_recursive_ratios_P0_1151[1, 2] = sym.factor(\n sym_state_probs_1151[p12] / sym_state_probs_1151[p00])\n sym_state_recursive_ratios_P0_1151[0, 3] = sym.factor(\n sym_state_probs_1151[p03] / sym_state_probs_1151[p00])\n sym_state_recursive_ratios_P0_1151[1, 3] = sym.factor(\n sym_state_probs_1151[p13] / sym_state_probs_1151[p00])\n sym_state_recursive_ratios_P0_1151[0, 4] = sym.factor(\n sym_state_probs_1151[p04] / sym_state_probs_1151[p00])\n sym_state_recursive_ratios_P0_1151[1, 4] = sym.factor(\n sym_state_probs_1151[p14] / sym_state_probs_1151[p00])\n sym_state_recursive_ratios_P0_1151[0, 5] = sym.factor(\n sym_state_probs_1151[p05] / sym_state_probs_1151[p00])\n sym_state_recursive_ratios_P0_1151[1, 5] = sym.factor(\n sym_state_probs_1151[p15] / sym_state_probs_1151[p00])\n return (sym_state_probs_1151, sym_state_recursive_ratios_1151,\n sym_state_recursive_ratios_right_1151,\n sym_state_recursive_ratios_P0_1151)\n\n\ndef get_symbolic_state_probabilities_1161():\n num_of_servers = 1\n threshold = 1\n system_capacity = 6\n buffer_capacity = 1\n Q_sym_1161 = abg.markov.get_symbolic_transition_matrix(num_of_servers,\n threshold, system_capacity, buffer_capacity)\n p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15, p06, p16 = (sym.\n symbols(\n 'p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15, p06, p16'))\n pi_1161 = sym.Matrix([p00, p01, p11, p02, p12, p03, p13, p04, p14, p05,\n p15, p06, p16])\n dimension_1161 = Q_sym_1161.shape[0]\n M_sym_1161 = sym.Matrix([Q_sym_1161.transpose()[:-1, :], sym.ones(1,\n dimension_1161)])\n sym_diff_equations_1161 = M_sym_1161 @ pi_1161\n b_sym_1161 = sym.Matrix([sym.zeros(dimension_1161 - 1, 1), [1]])\n eq0_1161 = sym.Eq(sym_diff_equations_1161[0], b_sym_1161[0])\n eq1_1161 = sym.Eq(sym_diff_equations_1161[1], b_sym_1161[1])\n eq2_1161 = sym.Eq(sym_diff_equations_1161[2], b_sym_1161[2])\n eq3_1161 = sym.Eq(sym_diff_equations_1161[3], b_sym_1161[3])\n eq4_1161 = sym.Eq(sym_diff_equations_1161[4], b_sym_1161[4])\n eq5_1161 = sym.Eq(sym_diff_equations_1161[5], b_sym_1161[5])\n eq6_1161 = sym.Eq(sym_diff_equations_1161[6], b_sym_1161[6])\n eq7_1161 = sym.Eq(sym_diff_equations_1161[7], b_sym_1161[7])\n eq8_1161 = sym.Eq(sym_diff_equations_1161[8], b_sym_1161[8])\n eq9_1161 = sym.Eq(sym_diff_equations_1161[9], b_sym_1161[9])\n eq10_1161 = sym.Eq(sym_diff_equations_1161[10], b_sym_1161[10])\n eq11_1161 = sym.Eq(sym_diff_equations_1161[11], b_sym_1161[11])\n eq12_1161 = sym.Eq(sym_diff_equations_1161[12], b_sym_1161[12])\n sym_state_probs_1161 = sym.solve([eq0_1161, eq1_1161, eq2_1161,\n eq3_1161, eq4_1161, eq5_1161, eq6_1161, eq7_1161, eq8_1161,\n eq9_1161, eq10_1161, eq11_1161, eq12_1161], (p00, p01, p11, p02,\n p12, p03, p13, p04, p14, p05, p15, p06, p16))\n sym_state_recursive_ratios_1161 = sym.zeros(buffer_capacity + 1, \n system_capacity + 1)\n sym_state_recursive_ratios_1161[0, 0] = 1\n sym_state_recursive_ratios_1161[0, 1] = sym.factor(sym_state_probs_1161\n [p01] / sym_state_probs_1161[p00])\n sym_state_recursive_ratios_1161[1, 1] = sym.factor(sym_state_probs_1161\n [p11] / sym_state_probs_1161[p01])\n sym_state_recursive_ratios_1161[0, 2] = sym.factor(sym_state_probs_1161\n [p02] / sym_state_probs_1161[p01])\n sym_state_recursive_ratios_1161[1, 2] = sym.factor(sym_state_probs_1161\n [p12] / sym_state_probs_1161[p02])\n sym_state_recursive_ratios_1161[0, 3] = sym.factor(sym_state_probs_1161\n [p03] / sym_state_probs_1161[p02])\n sym_state_recursive_ratios_1161[1, 3] = sym.factor(sym_state_probs_1161\n [p13] / sym_state_probs_1161[p03])\n sym_state_recursive_ratios_1161[0, 4] = sym.factor(sym_state_probs_1161\n [p04] / sym_state_probs_1161[p03])\n sym_state_recursive_ratios_1161[1, 4] = sym.factor(sym_state_probs_1161\n [p14] / sym_state_probs_1161[p04])\n sym_state_recursive_ratios_1161[0, 5] = sym.factor(sym_state_probs_1161\n [p05] / sym_state_probs_1161[p04])\n sym_state_recursive_ratios_1161[1, 5] = sym.factor(sym_state_probs_1161\n [p15] / sym_state_probs_1161[p05])\n sym_state_recursive_ratios_1161[0, 6] = sym.factor(sym_state_probs_1161\n [p06] / sym_state_probs_1161[p05])\n sym_state_recursive_ratios_1161[1, 6] = sym.factor(sym_state_probs_1161\n [p16] / sym_state_probs_1161[p06])\n sym_state_recursive_ratios_right_1161 = (sym_state_recursive_ratios_1161\n .copy())\n sym_state_recursive_ratios_right_1161[1, 2] = sym.factor(\n sym_state_probs_1161[p12] / sym_state_probs_1161[p11])\n sym_state_recursive_ratios_right_1161[1, 3] = sym.factor(\n sym_state_probs_1161[p13] / sym_state_probs_1161[p12])\n sym_state_recursive_ratios_right_1161[1, 4] = sym.factor(\n sym_state_probs_1161[p14] / sym_state_probs_1161[p13])\n sym_state_recursive_ratios_right_1161[1, 5] = sym.factor(\n sym_state_probs_1161[p15] / sym_state_probs_1161[p14])\n sym_state_recursive_ratios_right_1161[1, 6] = sym.factor(\n sym_state_probs_1161[p16] / sym_state_probs_1161[p15])\n sym_state_recursive_ratios_P0_1161 = sym.zeros(buffer_capacity + 1, \n system_capacity + 1)\n sym_state_recursive_ratios_P0_1161[0, 0] = 1\n sym_state_recursive_ratios_P0_1161[0, 1] = sym.factor(\n sym_state_probs_1161[p01] / sym_state_probs_1161[p00])\n sym_state_recursive_ratios_P0_1161[1, 1] = sym.factor(\n sym_state_probs_1161[p11] / sym_state_probs_1161[p00])\n sym_state_recursive_ratios_P0_1161[0, 2] = sym.factor(\n sym_state_probs_1161[p02] / sym_state_probs_1161[p00])\n sym_state_recursive_ratios_P0_1161[1, 2] = sym.factor(\n sym_state_probs_1161[p12] / sym_state_probs_1161[p00])\n sym_state_recursive_ratios_P0_1161[0, 3] = sym.factor(\n sym_state_probs_1161[p03] / sym_state_probs_1161[p00])\n sym_state_recursive_ratios_P0_1161[1, 3] = sym.factor(\n sym_state_probs_1161[p13] / sym_state_probs_1161[p00])\n sym_state_recursive_ratios_P0_1161[0, 4] = sym.factor(\n sym_state_probs_1161[p04] / sym_state_probs_1161[p00])\n sym_state_recursive_ratios_P0_1161[1, 4] = sym.factor(\n sym_state_probs_1161[p14] / sym_state_probs_1161[p00])\n sym_state_recursive_ratios_P0_1161[0, 5] = sym.factor(\n sym_state_probs_1161[p05] / sym_state_probs_1161[p00])\n sym_state_recursive_ratios_P0_1161[1, 5] = sym.factor(\n sym_state_probs_1161[p15] / sym_state_probs_1161[p00])\n sym_state_recursive_ratios_P0_1161[0, 6] = sym.factor(\n sym_state_probs_1161[p06] / sym_state_probs_1161[p00])\n sym_state_recursive_ratios_P0_1161[1, 6] = sym.factor(\n sym_state_probs_1161[p16] / sym_state_probs_1161[p00])\n return (sym_state_probs_1161, sym_state_recursive_ratios_1161,\n sym_state_recursive_ratios_right_1161,\n sym_state_recursive_ratios_P0_1161)\n\n\ndef get_symbolic_state_probabilities_1171():\n num_of_servers = 1\n threshold = 1\n system_capacity = 7\n buffer_capacity = 1\n Q_sym_1171 = abg.markov.get_symbolic_transition_matrix(num_of_servers,\n threshold, system_capacity, buffer_capacity)\n (p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15, p06, p16, p07, p17\n ) = (sym.symbols(\n 'p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15, p06, p16, p07, p17'\n ))\n pi_1171 = sym.Matrix([p00, p01, p11, p02, p12, p03, p13, p04, p14, p05,\n p15, p06, p16, p07, p17])\n dimension_1171 = Q_sym_1171.shape[0]\n M_sym_1171 = sym.Matrix([Q_sym_1171.transpose()[:-1, :], sym.ones(1,\n dimension_1171)])\n sym_diff_equations_1171 = M_sym_1171 @ pi_1171\n b_sym_1171 = sym.Matrix([sym.zeros(dimension_1171 - 1, 1), [1]])\n eq0_1171 = sym.Eq(sym_diff_equations_1171[0], b_sym_1171[0])\n eq1_1171 = sym.Eq(sym_diff_equations_1171[1], b_sym_1171[1])\n eq2_1171 = sym.Eq(sym_diff_equations_1171[2], b_sym_1171[2])\n eq3_1171 = sym.Eq(sym_diff_equations_1171[3], b_sym_1171[3])\n eq4_1171 = sym.Eq(sym_diff_equations_1171[4], b_sym_1171[4])\n eq5_1171 = sym.Eq(sym_diff_equations_1171[5], b_sym_1171[5])\n eq6_1171 = sym.Eq(sym_diff_equations_1171[6], b_sym_1171[6])\n eq7_1171 = sym.Eq(sym_diff_equations_1171[7], b_sym_1171[7])\n eq8_1171 = sym.Eq(sym_diff_equations_1171[8], b_sym_1171[8])\n eq9_1171 = sym.Eq(sym_diff_equations_1171[9], b_sym_1171[9])\n eq10_1171 = sym.Eq(sym_diff_equations_1171[10], b_sym_1171[10])\n eq11_1171 = sym.Eq(sym_diff_equations_1171[11], b_sym_1171[11])\n eq12_1171 = sym.Eq(sym_diff_equations_1171[12], b_sym_1171[12])\n eq13_1171 = sym.Eq(sym_diff_equations_1171[13], b_sym_1171[13])\n eq14_1171 = sym.Eq(sym_diff_equations_1171[14], b_sym_1171[14])\n sym_state_probs_1171 = sym.solve([eq0_1171, eq1_1171, eq2_1171,\n eq3_1171, eq4_1171, eq5_1171, eq6_1171, eq7_1171, eq8_1171,\n eq9_1171, eq10_1171, eq11_1171, eq12_1171, eq13_1171, eq14_1171], (\n p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15, p06, p16,\n p07, p17))\n sym_state_recursive_ratios_1171 = sym.zeros(buffer_capacity + 1, \n system_capacity + 1)\n sym_state_recursive_ratios_1171[0, 0] = 1\n sym_state_recursive_ratios_1171[0, 1] = sym.factor(sym_state_probs_1171\n [p01] / sym_state_probs_1171[p00])\n sym_state_recursive_ratios_1171[1, 1] = sym.factor(sym_state_probs_1171\n [p11] / sym_state_probs_1171[p01])\n sym_state_recursive_ratios_1171[0, 2] = sym.factor(sym_state_probs_1171\n [p02] / sym_state_probs_1171[p01])\n sym_state_recursive_ratios_1171[1, 2] = sym.factor(sym_state_probs_1171\n [p12] / sym_state_probs_1171[p02])\n sym_state_recursive_ratios_1171[0, 3] = sym.factor(sym_state_probs_1171\n [p03] / sym_state_probs_1171[p02])\n sym_state_recursive_ratios_1171[1, 3] = sym.factor(sym_state_probs_1171\n [p13] / sym_state_probs_1171[p03])\n sym_state_recursive_ratios_1171[0, 4] = sym.factor(sym_state_probs_1171\n [p04] / sym_state_probs_1171[p03])\n sym_state_recursive_ratios_1171[1, 4] = sym.factor(sym_state_probs_1171\n [p14] / sym_state_probs_1171[p04])\n sym_state_recursive_ratios_1171[0, 5] = sym.factor(sym_state_probs_1171\n [p05] / sym_state_probs_1171[p04])\n sym_state_recursive_ratios_1171[1, 5] = sym.factor(sym_state_probs_1171\n [p15] / sym_state_probs_1171[p05])\n sym_state_recursive_ratios_1171[0, 6] = sym.factor(sym_state_probs_1171\n [p06] / sym_state_probs_1171[p05])\n sym_state_recursive_ratios_1171[1, 6] = sym.factor(sym_state_probs_1171\n [p16] / sym_state_probs_1171[p06])\n sym_state_recursive_ratios_1171[0, 7] = sym.factor(sym_state_probs_1171\n [p07] / sym_state_probs_1171[p06])\n sym_state_recursive_ratios_1171[1, 7] = sym.factor(sym_state_probs_1171\n [p17] / sym_state_probs_1171[p07])\n sym_state_recursive_ratios_right_1171 = (sym_state_recursive_ratios_1171\n .copy())\n sym_state_recursive_ratios_right_1171[1, 2] = sym.factor(\n sym_state_probs_1171[p12] / sym_state_probs_1171[p11])\n sym_state_recursive_ratios_right_1171[1, 3] = sym.factor(\n sym_state_probs_1171[p13] / sym_state_probs_1171[p12])\n sym_state_recursive_ratios_right_1171[1, 4] = sym.factor(\n sym_state_probs_1171[p14] / sym_state_probs_1171[p13])\n sym_state_recursive_ratios_right_1171[1, 5] = sym.factor(\n sym_state_probs_1171[p15] / sym_state_probs_1171[p14])\n sym_state_recursive_ratios_right_1171[1, 6] = sym.factor(\n sym_state_probs_1171[p16] / sym_state_probs_1171[p15])\n sym_state_recursive_ratios_right_1171[1, 7] = sym.factor(\n sym_state_probs_1171[p17] / sym_state_probs_1171[p16])\n sym_state_recursive_ratios_P0_1171 = sym.zeros(buffer_capacity + 1, \n system_capacity + 1)\n sym_state_recursive_ratios_P0_1171[0, 0] = 1\n sym_state_recursive_ratios_P0_1171[0, 1] = sym.factor(\n sym_state_probs_1171[p01] / sym_state_probs_1171[p00])\n sym_state_recursive_ratios_P0_1171[1, 1] = sym.factor(\n sym_state_probs_1171[p11] / sym_state_probs_1171[p00])\n sym_state_recursive_ratios_P0_1171[0, 2] = sym.factor(\n sym_state_probs_1171[p02] / sym_state_probs_1171[p00])\n sym_state_recursive_ratios_P0_1171[1, 2] = sym.factor(\n sym_state_probs_1171[p12] / sym_state_probs_1171[p00])\n sym_state_recursive_ratios_P0_1171[0, 3] = sym.factor(\n sym_state_probs_1171[p03] / sym_state_probs_1171[p00])\n sym_state_recursive_ratios_P0_1171[1, 3] = sym.factor(\n sym_state_probs_1171[p13] / sym_state_probs_1171[p00])\n sym_state_recursive_ratios_P0_1171[0, 4] = sym.factor(\n sym_state_probs_1171[p04] / sym_state_probs_1171[p00])\n sym_state_recursive_ratios_P0_1171[1, 4] = sym.factor(\n sym_state_probs_1171[p14] / sym_state_probs_1171[p00])\n sym_state_recursive_ratios_P0_1171[0, 5] = sym.factor(\n sym_state_probs_1171[p05] / sym_state_probs_1171[p00])\n sym_state_recursive_ratios_P0_1171[1, 5] = sym.factor(\n sym_state_probs_1171[p15] / sym_state_probs_1171[p00])\n sym_state_recursive_ratios_P0_1171[0, 6] = sym.factor(\n sym_state_probs_1171[p06] / sym_state_probs_1171[p00])\n sym_state_recursive_ratios_P0_1171[1, 6] = sym.factor(\n sym_state_probs_1171[p16] / sym_state_probs_1171[p00])\n sym_state_recursive_ratios_P0_1171[0, 7] = sym.factor(\n sym_state_probs_1171[p07] / sym_state_probs_1171[p00])\n sym_state_recursive_ratios_P0_1171[1, 7] = sym.factor(\n sym_state_probs_1171[p17] / sym_state_probs_1171[p00])\n return (sym_state_probs_1171, sym_state_recursive_ratios_1171,\n sym_state_recursive_ratios_right_1171,\n sym_state_recursive_ratios_P0_1171)\n\n\ndef get_symbolic_state_probabilities_1181():\n num_of_servers = 1\n threshold = 1\n system_capacity = 8\n buffer_capacity = 1\n Q_sym_1181 = abg.markov.get_symbolic_transition_matrix(num_of_servers,\n threshold, system_capacity, buffer_capacity)\n (p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15, p06, p16, p07,\n p17, p08, p18) = (sym.symbols(\n 'p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15, p06, p16, p07, p17, p08, p18'\n ))\n pi_1181 = sym.Matrix([p00, p01, p11, p02, p12, p03, p13, p04, p14, p05,\n p15, p06, p16, p07, p17, p08, p18])\n dimension_1181 = Q_sym_1181.shape[0]\n M_sym_1181 = sym.Matrix([Q_sym_1181.transpose()[:-1, :], sym.ones(1,\n dimension_1181)])\n sym_diff_equations_1181 = M_sym_1181 @ pi_1181\n b_sym_1181 = sym.Matrix([sym.zeros(dimension_1181 - 1, 1), [1]])\n eq0_1181 = sym.Eq(sym_diff_equations_1181[0], b_sym_1181[0])\n eq1_1181 = sym.Eq(sym_diff_equations_1181[1], b_sym_1181[1])\n eq2_1181 = sym.Eq(sym_diff_equations_1181[2], b_sym_1181[2])\n eq3_1181 = sym.Eq(sym_diff_equations_1181[3], b_sym_1181[3])\n eq4_1181 = sym.Eq(sym_diff_equations_1181[4], b_sym_1181[4])\n eq5_1181 = sym.Eq(sym_diff_equations_1181[5], b_sym_1181[5])\n eq6_1181 = sym.Eq(sym_diff_equations_1181[6], b_sym_1181[6])\n eq7_1181 = sym.Eq(sym_diff_equations_1181[7], b_sym_1181[7])\n eq8_1181 = sym.Eq(sym_diff_equations_1181[8], b_sym_1181[8])\n eq9_1181 = sym.Eq(sym_diff_equations_1181[9], b_sym_1181[9])\n eq10_1181 = sym.Eq(sym_diff_equations_1181[10], b_sym_1181[10])\n eq11_1181 = sym.Eq(sym_diff_equations_1181[11], b_sym_1181[11])\n eq12_1181 = sym.Eq(sym_diff_equations_1181[12], b_sym_1181[12])\n eq13_1181 = sym.Eq(sym_diff_equations_1181[13], b_sym_1181[13])\n eq14_1181 = sym.Eq(sym_diff_equations_1181[14], b_sym_1181[14])\n eq15_1181 = sym.Eq(sym_diff_equations_1181[15], b_sym_1181[15])\n eq16_1181 = sym.Eq(sym_diff_equations_1181[16], b_sym_1181[16])\n sym_state_probs_1181 = sym.solve([eq0_1181, eq1_1181, eq2_1181,\n eq3_1181, eq4_1181, eq5_1181, eq6_1181, eq7_1181, eq8_1181,\n eq9_1181, eq10_1181, eq11_1181, eq12_1181, eq13_1181, eq14_1181,\n eq15_1181, eq16_1181], (p00, p01, p11, p02, p12, p03, p13, p04, p14,\n p05, p15, p06, p16, p07, p17, p08, p18))\n sym_state_recursive_ratios_1181 = sym.zeros(buffer_capacity + 1, \n system_capacity + 1)\n sym_state_recursive_ratios_1181[0, 0] = 1\n sym_state_recursive_ratios_1181[0, 1] = sym.factor(sym_state_probs_1181\n [p01] / sym_state_probs_1181[p00])\n sym_state_recursive_ratios_1181[1, 1] = sym.factor(sym_state_probs_1181\n [p11] / sym_state_probs_1181[p01])\n sym_state_recursive_ratios_1181[0, 2] = sym.factor(sym_state_probs_1181\n [p02] / sym_state_probs_1181[p01])\n sym_state_recursive_ratios_1181[1, 2] = sym.factor(sym_state_probs_1181\n [p12] / sym_state_probs_1181[p02])\n sym_state_recursive_ratios_1181[0, 3] = sym.factor(sym_state_probs_1181\n [p03] / sym_state_probs_1181[p02])\n sym_state_recursive_ratios_1181[1, 3] = sym.factor(sym_state_probs_1181\n [p13] / sym_state_probs_1181[p03])\n sym_state_recursive_ratios_1181[0, 4] = sym.factor(sym_state_probs_1181\n [p04] / sym_state_probs_1181[p03])\n sym_state_recursive_ratios_1181[1, 4] = sym.factor(sym_state_probs_1181\n [p14] / sym_state_probs_1181[p04])\n sym_state_recursive_ratios_1181[0, 5] = sym.factor(sym_state_probs_1181\n [p05] / sym_state_probs_1181[p04])\n sym_state_recursive_ratios_1181[1, 5] = sym.factor(sym_state_probs_1181\n [p15] / sym_state_probs_1181[p05])\n sym_state_recursive_ratios_1181[0, 6] = sym.factor(sym_state_probs_1181\n [p06] / sym_state_probs_1181[p05])\n sym_state_recursive_ratios_1181[1, 6] = sym.factor(sym_state_probs_1181\n [p16] / sym_state_probs_1181[p06])\n sym_state_recursive_ratios_1181[0, 7] = sym.factor(sym_state_probs_1181\n [p07] / sym_state_probs_1181[p06])\n sym_state_recursive_ratios_1181[1, 7] = sym.factor(sym_state_probs_1181\n [p17] / sym_state_probs_1181[p07])\n sym_state_recursive_ratios_1181[0, 8] = sym.factor(sym_state_probs_1181\n [p08] / sym_state_probs_1181[p07])\n sym_state_recursive_ratios_1181[1, 8] = sym.factor(sym_state_probs_1181\n [p18] / sym_state_probs_1181[p08])\n sym_state_recursive_ratios_right_1181 = (sym_state_recursive_ratios_1181\n .copy())\n sym_state_recursive_ratios_right_1181[1, 2] = sym.factor(\n sym_state_probs_1181[p12] / sym_state_probs_1181[p11])\n sym_state_recursive_ratios_right_1181[1, 3] = sym.factor(\n sym_state_probs_1181[p13] / sym_state_probs_1181[p12])\n sym_state_recursive_ratios_right_1181[1, 4] = sym.factor(\n sym_state_probs_1181[p14] / sym_state_probs_1181[p13])\n sym_state_recursive_ratios_right_1181[1, 5] = sym.factor(\n sym_state_probs_1181[p15] / sym_state_probs_1181[p14])\n sym_state_recursive_ratios_right_1181[1, 6] = sym.factor(\n sym_state_probs_1181[p16] / sym_state_probs_1181[p15])\n sym_state_recursive_ratios_right_1181[1, 7] = sym.factor(\n sym_state_probs_1181[p17] / sym_state_probs_1181[p16])\n sym_state_recursive_ratios_right_1181[1, 8] = sym.factor(\n sym_state_probs_1181[p18] / sym_state_probs_1181[p17])\n sym_state_recursive_ratios_P0_1181 = sym.zeros(buffer_capacity + 1, \n system_capacity + 1)\n sym_state_recursive_ratios_P0_1181[0, 0] = 1\n sym_state_recursive_ratios_P0_1181[0, 1] = sym.factor(\n sym_state_probs_1181[p01] / sym_state_probs_1181[p00])\n sym_state_recursive_ratios_P0_1181[1, 1] = sym.factor(\n sym_state_probs_1181[p11] / sym_state_probs_1181[p00])\n sym_state_recursive_ratios_P0_1181[0, 2] = sym.factor(\n sym_state_probs_1181[p02] / sym_state_probs_1181[p00])\n sym_state_recursive_ratios_P0_1181[1, 2] = sym.factor(\n sym_state_probs_1181[p12] / sym_state_probs_1181[p00])\n sym_state_recursive_ratios_P0_1181[0, 3] = sym.factor(\n sym_state_probs_1181[p03] / sym_state_probs_1181[p00])\n sym_state_recursive_ratios_P0_1181[1, 3] = sym.factor(\n sym_state_probs_1181[p13] / sym_state_probs_1181[p00])\n sym_state_recursive_ratios_P0_1181[0, 4] = sym.factor(\n sym_state_probs_1181[p04] / sym_state_probs_1181[p00])\n sym_state_recursive_ratios_P0_1181[1, 4] = sym.factor(\n sym_state_probs_1181[p14] / sym_state_probs_1181[p00])\n sym_state_recursive_ratios_P0_1181[0, 5] = sym.factor(\n sym_state_probs_1181[p05] / sym_state_probs_1181[p00])\n sym_state_recursive_ratios_P0_1181[1, 5] = sym.factor(\n sym_state_probs_1181[p15] / sym_state_probs_1181[p00])\n sym_state_recursive_ratios_P0_1181[0, 6] = sym.factor(\n sym_state_probs_1181[p06] / sym_state_probs_1181[p00])\n sym_state_recursive_ratios_P0_1181[1, 6] = sym.factor(\n sym_state_probs_1181[p16] / sym_state_probs_1181[p00])\n sym_state_recursive_ratios_P0_1181[0, 7] = sym.factor(\n sym_state_probs_1181[p07] / sym_state_probs_1181[p00])\n sym_state_recursive_ratios_P0_1181[1, 7] = sym.factor(\n sym_state_probs_1181[p17] / sym_state_probs_1181[p00])\n sym_state_recursive_ratios_P0_1181[0, 8] = sym.factor(\n sym_state_probs_1181[p08] / sym_state_probs_1181[p00])\n sym_state_recursive_ratios_P0_1181[1, 8] = sym.factor(\n sym_state_probs_1181[p18] / sym_state_probs_1181[p00])\n return (sym_state_probs_1181, sym_state_recursive_ratios_1181,\n sym_state_recursive_ratios_right_1181,\n sym_state_recursive_ratios_P0_1181)\n\n\ndef get_symbolic_state_probabilities_1191():\n num_of_servers = 1\n threshold = 1\n system_capacity = 9\n buffer_capacity = 1\n Q_sym_1191 = abg.markov.get_symbolic_transition_matrix(num_of_servers,\n threshold, system_capacity, buffer_capacity)\n (p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15, p06, p16, p07,\n p17, p08, p18, p09, p19) = (sym.symbols(\n 'p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15, p06, p16, p07, p17, p08, p18, p09, p19'\n ))\n pi_1191 = sym.Matrix([p00, p01, p11, p02, p12, p03, p13, p04, p14, p05,\n p15, p06, p16, p07, p17, p08, p18, p09, p19])\n dimension_1191 = Q_sym_1191.shape[0]\n M_sym_1191 = sym.Matrix([Q_sym_1191.transpose()[:-1, :], sym.ones(1,\n dimension_1191)])\n sym_diff_equations_1191 = M_sym_1191 @ pi_1191\n b_sym_1191 = sym.Matrix([sym.zeros(dimension_1191 - 1, 1), [1]])\n eq0_1191 = sym.Eq(sym_diff_equations_1191[0], b_sym_1191[0])\n eq1_1191 = sym.Eq(sym_diff_equations_1191[1], b_sym_1191[1])\n eq2_1191 = sym.Eq(sym_diff_equations_1191[2], b_sym_1191[2])\n eq3_1191 = sym.Eq(sym_diff_equations_1191[3], b_sym_1191[3])\n eq4_1191 = sym.Eq(sym_diff_equations_1191[4], b_sym_1191[4])\n eq5_1191 = sym.Eq(sym_diff_equations_1191[5], b_sym_1191[5])\n eq6_1191 = sym.Eq(sym_diff_equations_1191[6], b_sym_1191[6])\n eq7_1191 = sym.Eq(sym_diff_equations_1191[7], b_sym_1191[7])\n eq8_1191 = sym.Eq(sym_diff_equations_1191[8], b_sym_1191[8])\n eq9_1191 = sym.Eq(sym_diff_equations_1191[9], b_sym_1191[9])\n eq10_1191 = sym.Eq(sym_diff_equations_1191[10], b_sym_1191[10])\n eq11_1191 = sym.Eq(sym_diff_equations_1191[11], b_sym_1191[11])\n eq12_1191 = sym.Eq(sym_diff_equations_1191[12], b_sym_1191[12])\n eq13_1191 = sym.Eq(sym_diff_equations_1191[13], b_sym_1191[13])\n eq14_1191 = sym.Eq(sym_diff_equations_1191[14], b_sym_1191[14])\n eq15_1191 = sym.Eq(sym_diff_equations_1191[15], b_sym_1191[15])\n eq16_1191 = sym.Eq(sym_diff_equations_1191[16], b_sym_1191[16])\n eq17_1191 = sym.Eq(sym_diff_equations_1191[17], b_sym_1191[17])\n eq18_1191 = sym.Eq(sym_diff_equations_1191[18], b_sym_1191[18])\n sym_state_probs_1191 = sym.solve([eq0_1191, eq1_1191, eq2_1191,\n eq3_1191, eq4_1191, eq5_1191, eq6_1191, eq7_1191, eq8_1191,\n eq9_1191, eq10_1191, eq11_1191, eq12_1191, eq13_1191, eq14_1191,\n eq15_1191, eq16_1191, eq17_1191, eq18_1191], (p00, p01, p11, p02,\n p12, p03, p13, p04, p14, p05, p15, p06, p16, p07, p17, p08, p18,\n p09, p19))\n sym_state_recursive_ratios_1191 = sym.zeros(buffer_capacity + 1, \n system_capacity + 1)\n sym_state_recursive_ratios_1191[0, 0] = 1\n sym_state_recursive_ratios_1191[0, 1] = sym.factor(sym_state_probs_1191\n [p01] / sym_state_probs_1191[p00])\n sym_state_recursive_ratios_1191[1, 1] = sym.factor(sym_state_probs_1191\n [p11] / sym_state_probs_1191[p01])\n sym_state_recursive_ratios_1191[0, 2] = sym.factor(sym_state_probs_1191\n [p02] / sym_state_probs_1191[p01])\n sym_state_recursive_ratios_1191[1, 2] = sym.factor(sym_state_probs_1191\n [p12] / sym_state_probs_1191[p02])\n sym_state_recursive_ratios_1191[0, 3] = sym.factor(sym_state_probs_1191\n [p03] / sym_state_probs_1191[p02])\n sym_state_recursive_ratios_1191[1, 3] = sym.factor(sym_state_probs_1191\n [p13] / sym_state_probs_1191[p03])\n sym_state_recursive_ratios_1191[0, 4] = sym.factor(sym_state_probs_1191\n [p04] / sym_state_probs_1191[p03])\n sym_state_recursive_ratios_1191[1, 4] = sym.factor(sym_state_probs_1191\n [p14] / sym_state_probs_1191[p04])\n sym_state_recursive_ratios_1191[0, 5] = sym.factor(sym_state_probs_1191\n [p05] / sym_state_probs_1191[p04])\n sym_state_recursive_ratios_1191[1, 5] = sym.factor(sym_state_probs_1191\n [p15] / sym_state_probs_1191[p05])\n sym_state_recursive_ratios_1191[0, 6] = sym.factor(sym_state_probs_1191\n [p06] / sym_state_probs_1191[p05])\n sym_state_recursive_ratios_1191[1, 6] = sym.factor(sym_state_probs_1191\n [p16] / sym_state_probs_1191[p06])\n sym_state_recursive_ratios_1191[0, 7] = sym.factor(sym_state_probs_1191\n [p07] / sym_state_probs_1191[p06])\n sym_state_recursive_ratios_1191[1, 7] = sym.factor(sym_state_probs_1191\n [p17] / sym_state_probs_1191[p07])\n sym_state_recursive_ratios_1191[0, 8] = sym.factor(sym_state_probs_1191\n [p08] / sym_state_probs_1191[p07])\n sym_state_recursive_ratios_1191[1, 8] = sym.factor(sym_state_probs_1191\n [p18] / sym_state_probs_1191[p08])\n sym_state_recursive_ratios_1191[0, 9] = sym.factor(sym_state_probs_1191\n [p09] / sym_state_probs_1191[p08])\n sym_state_recursive_ratios_1191[1, 9] = sym.factor(sym_state_probs_1191\n [p19] / sym_state_probs_1191[p09])\n sym_state_recursive_ratios_right_1191 = (sym_state_recursive_ratios_1191\n .copy())\n sym_state_recursive_ratios_right_1191[1, 2] = sym.factor(\n sym_state_probs_1191[p12] / sym_state_probs_1191[p11])\n sym_state_recursive_ratios_right_1191[1, 3] = sym.factor(\n sym_state_probs_1191[p13] / sym_state_probs_1191[p12])\n sym_state_recursive_ratios_right_1191[1, 4] = sym.factor(\n sym_state_probs_1191[p14] / sym_state_probs_1191[p13])\n sym_state_recursive_ratios_right_1191[1, 5] = sym.factor(\n sym_state_probs_1191[p15] / sym_state_probs_1191[p14])\n sym_state_recursive_ratios_right_1191[1, 6] = sym.factor(\n sym_state_probs_1191[p16] / sym_state_probs_1191[p15])\n sym_state_recursive_ratios_right_1191[1, 7] = sym.factor(\n sym_state_probs_1191[p17] / sym_state_probs_1191[p16])\n sym_state_recursive_ratios_right_1191[1, 8] = sym.factor(\n sym_state_probs_1191[p18] / sym_state_probs_1191[p17])\n sym_state_recursive_ratios_right_1191[1, 8] = sym.factor(\n sym_state_probs_1191[p18] / sym_state_probs_1191[p17])\n sym_state_recursive_ratios_P0_1191 = sym.zeros(buffer_capacity + 1, \n system_capacity + 1)\n sym_state_recursive_ratios_P0_1191[0, 0] = 1\n sym_state_recursive_ratios_P0_1191[0, 1] = sym.factor(\n sym_state_probs_1191[p01] / sym_state_probs_1191[p00])\n sym_state_recursive_ratios_P0_1191[1, 1] = sym.factor(\n sym_state_probs_1191[p11] / sym_state_probs_1191[p00])\n sym_state_recursive_ratios_P0_1191[0, 2] = sym.factor(\n sym_state_probs_1191[p02] / sym_state_probs_1191[p00])\n sym_state_recursive_ratios_P0_1191[1, 2] = sym.factor(\n sym_state_probs_1191[p12] / sym_state_probs_1191[p00])\n sym_state_recursive_ratios_P0_1191[0, 3] = sym.factor(\n sym_state_probs_1191[p03] / sym_state_probs_1191[p00])\n sym_state_recursive_ratios_P0_1191[1, 3] = sym.factor(\n sym_state_probs_1191[p13] / sym_state_probs_1191[p00])\n sym_state_recursive_ratios_P0_1191[0, 4] = sym.factor(\n sym_state_probs_1191[p04] / sym_state_probs_1191[p00])\n sym_state_recursive_ratios_P0_1191[1, 4] = sym.factor(\n sym_state_probs_1191[p14] / sym_state_probs_1191[p00])\n sym_state_recursive_ratios_P0_1191[0, 5] = sym.factor(\n sym_state_probs_1191[p05] / sym_state_probs_1191[p00])\n sym_state_recursive_ratios_P0_1191[1, 5] = sym.factor(\n sym_state_probs_1191[p15] / sym_state_probs_1191[p00])\n sym_state_recursive_ratios_P0_1191[0, 6] = sym.factor(\n sym_state_probs_1191[p06] / sym_state_probs_1191[p00])\n sym_state_recursive_ratios_P0_1191[1, 6] = sym.factor(\n sym_state_probs_1191[p16] / sym_state_probs_1191[p00])\n sym_state_recursive_ratios_P0_1191[0, 7] = sym.factor(\n sym_state_probs_1191[p07] / sym_state_probs_1191[p00])\n sym_state_recursive_ratios_P0_1191[1, 7] = sym.factor(\n sym_state_probs_1191[p17] / sym_state_probs_1191[p00])\n sym_state_recursive_ratios_P0_1191[0, 8] = sym.factor(\n sym_state_probs_1191[p08] / sym_state_probs_1191[p00])\n sym_state_recursive_ratios_P0_1191[1, 8] = sym.factor(\n sym_state_probs_1191[p18] / sym_state_probs_1191[p00])\n sym_state_recursive_ratios_P0_1191[0, 9] = sym.factor(\n sym_state_probs_1191[p09] / sym_state_probs_1191[p00])\n sym_state_recursive_ratios_P0_1191[1, 9] = sym.factor(\n sym_state_probs_1191[p19] / sym_state_probs_1191[p00])\n return (sym_state_probs_1191, sym_state_recursive_ratios_1191,\n sym_state_recursive_ratios_right_1191,\n sym_state_recursive_ratios_P0_1191)\n",
"step-5": "import ambulance_game as abg\nimport numpy as np\nimport sympy as sym\nfrom sympy.abc import a, b, c, d, e, f, g, h, i, j\n\n\ndef get_symbolic_pi(num_of_servers, threshold, system_capacity, buffer_capacity):\n Q_sym = abg.markov.get_symbolic_transition_matrix(\n num_of_servers=num_of_servers,\n threshold=threshold,\n system_capacity=system_capacity,\n buffer_capacity=buffer_capacity,\n )\n dimension = Q_sym.shape[0]\n if dimension > 7:\n return \"Capacity of 6 exceeded\"\n M_sym = sym.Matrix([Q_sym.transpose()[:-1, :], sym.ones(1, dimension)])\n b_sym = sym.Matrix([sym.zeros(dimension - 1, 1), [1]])\n system = M_sym.col_insert(dimension, b_sym)\n sol = sym.solve_linear_system_LU(system, [a, b, c, d, e, f, g])\n return sol\n\n\ndef get_symbolic_state_probabilities_1222():\n num_of_servers = 1\n threshold = 2\n system_capacity = 2\n buffer_capacity = 2\n\n sym_pi_1222 = get_symbolic_pi(\n num_of_servers=num_of_servers,\n threshold=threshold,\n system_capacity=system_capacity,\n buffer_capacity=buffer_capacity,\n )\n all_states_1222 = abg.markov.build_states(\n threshold=threshold,\n system_capacity=system_capacity,\n buffer_capacity=buffer_capacity,\n )\n\n sym_state_probs_1222 = [0 for _ in range(len(all_states_1222))]\n sym_state_probs_1222[0] = sym.factor(sym_pi_1222[a]) # (0,0)\n sym_state_probs_1222[1] = sym.factor(sym_pi_1222[b]) # (0,1)\n sym_state_probs_1222[2] = sym.factor(sym_pi_1222[c]) # (1,1)\n sym_state_probs_1222[3] = sym.factor(sym_pi_1222[d]) # (0,2)\n sym_state_probs_1222[4] = sym.factor(sym_pi_1222[e]) # (1,2)\n\n sym_state_recursive_ratios_1222 = sym.zeros(\n buffer_capacity + 1, system_capacity + 1\n )\n sym_state_recursive_ratios_1222[0, 0] = 1\n sym_state_recursive_ratios_1222[0, 1] = sym.factor(\n sym_state_probs_1222[1] / sym_state_probs_1222[0]\n ) # (0,0) -> (0,1)\n sym_state_recursive_ratios_1222[0, 2] = sym.factor(\n sym_state_probs_1222[2] / sym_state_probs_1222[1]\n ) # (0,1) -> (1,1)\n sym_state_recursive_ratios_1222[1, 2] = sym.factor(\n sym_state_probs_1222[3] / sym_state_probs_1222[2]\n ) # (0,1) -> (0,2)\n sym_state_recursive_ratios_1222[2, 2] = sym.factor(\n sym_state_probs_1222[4] / sym_state_probs_1222[3]\n ) # (0,2) -> (1,2)\n\n return sym_state_probs_1222, sym_state_recursive_ratios_1222\n\n\ndef get_symbolic_state_probabilities_1121():\n num_of_servers = 1\n threshold = 1\n system_capacity = 2\n buffer_capacity = 1\n\n all_states_1121 = abg.markov.build_states(\n threshold=threshold,\n system_capacity=system_capacity,\n buffer_capacity=buffer_capacity,\n )\n sym_pi_1121 = get_symbolic_pi(\n num_of_servers=num_of_servers,\n threshold=threshold,\n system_capacity=system_capacity,\n buffer_capacity=buffer_capacity,\n )\n sym_state_probs_1121 = [0 for _ in range(len(all_states_1121))]\n\n sym_state_probs_1121[0] = sym.factor(sym_pi_1121[a]) # (0,0)\n sym_state_probs_1121[1] = sym.factor(sym_pi_1121[b]) # (0,1)\n sym_state_probs_1121[2] = sym.factor(sym_pi_1121[c]) # (1,1)\n sym_state_probs_1121[3] = sym.factor(sym_pi_1121[d]) # (0,2)\n sym_state_probs_1121[4] = sym.factor(sym_pi_1121[e]) # (1,2)\n\n sym_state_recursive_ratios_1121 = sym.zeros(\n buffer_capacity + 1, system_capacity + 1\n )\n sym_state_recursive_ratios_1121[0, 0] = 1\n sym_state_recursive_ratios_1121[0, 1] = sym.factor(\n sym_state_probs_1121[1] / sym_state_probs_1121[0]\n ) # (0,0) -> (0,1)\n sym_state_recursive_ratios_1121[1, 1] = sym.factor(\n sym_state_probs_1121[2] / sym_state_probs_1121[1]\n ) # (0,1) -> (1,1)\n sym_state_recursive_ratios_1121[0, 2] = sym.factor(\n sym_state_probs_1121[3] / sym_state_probs_1121[1]\n ) # (0,1) -> (0,2)\n sym_state_recursive_ratios_1121[1, 2] = sym.factor(\n sym_state_probs_1121[4] / sym_state_probs_1121[3]\n ) # (0,2) -> (1,2)\n\n sym_state_recursive_ratios_right_1121 = sym_state_recursive_ratios_1121.copy()\n sym_state_recursive_ratios_right_1121[1, 2] = sym.factor(\n sym_state_probs_1121[4] / sym_state_probs_1121[2]\n ) # (1,1) -> (1,2)\n\n sym_state_recursive_ratios_P0_1121 = sym.zeros(\n buffer_capacity + 1, system_capacity + 1\n )\n sym_state_recursive_ratios_P0_1121[0, 0] = 1\n sym_state_recursive_ratios_P0_1121[0, 1] = sym.factor(\n sym_state_probs_1121[1] / sym_state_probs_1121[0]\n ) # (0,0) -> (0,1)\n sym_state_recursive_ratios_P0_1121[1, 1] = sym.factor(\n sym_state_probs_1121[2] / sym_state_probs_1121[0]\n ) # (0,0) -> (1,1)\n sym_state_recursive_ratios_P0_1121[0, 2] = sym.factor(\n sym_state_probs_1121[3] / sym_state_probs_1121[0]\n ) # (0,0) -> (0,2)\n sym_state_recursive_ratios_P0_1121[1, 2] = sym.factor(\n sym_state_probs_1121[4] / sym_state_probs_1121[0]\n ) # (0,0) -> (1,2)\n\n return (\n sym_state_probs_1121,\n sym_state_recursive_ratios_1121,\n sym_state_recursive_ratios_right_1121,\n sym_state_recursive_ratios_P0_1121,\n )\n\n\ndef get_symbolic_state_probabilities_1122():\n # num_of_servers = 1\n threshold = 1\n system_capacity = 2\n buffer_capacity = 2\n\n all_states_1122 = abg.markov.build_states(\n threshold=threshold,\n system_capacity=system_capacity,\n buffer_capacity=buffer_capacity,\n )\n sym_state_probs_1122 = [0 for _ in range(len(all_states_1122))]\n\n sym_Lambda = sym.symbols(\"Lambda\")\n sym_lambda_1 = sym.symbols(\"lambda_1\")\n sym_lambda_2 = sym.symbols(\"lambda_2\")\n sym_mu = sym.symbols(\"mu\")\n\n sym_state_probs_1122[0] = (\n (sym_mu**6)\n + 2 * (sym_lambda_2) * (sym_mu**5)\n + (sym_lambda_2**2) * (sym_mu**4)\n ) # (0,0)\n sym_state_probs_1122[1] = (sym_Lambda * sym_mu**3) * (\n sym_mu**2 + 2 * sym_mu * sym_lambda_2 + sym_lambda_2**2\n ) # (0,1)\n sym_state_probs_1122[2] = (sym_Lambda * sym_lambda_2 * sym_mu**2) * (\n sym_lambda_2**2\n + sym_lambda_2 * sym_lambda_1\n + sym_lambda_1 * sym_mu\n + sym_mu**2\n + 2 * sym_lambda_2 * sym_mu\n ) # (1,1)\n sym_state_probs_1122[3] = (sym_Lambda * sym_lambda_2**2 * sym_mu) * (\n sym_lambda_2**2\n + 2 * sym_lambda_1 * sym_lambda_2\n + 3 * sym_lambda_1 * sym_mu\n + sym_mu**2\n + 2 * sym_lambda_2 * sym_mu\n + sym_lambda_1**2\n ) # (2,1)\n sym_state_probs_1122[4] = (sym_Lambda * sym_lambda_1 * sym_mu**3) * (\n sym_lambda_2 + sym_mu\n ) # (0,2)\n sym_state_probs_1122[5] = (\n sym_Lambda * sym_lambda_1 * sym_lambda_2 * sym_mu**2\n ) * (\n 2 * sym_mu + sym_lambda_1 + sym_lambda_2\n ) # (1,2)\n sym_state_probs_1122[6] = (sym_Lambda * sym_lambda_1 * sym_lambda_2**2) * (\n sym_lambda_1**2\n + 4 * sym_lambda_1 * sym_mu\n + 2 * sym_lambda_1 * sym_lambda_2\n + 3 * sym_mu**2\n + sym_lambda_2**2\n + 3 * sym_lambda_2 * sym_mu\n ) # (2,2)\n\n total_1122 = np.sum(sym_state_probs_1122)\n sym_state_probs_1122 = [i / total_1122 for i in sym_state_probs_1122]\n\n sym_state_recursive_ratios_1122 = sym.zeros(\n buffer_capacity + 1, system_capacity + 1\n )\n sym_state_recursive_ratios_1122[0, 0] = 1\n sym_state_recursive_ratios_1122[0, 1] = sym.factor(\n sym_state_probs_1122[1] / sym_state_probs_1122[0]\n ) # (0,0) -> (0,1)\n sym_state_recursive_ratios_1122[1, 1] = sym.factor(\n sym_state_probs_1122[2] / sym_state_probs_1122[1]\n ) # (0,1) -> (1,1)\n sym_state_recursive_ratios_1122[2, 1] = sym.factor(\n sym_state_probs_1122[3] / sym_state_probs_1122[2]\n ) # (1,1) -> (2,1)\n\n sym_state_recursive_ratios_1122[0, 2] = sym.factor(\n sym_state_probs_1122[4] / sym_state_probs_1122[1]\n ) # (0,1) -> (0,2)\n sym_state_recursive_ratios_1122[1, 2] = sym.factor(\n sym_state_probs_1122[5] / sym_state_probs_1122[4]\n ) # (0,2) -> (1,2)\n sym_state_recursive_ratios_1122[2, 2] = sym.factor(\n sym_state_probs_1122[6] / sym_state_probs_1122[5]\n ) # (1,2) -> (2,2)\n\n sym_state_recursive_ratios_right_1122 = sym_state_recursive_ratios_1122.copy()\n sym_state_recursive_ratios_right_1122[1, 2] = sym.factor(\n sym_state_probs_1122[5] / sym_state_probs_1122[2]\n ) # (1,1) -> (1,2)\n sym_state_recursive_ratios_right_1122[2, 2] = sym.factor(\n sym_state_probs_1122[6] / sym_state_probs_1122[3]\n ) # (2,1) -> (2,2)\n\n sym_state_recursive_ratios_P0_1122 = sym.zeros(\n buffer_capacity + 1, system_capacity + 1\n )\n sym_state_recursive_ratios_P0_1122[0, 0] = 1\n sym_state_recursive_ratios_P0_1122[0, 1] = sym.factor(\n sym_state_probs_1122[1] / sym_state_probs_1122[0]\n ) # (0,0) -> (0,1)\n sym_state_recursive_ratios_P0_1122[1, 1] = sym.factor(\n sym_state_probs_1122[2] / sym_state_probs_1122[0]\n ) # (0,0) -> (1,1)\n sym_state_recursive_ratios_P0_1122[2, 1] = sym.factor(\n sym_state_probs_1122[3] / sym_state_probs_1122[0]\n ) # (0,0) -> (2,1)\n\n sym_state_recursive_ratios_P0_1122[0, 2] = sym.factor(\n sym_state_probs_1122[4] / sym_state_probs_1122[0]\n ) # (0,0) -> (0,2)\n sym_state_recursive_ratios_P0_1122[1, 2] = sym.factor(\n sym_state_probs_1122[5] / sym_state_probs_1122[0]\n ) # (0,0) -> (1,2)\n sym_state_recursive_ratios_P0_1122[2, 2] = sym.factor(\n sym_state_probs_1122[6] / sym_state_probs_1122[0]\n ) # (0,0) -> (2,2)\n\n return (\n sym_state_probs_1122,\n sym_state_recursive_ratios_1122,\n sym_state_recursive_ratios_right_1122,\n sym_state_recursive_ratios_P0_1122,\n )\n\n\ndef get_symbolic_state_probabilities_1123():\n num_of_servers = 1\n threshold = 1\n system_capacity = 2\n buffer_capacity = 3\n\n Q_sym_1123 = abg.markov.get_symbolic_transition_matrix(\n num_of_servers, threshold, system_capacity, buffer_capacity\n )\n\n p00, p01, p11, p21, p31, p02, p12, p22, p32 = sym.symbols(\n \"p00, p01, p11, p21, p31, p02, p12, p22, p32\"\n )\n pi_1123 = sym.Matrix([p00, p01, p11, p21, p31, p02, p12, p22, p32])\n dimension_1123 = Q_sym_1123.shape[0]\n\n M_sym_1123 = sym.Matrix(\n [Q_sym_1123.transpose()[:-1, :], sym.ones(1, dimension_1123)]\n )\n sym_diff_equations_1123 = M_sym_1123 @ pi_1123\n\n b_sym_1123 = sym.Matrix([sym.zeros(dimension_1123 - 1, 1), [1]])\n\n eq0_1123 = sym.Eq(sym_diff_equations_1123[0], b_sym_1123[0])\n eq1_1123 = sym.Eq(sym_diff_equations_1123[1], b_sym_1123[1])\n eq2_1123 = sym.Eq(sym_diff_equations_1123[2], b_sym_1123[2])\n eq3_1123 = sym.Eq(sym_diff_equations_1123[3], b_sym_1123[3])\n eq4_1123 = sym.Eq(sym_diff_equations_1123[4], b_sym_1123[4])\n eq5_1123 = sym.Eq(sym_diff_equations_1123[5], b_sym_1123[5])\n eq6_1123 = sym.Eq(sym_diff_equations_1123[6], b_sym_1123[6])\n eq7_1123 = sym.Eq(sym_diff_equations_1123[7], b_sym_1123[7])\n eq8_1123 = sym.Eq(sym_diff_equations_1123[8], b_sym_1123[8])\n\n sym_state_probs_1123 = sym.solve(\n [\n eq0_1123,\n eq1_1123,\n eq2_1123,\n eq3_1123,\n eq4_1123,\n eq5_1123,\n eq6_1123,\n eq7_1123,\n eq8_1123,\n ],\n (p00, p01, p11, p21, p31, p02, p12, p22, p32),\n )\n\n sym_state_recursive_ratios_1123 = sym.zeros(\n buffer_capacity + 1, system_capacity + 1\n )\n sym_state_recursive_ratios_1123[0, 0] = 1\n sym_state_recursive_ratios_1123[0, 1] = sym.factor(\n sym_state_probs_1123[p01] / sym_state_probs_1123[p00]\n ) # (0,0) -> (0,1)\n sym_state_recursive_ratios_1123[1, 1] = sym.factor(\n sym_state_probs_1123[p11] / sym_state_probs_1123[p01]\n ) # (0,1) -> (1,1)\n sym_state_recursive_ratios_1123[2, 1] = sym.factor(\n sym_state_probs_1123[p21] / sym_state_probs_1123[p11]\n ) # (1,1) -> (2,1)\n sym_state_recursive_ratios_1123[3, 1] = sym.factor(\n sym_state_probs_1123[p31] / sym_state_probs_1123[p21]\n ) # (2,1) -> (3,1)\n sym_state_recursive_ratios_1123[0, 2] = sym.factor(\n sym_state_probs_1123[p02] / sym_state_probs_1123[p01]\n ) # (0,1) -> (0,2)\n sym_state_recursive_ratios_1123[1, 2] = sym.factor(\n sym_state_probs_1123[p12] / sym_state_probs_1123[p02]\n ) # (0,2) -> (1,2)\n sym_state_recursive_ratios_1123[2, 2] = sym.factor(\n sym_state_probs_1123[p22] / sym_state_probs_1123[p12]\n ) # (1,2) -> (2,2)\n sym_state_recursive_ratios_1123[2, 2] = sym.factor(\n sym_state_probs_1123[p32] / sym_state_probs_1123[p22]\n ) # (2,2) -> (3,2)\n\n sym_state_recursive_ratios_right_1123 = sym_state_recursive_ratios_1123.copy()\n sym_state_recursive_ratios_right_1123[1, 2] = sym.factor(\n sym_state_probs_1123[p12] / sym_state_probs_1123[p11]\n ) # (1,1) -> (1,2)\n sym_state_recursive_ratios_right_1123[2, 2] = sym.factor(\n sym_state_probs_1123[p22] / sym_state_probs_1123[p21]\n ) # (2,1) -> (2,2)\n sym_state_recursive_ratios_right_1123[3, 2] = sym.factor(\n sym_state_probs_1123[p32] / sym_state_probs_1123[p22]\n ) # (2,2) -> (3,2)\n\n sym_state_recursive_ratios_P0_1123 = sym.zeros(\n buffer_capacity + 1, system_capacity + 1\n )\n sym_state_recursive_ratios_P0_1123[0, 0] = 1\n sym_state_recursive_ratios_P0_1123[0, 1] = sym.factor(\n sym_state_probs_1123[p01] / sym_state_probs_1123[p00]\n ) # (0,0) -> (0,1)\n sym_state_recursive_ratios_P0_1123[1, 1] = sym.factor(\n sym_state_probs_1123[p11] / sym_state_probs_1123[p00]\n ) # (0,0) -> (1,1)\n sym_state_recursive_ratios_P0_1123[2, 1] = sym.factor(\n sym_state_probs_1123[p21] / sym_state_probs_1123[p00]\n ) # (0,0) -> (2,1)\n sym_state_recursive_ratios_P0_1123[3, 1] = sym.factor(\n sym_state_probs_1123[p31] / sym_state_probs_1123[p00]\n ) # (0,0) -> (3,1)\n sym_state_recursive_ratios_P0_1123[0, 2] = sym.factor(\n sym_state_probs_1123[p02] / sym_state_probs_1123[p00]\n ) # (0,0) -> (0,2)\n sym_state_recursive_ratios_P0_1123[1, 2] = sym.factor(\n sym_state_probs_1123[p12] / sym_state_probs_1123[p00]\n ) # (0,0) -> (1,2)\n sym_state_recursive_ratios_P0_1123[2, 2] = sym.factor(\n sym_state_probs_1123[p22] / sym_state_probs_1123[p00]\n ) # (0,0) -> (2,2)\n sym_state_recursive_ratios_P0_1123[3, 2] = sym.factor(\n sym_state_probs_1123[p32] / sym_state_probs_1123[p00]\n ) # (0,0) -> (3,2)\n\n return (\n sym_state_probs_1123,\n sym_state_recursive_ratios_1123,\n sym_state_recursive_ratios_right_1123,\n sym_state_recursive_ratios_P0_1123,\n )\n\n\ndef get_symbolic_state_probabilities_1341():\n # num_of_servers = 1\n threshold = 3\n system_capacity = 4\n buffer_capacity = 1\n\n all_states_1341 = abg.markov.build_states(\n threshold=threshold,\n system_capacity=system_capacity,\n buffer_capacity=buffer_capacity,\n )\n sym_state_probs_1341 = [0 for _ in range(len(all_states_1341))]\n\n sym_Lambda = sym.symbols(\"Lambda\")\n sym_lambda_1 = sym.symbols(\"lambda_1\")\n sym_lambda_2 = sym.symbols(\"lambda_2\")\n sym_mu = sym.symbols(\"mu\")\n\n sym_state_probs_1341[0] = (sym_lambda_2) * (sym_mu**5) + (sym_mu**6) # (0,0)\n sym_state_probs_1341[1] = sym_Lambda * sym_lambda_2 * (sym_mu**4) + sym_Lambda * (\n sym_mu**5\n ) # (0,1)\n sym_state_probs_1341[2] = (sym_Lambda**2) * sym_lambda_2 * (sym_mu**3) + (\n sym_Lambda**2\n ) * (\n sym_mu**4\n ) # (0,2)\n sym_state_probs_1341[3] = (sym_Lambda**3) * sym_lambda_2 * (sym_mu**2) + (\n sym_Lambda**3\n ) * (\n sym_mu**3\n ) # (0,3)\n sym_state_probs_1341[4] = (\n (sym_Lambda**3) * sym_lambda_1 * sym_lambda_2 * sym_mu\n + (sym_Lambda**3) * sym_lambda_2 * (sym_mu**2)\n + (sym_Lambda**3) * sym_lambda_2 * sym_lambda_2 * sym_mu\n ) # (1,3)\n sym_state_probs_1341[5] = (sym_Lambda**3) * sym_lambda_1 * (sym_mu**2) # (0,4)\n sym_state_probs_1341[6] = (\n (sym_Lambda**3) * (sym_lambda_1**2) * sym_lambda_2\n + (sym_Lambda**3) * sym_lambda_1 * (sym_lambda_2**2)\n + 2 * (sym_Lambda**3) * sym_lambda_1 * sym_lambda_2 * sym_mu\n ) # (1,4)\n\n total_1341 = np.sum(sym_state_probs_1341)\n sym_state_probs_1341 = [i / total_1341 for i in sym_state_probs_1341]\n\n sym_state_recursive_ratios_1341 = sym.zeros(\n buffer_capacity + 1, system_capacity + 1\n )\n sym_state_recursive_ratios_1341[0, 0] = 1\n sym_state_recursive_ratios_1341[0, 1] = sym.factor(\n sym_state_probs_1341[1] / sym_state_probs_1341[0]\n ) # (0,0) -> (0,1)\n sym_state_recursive_ratios_1341[0, 2] = sym.factor(\n sym_state_probs_1341[2] / sym_state_probs_1341[1]\n ) # (0,1) -> (0,2)\n sym_state_recursive_ratios_1341[0, 3] = sym.factor(\n sym_state_probs_1341[3] / sym_state_probs_1341[2]\n ) # (0,2) -> (0,3)\n sym_state_recursive_ratios_1341[0, 4] = sym.factor(\n sym_state_probs_1341[5] / sym_state_probs_1341[3]\n ) # (0,3) -> (0,4)\n\n sym_state_recursive_ratios_1341[1, 3] = sym.factor(\n sym_state_probs_1341[4] / sym_state_probs_1341[3]\n ) # (0,3) -> (1,3)\n sym_state_recursive_ratios_1341[1, 4] = sym.factor(\n sym_state_probs_1341[6] / sym_state_probs_1341[5]\n ) # (0,4) -> (1,4)\n\n sym_state_recursive_ratios_right_1341 = sym_state_recursive_ratios_1341.copy()\n sym_state_recursive_ratios_right_1341[1, 4] = sym.factor(\n sym_state_probs_1341[6] / sym_state_probs_1341[4]\n ) # (1,3) -> (1,4)\n\n sym_state_recursive_ratios_P0_1341 = sym.zeros(\n buffer_capacity + 1, system_capacity + 1\n )\n sym_state_recursive_ratios_P0_1341[0, 0] = 1\n sym_state_recursive_ratios_P0_1341[0, 1] = sym.factor(\n sym_state_probs_1341[1] / sym_state_probs_1341[0]\n ) # (0,0) -> (0,1)\n sym_state_recursive_ratios_P0_1341[0, 2] = sym.factor(\n sym_state_probs_1341[2] / sym_state_probs_1341[0]\n ) # (0,0) -> (0,2)\n sym_state_recursive_ratios_P0_1341[0, 3] = sym.factor(\n sym_state_probs_1341[3] / sym_state_probs_1341[0]\n ) # (0,0) -> (0,3)\n\n sym_state_recursive_ratios_P0_1341[1, 3] = sym.factor(\n sym_state_probs_1341[4] / sym_state_probs_1341[0]\n ) # (0,0) -> (1,3)\n sym_state_recursive_ratios_P0_1341[0, 4] = sym.factor(\n sym_state_probs_1341[5] / sym_state_probs_1341[0]\n ) # (0,0) -> (0,4)\n sym_state_recursive_ratios_P0_1341[1, 4] = sym.factor(\n sym_state_probs_1341[6] / sym_state_probs_1341[0]\n ) # (0,0) -> (1,4)\n\n return (\n sym_state_probs_1341,\n sym_state_recursive_ratios_1341,\n sym_state_recursive_ratios_right_1341,\n sym_state_recursive_ratios_P0_1341,\n )\n\n\ndef get_symbolic_state_probabilities_1131():\n # num_of_servers = 1\n threshold = 1\n system_capacity = 3\n buffer_capacity = 1\n\n all_states_1131 = abg.markov.build_states(\n threshold=threshold,\n system_capacity=system_capacity,\n buffer_capacity=buffer_capacity,\n )\n sym_state_probs_1131 = [0 for _ in range(len(all_states_1131))]\n\n sym_Lambda = sym.symbols(\"Lambda\")\n sym_lambda_1 = sym.symbols(\"lambda_1\")\n sym_lambda_2 = sym.symbols(\"lambda_2\")\n sym_mu = sym.symbols(\"mu\")\n\n # (0,0)\n sym_state_probs_1131[0] = (\n (sym_mu**6)\n + 2 * (sym_lambda_2 * (sym_mu**5))\n + ((sym_lambda_2**2) * (sym_mu**4))\n + (sym_lambda_1 * sym_lambda_2 * (sym_mu**4))\n )\n # (0,1)\n sym_state_probs_1131[1] = sym_state_probs_1131[0] * sym_Lambda / sym_mu\n # (1,1)\n sym_state_probs_1131[2] = (\n (sym_Lambda * (sym_lambda_1**2) * sym_lambda_2 * (sym_mu**2))\n + (sym_Lambda * sym_lambda_2 * sym_lambda_1 * (sym_mu**3))\n + 2 * (sym_Lambda * sym_lambda_1 * (sym_lambda_2**2) * (sym_mu**2))\n + 2 * (sym_Lambda * (sym_lambda_2**2) * (sym_mu**3))\n + (sym_Lambda * (sym_lambda_2**3) * (sym_mu**2))\n + (sym_Lambda * sym_lambda_2 * (sym_mu**4))\n )\n # (0,2)\n sym_state_probs_1131[3] = (\n sym_Lambda * sym_lambda_1 * sym_mu**3 * (sym_lambda_2 + sym_mu)\n )\n # (1,2)\n sym_state_probs_1131[4] = (sym_Lambda * sym_lambda_2 * sym_lambda_1 * sym_mu) * (\n (sym_lambda_2**2)\n + 2 * sym_lambda_2 * sym_lambda_1\n + 3 * sym_lambda_2 * sym_mu\n + (sym_lambda_1**2)\n + 2 * sym_lambda_1 * sym_mu\n + 2 * (sym_mu**2)\n )\n # (0,3)\n sym_state_probs_1131[5] = sym_Lambda * (sym_lambda_1**2) * (sym_mu**3)\n # (1,3)\n sym_state_probs_1131[6] = (sym_Lambda * sym_lambda_2 * (sym_lambda_1**2)) * (\n (sym_lambda_2**2)\n + 2 * sym_lambda_2 * sym_lambda_1\n + 3 * sym_lambda_2 * sym_mu\n + (sym_lambda_1**2)\n + 2 * sym_lambda_1 * sym_mu\n + 3 * (sym_mu**2)\n )\n\n denominator = (\n sym_Lambda * sym_lambda_2**3 * sym_lambda_1**2\n + sym_Lambda * sym_lambda_2**3 * sym_lambda_1 * sym_mu\n + sym_Lambda * sym_lambda_2**3 * sym_mu**2\n + 2 * sym_Lambda * sym_lambda_2**2 * sym_lambda_1**3\n + 5 * sym_Lambda * sym_lambda_2**2 * sym_lambda_1**2 * sym_mu\n + 5 * sym_Lambda * sym_lambda_2**2 * sym_lambda_1 * sym_mu**2\n + 3 * sym_Lambda * sym_lambda_2**2 * sym_mu**3\n + sym_Lambda * sym_lambda_2 * sym_lambda_1**4\n + 3 * sym_Lambda * sym_lambda_2 * sym_lambda_1**3 * sym_mu\n + 6 * sym_Lambda * sym_lambda_2 * sym_lambda_1**2 * sym_mu**2\n + 5 * sym_Lambda * sym_lambda_2 * sym_lambda_1 * sym_mu**3\n + 3 * sym_Lambda * sym_lambda_2 * sym_mu**4\n + sym_Lambda * sym_lambda_1**2 * sym_mu**3\n + sym_Lambda * sym_lambda_1 * sym_mu**4\n + sym_Lambda * sym_mu**5\n + sym_lambda_2**2 * sym_mu**4\n + sym_lambda_2 * sym_lambda_1 * sym_mu**4\n + 2 * sym_lambda_2 * sym_mu**5\n + sym_mu**6\n )\n\n sym_state_probs_1131 = [i / denominator for i in sym_state_probs_1131]\n\n sym_state_recursive_ratios_1131 = sym.zeros(\n buffer_capacity + 1, system_capacity + 1\n )\n sym_state_recursive_ratios_1131[0, 0] = 1\n sym_state_recursive_ratios_1131[0, 1] = sym.factor(\n sym_state_probs_1131[1] / sym_state_probs_1131[0]\n ) # (0,0) -> (0,1)\n sym_state_recursive_ratios_1131[1, 1] = sym.factor(\n sym_state_probs_1131[2] / sym_state_probs_1131[1]\n ) # (0,1) -> (1,1)\n sym_state_recursive_ratios_1131[0, 2] = sym.factor(\n sym_state_probs_1131[3] / sym_state_probs_1131[1]\n ) # (0,1) -> (0,2)\n sym_state_recursive_ratios_1131[1, 2] = sym.factor(\n sym_state_probs_1131[4] / sym_state_probs_1131[3]\n ) # (0,2) -> (1,2)\n sym_state_recursive_ratios_1131[0, 3] = sym.factor(\n sym_state_probs_1131[5] / sym_state_probs_1131[3]\n ) # (0,2) -> (0,3)\n sym_state_recursive_ratios_1131[1, 3] = sym.factor(\n sym_state_probs_1131[6] / sym_state_probs_1131[5]\n ) # (0,3) -> (1,3)\n\n sym_state_recursive_ratios_right_1131 = sym_state_recursive_ratios_1131.copy()\n sym_state_recursive_ratios_right_1131[1, 2] = sym.factor(\n sym_state_probs_1131[4] / sym_state_probs_1131[2]\n ) # (1,1) -> (1,2)\n sym_state_recursive_ratios_right_1131[1, 3] = sym.factor(\n sym_state_probs_1131[6] / sym_state_probs_1131[4]\n ) # (1,2) -> (1,3)\n\n sym_state_recursive_ratios_P0_1131 = sym.zeros(\n buffer_capacity + 1, system_capacity + 1\n )\n sym_state_recursive_ratios_P0_1131[0, 0] = 1\n sym_state_recursive_ratios_P0_1131[0, 1] = sym.factor(\n sym_state_probs_1131[1] / sym_state_probs_1131[0]\n ) # (0,0) -> (0,1)\n sym_state_recursive_ratios_P0_1131[1, 1] = sym.factor(\n sym_state_probs_1131[2] / sym_state_probs_1131[0]\n ) # (0,0) -> (1,1)\n sym_state_recursive_ratios_P0_1131[0, 2] = sym.factor(\n sym_state_probs_1131[3] / sym_state_probs_1131[0]\n ) # (0,0) -> (0,2)\n sym_state_recursive_ratios_P0_1131[1, 2] = sym.factor(\n sym_state_probs_1131[4] / sym_state_probs_1131[0]\n ) # (0,0) -> (1,2)\n sym_state_recursive_ratios_P0_1131[0, 3] = sym.factor(\n sym_state_probs_1131[5] / sym_state_probs_1131[0]\n ) # (0,0) -> (0,3)\n sym_state_recursive_ratios_P0_1131[1, 3] = sym.factor(\n sym_state_probs_1131[6] / sym_state_probs_1131[0]\n ) # (0,0) -> (1,3)\n\n return (\n sym_state_probs_1131,\n sym_state_recursive_ratios_1131,\n sym_state_recursive_ratios_right_1131,\n sym_state_recursive_ratios_P0_1131,\n )\n\n\ndef get_symbolic_state_probabilities_1132():\n num_of_servers = 1\n threshold = 1\n system_capacity = 3\n buffer_capacity = 2\n\n Q_sym_1132 = abg.markov.get_symbolic_transition_matrix(\n num_of_servers, threshold, system_capacity, buffer_capacity\n )\n\n p00, p01, p11, p21, p02, p12, p22, p03, p13, p23 = sym.symbols(\n \"p00, p01, p11, p21, p02, p12, p22, p03, p13, p23\"\n )\n pi_1132 = sym.Matrix([p00, p01, p11, p21, p02, p12, p22, p03, p13, p23])\n dimension_1132 = Q_sym_1132.shape[0]\n\n M_sym_1132 = sym.Matrix(\n [Q_sym_1132.transpose()[:-1, :], sym.ones(1, dimension_1132)]\n )\n sym_diff_equations_1132 = M_sym_1132 @ pi_1132\n\n b_sym_1132 = sym.Matrix([sym.zeros(dimension_1132 - 1, 1), [1]])\n\n eq0_1132 = sym.Eq(sym_diff_equations_1132[0], b_sym_1132[0])\n eq1_1132 = sym.Eq(sym_diff_equations_1132[1], b_sym_1132[1])\n eq2_1132 = sym.Eq(sym_diff_equations_1132[2], b_sym_1132[2])\n eq3_1132 = sym.Eq(sym_diff_equations_1132[3], b_sym_1132[3])\n eq4_1132 = sym.Eq(sym_diff_equations_1132[4], b_sym_1132[4])\n eq5_1132 = sym.Eq(sym_diff_equations_1132[5], b_sym_1132[5])\n eq6_1132 = sym.Eq(sym_diff_equations_1132[6], b_sym_1132[6])\n eq7_1132 = sym.Eq(sym_diff_equations_1132[7], b_sym_1132[7])\n eq8_1132 = sym.Eq(sym_diff_equations_1132[8], b_sym_1132[8])\n eq9_1132 = sym.Eq(sym_diff_equations_1132[9], b_sym_1132[9])\n\n sym_state_probs_1132 = sym.solve(\n [\n eq0_1132,\n eq1_1132,\n eq2_1132,\n eq3_1132,\n eq4_1132,\n eq5_1132,\n eq6_1132,\n eq7_1132,\n eq8_1132,\n eq9_1132,\n ],\n (p00, p01, p11, p21, p02, p12, p22, p03, p13, p23),\n )\n\n sym_state_recursive_ratios_1132 = sym.zeros(\n buffer_capacity + 1, system_capacity + 1\n )\n sym_state_recursive_ratios_1132[0, 0] = 1\n sym_state_recursive_ratios_1132[0, 1] = sym.factor(\n sym_state_probs_1132[p01] / sym_state_probs_1132[p00]\n ) # (0,0) -> (0,1)\n sym_state_recursive_ratios_1132[1, 1] = sym.factor(\n sym_state_probs_1132[p11] / sym_state_probs_1132[p01]\n ) # (0,1) -> (1,1)\n sym_state_recursive_ratios_1132[2, 1] = sym.factor(\n sym_state_probs_1132[p21] / sym_state_probs_1132[p11]\n ) # (1,1) -> (2,1)\n sym_state_recursive_ratios_1132[0, 2] = sym.factor(\n sym_state_probs_1132[p02] / sym_state_probs_1132[p01]\n ) # (0,1) -> (0,2)\n sym_state_recursive_ratios_1132[1, 2] = sym.factor(\n sym_state_probs_1132[p12] / sym_state_probs_1132[p02]\n ) # (0,2) -> (1,2)\n sym_state_recursive_ratios_1132[2, 2] = sym.factor(\n sym_state_probs_1132[p22] / sym_state_probs_1132[p12]\n ) # (1,2) -> (2,2)\n sym_state_recursive_ratios_1132[0, 3] = sym.factor(\n sym_state_probs_1132[p03] / sym_state_probs_1132[p02]\n ) # (0,2) -> (0,3)\n sym_state_recursive_ratios_1132[1, 3] = sym.factor(\n sym_state_probs_1132[p13] / sym_state_probs_1132[p03]\n ) # (0,3) -> (1,3)\n sym_state_recursive_ratios_1132[2, 3] = sym.factor(\n sym_state_probs_1132[p23] / sym_state_probs_1132[p13]\n ) # (1,3) -> (2,3)\n\n sym_state_recursive_ratios_right_1132 = sym_state_recursive_ratios_1132.copy()\n sym_state_recursive_ratios_right_1132[1, 2] = sym.factor(\n sym_state_probs_1132[p12] / sym_state_probs_1132[p11]\n ) # (1,1) -> (1,2)\n sym_state_recursive_ratios_right_1132[1, 3] = sym.factor(\n sym_state_probs_1132[p13] / sym_state_probs_1132[p12]\n ) # (1,2) -> (1,3)\n sym_state_recursive_ratios_right_1132[2, 2] = sym.factor(\n sym_state_probs_1132[p22] / sym_state_probs_1132[p21]\n ) # (2,1) -> (2,2)\n sym_state_recursive_ratios_right_1132[2, 3] = sym.factor(\n sym_state_probs_1132[p23] / sym_state_probs_1132[p22]\n ) # (2,2) -> (2,3)\n\n sym_state_recursive_ratios_P0_1132 = sym.zeros(\n buffer_capacity + 1, system_capacity + 1\n )\n sym_state_recursive_ratios_P0_1132[0, 0] = 1\n sym_state_recursive_ratios_P0_1132[0, 1] = sym.factor(\n sym_state_probs_1132[p01] / sym_state_probs_1132[p00]\n ) # (0,0) -> (0,1)\n sym_state_recursive_ratios_P0_1132[1, 1] = sym.factor(\n sym_state_probs_1132[p11] / sym_state_probs_1132[p00]\n ) # (0,0) -> (1,1)\n sym_state_recursive_ratios_P0_1132[2, 1] = sym.factor(\n sym_state_probs_1132[p21] / sym_state_probs_1132[p00]\n ) # (0,0) -> (2,1)\n sym_state_recursive_ratios_P0_1132[0, 2] = sym.factor(\n sym_state_probs_1132[p02] / sym_state_probs_1132[p00]\n ) # (0,0) -> (0,2)\n sym_state_recursive_ratios_P0_1132[1, 2] = sym.factor(\n sym_state_probs_1132[p12] / sym_state_probs_1132[p00]\n ) # (0,0) -> (1,2)\n sym_state_recursive_ratios_P0_1132[2, 2] = sym.factor(\n sym_state_probs_1132[p22] / sym_state_probs_1132[p00]\n ) # (0,0) -> (2,2)\n sym_state_recursive_ratios_P0_1132[0, 3] = sym.factor(\n sym_state_probs_1132[p03] / sym_state_probs_1132[p00]\n ) # (0,0) -> (0,3)\n sym_state_recursive_ratios_P0_1132[1, 3] = sym.factor(\n sym_state_probs_1132[p13] / sym_state_probs_1132[p00]\n ) # (0,0) -> (1,3)\n sym_state_recursive_ratios_P0_1132[2, 3] = sym.factor(\n sym_state_probs_1132[p23] / sym_state_probs_1132[p00]\n ) # (0,0) -> (2,3)\n\n return (\n sym_state_probs_1132,\n sym_state_recursive_ratios_1132,\n sym_state_recursive_ratios_right_1132,\n sym_state_recursive_ratios_P0_1132,\n )\n\n\ndef get_symbolic_state_probabilities_1141():\n num_of_servers = 1\n threshold = 1\n system_capacity = 4\n buffer_capacity = 1\n\n Q_sym_1141 = abg.markov.get_symbolic_transition_matrix(\n num_of_servers, threshold, system_capacity, buffer_capacity\n )\n\n p00, p01, p11, p02, p12, p03, p13, p04, p14 = sym.symbols(\n \"p00, p01, p11, p02, p12, p03, p13, p04, p14\"\n )\n pi_1141 = sym.Matrix([p00, p01, p11, p02, p12, p03, p13, p04, p14])\n dimension_1141 = Q_sym_1141.shape[0]\n\n M_sym_1141 = sym.Matrix(\n [Q_sym_1141.transpose()[:-1, :], sym.ones(1, dimension_1141)]\n )\n sym_diff_equations_1141 = M_sym_1141 @ pi_1141\n\n b_sym_1141 = sym.Matrix([sym.zeros(dimension_1141 - 1, 1), [1]])\n\n eq0_1141 = sym.Eq(sym_diff_equations_1141[0], b_sym_1141[0])\n eq1_1141 = sym.Eq(sym_diff_equations_1141[1], b_sym_1141[1])\n eq2_1141 = sym.Eq(sym_diff_equations_1141[2], b_sym_1141[2])\n eq3_1141 = sym.Eq(sym_diff_equations_1141[3], b_sym_1141[3])\n eq4_1141 = sym.Eq(sym_diff_equations_1141[4], b_sym_1141[4])\n eq5_1141 = sym.Eq(sym_diff_equations_1141[5], b_sym_1141[5])\n eq6_1141 = sym.Eq(sym_diff_equations_1141[6], b_sym_1141[6])\n eq7_1141 = sym.Eq(sym_diff_equations_1141[7], b_sym_1141[7])\n eq8_1141 = sym.Eq(sym_diff_equations_1141[8], b_sym_1141[8])\n\n sym_state_probs_1141 = sym.solve(\n [\n eq0_1141,\n eq1_1141,\n eq2_1141,\n eq3_1141,\n eq4_1141,\n eq5_1141,\n eq6_1141,\n eq7_1141,\n eq8_1141,\n ],\n (p00, p01, p11, p02, p12, p03, p13, p04, p14),\n )\n\n sym_state_recursive_ratios_1141 = sym.zeros(\n buffer_capacity + 1, system_capacity + 1\n )\n sym_state_recursive_ratios_1141[0, 0] = 1\n sym_state_recursive_ratios_1141[0, 1] = sym.factor(\n sym_state_probs_1141[p01] / sym_state_probs_1141[p00]\n ) # (0,0) -> (0,1)\n sym_state_recursive_ratios_1141[1, 1] = sym.factor(\n sym_state_probs_1141[p11] / sym_state_probs_1141[p01]\n ) # (0,1) -> (1,1)\n sym_state_recursive_ratios_1141[0, 2] = sym.factor(\n sym_state_probs_1141[p02] / sym_state_probs_1141[p01]\n ) # (0,1) -> (0,2)\n sym_state_recursive_ratios_1141[1, 2] = sym.factor(\n sym_state_probs_1141[p12] / sym_state_probs_1141[p02]\n ) # (0,2) -> (1,2)\n sym_state_recursive_ratios_1141[0, 3] = sym.factor(\n sym_state_probs_1141[p03] / sym_state_probs_1141[p02]\n ) # (0,2) -> (0,3)\n sym_state_recursive_ratios_1141[1, 3] = sym.factor(\n sym_state_probs_1141[p13] / sym_state_probs_1141[p03]\n ) # (0,3) -> (1,3)\n sym_state_recursive_ratios_1141[0, 4] = sym.factor(\n sym_state_probs_1141[p04] / sym_state_probs_1141[p03]\n ) # (0,3) -> (0,4)\n sym_state_recursive_ratios_1141[1, 4] = sym.factor(\n sym_state_probs_1141[p14] / sym_state_probs_1141[p04]\n ) # (0,4) -> (1,4)\n\n sym_state_recursive_ratios_right_1141 = sym_state_recursive_ratios_1141.copy()\n sym_state_recursive_ratios_right_1141[1, 2] = sym.factor(\n sym_state_probs_1141[p12] / sym_state_probs_1141[p11]\n ) # (1,1) -> (1,2)\n sym_state_recursive_ratios_right_1141[1, 3] = sym.factor(\n sym_state_probs_1141[p13] / sym_state_probs_1141[p12]\n ) # (1,2) -> (1,3)\n sym_state_recursive_ratios_right_1141[1, 4] = sym.factor(\n sym_state_probs_1141[p14] / sym_state_probs_1141[p13]\n ) # (1,3) -> (1,4)\n\n sym_state_recursive_ratios_P0_1141 = sym.zeros(\n buffer_capacity + 1, system_capacity + 1\n )\n sym_state_recursive_ratios_P0_1141[0, 0] = 1\n sym_state_recursive_ratios_P0_1141[0, 1] = sym.factor(\n sym_state_probs_1141[p01] / sym_state_probs_1141[p00]\n ) # (0,0) -> (0,1)\n sym_state_recursive_ratios_P0_1141[1, 1] = sym.factor(\n sym_state_probs_1141[p11] / sym_state_probs_1141[p00]\n ) # (0,0) -> (1,1)\n sym_state_recursive_ratios_P0_1141[0, 2] = sym.factor(\n sym_state_probs_1141[p02] / sym_state_probs_1141[p00]\n ) # (0,0) -> (0,2)\n sym_state_recursive_ratios_P0_1141[1, 2] = sym.factor(\n sym_state_probs_1141[p12] / sym_state_probs_1141[p00]\n ) # (0,0) -> (1,2)\n sym_state_recursive_ratios_P0_1141[0, 3] = sym.factor(\n sym_state_probs_1141[p03] / sym_state_probs_1141[p00]\n ) # (0,0) -> (0,3)\n sym_state_recursive_ratios_P0_1141[1, 3] = sym.factor(\n sym_state_probs_1141[p13] / sym_state_probs_1141[p00]\n ) # (0,0) -> (1,3)\n sym_state_recursive_ratios_P0_1141[0, 4] = sym.factor(\n sym_state_probs_1141[p04] / sym_state_probs_1141[p00]\n ) # (0,0) -> (0,4)\n sym_state_recursive_ratios_P0_1141[1, 4] = sym.factor(\n sym_state_probs_1141[p14] / sym_state_probs_1141[p00]\n ) # (0,0) -> (1,4)\n\n return (\n sym_state_probs_1141,\n sym_state_recursive_ratios_1141,\n sym_state_recursive_ratios_right_1141,\n sym_state_recursive_ratios_P0_1141,\n )\n\n\ndef get_symbolic_state_probabilities_1142():\n num_of_servers = 1\n threshold = 1\n system_capacity = 4\n buffer_capacity = 2\n\n Q_sym_1142 = abg.markov.get_symbolic_transition_matrix(\n num_of_servers=num_of_servers,\n threshold=threshold,\n system_capacity=system_capacity,\n buffer_capacity=buffer_capacity,\n )\n\n p00, p01, p11, p21, p02, p12, p22, p03, p13, p23, p04, p14, p24 = sym.symbols(\n \"p00, p01, p11, p21, p02, p12, p22, p03, p13, p23, p04, p14, p24\"\n )\n pi_1142 = sym.Matrix(\n [p00, p01, p11, p21, p02, p12, p22, p03, p13, p23, p04, p14, p24]\n )\n dimension_1142 = Q_sym_1142.shape[0]\n\n M_sym_1142 = sym.Matrix(\n [Q_sym_1142.transpose()[:-1, :], sym.ones(1, dimension_1142)]\n )\n sym_diff_equations_1142 = M_sym_1142 @ pi_1142\n\n b_sym_1142 = sym.Matrix([sym.zeros(dimension_1142 - 1, 1), [1]])\n\n eq0_1142 = sym.Eq(sym_diff_equations_1142[0], b_sym_1142[0])\n eq1_1142 = sym.Eq(sym_diff_equations_1142[1], b_sym_1142[1])\n eq2_1142 = sym.Eq(sym_diff_equations_1142[2], b_sym_1142[2])\n eq3_1142 = sym.Eq(sym_diff_equations_1142[3], b_sym_1142[3])\n eq4_1142 = sym.Eq(sym_diff_equations_1142[4], b_sym_1142[4])\n eq5_1142 = sym.Eq(sym_diff_equations_1142[5], b_sym_1142[5])\n eq6_1142 = sym.Eq(sym_diff_equations_1142[6], b_sym_1142[6])\n eq7_1142 = sym.Eq(sym_diff_equations_1142[7], b_sym_1142[7])\n eq8_1142 = sym.Eq(sym_diff_equations_1142[8], b_sym_1142[8])\n eq9_1142 = sym.Eq(sym_diff_equations_1142[9], b_sym_1142[9])\n eq10_1142 = sym.Eq(sym_diff_equations_1142[10], b_sym_1142[10])\n eq11_1142 = sym.Eq(sym_diff_equations_1142[11], b_sym_1142[11])\n eq12_1142 = sym.Eq(sym_diff_equations_1142[12], b_sym_1142[12])\n\n sym_state_probs_1142 = sym.solve(\n [\n eq0_1142,\n eq1_1142,\n eq2_1142,\n eq3_1142,\n eq4_1142,\n eq5_1142,\n eq6_1142,\n eq7_1142,\n eq8_1142,\n eq9_1142,\n eq10_1142,\n eq11_1142,\n eq12_1142,\n ],\n (p00, p01, p11, p21, p02, p12, p22, p03, p13, p23, p04, p14, p24),\n )\n\n sym_state_recursive_ratios_1142 = sym.zeros(\n buffer_capacity + 1, system_capacity + 1\n )\n sym_state_recursive_ratios_1142[0, 0] = 1\n sym_state_recursive_ratios_1142[0, 1] = sym.factor(\n sym_state_probs_1142[p01] / sym_state_probs_1142[p00]\n ) # (0,0) -> (0,1)\n sym_state_recursive_ratios_1142[1, 1] = sym.factor(\n sym_state_probs_1142[p11] / sym_state_probs_1142[p01]\n ) # (0,1) -> (1,1)\n sym_state_recursive_ratios_1142[2, 1] = sym.factor(\n sym_state_probs_1142[p21] / sym_state_probs_1142[p11]\n ) # (1,1) -> (2,1)\n sym_state_recursive_ratios_1142[0, 2] = sym.factor(\n sym_state_probs_1142[p02] / sym_state_probs_1142[p01]\n ) # (0,1) -> (0,2)\n sym_state_recursive_ratios_1142[1, 2] = sym.factor(\n sym_state_probs_1142[p12] / sym_state_probs_1142[p02]\n ) # (0,2) -> (1,2)\n sym_state_recursive_ratios_1142[2, 2] = sym.factor(\n sym_state_probs_1142[p22] / sym_state_probs_1142[p12]\n ) # (1,2) -> (2,2)\n sym_state_recursive_ratios_1142[0, 3] = sym.factor(\n sym_state_probs_1142[p03] / sym_state_probs_1142[p02]\n ) # (0,2) -> (0,3)\n sym_state_recursive_ratios_1142[1, 3] = sym.factor(\n sym_state_probs_1142[p13] / sym_state_probs_1142[p03]\n ) # (0,3) -> (1,3)\n sym_state_recursive_ratios_1142[2, 3] = sym.factor(\n sym_state_probs_1142[p23] / sym_state_probs_1142[p13]\n ) # (1,3) -> (2,3)\n sym_state_recursive_ratios_1142[0, 4] = sym.factor(\n sym_state_probs_1142[p04] / sym_state_probs_1142[p03]\n ) # (0,3) -> (0,4)\n sym_state_recursive_ratios_1142[1, 4] = sym.factor(\n sym_state_probs_1142[p14] / sym_state_probs_1142[p04]\n ) # (0,4) -> (1,4)\n sym_state_recursive_ratios_1142[2, 4] = sym.factor(\n sym_state_probs_1142[p24] / sym_state_probs_1142[p14]\n ) # (1,4) -> (2,4)\n\n sym_state_recursive_ratios_right_1142 = sym_state_recursive_ratios_1142.copy()\n sym_state_recursive_ratios_right_1142[1, 2] = sym.factor(\n sym_state_probs_1142[p12] / sym_state_probs_1142[p11]\n ) # (1,1) -> (1,2)\n sym_state_recursive_ratios_right_1142[1, 3] = sym.factor(\n sym_state_probs_1142[p13] / sym_state_probs_1142[p12]\n ) # (1,2) -> (1,3)\n sym_state_recursive_ratios_right_1142[1, 4] = sym.factor(\n sym_state_probs_1142[p14] / sym_state_probs_1142[p13]\n ) # (1,3) -> (1,4)\n sym_state_recursive_ratios_right_1142[2, 2] = sym.factor(\n sym_state_probs_1142[p22] / sym_state_probs_1142[p21]\n ) # (2,1) -> (2,2)\n sym_state_recursive_ratios_right_1142[2, 3] = sym.factor(\n sym_state_probs_1142[p23] / sym_state_probs_1142[p22]\n ) # (2,2) -> (2,3)\n sym_state_recursive_ratios_right_1142[2, 4] = sym.factor(\n sym_state_probs_1142[p24] / sym_state_probs_1142[p23]\n ) # (2,3) -> (2,4)\n\n sym_state_recursive_ratios_P0_1142 = sym.zeros(\n buffer_capacity + 1, system_capacity + 1\n )\n sym_state_recursive_ratios_P0_1142[0, 0] = 1\n sym_state_recursive_ratios_P0_1142[0, 1] = sym.factor(\n sym_state_probs_1142[p01] / sym_state_probs_1142[p00]\n ) # (0,0) -> (0,1)\n sym_state_recursive_ratios_P0_1142[1, 1] = sym.factor(\n sym_state_probs_1142[p11] / sym_state_probs_1142[p00]\n ) # (0,0) -> (1,1)\n sym_state_recursive_ratios_P0_1142[2, 1] = sym.factor(\n sym_state_probs_1142[p21] / sym_state_probs_1142[p00]\n ) # (0,0) -> (2,1)\n\n sym_state_recursive_ratios_P0_1142[0, 2] = sym.factor(\n sym_state_probs_1142[p02] / sym_state_probs_1142[p00]\n ) # (0,0) -> (0,2)\n sym_state_recursive_ratios_P0_1142[1, 2] = sym.factor(\n sym_state_probs_1142[p12] / sym_state_probs_1142[p00]\n ) # (0,0) -> (1,2)\n sym_state_recursive_ratios_P0_1142[2, 2] = sym.factor(\n sym_state_probs_1142[p22] / sym_state_probs_1142[p00]\n ) # (0,0) -> (2,2)\n\n sym_state_recursive_ratios_P0_1142[0, 3] = sym.factor(\n sym_state_probs_1142[p03] / sym_state_probs_1142[p00]\n ) # (0,0) -> (0,3)\n sym_state_recursive_ratios_P0_1142[1, 3] = sym.factor(\n sym_state_probs_1142[p13] / sym_state_probs_1142[p00]\n ) # (0,0) -> (1,3)\n sym_state_recursive_ratios_P0_1142[2, 3] = sym.factor(\n sym_state_probs_1142[p23] / sym_state_probs_1142[p00]\n ) # (0,0) -> (2,3)\n\n sym_state_recursive_ratios_P0_1142[0, 4] = sym.factor(\n sym_state_probs_1142[p04] / sym_state_probs_1142[p00]\n ) # (0,0) -> (0,4)\n sym_state_recursive_ratios_P0_1142[1, 4] = sym.factor(\n sym_state_probs_1142[p14] / sym_state_probs_1142[p00]\n ) # (0,0) -> (1,4)\n sym_state_recursive_ratios_P0_1142[2, 4] = sym.factor(\n sym_state_probs_1142[p24] / sym_state_probs_1142[p00]\n ) # (0,0) -> (2,4)\n\n return (\n sym_state_probs_1142,\n sym_state_recursive_ratios_1142,\n sym_state_recursive_ratios_right_1142,\n sym_state_recursive_ratios_P0_1142,\n )\n\n\ndef get_symbolic_state_probabilities_1151():\n num_of_servers = 1\n threshold = 1\n system_capacity = 5\n buffer_capacity = 1\n\n Q_sym_1151 = abg.markov.get_symbolic_transition_matrix(\n num_of_servers, threshold, system_capacity, buffer_capacity\n )\n\n p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15 = sym.symbols(\n \"p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15\"\n )\n pi_1151 = sym.Matrix([p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15])\n dimension_1151 = Q_sym_1151.shape[0]\n\n M_sym_1151 = sym.Matrix(\n [Q_sym_1151.transpose()[:-1, :], sym.ones(1, dimension_1151)]\n )\n sym_diff_equations_1151 = M_sym_1151 @ pi_1151\n\n b_sym_1151 = sym.Matrix([sym.zeros(dimension_1151 - 1, 1), [1]])\n\n eq0_1151 = sym.Eq(sym_diff_equations_1151[0], b_sym_1151[0])\n eq1_1151 = sym.Eq(sym_diff_equations_1151[1], b_sym_1151[1])\n eq2_1151 = sym.Eq(sym_diff_equations_1151[2], b_sym_1151[2])\n eq3_1151 = sym.Eq(sym_diff_equations_1151[3], b_sym_1151[3])\n eq4_1151 = sym.Eq(sym_diff_equations_1151[4], b_sym_1151[4])\n eq5_1151 = sym.Eq(sym_diff_equations_1151[5], b_sym_1151[5])\n eq6_1151 = sym.Eq(sym_diff_equations_1151[6], b_sym_1151[6])\n eq7_1151 = sym.Eq(sym_diff_equations_1151[7], b_sym_1151[7])\n eq8_1151 = sym.Eq(sym_diff_equations_1151[8], b_sym_1151[8])\n eq9_1151 = sym.Eq(sym_diff_equations_1151[9], b_sym_1151[9])\n eq10_1151 = sym.Eq(sym_diff_equations_1151[10], b_sym_1151[10])\n\n sym_state_probs_1151 = sym.solve(\n [\n eq0_1151,\n eq1_1151,\n eq2_1151,\n eq3_1151,\n eq4_1151,\n eq5_1151,\n eq6_1151,\n eq7_1151,\n eq8_1151,\n eq9_1151,\n eq10_1151,\n ],\n (p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15),\n )\n\n sym_state_recursive_ratios_1151 = sym.zeros(\n buffer_capacity + 1, system_capacity + 1\n )\n sym_state_recursive_ratios_1151[0, 0] = 1\n sym_state_recursive_ratios_1151[0, 1] = sym.factor(\n sym_state_probs_1151[p01] / sym_state_probs_1151[p00]\n ) # (0,0) -> (0,1)\n sym_state_recursive_ratios_1151[1, 1] = sym.factor(\n sym_state_probs_1151[p11] / sym_state_probs_1151[p01]\n ) # (0,1) -> (1,1)\n sym_state_recursive_ratios_1151[0, 2] = sym.factor(\n sym_state_probs_1151[p02] / sym_state_probs_1151[p01]\n ) # (0,1) -> (0,2)\n sym_state_recursive_ratios_1151[1, 2] = sym.factor(\n sym_state_probs_1151[p12] / sym_state_probs_1151[p02]\n ) # (0,2) -> (1,2)\n sym_state_recursive_ratios_1151[0, 3] = sym.factor(\n sym_state_probs_1151[p03] / sym_state_probs_1151[p02]\n ) # (0,2) -> (0,3)\n sym_state_recursive_ratios_1151[1, 3] = sym.factor(\n sym_state_probs_1151[p13] / sym_state_probs_1151[p03]\n ) # (0,3) -> (1,3)\n sym_state_recursive_ratios_1151[0, 4] = sym.factor(\n sym_state_probs_1151[p04] / sym_state_probs_1151[p03]\n ) # (0,3) -> (0,4)\n sym_state_recursive_ratios_1151[1, 4] = sym.factor(\n sym_state_probs_1151[p14] / sym_state_probs_1151[p04]\n ) # (0,4) -> (1,4)\n sym_state_recursive_ratios_1151[0, 5] = sym.factor(\n sym_state_probs_1151[p05] / sym_state_probs_1151[p04]\n ) # (0,4) -> (0,5)\n sym_state_recursive_ratios_1151[1, 5] = sym.factor(\n sym_state_probs_1151[p15] / sym_state_probs_1151[p05]\n ) # (0,5) -> (1,5)\n\n sym_state_recursive_ratios_right_1151 = sym_state_recursive_ratios_1151.copy()\n sym_state_recursive_ratios_right_1151[1, 2] = sym.factor(\n sym_state_probs_1151[p12] / sym_state_probs_1151[p11]\n ) # (1,1) -> (1,2)\n sym_state_recursive_ratios_right_1151[1, 3] = sym.factor(\n sym_state_probs_1151[p13] / sym_state_probs_1151[p12]\n ) # (1,2) -> (1,3)\n sym_state_recursive_ratios_right_1151[1, 4] = sym.factor(\n sym_state_probs_1151[p14] / sym_state_probs_1151[p13]\n ) # (1,3) -> (1,4)\n sym_state_recursive_ratios_right_1151[1, 5] = sym.factor(\n sym_state_probs_1151[p15] / sym_state_probs_1151[p14]\n ) # (1,4) -> (1,5)\n\n sym_state_recursive_ratios_P0_1151 = sym.zeros(\n buffer_capacity + 1, system_capacity + 1\n )\n sym_state_recursive_ratios_P0_1151[0, 0] = 1\n sym_state_recursive_ratios_P0_1151[0, 1] = sym.factor(\n sym_state_probs_1151[p01] / sym_state_probs_1151[p00]\n ) # (0,0) -> (0,1)\n sym_state_recursive_ratios_P0_1151[1, 1] = sym.factor(\n sym_state_probs_1151[p11] / sym_state_probs_1151[p00]\n ) # (0,0) -> (1,1)\n sym_state_recursive_ratios_P0_1151[0, 2] = sym.factor(\n sym_state_probs_1151[p02] / sym_state_probs_1151[p00]\n ) # (0,0) -> (0,2)\n sym_state_recursive_ratios_P0_1151[1, 2] = sym.factor(\n sym_state_probs_1151[p12] / sym_state_probs_1151[p00]\n ) # (0,0) -> (1,2)\n sym_state_recursive_ratios_P0_1151[0, 3] = sym.factor(\n sym_state_probs_1151[p03] / sym_state_probs_1151[p00]\n ) # (0,0) -> (0,3)\n sym_state_recursive_ratios_P0_1151[1, 3] = sym.factor(\n sym_state_probs_1151[p13] / sym_state_probs_1151[p00]\n ) # (0,0) -> (1,3)\n sym_state_recursive_ratios_P0_1151[0, 4] = sym.factor(\n sym_state_probs_1151[p04] / sym_state_probs_1151[p00]\n ) # (0,0) -> (0,4)\n sym_state_recursive_ratios_P0_1151[1, 4] = sym.factor(\n sym_state_probs_1151[p14] / sym_state_probs_1151[p00]\n ) # (0,0) -> (1,4)\n sym_state_recursive_ratios_P0_1151[0, 5] = sym.factor(\n sym_state_probs_1151[p05] / sym_state_probs_1151[p00]\n ) # (0,0) -> (0,5)\n sym_state_recursive_ratios_P0_1151[1, 5] = sym.factor(\n sym_state_probs_1151[p15] / sym_state_probs_1151[p00]\n ) # (0,0) -> (1,5)\n\n return (\n sym_state_probs_1151,\n sym_state_recursive_ratios_1151,\n sym_state_recursive_ratios_right_1151,\n sym_state_recursive_ratios_P0_1151,\n )\n\n\ndef get_symbolic_state_probabilities_1161():\n num_of_servers = 1\n threshold = 1\n system_capacity = 6\n buffer_capacity = 1\n\n Q_sym_1161 = abg.markov.get_symbolic_transition_matrix(\n num_of_servers, threshold, system_capacity, buffer_capacity\n )\n\n p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15, p06, p16 = sym.symbols(\n \"p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15, p06, p16\"\n )\n pi_1161 = sym.Matrix(\n [p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15, p06, p16]\n )\n dimension_1161 = Q_sym_1161.shape[0]\n\n M_sym_1161 = sym.Matrix(\n [Q_sym_1161.transpose()[:-1, :], sym.ones(1, dimension_1161)]\n )\n sym_diff_equations_1161 = M_sym_1161 @ pi_1161\n\n b_sym_1161 = sym.Matrix([sym.zeros(dimension_1161 - 1, 1), [1]])\n\n eq0_1161 = sym.Eq(sym_diff_equations_1161[0], b_sym_1161[0])\n eq1_1161 = sym.Eq(sym_diff_equations_1161[1], b_sym_1161[1])\n eq2_1161 = sym.Eq(sym_diff_equations_1161[2], b_sym_1161[2])\n eq3_1161 = sym.Eq(sym_diff_equations_1161[3], b_sym_1161[3])\n eq4_1161 = sym.Eq(sym_diff_equations_1161[4], b_sym_1161[4])\n eq5_1161 = sym.Eq(sym_diff_equations_1161[5], b_sym_1161[5])\n eq6_1161 = sym.Eq(sym_diff_equations_1161[6], b_sym_1161[6])\n eq7_1161 = sym.Eq(sym_diff_equations_1161[7], b_sym_1161[7])\n eq8_1161 = sym.Eq(sym_diff_equations_1161[8], b_sym_1161[8])\n eq9_1161 = sym.Eq(sym_diff_equations_1161[9], b_sym_1161[9])\n eq10_1161 = sym.Eq(sym_diff_equations_1161[10], b_sym_1161[10])\n eq11_1161 = sym.Eq(sym_diff_equations_1161[11], b_sym_1161[11])\n eq12_1161 = sym.Eq(sym_diff_equations_1161[12], b_sym_1161[12])\n\n sym_state_probs_1161 = sym.solve(\n [\n eq0_1161,\n eq1_1161,\n eq2_1161,\n eq3_1161,\n eq4_1161,\n eq5_1161,\n eq6_1161,\n eq7_1161,\n eq8_1161,\n eq9_1161,\n eq10_1161,\n eq11_1161,\n eq12_1161,\n ],\n (p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15, p06, p16),\n )\n\n sym_state_recursive_ratios_1161 = sym.zeros(\n buffer_capacity + 1, system_capacity + 1\n )\n sym_state_recursive_ratios_1161[0, 0] = 1\n sym_state_recursive_ratios_1161[0, 1] = sym.factor(\n sym_state_probs_1161[p01] / sym_state_probs_1161[p00]\n ) # (0,0) -> (0,1)\n sym_state_recursive_ratios_1161[1, 1] = sym.factor(\n sym_state_probs_1161[p11] / sym_state_probs_1161[p01]\n ) # (0,1) -> (1,1)\n sym_state_recursive_ratios_1161[0, 2] = sym.factor(\n sym_state_probs_1161[p02] / sym_state_probs_1161[p01]\n ) # (0,1) -> (0,2)\n sym_state_recursive_ratios_1161[1, 2] = sym.factor(\n sym_state_probs_1161[p12] / sym_state_probs_1161[p02]\n ) # (0,2) -> (1,2)\n sym_state_recursive_ratios_1161[0, 3] = sym.factor(\n sym_state_probs_1161[p03] / sym_state_probs_1161[p02]\n ) # (0,2) -> (0,3)\n sym_state_recursive_ratios_1161[1, 3] = sym.factor(\n sym_state_probs_1161[p13] / sym_state_probs_1161[p03]\n ) # (0,3) -> (1,3)\n sym_state_recursive_ratios_1161[0, 4] = sym.factor(\n sym_state_probs_1161[p04] / sym_state_probs_1161[p03]\n ) # (0,3) -> (0,4)\n sym_state_recursive_ratios_1161[1, 4] = sym.factor(\n sym_state_probs_1161[p14] / sym_state_probs_1161[p04]\n ) # (0,4) -> (1,4)\n sym_state_recursive_ratios_1161[0, 5] = sym.factor(\n sym_state_probs_1161[p05] / sym_state_probs_1161[p04]\n ) # (0,4) -> (0,5)\n sym_state_recursive_ratios_1161[1, 5] = sym.factor(\n sym_state_probs_1161[p15] / sym_state_probs_1161[p05]\n ) # (0,5) -> (1,5)\n sym_state_recursive_ratios_1161[0, 6] = sym.factor(\n sym_state_probs_1161[p06] / sym_state_probs_1161[p05]\n ) # (0,5) -> (0,6)\n sym_state_recursive_ratios_1161[1, 6] = sym.factor(\n sym_state_probs_1161[p16] / sym_state_probs_1161[p06]\n ) # (0,6) -> (1,6)\n\n sym_state_recursive_ratios_right_1161 = sym_state_recursive_ratios_1161.copy()\n sym_state_recursive_ratios_right_1161[1, 2] = sym.factor(\n sym_state_probs_1161[p12] / sym_state_probs_1161[p11]\n ) # (1,1) -> (1,2)\n sym_state_recursive_ratios_right_1161[1, 3] = sym.factor(\n sym_state_probs_1161[p13] / sym_state_probs_1161[p12]\n ) # (1,2) -> (1,3)\n sym_state_recursive_ratios_right_1161[1, 4] = sym.factor(\n sym_state_probs_1161[p14] / sym_state_probs_1161[p13]\n ) # (1,3) -> (1,4)\n sym_state_recursive_ratios_right_1161[1, 5] = sym.factor(\n sym_state_probs_1161[p15] / sym_state_probs_1161[p14]\n ) # (1,4) -> (1,5)\n sym_state_recursive_ratios_right_1161[1, 6] = sym.factor(\n sym_state_probs_1161[p16] / sym_state_probs_1161[p15]\n ) # (1,5) -> (1,6)\n\n sym_state_recursive_ratios_P0_1161 = sym.zeros(\n buffer_capacity + 1, system_capacity + 1\n )\n sym_state_recursive_ratios_P0_1161[0, 0] = 1\n sym_state_recursive_ratios_P0_1161[0, 1] = sym.factor(\n sym_state_probs_1161[p01] / sym_state_probs_1161[p00]\n ) # (0,0) -> (0,1)\n sym_state_recursive_ratios_P0_1161[1, 1] = sym.factor(\n sym_state_probs_1161[p11] / sym_state_probs_1161[p00]\n ) # (0,0) -> (1,1)\n sym_state_recursive_ratios_P0_1161[0, 2] = sym.factor(\n sym_state_probs_1161[p02] / sym_state_probs_1161[p00]\n ) # (0,0) -> (0,2)\n sym_state_recursive_ratios_P0_1161[1, 2] = sym.factor(\n sym_state_probs_1161[p12] / sym_state_probs_1161[p00]\n ) # (0,0) -> (1,2)\n sym_state_recursive_ratios_P0_1161[0, 3] = sym.factor(\n sym_state_probs_1161[p03] / sym_state_probs_1161[p00]\n ) # (0,0) -> (0,3)\n sym_state_recursive_ratios_P0_1161[1, 3] = sym.factor(\n sym_state_probs_1161[p13] / sym_state_probs_1161[p00]\n ) # (0,0) -> (1,3)\n sym_state_recursive_ratios_P0_1161[0, 4] = sym.factor(\n sym_state_probs_1161[p04] / sym_state_probs_1161[p00]\n ) # (0,0) -> (0,4)\n sym_state_recursive_ratios_P0_1161[1, 4] = sym.factor(\n sym_state_probs_1161[p14] / sym_state_probs_1161[p00]\n ) # (0,0) -> (1,4)\n sym_state_recursive_ratios_P0_1161[0, 5] = sym.factor(\n sym_state_probs_1161[p05] / sym_state_probs_1161[p00]\n ) # (0,0) -> (0,5)\n sym_state_recursive_ratios_P0_1161[1, 5] = sym.factor(\n sym_state_probs_1161[p15] / sym_state_probs_1161[p00]\n ) # (0,0) -> (1,5)\n sym_state_recursive_ratios_P0_1161[0, 6] = sym.factor(\n sym_state_probs_1161[p06] / sym_state_probs_1161[p00]\n ) # (0,0) -> (0,6)\n sym_state_recursive_ratios_P0_1161[1, 6] = sym.factor(\n sym_state_probs_1161[p16] / sym_state_probs_1161[p00]\n ) # (0,0) -> (1,6)\n\n return (\n sym_state_probs_1161,\n sym_state_recursive_ratios_1161,\n sym_state_recursive_ratios_right_1161,\n sym_state_recursive_ratios_P0_1161,\n )\n\n\ndef get_symbolic_state_probabilities_1171():\n num_of_servers = 1\n threshold = 1\n system_capacity = 7\n buffer_capacity = 1\n\n Q_sym_1171 = abg.markov.get_symbolic_transition_matrix(\n num_of_servers, threshold, system_capacity, buffer_capacity\n )\n\n (\n p00,\n p01,\n p11,\n p02,\n p12,\n p03,\n p13,\n p04,\n p14,\n p05,\n p15,\n p06,\n p16,\n p07,\n p17,\n ) = sym.symbols(\n \"p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15, p06, p16, p07, p17\"\n )\n pi_1171 = sym.Matrix(\n [p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15, p06, p16, p07, p17]\n )\n dimension_1171 = Q_sym_1171.shape[0]\n\n M_sym_1171 = sym.Matrix(\n [Q_sym_1171.transpose()[:-1, :], sym.ones(1, dimension_1171)]\n )\n sym_diff_equations_1171 = M_sym_1171 @ pi_1171\n\n b_sym_1171 = sym.Matrix([sym.zeros(dimension_1171 - 1, 1), [1]])\n\n eq0_1171 = sym.Eq(sym_diff_equations_1171[0], b_sym_1171[0])\n eq1_1171 = sym.Eq(sym_diff_equations_1171[1], b_sym_1171[1])\n eq2_1171 = sym.Eq(sym_diff_equations_1171[2], b_sym_1171[2])\n eq3_1171 = sym.Eq(sym_diff_equations_1171[3], b_sym_1171[3])\n eq4_1171 = sym.Eq(sym_diff_equations_1171[4], b_sym_1171[4])\n eq5_1171 = sym.Eq(sym_diff_equations_1171[5], b_sym_1171[5])\n eq6_1171 = sym.Eq(sym_diff_equations_1171[6], b_sym_1171[6])\n eq7_1171 = sym.Eq(sym_diff_equations_1171[7], b_sym_1171[7])\n eq8_1171 = sym.Eq(sym_diff_equations_1171[8], b_sym_1171[8])\n eq9_1171 = sym.Eq(sym_diff_equations_1171[9], b_sym_1171[9])\n eq10_1171 = sym.Eq(sym_diff_equations_1171[10], b_sym_1171[10])\n eq11_1171 = sym.Eq(sym_diff_equations_1171[11], b_sym_1171[11])\n eq12_1171 = sym.Eq(sym_diff_equations_1171[12], b_sym_1171[12])\n eq13_1171 = sym.Eq(sym_diff_equations_1171[13], b_sym_1171[13])\n eq14_1171 = sym.Eq(sym_diff_equations_1171[14], b_sym_1171[14])\n\n sym_state_probs_1171 = sym.solve(\n [\n eq0_1171,\n eq1_1171,\n eq2_1171,\n eq3_1171,\n eq4_1171,\n eq5_1171,\n eq6_1171,\n eq7_1171,\n eq8_1171,\n eq9_1171,\n eq10_1171,\n eq11_1171,\n eq12_1171,\n eq13_1171,\n eq14_1171,\n ],\n (p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15, p06, p16, p07, p17),\n )\n\n sym_state_recursive_ratios_1171 = sym.zeros(\n buffer_capacity + 1, system_capacity + 1\n )\n sym_state_recursive_ratios_1171[0, 0] = 1\n sym_state_recursive_ratios_1171[0, 1] = sym.factor(\n sym_state_probs_1171[p01] / sym_state_probs_1171[p00]\n ) # (0,0) -> (0,1)\n sym_state_recursive_ratios_1171[1, 1] = sym.factor(\n sym_state_probs_1171[p11] / sym_state_probs_1171[p01]\n ) # (0,1) -> (1,1)\n sym_state_recursive_ratios_1171[0, 2] = sym.factor(\n sym_state_probs_1171[p02] / sym_state_probs_1171[p01]\n ) # (0,1) -> (0,2)\n sym_state_recursive_ratios_1171[1, 2] = sym.factor(\n sym_state_probs_1171[p12] / sym_state_probs_1171[p02]\n ) # (0,2) -> (1,2)\n sym_state_recursive_ratios_1171[0, 3] = sym.factor(\n sym_state_probs_1171[p03] / sym_state_probs_1171[p02]\n ) # (0,2) -> (0,3)\n sym_state_recursive_ratios_1171[1, 3] = sym.factor(\n sym_state_probs_1171[p13] / sym_state_probs_1171[p03]\n ) # (0,3) -> (1,3)\n sym_state_recursive_ratios_1171[0, 4] = sym.factor(\n sym_state_probs_1171[p04] / sym_state_probs_1171[p03]\n ) # (0,3) -> (0,4)\n sym_state_recursive_ratios_1171[1, 4] = sym.factor(\n sym_state_probs_1171[p14] / sym_state_probs_1171[p04]\n ) # (0,4) -> (1,4)\n sym_state_recursive_ratios_1171[0, 5] = sym.factor(\n sym_state_probs_1171[p05] / sym_state_probs_1171[p04]\n ) # (0,4) -> (0,5)\n sym_state_recursive_ratios_1171[1, 5] = sym.factor(\n sym_state_probs_1171[p15] / sym_state_probs_1171[p05]\n ) # (0,5) -> (1,5)\n sym_state_recursive_ratios_1171[0, 6] = sym.factor(\n sym_state_probs_1171[p06] / sym_state_probs_1171[p05]\n ) # (0,5) -> (0,6)\n sym_state_recursive_ratios_1171[1, 6] = sym.factor(\n sym_state_probs_1171[p16] / sym_state_probs_1171[p06]\n ) # (0,6) -> (1,6)\n sym_state_recursive_ratios_1171[0, 7] = sym.factor(\n sym_state_probs_1171[p07] / sym_state_probs_1171[p06]\n ) # (0,6) -> (0,7)\n sym_state_recursive_ratios_1171[1, 7] = sym.factor(\n sym_state_probs_1171[p17] / sym_state_probs_1171[p07]\n ) # (0,7) -> (1,7)\n\n sym_state_recursive_ratios_right_1171 = sym_state_recursive_ratios_1171.copy()\n sym_state_recursive_ratios_right_1171[1, 2] = sym.factor(\n sym_state_probs_1171[p12] / sym_state_probs_1171[p11]\n ) # (1,1) -> (1,2)\n sym_state_recursive_ratios_right_1171[1, 3] = sym.factor(\n sym_state_probs_1171[p13] / sym_state_probs_1171[p12]\n ) # (1,2) -> (1,3)\n sym_state_recursive_ratios_right_1171[1, 4] = sym.factor(\n sym_state_probs_1171[p14] / sym_state_probs_1171[p13]\n ) # (1,3) -> (1,4)\n sym_state_recursive_ratios_right_1171[1, 5] = sym.factor(\n sym_state_probs_1171[p15] / sym_state_probs_1171[p14]\n ) # (1,4) -> (1,5)\n sym_state_recursive_ratios_right_1171[1, 6] = sym.factor(\n sym_state_probs_1171[p16] / sym_state_probs_1171[p15]\n ) # (1,5) -> (1,6)\n sym_state_recursive_ratios_right_1171[1, 7] = sym.factor(\n sym_state_probs_1171[p17] / sym_state_probs_1171[p16]\n ) # (1,6) -> (1,7)\n\n sym_state_recursive_ratios_P0_1171 = sym.zeros(\n buffer_capacity + 1, system_capacity + 1\n )\n sym_state_recursive_ratios_P0_1171[0, 0] = 1\n sym_state_recursive_ratios_P0_1171[0, 1] = sym.factor(\n sym_state_probs_1171[p01] / sym_state_probs_1171[p00]\n ) # (0,0) -> (0,1)\n sym_state_recursive_ratios_P0_1171[1, 1] = sym.factor(\n sym_state_probs_1171[p11] / sym_state_probs_1171[p00]\n ) # (0,0) -> (1,1)\n sym_state_recursive_ratios_P0_1171[0, 2] = sym.factor(\n sym_state_probs_1171[p02] / sym_state_probs_1171[p00]\n ) # (0,0) -> (0,2)\n sym_state_recursive_ratios_P0_1171[1, 2] = sym.factor(\n sym_state_probs_1171[p12] / sym_state_probs_1171[p00]\n ) # (0,0) -> (1,2)\n sym_state_recursive_ratios_P0_1171[0, 3] = sym.factor(\n sym_state_probs_1171[p03] / sym_state_probs_1171[p00]\n ) # (0,0) -> (0,3)\n sym_state_recursive_ratios_P0_1171[1, 3] = sym.factor(\n sym_state_probs_1171[p13] / sym_state_probs_1171[p00]\n ) # (0,0) -> (1,3)\n sym_state_recursive_ratios_P0_1171[0, 4] = sym.factor(\n sym_state_probs_1171[p04] / sym_state_probs_1171[p00]\n ) # (0,0) -> (0,4)\n sym_state_recursive_ratios_P0_1171[1, 4] = sym.factor(\n sym_state_probs_1171[p14] / sym_state_probs_1171[p00]\n ) # (0,0) -> (1,4)\n sym_state_recursive_ratios_P0_1171[0, 5] = sym.factor(\n sym_state_probs_1171[p05] / sym_state_probs_1171[p00]\n ) # (0,0) -> (0,5)\n sym_state_recursive_ratios_P0_1171[1, 5] = sym.factor(\n sym_state_probs_1171[p15] / sym_state_probs_1171[p00]\n ) # (0,0) -> (1,5)\n sym_state_recursive_ratios_P0_1171[0, 6] = sym.factor(\n sym_state_probs_1171[p06] / sym_state_probs_1171[p00]\n ) # (0,0) -> (0,6)\n sym_state_recursive_ratios_P0_1171[1, 6] = sym.factor(\n sym_state_probs_1171[p16] / sym_state_probs_1171[p00]\n ) # (0,0) -> (1,6)\n sym_state_recursive_ratios_P0_1171[0, 7] = sym.factor(\n sym_state_probs_1171[p07] / sym_state_probs_1171[p00]\n ) # (0,0) -> (0,7)\n sym_state_recursive_ratios_P0_1171[1, 7] = sym.factor(\n sym_state_probs_1171[p17] / sym_state_probs_1171[p00]\n ) # (0,0) -> (1,7)\n\n return (\n sym_state_probs_1171,\n sym_state_recursive_ratios_1171,\n sym_state_recursive_ratios_right_1171,\n sym_state_recursive_ratios_P0_1171,\n )\n\n\ndef get_symbolic_state_probabilities_1181():\n num_of_servers = 1\n threshold = 1\n system_capacity = 8\n buffer_capacity = 1\n\n Q_sym_1181 = abg.markov.get_symbolic_transition_matrix(\n num_of_servers, threshold, system_capacity, buffer_capacity\n )\n\n (\n p00,\n p01,\n p11,\n p02,\n p12,\n p03,\n p13,\n p04,\n p14,\n p05,\n p15,\n p06,\n p16,\n p07,\n p17,\n p08,\n p18,\n ) = sym.symbols(\n \"p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15, p06, p16, p07, p17, p08, p18\"\n )\n pi_1181 = sym.Matrix(\n [\n p00,\n p01,\n p11,\n p02,\n p12,\n p03,\n p13,\n p04,\n p14,\n p05,\n p15,\n p06,\n p16,\n p07,\n p17,\n p08,\n p18,\n ]\n )\n dimension_1181 = Q_sym_1181.shape[0]\n\n M_sym_1181 = sym.Matrix(\n [Q_sym_1181.transpose()[:-1, :], sym.ones(1, dimension_1181)]\n )\n sym_diff_equations_1181 = M_sym_1181 @ pi_1181\n\n b_sym_1181 = sym.Matrix([sym.zeros(dimension_1181 - 1, 1), [1]])\n\n eq0_1181 = sym.Eq(sym_diff_equations_1181[0], b_sym_1181[0])\n eq1_1181 = sym.Eq(sym_diff_equations_1181[1], b_sym_1181[1])\n eq2_1181 = sym.Eq(sym_diff_equations_1181[2], b_sym_1181[2])\n eq3_1181 = sym.Eq(sym_diff_equations_1181[3], b_sym_1181[3])\n eq4_1181 = sym.Eq(sym_diff_equations_1181[4], b_sym_1181[4])\n eq5_1181 = sym.Eq(sym_diff_equations_1181[5], b_sym_1181[5])\n eq6_1181 = sym.Eq(sym_diff_equations_1181[6], b_sym_1181[6])\n eq7_1181 = sym.Eq(sym_diff_equations_1181[7], b_sym_1181[7])\n eq8_1181 = sym.Eq(sym_diff_equations_1181[8], b_sym_1181[8])\n eq9_1181 = sym.Eq(sym_diff_equations_1181[9], b_sym_1181[9])\n eq10_1181 = sym.Eq(sym_diff_equations_1181[10], b_sym_1181[10])\n eq11_1181 = sym.Eq(sym_diff_equations_1181[11], b_sym_1181[11])\n eq12_1181 = sym.Eq(sym_diff_equations_1181[12], b_sym_1181[12])\n eq13_1181 = sym.Eq(sym_diff_equations_1181[13], b_sym_1181[13])\n eq14_1181 = sym.Eq(sym_diff_equations_1181[14], b_sym_1181[14])\n eq15_1181 = sym.Eq(sym_diff_equations_1181[15], b_sym_1181[15])\n eq16_1181 = sym.Eq(sym_diff_equations_1181[16], b_sym_1181[16])\n\n sym_state_probs_1181 = sym.solve(\n [\n eq0_1181,\n eq1_1181,\n eq2_1181,\n eq3_1181,\n eq4_1181,\n eq5_1181,\n eq6_1181,\n eq7_1181,\n eq8_1181,\n eq9_1181,\n eq10_1181,\n eq11_1181,\n eq12_1181,\n eq13_1181,\n eq14_1181,\n eq15_1181,\n eq16_1181,\n ],\n (\n p00,\n p01,\n p11,\n p02,\n p12,\n p03,\n p13,\n p04,\n p14,\n p05,\n p15,\n p06,\n p16,\n p07,\n p17,\n p08,\n p18,\n ),\n )\n\n sym_state_recursive_ratios_1181 = sym.zeros(\n buffer_capacity + 1, system_capacity + 1\n )\n sym_state_recursive_ratios_1181[0, 0] = 1\n sym_state_recursive_ratios_1181[0, 1] = sym.factor(\n sym_state_probs_1181[p01] / sym_state_probs_1181[p00]\n ) # (0,0) -> (0,1)\n sym_state_recursive_ratios_1181[1, 1] = sym.factor(\n sym_state_probs_1181[p11] / sym_state_probs_1181[p01]\n ) # (0,1) -> (1,1)\n sym_state_recursive_ratios_1181[0, 2] = sym.factor(\n sym_state_probs_1181[p02] / sym_state_probs_1181[p01]\n ) # (0,1) -> (0,2)\n sym_state_recursive_ratios_1181[1, 2] = sym.factor(\n sym_state_probs_1181[p12] / sym_state_probs_1181[p02]\n ) # (0,2) -> (1,2)\n sym_state_recursive_ratios_1181[0, 3] = sym.factor(\n sym_state_probs_1181[p03] / sym_state_probs_1181[p02]\n ) # (0,2) -> (0,3)\n sym_state_recursive_ratios_1181[1, 3] = sym.factor(\n sym_state_probs_1181[p13] / sym_state_probs_1181[p03]\n ) # (0,3) -> (1,3)\n sym_state_recursive_ratios_1181[0, 4] = sym.factor(\n sym_state_probs_1181[p04] / sym_state_probs_1181[p03]\n ) # (0,3) -> (0,4)\n sym_state_recursive_ratios_1181[1, 4] = sym.factor(\n sym_state_probs_1181[p14] / sym_state_probs_1181[p04]\n ) # (0,4) -> (1,4)\n sym_state_recursive_ratios_1181[0, 5] = sym.factor(\n sym_state_probs_1181[p05] / sym_state_probs_1181[p04]\n ) # (0,4) -> (0,5)\n sym_state_recursive_ratios_1181[1, 5] = sym.factor(\n sym_state_probs_1181[p15] / sym_state_probs_1181[p05]\n ) # (0,5) -> (1,5)\n sym_state_recursive_ratios_1181[0, 6] = sym.factor(\n sym_state_probs_1181[p06] / sym_state_probs_1181[p05]\n ) # (0,5) -> (0,6)\n sym_state_recursive_ratios_1181[1, 6] = sym.factor(\n sym_state_probs_1181[p16] / sym_state_probs_1181[p06]\n ) # (0,6) -> (1,6)\n sym_state_recursive_ratios_1181[0, 7] = sym.factor(\n sym_state_probs_1181[p07] / sym_state_probs_1181[p06]\n ) # (0,6) -> (0,7)\n sym_state_recursive_ratios_1181[1, 7] = sym.factor(\n sym_state_probs_1181[p17] / sym_state_probs_1181[p07]\n ) # (0,7) -> (1,7)\n sym_state_recursive_ratios_1181[0, 8] = sym.factor(\n sym_state_probs_1181[p08] / sym_state_probs_1181[p07]\n ) # (0,7) -> (0,8)\n sym_state_recursive_ratios_1181[1, 8] = sym.factor(\n sym_state_probs_1181[p18] / sym_state_probs_1181[p08]\n ) # (0,8) -> (1,8)\n\n sym_state_recursive_ratios_right_1181 = sym_state_recursive_ratios_1181.copy()\n sym_state_recursive_ratios_right_1181[1, 2] = sym.factor(\n sym_state_probs_1181[p12] / sym_state_probs_1181[p11]\n ) # (1,1) -> (1,2)\n sym_state_recursive_ratios_right_1181[1, 3] = sym.factor(\n sym_state_probs_1181[p13] / sym_state_probs_1181[p12]\n ) # (1,2) -> (1,3)\n sym_state_recursive_ratios_right_1181[1, 4] = sym.factor(\n sym_state_probs_1181[p14] / sym_state_probs_1181[p13]\n ) # (1,3) -> (1,4)\n sym_state_recursive_ratios_right_1181[1, 5] = sym.factor(\n sym_state_probs_1181[p15] / sym_state_probs_1181[p14]\n ) # (1,4) -> (1,5)\n sym_state_recursive_ratios_right_1181[1, 6] = sym.factor(\n sym_state_probs_1181[p16] / sym_state_probs_1181[p15]\n ) # (1,5) -> (1,6)\n sym_state_recursive_ratios_right_1181[1, 7] = sym.factor(\n sym_state_probs_1181[p17] / sym_state_probs_1181[p16]\n ) # (1,6) -> (1,7)\n sym_state_recursive_ratios_right_1181[1, 8] = sym.factor(\n sym_state_probs_1181[p18] / sym_state_probs_1181[p17]\n ) # (1,7) -> (1,8)\n\n sym_state_recursive_ratios_P0_1181 = sym.zeros(\n buffer_capacity + 1, system_capacity + 1\n )\n sym_state_recursive_ratios_P0_1181[0, 0] = 1\n sym_state_recursive_ratios_P0_1181[0, 1] = sym.factor(\n sym_state_probs_1181[p01] / sym_state_probs_1181[p00]\n ) # (0,0) -> (0,1)\n sym_state_recursive_ratios_P0_1181[1, 1] = sym.factor(\n sym_state_probs_1181[p11] / sym_state_probs_1181[p00]\n ) # (0,0) -> (1,1)\n sym_state_recursive_ratios_P0_1181[0, 2] = sym.factor(\n sym_state_probs_1181[p02] / sym_state_probs_1181[p00]\n ) # (0,0) -> (0,2)\n sym_state_recursive_ratios_P0_1181[1, 2] = sym.factor(\n sym_state_probs_1181[p12] / sym_state_probs_1181[p00]\n ) # (0,0) -> (1,2)\n sym_state_recursive_ratios_P0_1181[0, 3] = sym.factor(\n sym_state_probs_1181[p03] / sym_state_probs_1181[p00]\n ) # (0,0) -> (0,3)\n sym_state_recursive_ratios_P0_1181[1, 3] = sym.factor(\n sym_state_probs_1181[p13] / sym_state_probs_1181[p00]\n ) # (0,0) -> (1,3)\n sym_state_recursive_ratios_P0_1181[0, 4] = sym.factor(\n sym_state_probs_1181[p04] / sym_state_probs_1181[p00]\n ) # (0,0) -> (0,4)\n sym_state_recursive_ratios_P0_1181[1, 4] = sym.factor(\n sym_state_probs_1181[p14] / sym_state_probs_1181[p00]\n ) # (0,0) -> (1,4)\n sym_state_recursive_ratios_P0_1181[0, 5] = sym.factor(\n sym_state_probs_1181[p05] / sym_state_probs_1181[p00]\n ) # (0,0) -> (0,5)\n sym_state_recursive_ratios_P0_1181[1, 5] = sym.factor(\n sym_state_probs_1181[p15] / sym_state_probs_1181[p00]\n ) # (0,0) -> (1,5)\n sym_state_recursive_ratios_P0_1181[0, 6] = sym.factor(\n sym_state_probs_1181[p06] / sym_state_probs_1181[p00]\n ) # (0,0) -> (0,6)\n sym_state_recursive_ratios_P0_1181[1, 6] = sym.factor(\n sym_state_probs_1181[p16] / sym_state_probs_1181[p00]\n ) # (0,0) -> (1,6)\n sym_state_recursive_ratios_P0_1181[0, 7] = sym.factor(\n sym_state_probs_1181[p07] / sym_state_probs_1181[p00]\n ) # (0,0) -> (0,7)\n sym_state_recursive_ratios_P0_1181[1, 7] = sym.factor(\n sym_state_probs_1181[p17] / sym_state_probs_1181[p00]\n ) # (0,0) -> (1,7)\n sym_state_recursive_ratios_P0_1181[0, 8] = sym.factor(\n sym_state_probs_1181[p08] / sym_state_probs_1181[p00]\n ) # (0,0) -> (0,8)\n sym_state_recursive_ratios_P0_1181[1, 8] = sym.factor(\n sym_state_probs_1181[p18] / sym_state_probs_1181[p00]\n ) # (0,0) -> (1,8)\n\n return (\n sym_state_probs_1181,\n sym_state_recursive_ratios_1181,\n sym_state_recursive_ratios_right_1181,\n sym_state_recursive_ratios_P0_1181,\n )\n\n\ndef get_symbolic_state_probabilities_1191():\n num_of_servers = 1\n threshold = 1\n system_capacity = 9\n buffer_capacity = 1\n\n Q_sym_1191 = abg.markov.get_symbolic_transition_matrix(\n num_of_servers, threshold, system_capacity, buffer_capacity\n )\n\n (\n p00,\n p01,\n p11,\n p02,\n p12,\n p03,\n p13,\n p04,\n p14,\n p05,\n p15,\n p06,\n p16,\n p07,\n p17,\n p08,\n p18,\n p09,\n p19,\n ) = sym.symbols(\n \"p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15, p06, p16, p07, p17, p08, p18, p09, p19\"\n )\n pi_1191 = sym.Matrix(\n [\n p00,\n p01,\n p11,\n p02,\n p12,\n p03,\n p13,\n p04,\n p14,\n p05,\n p15,\n p06,\n p16,\n p07,\n p17,\n p08,\n p18,\n p09,\n p19,\n ]\n )\n dimension_1191 = Q_sym_1191.shape[0]\n\n M_sym_1191 = sym.Matrix(\n [Q_sym_1191.transpose()[:-1, :], sym.ones(1, dimension_1191)]\n )\n sym_diff_equations_1191 = M_sym_1191 @ pi_1191\n\n b_sym_1191 = sym.Matrix([sym.zeros(dimension_1191 - 1, 1), [1]])\n\n eq0_1191 = sym.Eq(sym_diff_equations_1191[0], b_sym_1191[0])\n eq1_1191 = sym.Eq(sym_diff_equations_1191[1], b_sym_1191[1])\n eq2_1191 = sym.Eq(sym_diff_equations_1191[2], b_sym_1191[2])\n eq3_1191 = sym.Eq(sym_diff_equations_1191[3], b_sym_1191[3])\n eq4_1191 = sym.Eq(sym_diff_equations_1191[4], b_sym_1191[4])\n eq5_1191 = sym.Eq(sym_diff_equations_1191[5], b_sym_1191[5])\n eq6_1191 = sym.Eq(sym_diff_equations_1191[6], b_sym_1191[6])\n eq7_1191 = sym.Eq(sym_diff_equations_1191[7], b_sym_1191[7])\n eq8_1191 = sym.Eq(sym_diff_equations_1191[8], b_sym_1191[8])\n eq9_1191 = sym.Eq(sym_diff_equations_1191[9], b_sym_1191[9])\n eq10_1191 = sym.Eq(sym_diff_equations_1191[10], b_sym_1191[10])\n eq11_1191 = sym.Eq(sym_diff_equations_1191[11], b_sym_1191[11])\n eq12_1191 = sym.Eq(sym_diff_equations_1191[12], b_sym_1191[12])\n eq13_1191 = sym.Eq(sym_diff_equations_1191[13], b_sym_1191[13])\n eq14_1191 = sym.Eq(sym_diff_equations_1191[14], b_sym_1191[14])\n eq15_1191 = sym.Eq(sym_diff_equations_1191[15], b_sym_1191[15])\n eq16_1191 = sym.Eq(sym_diff_equations_1191[16], b_sym_1191[16])\n eq17_1191 = sym.Eq(sym_diff_equations_1191[17], b_sym_1191[17])\n eq18_1191 = sym.Eq(sym_diff_equations_1191[18], b_sym_1191[18])\n\n sym_state_probs_1191 = sym.solve(\n [\n eq0_1191,\n eq1_1191,\n eq2_1191,\n eq3_1191,\n eq4_1191,\n eq5_1191,\n eq6_1191,\n eq7_1191,\n eq8_1191,\n eq9_1191,\n eq10_1191,\n eq11_1191,\n eq12_1191,\n eq13_1191,\n eq14_1191,\n eq15_1191,\n eq16_1191,\n eq17_1191,\n eq18_1191,\n ],\n (\n p00,\n p01,\n p11,\n p02,\n p12,\n p03,\n p13,\n p04,\n p14,\n p05,\n p15,\n p06,\n p16,\n p07,\n p17,\n p08,\n p18,\n p09,\n p19,\n ),\n )\n\n sym_state_recursive_ratios_1191 = sym.zeros(\n buffer_capacity + 1, system_capacity + 1\n )\n sym_state_recursive_ratios_1191[0, 0] = 1\n sym_state_recursive_ratios_1191[0, 1] = sym.factor(\n sym_state_probs_1191[p01] / sym_state_probs_1191[p00]\n ) # (0,0) -> (0,1)\n sym_state_recursive_ratios_1191[1, 1] = sym.factor(\n sym_state_probs_1191[p11] / sym_state_probs_1191[p01]\n ) # (0,1) -> (1,1)\n sym_state_recursive_ratios_1191[0, 2] = sym.factor(\n sym_state_probs_1191[p02] / sym_state_probs_1191[p01]\n ) # (0,1) -> (0,2)\n sym_state_recursive_ratios_1191[1, 2] = sym.factor(\n sym_state_probs_1191[p12] / sym_state_probs_1191[p02]\n ) # (0,2) -> (1,2)\n sym_state_recursive_ratios_1191[0, 3] = sym.factor(\n sym_state_probs_1191[p03] / sym_state_probs_1191[p02]\n ) # (0,2) -> (0,3)\n sym_state_recursive_ratios_1191[1, 3] = sym.factor(\n sym_state_probs_1191[p13] / sym_state_probs_1191[p03]\n ) # (0,3) -> (1,3)\n sym_state_recursive_ratios_1191[0, 4] = sym.factor(\n sym_state_probs_1191[p04] / sym_state_probs_1191[p03]\n ) # (0,3) -> (0,4)\n sym_state_recursive_ratios_1191[1, 4] = sym.factor(\n sym_state_probs_1191[p14] / sym_state_probs_1191[p04]\n ) # (0,4) -> (1,4)\n sym_state_recursive_ratios_1191[0, 5] = sym.factor(\n sym_state_probs_1191[p05] / sym_state_probs_1191[p04]\n ) # (0,4) -> (0,5)\n sym_state_recursive_ratios_1191[1, 5] = sym.factor(\n sym_state_probs_1191[p15] / sym_state_probs_1191[p05]\n ) # (0,5) -> (1,5)\n sym_state_recursive_ratios_1191[0, 6] = sym.factor(\n sym_state_probs_1191[p06] / sym_state_probs_1191[p05]\n ) # (0,5) -> (0,6)\n sym_state_recursive_ratios_1191[1, 6] = sym.factor(\n sym_state_probs_1191[p16] / sym_state_probs_1191[p06]\n ) # (0,6) -> (1,6)\n sym_state_recursive_ratios_1191[0, 7] = sym.factor(\n sym_state_probs_1191[p07] / sym_state_probs_1191[p06]\n ) # (0,6) -> (0,7)\n sym_state_recursive_ratios_1191[1, 7] = sym.factor(\n sym_state_probs_1191[p17] / sym_state_probs_1191[p07]\n ) # (0,7) -> (1,7)\n sym_state_recursive_ratios_1191[0, 8] = sym.factor(\n sym_state_probs_1191[p08] / sym_state_probs_1191[p07]\n ) # (0,7) -> (0,8)\n sym_state_recursive_ratios_1191[1, 8] = sym.factor(\n sym_state_probs_1191[p18] / sym_state_probs_1191[p08]\n ) # (0,8) -> (1,8)\n sym_state_recursive_ratios_1191[0, 9] = sym.factor(\n sym_state_probs_1191[p09] / sym_state_probs_1191[p08]\n ) # (0,8) -> (0,9)\n sym_state_recursive_ratios_1191[1, 9] = sym.factor(\n sym_state_probs_1191[p19] / sym_state_probs_1191[p09]\n ) # (0,9) -> (1,9)\n\n sym_state_recursive_ratios_right_1191 = sym_state_recursive_ratios_1191.copy()\n sym_state_recursive_ratios_right_1191[1, 2] = sym.factor(\n sym_state_probs_1191[p12] / sym_state_probs_1191[p11]\n ) # (1,1) -> (1,2)\n sym_state_recursive_ratios_right_1191[1, 3] = sym.factor(\n sym_state_probs_1191[p13] / sym_state_probs_1191[p12]\n ) # (1,2) -> (1,3)\n sym_state_recursive_ratios_right_1191[1, 4] = sym.factor(\n sym_state_probs_1191[p14] / sym_state_probs_1191[p13]\n ) # (1,3) -> (1,4)\n sym_state_recursive_ratios_right_1191[1, 5] = sym.factor(\n sym_state_probs_1191[p15] / sym_state_probs_1191[p14]\n ) # (1,4) -> (1,5)\n sym_state_recursive_ratios_right_1191[1, 6] = sym.factor(\n sym_state_probs_1191[p16] / sym_state_probs_1191[p15]\n ) # (1,5) -> (1,6)\n sym_state_recursive_ratios_right_1191[1, 7] = sym.factor(\n sym_state_probs_1191[p17] / sym_state_probs_1191[p16]\n ) # (1,6) -> (1,7)\n sym_state_recursive_ratios_right_1191[1, 8] = sym.factor(\n sym_state_probs_1191[p18] / sym_state_probs_1191[p17]\n ) # (1,7) -> (1,8)\n sym_state_recursive_ratios_right_1191[1, 8] = sym.factor(\n sym_state_probs_1191[p18] / sym_state_probs_1191[p17]\n ) # (1,8) -> (1,9)\n\n sym_state_recursive_ratios_P0_1191 = sym.zeros(\n buffer_capacity + 1, system_capacity + 1\n )\n sym_state_recursive_ratios_P0_1191[0, 0] = 1\n sym_state_recursive_ratios_P0_1191[0, 1] = sym.factor(\n sym_state_probs_1191[p01] / sym_state_probs_1191[p00]\n ) # (0,0) -> (0,1)\n sym_state_recursive_ratios_P0_1191[1, 1] = sym.factor(\n sym_state_probs_1191[p11] / sym_state_probs_1191[p00]\n ) # (0,0) -> (1,1)\n sym_state_recursive_ratios_P0_1191[0, 2] = sym.factor(\n sym_state_probs_1191[p02] / sym_state_probs_1191[p00]\n ) # (0,0) -> (0,2)\n sym_state_recursive_ratios_P0_1191[1, 2] = sym.factor(\n sym_state_probs_1191[p12] / sym_state_probs_1191[p00]\n ) # (0,0) -> (1,2)\n sym_state_recursive_ratios_P0_1191[0, 3] = sym.factor(\n sym_state_probs_1191[p03] / sym_state_probs_1191[p00]\n ) # (0,0) -> (0,3)\n sym_state_recursive_ratios_P0_1191[1, 3] = sym.factor(\n sym_state_probs_1191[p13] / sym_state_probs_1191[p00]\n ) # (0,0) -> (1,3)\n sym_state_recursive_ratios_P0_1191[0, 4] = sym.factor(\n sym_state_probs_1191[p04] / sym_state_probs_1191[p00]\n ) # (0,0) -> (0,4)\n sym_state_recursive_ratios_P0_1191[1, 4] = sym.factor(\n sym_state_probs_1191[p14] / sym_state_probs_1191[p00]\n ) # (0,0) -> (1,4)\n sym_state_recursive_ratios_P0_1191[0, 5] = sym.factor(\n sym_state_probs_1191[p05] / sym_state_probs_1191[p00]\n ) # (0,0) -> (0,5)\n sym_state_recursive_ratios_P0_1191[1, 5] = sym.factor(\n sym_state_probs_1191[p15] / sym_state_probs_1191[p00]\n ) # (0,0) -> (1,5)\n sym_state_recursive_ratios_P0_1191[0, 6] = sym.factor(\n sym_state_probs_1191[p06] / sym_state_probs_1191[p00]\n ) # (0,0) -> (0,6)\n sym_state_recursive_ratios_P0_1191[1, 6] = sym.factor(\n sym_state_probs_1191[p16] / sym_state_probs_1191[p00]\n ) # (0,0) -> (1,6)\n sym_state_recursive_ratios_P0_1191[0, 7] = sym.factor(\n sym_state_probs_1191[p07] / sym_state_probs_1191[p00]\n ) # (0,0) -> (0,7)\n sym_state_recursive_ratios_P0_1191[1, 7] = sym.factor(\n sym_state_probs_1191[p17] / sym_state_probs_1191[p00]\n ) # (0,0) -> (1,7)\n sym_state_recursive_ratios_P0_1191[0, 8] = sym.factor(\n sym_state_probs_1191[p08] / sym_state_probs_1191[p00]\n ) # (0,0) -> (0,8)\n sym_state_recursive_ratios_P0_1191[1, 8] = sym.factor(\n sym_state_probs_1191[p18] / sym_state_probs_1191[p00]\n ) # (0,0) -> (1,8)\n sym_state_recursive_ratios_P0_1191[0, 9] = sym.factor(\n sym_state_probs_1191[p09] / sym_state_probs_1191[p00]\n ) # (0,0) -> (0,9)\n sym_state_recursive_ratios_P0_1191[1, 9] = sym.factor(\n sym_state_probs_1191[p19] / sym_state_probs_1191[p00]\n ) # (0,0) -> (1,9)\n\n return (\n sym_state_probs_1191,\n sym_state_recursive_ratios_1191,\n sym_state_recursive_ratios_right_1191,\n sym_state_recursive_ratios_P0_1191,\n )\n",
"step-ids": [
5,
12,
13,
16,
17
]
}
|
[
5,
12,
13,
16,
17
] |
import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
import numpy as np
import struct
import wave
scale = 0.01
wav = wave.open('output.wav', 'r')
print 'channels %d'%wav.getnchannels()
print 'smpl width %d'%wav.getsampwidth()
print 'frame rate %f'%wav.getframerate()
nframes = wav.getnframes()
print 'frames %d'%nframes
data = wav.readframes(nframes)
data = scale * np.array(struct.unpack('<%dh'%nframes, data)) / float((1 << 14))
plt.plot(data)
plt.show()
|
normal
|
{
"blob_id": "c105f06e302740e9b7be100df905852bb5610a2c",
"index": 49,
"step-1": "import matplotlib\nmatplotlib.use('TkAgg')\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport struct\nimport wave\n\nscale = 0.01\nwav = wave.open('output.wav', 'r')\n\nprint 'channels %d'%wav.getnchannels()\nprint 'smpl width %d'%wav.getsampwidth()\nprint 'frame rate %f'%wav.getframerate()\nnframes = wav.getnframes()\nprint 'frames %d'%nframes\n\ndata = wav.readframes(nframes)\n\ndata = scale * np.array(struct.unpack('<%dh'%nframes, data)) / float((1 << 14))\n\nplt.plot(data)\nplt.show()\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
import os
import glob
import pandas as pd
import xml.etree.ElementTree as ET
import argparse
import numpy as np
def run(path, output):
#xml_df = xml_to_csv(path)
#xml_df.to_csv(output, index=None)
# for filename in os.listdir(path):
# base_file, ext = os.path.splitext(filename)
# print(base_file, ext)
for xml_file in glob.glob(path + '/*.xml'):
tree = ET.parse(xml_file)
root = tree.getroot()
base_file, ext = os.path.splitext(root.find('filename').text)
txtFileName = os.path.join(output, base_file+".txt")
l = []
for member in root.findall('object'):
#================ CLASS NAMES =======================
if member[0].text == 'opened_door':
iclass = 0
elif member[0].text == 'closed_door':
iclass = 1
elif member[0].text == 'elevator_door':
iclass = 2
elif member[0].text == 'ascending_stair':
iclass = 3
elif member[0].text == 'descending_stair':
iclass = 4
elif member[0].text == 'door':
iclass = 1
#class_number x1 y1 width height image_width image_height
l.append([iclass,
int(member.find('bndbox').find('xmin').text),
int(member.find('bndbox').find('ymin').text),
int(member.find('bndbox').find('xmax').text)-int(member.find('bndbox').find('xmin').text),
int(member.find('bndbox').find('ymax').text)-int(member.find('bndbox').find('ymin').text),
int(root.find('size')[0].text),
int(root.find('size')[1].text) ])
np.savetxt(txtFileName, np.asarray(l),fmt='%d', delimiter =' ',newline='\n')
print('Successfully converted xml to txt.')
#=============================================================================
# construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-p", "--path", required=True, help="annotations path")
ap.add_argument("-o", "--output", required=True, help="txt output path")
args = vars(ap.parse_args())
print()
print()
print()
print('==========================================================================')
print(' ATENTION ')
print()
print(' ATENTION ')
print()
print()
print('Hi body - dont forget update CLASS NAMES')
print()
print('==========================================================================')
print()
print()
print()
run(args["path"], args["output"])
|
normal
|
{
"blob_id": "26d14bc74d893f6f14ee7405280f4af41854c544",
"index": 141,
"step-1": "<mask token>\n\n\ndef run(path, output):\n for xml_file in glob.glob(path + '/*.xml'):\n tree = ET.parse(xml_file)\n root = tree.getroot()\n base_file, ext = os.path.splitext(root.find('filename').text)\n txtFileName = os.path.join(output, base_file + '.txt')\n l = []\n for member in root.findall('object'):\n if member[0].text == 'opened_door':\n iclass = 0\n elif member[0].text == 'closed_door':\n iclass = 1\n elif member[0].text == 'elevator_door':\n iclass = 2\n elif member[0].text == 'ascending_stair':\n iclass = 3\n elif member[0].text == 'descending_stair':\n iclass = 4\n elif member[0].text == 'door':\n iclass = 1\n l.append([iclass, int(member.find('bndbox').find('xmin').text),\n int(member.find('bndbox').find('ymin').text), int(member.\n find('bndbox').find('xmax').text) - int(member.find(\n 'bndbox').find('xmin').text), int(member.find('bndbox').\n find('ymax').text) - int(member.find('bndbox').find('ymin')\n .text), int(root.find('size')[0].text), int(root.find(\n 'size')[1].text)])\n np.savetxt(txtFileName, np.asarray(l), fmt='%d', delimiter=' ',\n newline='\\n')\n print('Successfully converted xml to txt.')\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef run(path, output):\n for xml_file in glob.glob(path + '/*.xml'):\n tree = ET.parse(xml_file)\n root = tree.getroot()\n base_file, ext = os.path.splitext(root.find('filename').text)\n txtFileName = os.path.join(output, base_file + '.txt')\n l = []\n for member in root.findall('object'):\n if member[0].text == 'opened_door':\n iclass = 0\n elif member[0].text == 'closed_door':\n iclass = 1\n elif member[0].text == 'elevator_door':\n iclass = 2\n elif member[0].text == 'ascending_stair':\n iclass = 3\n elif member[0].text == 'descending_stair':\n iclass = 4\n elif member[0].text == 'door':\n iclass = 1\n l.append([iclass, int(member.find('bndbox').find('xmin').text),\n int(member.find('bndbox').find('ymin').text), int(member.\n find('bndbox').find('xmax').text) - int(member.find(\n 'bndbox').find('xmin').text), int(member.find('bndbox').\n find('ymax').text) - int(member.find('bndbox').find('ymin')\n .text), int(root.find('size')[0].text), int(root.find(\n 'size')[1].text)])\n np.savetxt(txtFileName, np.asarray(l), fmt='%d', delimiter=' ',\n newline='\\n')\n print('Successfully converted xml to txt.')\n\n\n<mask token>\nap.add_argument('-p', '--path', required=True, help='annotations path')\nap.add_argument('-o', '--output', required=True, help='txt output path')\n<mask token>\nprint()\nprint()\nprint()\nprint(\n '=========================================================================='\n )\nprint(\n ' ATENTION '\n )\nprint()\nprint(\n ' ATENTION '\n )\nprint()\nprint()\nprint('Hi body - dont forget update CLASS NAMES')\nprint()\nprint(\n '=========================================================================='\n )\nprint()\nprint()\nprint()\nrun(args['path'], args['output'])\n",
"step-3": "<mask token>\n\n\ndef run(path, output):\n for xml_file in glob.glob(path + '/*.xml'):\n tree = ET.parse(xml_file)\n root = tree.getroot()\n base_file, ext = os.path.splitext(root.find('filename').text)\n txtFileName = os.path.join(output, base_file + '.txt')\n l = []\n for member in root.findall('object'):\n if member[0].text == 'opened_door':\n iclass = 0\n elif member[0].text == 'closed_door':\n iclass = 1\n elif member[0].text == 'elevator_door':\n iclass = 2\n elif member[0].text == 'ascending_stair':\n iclass = 3\n elif member[0].text == 'descending_stair':\n iclass = 4\n elif member[0].text == 'door':\n iclass = 1\n l.append([iclass, int(member.find('bndbox').find('xmin').text),\n int(member.find('bndbox').find('ymin').text), int(member.\n find('bndbox').find('xmax').text) - int(member.find(\n 'bndbox').find('xmin').text), int(member.find('bndbox').\n find('ymax').text) - int(member.find('bndbox').find('ymin')\n .text), int(root.find('size')[0].text), int(root.find(\n 'size')[1].text)])\n np.savetxt(txtFileName, np.asarray(l), fmt='%d', delimiter=' ',\n newline='\\n')\n print('Successfully converted xml to txt.')\n\n\nap = argparse.ArgumentParser()\nap.add_argument('-p', '--path', required=True, help='annotations path')\nap.add_argument('-o', '--output', required=True, help='txt output path')\nargs = vars(ap.parse_args())\nprint()\nprint()\nprint()\nprint(\n '=========================================================================='\n )\nprint(\n ' ATENTION '\n )\nprint()\nprint(\n ' ATENTION '\n )\nprint()\nprint()\nprint('Hi body - dont forget update CLASS NAMES')\nprint()\nprint(\n '=========================================================================='\n )\nprint()\nprint()\nprint()\nrun(args['path'], args['output'])\n",
"step-4": "import os\nimport glob\nimport pandas as pd\nimport xml.etree.ElementTree as ET\nimport argparse\nimport numpy as np\n\n\ndef run(path, output):\n for xml_file in glob.glob(path + '/*.xml'):\n tree = ET.parse(xml_file)\n root = tree.getroot()\n base_file, ext = os.path.splitext(root.find('filename').text)\n txtFileName = os.path.join(output, base_file + '.txt')\n l = []\n for member in root.findall('object'):\n if member[0].text == 'opened_door':\n iclass = 0\n elif member[0].text == 'closed_door':\n iclass = 1\n elif member[0].text == 'elevator_door':\n iclass = 2\n elif member[0].text == 'ascending_stair':\n iclass = 3\n elif member[0].text == 'descending_stair':\n iclass = 4\n elif member[0].text == 'door':\n iclass = 1\n l.append([iclass, int(member.find('bndbox').find('xmin').text),\n int(member.find('bndbox').find('ymin').text), int(member.\n find('bndbox').find('xmax').text) - int(member.find(\n 'bndbox').find('xmin').text), int(member.find('bndbox').\n find('ymax').text) - int(member.find('bndbox').find('ymin')\n .text), int(root.find('size')[0].text), int(root.find(\n 'size')[1].text)])\n np.savetxt(txtFileName, np.asarray(l), fmt='%d', delimiter=' ',\n newline='\\n')\n print('Successfully converted xml to txt.')\n\n\nap = argparse.ArgumentParser()\nap.add_argument('-p', '--path', required=True, help='annotations path')\nap.add_argument('-o', '--output', required=True, help='txt output path')\nargs = vars(ap.parse_args())\nprint()\nprint()\nprint()\nprint(\n '=========================================================================='\n )\nprint(\n ' ATENTION '\n )\nprint()\nprint(\n ' ATENTION '\n )\nprint()\nprint()\nprint('Hi body - dont forget update CLASS NAMES')\nprint()\nprint(\n '=========================================================================='\n )\nprint()\nprint()\nprint()\nrun(args['path'], args['output'])\n",
"step-5": "import os\nimport glob\nimport pandas as pd\nimport xml.etree.ElementTree as ET\nimport argparse\nimport numpy as np\n\ndef run(path, output):\n #xml_df = xml_to_csv(path)\n #xml_df.to_csv(output, index=None)\n\n # for filename in os.listdir(path):\n # base_file, ext = os.path.splitext(filename)\n # print(base_file, ext)\n\n for xml_file in glob.glob(path + '/*.xml'):\n\n tree = ET.parse(xml_file)\n root = tree.getroot()\n\n base_file, ext = os.path.splitext(root.find('filename').text)\n txtFileName = os.path.join(output, base_file+\".txt\")\n\n l = []\n for member in root.findall('object'):\n\n #================ CLASS NAMES =======================\n if member[0].text == 'opened_door':\n iclass = 0\n elif member[0].text == 'closed_door':\n iclass = 1\n elif member[0].text == 'elevator_door':\n iclass = 2\n elif member[0].text == 'ascending_stair':\n iclass = 3\n elif member[0].text == 'descending_stair':\n iclass = 4\n elif member[0].text == 'door':\n iclass = 1\n \n #class_number x1 y1 width height image_width image_height\n l.append([iclass, \n int(member.find('bndbox').find('xmin').text), \n int(member.find('bndbox').find('ymin').text), \n int(member.find('bndbox').find('xmax').text)-int(member.find('bndbox').find('xmin').text), \n int(member.find('bndbox').find('ymax').text)-int(member.find('bndbox').find('ymin').text), \n int(root.find('size')[0].text), \n int(root.find('size')[1].text) ])\n\n np.savetxt(txtFileName, np.asarray(l),fmt='%d', delimiter =' ',newline='\\n') \n\n print('Successfully converted xml to txt.')\n\n#=============================================================================\n# construct the argument parser and parse the arguments\nap = argparse.ArgumentParser()\nap.add_argument(\"-p\", \"--path\", required=True, help=\"annotations path\")\nap.add_argument(\"-o\", \"--output\", required=True, help=\"txt output path\")\n\nargs = vars(ap.parse_args())\n\n\nprint()\nprint()\nprint()\nprint('==========================================================================')\nprint(' ATENTION ')\nprint()\nprint(' ATENTION ')\nprint()\nprint()\nprint('Hi body - dont forget update CLASS NAMES')\nprint()\nprint('==========================================================================')\nprint()\nprint()\nprint()\n\nrun(args[\"path\"], args[\"output\"])\n\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
__author__ = 'anderson'
from pyramid.security import Everyone, Allow, ALL_PERMISSIONS
class Root(object):
#Access Control List
__acl__ = [(Allow, Everyone, 'view'),
(Allow, 'role_admin', ALL_PERMISSIONS),
(Allow, 'role_usuario', 'comum')]
def __init__(self, request):
pass
|
normal
|
{
"blob_id": "5ee2a51ea981f0feab688d9c571620a95d89a422",
"index": 6980,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Root(object):\n <mask token>\n\n def __init__(self, request):\n pass\n",
"step-3": "__author__ = 'anderson'\n<mask token>\n\n\nclass Root(object):\n __acl__ = [(Allow, Everyone, 'view'), (Allow, 'role_admin',\n ALL_PERMISSIONS), (Allow, 'role_usuario', 'comum')]\n\n def __init__(self, request):\n pass\n",
"step-4": "__author__ = 'anderson'\nfrom pyramid.security import Everyone, Allow, ALL_PERMISSIONS\n\n\nclass Root(object):\n __acl__ = [(Allow, Everyone, 'view'), (Allow, 'role_admin',\n ALL_PERMISSIONS), (Allow, 'role_usuario', 'comum')]\n\n def __init__(self, request):\n pass\n",
"step-5": "__author__ = 'anderson'\nfrom pyramid.security import Everyone, Allow, ALL_PERMISSIONS\n\n\nclass Root(object):\n #Access Control List\n __acl__ = [(Allow, Everyone, 'view'),\n (Allow, 'role_admin', ALL_PERMISSIONS),\n (Allow, 'role_usuario', 'comum')]\n\n def __init__(self, request):\n pass\n",
"step-ids": [
0,
2,
4,
5,
6
]
}
|
[
0,
2,
4,
5,
6
] |
print("n:",end="")
n=int(input())
print("a:",end="")
a=list(map(int,input().split()))
ans=0
for i in range(n):
for j in range(i+1,n):
for k in range(j+1,n):
ai,aj,ak=sorted([a[i],a[j],a[k]])
if(ai+aj>ak and ai+aj+ak>ans):
ans=ai+aj+ak
print(ans)
|
normal
|
{
"blob_id": "130f49028833bf57d7e4f9fbb0764801c3508c3b",
"index": 3055,
"step-1": "<mask token>\n",
"step-2": "print('n:', end='')\n<mask token>\nprint('a:', end='')\n<mask token>\nfor i in range(n):\n for j in range(i + 1, n):\n for k in range(j + 1, n):\n ai, aj, ak = sorted([a[i], a[j], a[k]])\n if ai + aj > ak and ai + aj + ak > ans:\n ans = ai + aj + ak\nprint(ans)\n",
"step-3": "print('n:', end='')\nn = int(input())\nprint('a:', end='')\na = list(map(int, input().split()))\nans = 0\nfor i in range(n):\n for j in range(i + 1, n):\n for k in range(j + 1, n):\n ai, aj, ak = sorted([a[i], a[j], a[k]])\n if ai + aj > ak and ai + aj + ak > ans:\n ans = ai + aj + ak\nprint(ans)\n",
"step-4": "print(\"n:\",end=\"\")\nn=int(input())\nprint(\"a:\",end=\"\")\na=list(map(int,input().split()))\n\nans=0\nfor i in range(n):\n for j in range(i+1,n):\n for k in range(j+1,n):\n ai,aj,ak=sorted([a[i],a[j],a[k]])\n if(ai+aj>ak and ai+aj+ak>ans):\n ans=ai+aj+ak\nprint(ans)",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import os
from NeuralEmulator.Configurators.NormalLeakSourceConfigurator import NormalLeakSourceConfigurator
from NeuralEmulator.Configurators.OZNeuronConfigurator import OZNeuronConfigurator
from NeuralEmulator.Configurators.PulseSynapseConfigurator import PulseSynapseConfigurator
from NeuralEmulator.NormalLeakSource import NormalLeakSource
from NeuralEmulator.OZNeuron import OZNeuron
from NeuralEmulator.Preprocessing.NegPreprocessingBlock import NegPreprocessingBlock
from NeuralEmulator.Preprocessing.PosPreprocessingBlock import PosPreprocessingBlock
from NeuralEmulator.Preprocessing.PreprocessingBlock import PreprocessingBlock
from NeuralEmulator.PulseSynapse import PulseSynapse
from NeuralEmulator.Test.SimpleVoltageSource import SimpleVoltageSource
import random
class NeuronsGenerator:
def __init__(self, neuronsNumber, synapse, lowerBound=100.0 * (10 ** -3), upperBound=800.0 * (10 ** -3), randomVals=False):
noramalLeakSourceConfigurator = NormalLeakSourceConfigurator()
ozNeuronConfigurator = OZNeuronConfigurator()
neurons = []
leaks = []
if randomVals is False:
start = upperBound
delta = (upperBound - lowerBound) / neuronsNumber
for x in range(neuronsNumber):
normalLeakSource = NormalLeakSource(SimpleVoltageSource(start), noramalLeakSourceConfigurator)
ozNeuron = OZNeuron(synapse, normalLeakSource, ozNeuronConfigurator)
leaks.append(normalLeakSource)
neurons.append(ozNeuron)
start -= delta
else:
lowerBound = int(lowerBound * (10 ** 3))
uppderBound = int(upperBound * (10 ** 3))
vals = set()
while len(vals) != neuronsNumber:
vlk = random.randint(lowerBound, uppderBound)
vals.add(vlk)
for x in range(neuronsNumber):
vlk = vals.pop()
vlk = vlk * (10 ** -3)
normalLeakSource = NormalLeakSource(SimpleVoltageSource(vlk), noramalLeakSourceConfigurator)
ozNeuron = OZNeuron(synapse, normalLeakSource, ozNeuronConfigurator)
leaks.append(normalLeakSource)
neurons.append(ozNeuron)
self.neurons = neurons
self.leaks = leaks
def getNeurons(self):
return self.neurons
def getLeaks(self):
return self.leaks
if __name__ == "__main__":
os.environ["NERUSIM_CONF"] = r"C:\Users\Avi\Desktop\IntelliSpikesLab\Emulator\config"
vin = SimpleVoltageSource()
preProcessBlock = PreprocessingBlock(vin)
vposPort = PosPreprocessingBlock(preProcessBlock)
g = NeuronsGenerator(50, vposPort, randomVals=True)
neurons = g.getNeurons()
print("sf")
|
normal
|
{
"blob_id": "177401f25471cf1cbd32dd0770acdc12bf271361",
"index": 8030,
"step-1": "<mask token>\n\n\nclass NeuronsGenerator:\n\n def __init__(self, neuronsNumber, synapse, lowerBound=100.0 * 10 ** -3,\n upperBound=800.0 * 10 ** -3, randomVals=False):\n noramalLeakSourceConfigurator = NormalLeakSourceConfigurator()\n ozNeuronConfigurator = OZNeuronConfigurator()\n neurons = []\n leaks = []\n if randomVals is False:\n start = upperBound\n delta = (upperBound - lowerBound) / neuronsNumber\n for x in range(neuronsNumber):\n normalLeakSource = NormalLeakSource(SimpleVoltageSource(\n start), noramalLeakSourceConfigurator)\n ozNeuron = OZNeuron(synapse, normalLeakSource,\n ozNeuronConfigurator)\n leaks.append(normalLeakSource)\n neurons.append(ozNeuron)\n start -= delta\n else:\n lowerBound = int(lowerBound * 10 ** 3)\n uppderBound = int(upperBound * 10 ** 3)\n vals = set()\n while len(vals) != neuronsNumber:\n vlk = random.randint(lowerBound, uppderBound)\n vals.add(vlk)\n for x in range(neuronsNumber):\n vlk = vals.pop()\n vlk = vlk * 10 ** -3\n normalLeakSource = NormalLeakSource(SimpleVoltageSource(vlk\n ), noramalLeakSourceConfigurator)\n ozNeuron = OZNeuron(synapse, normalLeakSource,\n ozNeuronConfigurator)\n leaks.append(normalLeakSource)\n neurons.append(ozNeuron)\n self.neurons = neurons\n self.leaks = leaks\n\n def getNeurons(self):\n return self.neurons\n <mask token>\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass NeuronsGenerator:\n\n def __init__(self, neuronsNumber, synapse, lowerBound=100.0 * 10 ** -3,\n upperBound=800.0 * 10 ** -3, randomVals=False):\n noramalLeakSourceConfigurator = NormalLeakSourceConfigurator()\n ozNeuronConfigurator = OZNeuronConfigurator()\n neurons = []\n leaks = []\n if randomVals is False:\n start = upperBound\n delta = (upperBound - lowerBound) / neuronsNumber\n for x in range(neuronsNumber):\n normalLeakSource = NormalLeakSource(SimpleVoltageSource(\n start), noramalLeakSourceConfigurator)\n ozNeuron = OZNeuron(synapse, normalLeakSource,\n ozNeuronConfigurator)\n leaks.append(normalLeakSource)\n neurons.append(ozNeuron)\n start -= delta\n else:\n lowerBound = int(lowerBound * 10 ** 3)\n uppderBound = int(upperBound * 10 ** 3)\n vals = set()\n while len(vals) != neuronsNumber:\n vlk = random.randint(lowerBound, uppderBound)\n vals.add(vlk)\n for x in range(neuronsNumber):\n vlk = vals.pop()\n vlk = vlk * 10 ** -3\n normalLeakSource = NormalLeakSource(SimpleVoltageSource(vlk\n ), noramalLeakSourceConfigurator)\n ozNeuron = OZNeuron(synapse, normalLeakSource,\n ozNeuronConfigurator)\n leaks.append(normalLeakSource)\n neurons.append(ozNeuron)\n self.neurons = neurons\n self.leaks = leaks\n\n def getNeurons(self):\n return self.neurons\n\n def getLeaks(self):\n return self.leaks\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass NeuronsGenerator:\n\n def __init__(self, neuronsNumber, synapse, lowerBound=100.0 * 10 ** -3,\n upperBound=800.0 * 10 ** -3, randomVals=False):\n noramalLeakSourceConfigurator = NormalLeakSourceConfigurator()\n ozNeuronConfigurator = OZNeuronConfigurator()\n neurons = []\n leaks = []\n if randomVals is False:\n start = upperBound\n delta = (upperBound - lowerBound) / neuronsNumber\n for x in range(neuronsNumber):\n normalLeakSource = NormalLeakSource(SimpleVoltageSource(\n start), noramalLeakSourceConfigurator)\n ozNeuron = OZNeuron(synapse, normalLeakSource,\n ozNeuronConfigurator)\n leaks.append(normalLeakSource)\n neurons.append(ozNeuron)\n start -= delta\n else:\n lowerBound = int(lowerBound * 10 ** 3)\n uppderBound = int(upperBound * 10 ** 3)\n vals = set()\n while len(vals) != neuronsNumber:\n vlk = random.randint(lowerBound, uppderBound)\n vals.add(vlk)\n for x in range(neuronsNumber):\n vlk = vals.pop()\n vlk = vlk * 10 ** -3\n normalLeakSource = NormalLeakSource(SimpleVoltageSource(vlk\n ), noramalLeakSourceConfigurator)\n ozNeuron = OZNeuron(synapse, normalLeakSource,\n ozNeuronConfigurator)\n leaks.append(normalLeakSource)\n neurons.append(ozNeuron)\n self.neurons = neurons\n self.leaks = leaks\n\n def getNeurons(self):\n return self.neurons\n\n def getLeaks(self):\n return self.leaks\n\n\nif __name__ == '__main__':\n os.environ['NERUSIM_CONF'\n ] = 'C:\\\\Users\\\\Avi\\\\Desktop\\\\IntelliSpikesLab\\\\Emulator\\\\config'\n vin = SimpleVoltageSource()\n preProcessBlock = PreprocessingBlock(vin)\n vposPort = PosPreprocessingBlock(preProcessBlock)\n g = NeuronsGenerator(50, vposPort, randomVals=True)\n neurons = g.getNeurons()\n print('sf')\n",
"step-4": "import os\nfrom NeuralEmulator.Configurators.NormalLeakSourceConfigurator import NormalLeakSourceConfigurator\nfrom NeuralEmulator.Configurators.OZNeuronConfigurator import OZNeuronConfigurator\nfrom NeuralEmulator.Configurators.PulseSynapseConfigurator import PulseSynapseConfigurator\nfrom NeuralEmulator.NormalLeakSource import NormalLeakSource\nfrom NeuralEmulator.OZNeuron import OZNeuron\nfrom NeuralEmulator.Preprocessing.NegPreprocessingBlock import NegPreprocessingBlock\nfrom NeuralEmulator.Preprocessing.PosPreprocessingBlock import PosPreprocessingBlock\nfrom NeuralEmulator.Preprocessing.PreprocessingBlock import PreprocessingBlock\nfrom NeuralEmulator.PulseSynapse import PulseSynapse\nfrom NeuralEmulator.Test.SimpleVoltageSource import SimpleVoltageSource\nimport random\n\n\nclass NeuronsGenerator:\n\n def __init__(self, neuronsNumber, synapse, lowerBound=100.0 * 10 ** -3,\n upperBound=800.0 * 10 ** -3, randomVals=False):\n noramalLeakSourceConfigurator = NormalLeakSourceConfigurator()\n ozNeuronConfigurator = OZNeuronConfigurator()\n neurons = []\n leaks = []\n if randomVals is False:\n start = upperBound\n delta = (upperBound - lowerBound) / neuronsNumber\n for x in range(neuronsNumber):\n normalLeakSource = NormalLeakSource(SimpleVoltageSource(\n start), noramalLeakSourceConfigurator)\n ozNeuron = OZNeuron(synapse, normalLeakSource,\n ozNeuronConfigurator)\n leaks.append(normalLeakSource)\n neurons.append(ozNeuron)\n start -= delta\n else:\n lowerBound = int(lowerBound * 10 ** 3)\n uppderBound = int(upperBound * 10 ** 3)\n vals = set()\n while len(vals) != neuronsNumber:\n vlk = random.randint(lowerBound, uppderBound)\n vals.add(vlk)\n for x in range(neuronsNumber):\n vlk = vals.pop()\n vlk = vlk * 10 ** -3\n normalLeakSource = NormalLeakSource(SimpleVoltageSource(vlk\n ), noramalLeakSourceConfigurator)\n ozNeuron = OZNeuron(synapse, normalLeakSource,\n ozNeuronConfigurator)\n leaks.append(normalLeakSource)\n neurons.append(ozNeuron)\n self.neurons = neurons\n self.leaks = leaks\n\n def getNeurons(self):\n return self.neurons\n\n def getLeaks(self):\n return self.leaks\n\n\nif __name__ == '__main__':\n os.environ['NERUSIM_CONF'\n ] = 'C:\\\\Users\\\\Avi\\\\Desktop\\\\IntelliSpikesLab\\\\Emulator\\\\config'\n vin = SimpleVoltageSource()\n preProcessBlock = PreprocessingBlock(vin)\n vposPort = PosPreprocessingBlock(preProcessBlock)\n g = NeuronsGenerator(50, vposPort, randomVals=True)\n neurons = g.getNeurons()\n print('sf')\n",
"step-5": "import os\r\n\r\nfrom NeuralEmulator.Configurators.NormalLeakSourceConfigurator import NormalLeakSourceConfigurator\r\nfrom NeuralEmulator.Configurators.OZNeuronConfigurator import OZNeuronConfigurator\r\nfrom NeuralEmulator.Configurators.PulseSynapseConfigurator import PulseSynapseConfigurator\r\nfrom NeuralEmulator.NormalLeakSource import NormalLeakSource\r\nfrom NeuralEmulator.OZNeuron import OZNeuron\r\nfrom NeuralEmulator.Preprocessing.NegPreprocessingBlock import NegPreprocessingBlock\r\nfrom NeuralEmulator.Preprocessing.PosPreprocessingBlock import PosPreprocessingBlock\r\nfrom NeuralEmulator.Preprocessing.PreprocessingBlock import PreprocessingBlock\r\nfrom NeuralEmulator.PulseSynapse import PulseSynapse\r\nfrom NeuralEmulator.Test.SimpleVoltageSource import SimpleVoltageSource\r\nimport random\r\n\r\n\r\nclass NeuronsGenerator:\r\n def __init__(self, neuronsNumber, synapse, lowerBound=100.0 * (10 ** -3), upperBound=800.0 * (10 ** -3), randomVals=False):\r\n\r\n noramalLeakSourceConfigurator = NormalLeakSourceConfigurator()\r\n ozNeuronConfigurator = OZNeuronConfigurator()\r\n\r\n neurons = []\r\n leaks = []\r\n\r\n if randomVals is False:\r\n start = upperBound\r\n delta = (upperBound - lowerBound) / neuronsNumber\r\n for x in range(neuronsNumber):\r\n normalLeakSource = NormalLeakSource(SimpleVoltageSource(start), noramalLeakSourceConfigurator)\r\n ozNeuron = OZNeuron(synapse, normalLeakSource, ozNeuronConfigurator)\r\n leaks.append(normalLeakSource)\r\n neurons.append(ozNeuron)\r\n start -= delta\r\n else:\r\n lowerBound = int(lowerBound * (10 ** 3))\r\n uppderBound = int(upperBound * (10 ** 3))\r\n vals = set()\r\n\r\n while len(vals) != neuronsNumber:\r\n vlk = random.randint(lowerBound, uppderBound)\r\n vals.add(vlk)\r\n\r\n for x in range(neuronsNumber):\r\n vlk = vals.pop()\r\n vlk = vlk * (10 ** -3)\r\n normalLeakSource = NormalLeakSource(SimpleVoltageSource(vlk), noramalLeakSourceConfigurator)\r\n ozNeuron = OZNeuron(synapse, normalLeakSource, ozNeuronConfigurator)\r\n leaks.append(normalLeakSource)\r\n neurons.append(ozNeuron)\r\n\r\n self.neurons = neurons\r\n self.leaks = leaks\r\n\r\n def getNeurons(self):\r\n return self.neurons\r\n\r\n def getLeaks(self):\r\n return self.leaks\r\n\r\n\r\nif __name__ == \"__main__\":\r\n os.environ[\"NERUSIM_CONF\"] = r\"C:\\Users\\Avi\\Desktop\\IntelliSpikesLab\\Emulator\\config\"\r\n\r\n vin = SimpleVoltageSource()\r\n preProcessBlock = PreprocessingBlock(vin)\r\n vposPort = PosPreprocessingBlock(preProcessBlock)\r\n\r\n g = NeuronsGenerator(50, vposPort, randomVals=True)\r\n neurons = g.getNeurons()\r\n print(\"sf\")\r\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
import pygame
class BackGround:
def __init__(self, x, y):
self.y = y
self.x = x
def set_image(self, src):
self.image = pygame.image.load(src)
self.rect = self.image.get_rect()
self.rect.y = self.y
self.rect.x = self.x
def draw(self, screen):
screen.blit(self.image, self.rect)
|
normal
|
{
"blob_id": "071e3cf6b4337e0079bbb2c7694fff2468142070",
"index": 6505,
"step-1": "<mask token>\n\n\nclass BackGround:\n\n def __init__(self, x, y):\n self.y = y\n self.x = x\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass BackGround:\n\n def __init__(self, x, y):\n self.y = y\n self.x = x\n <mask token>\n\n def draw(self, screen):\n screen.blit(self.image, self.rect)\n",
"step-3": "<mask token>\n\n\nclass BackGround:\n\n def __init__(self, x, y):\n self.y = y\n self.x = x\n\n def set_image(self, src):\n self.image = pygame.image.load(src)\n self.rect = self.image.get_rect()\n self.rect.y = self.y\n self.rect.x = self.x\n\n def draw(self, screen):\n screen.blit(self.image, self.rect)\n",
"step-4": "import pygame\n\n\nclass BackGround:\n\n def __init__(self, x, y):\n self.y = y\n self.x = x\n\n def set_image(self, src):\n self.image = pygame.image.load(src)\n self.rect = self.image.get_rect()\n self.rect.y = self.y\n self.rect.x = self.x\n\n def draw(self, screen):\n screen.blit(self.image, self.rect)\n",
"step-5": null,
"step-ids": [
2,
3,
4,
5
]
}
|
[
2,
3,
4,
5
] |
# -*- coding: utf-8 -*-
# @Author: Marcela Campo
# @Date: 2016-05-06 18:56:47
# @Last Modified by: Marcela Campo
# @Last Modified time: 2016-05-06 19:03:21
import os
from flask.ext.script import Manager
from flask.ext.migrate import Migrate, MigrateCommand
from server import app, db
app.config.from_object('config.DevelopmentConfig')
migrate = Migrate(app, db)
manager = Manager(app)
manager.add_command('db', MigrateCommand)
if __name__ == '__main__':
manager.run()
|
normal
|
{
"blob_id": "d7b91b0476a1f2e00408ce1f1501bf98d4c06e4e",
"index": 9540,
"step-1": "<mask token>\n",
"step-2": "<mask token>\napp.config.from_object('config.DevelopmentConfig')\n<mask token>\nmanager.add_command('db', MigrateCommand)\nif __name__ == '__main__':\n manager.run()\n",
"step-3": "<mask token>\napp.config.from_object('config.DevelopmentConfig')\nmigrate = Migrate(app, db)\nmanager = Manager(app)\nmanager.add_command('db', MigrateCommand)\nif __name__ == '__main__':\n manager.run()\n",
"step-4": "import os\nfrom flask.ext.script import Manager\nfrom flask.ext.migrate import Migrate, MigrateCommand\nfrom server import app, db\napp.config.from_object('config.DevelopmentConfig')\nmigrate = Migrate(app, db)\nmanager = Manager(app)\nmanager.add_command('db', MigrateCommand)\nif __name__ == '__main__':\n manager.run()\n",
"step-5": "# -*- coding: utf-8 -*-\n# @Author: Marcela Campo\n# @Date: 2016-05-06 18:56:47\n# @Last Modified by: Marcela Campo\n# @Last Modified time: 2016-05-06 19:03:21\nimport os\nfrom flask.ext.script import Manager\nfrom flask.ext.migrate import Migrate, MigrateCommand\n\nfrom server import app, db\n\n\napp.config.from_object('config.DevelopmentConfig')\n\nmigrate = Migrate(app, db)\nmanager = Manager(app)\n\nmanager.add_command('db', MigrateCommand)\n\n\nif __name__ == '__main__':\n manager.run()\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
n = int(input("Please input the number of 1's and 0's you want to print:"))
for i in range (1, n+1):
if i%2 == 1:
print ("1 ", end = "")
else:
print ("0 ", end = "")
|
normal
|
{
"blob_id": "bd96b31c5de2f0ad4bbc28c876b86ec238db3184",
"index": 9108,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor i in range(1, n + 1):\n if i % 2 == 1:\n print('1 ', end='')\n else:\n print('0 ', end='')\n",
"step-3": "n = int(input(\"Please input the number of 1's and 0's you want to print:\"))\nfor i in range(1, n + 1):\n if i % 2 == 1:\n print('1 ', end='')\n else:\n print('0 ', end='')\n",
"step-4": "n = int(input(\"Please input the number of 1's and 0's you want to print:\"))\n\nfor i in range (1, n+1):\n if i%2 == 1:\n print (\"1 \", end = \"\")\n else:\n print (\"0 \", end = \"\")",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
"""added Trail.Geometry without srid
Revision ID: 56afb969b589
Revises: 2cf6c7c1f0d7
Create Date: 2014-12-05 18:13:55.512637
"""
# revision identifiers, used by Alembic.
revision = '56afb969b589'
down_revision = '2cf6c7c1f0d7'
from alembic import op
import sqlalchemy as sa
import flask_admin
import geoalchemy2
def upgrade():
### commands auto generated by Alembic - please adjust! ###
#with op.batch_alter_table('POI', schema=None) as batch_op:
# batch_op.drop_index('idx_POI_point')
with op.batch_alter_table('trail', schema=None) as batch_op:
batch_op.add_column(sa.Column('geom', geoalchemy2.types.Geometry(geometry_type='MULTILINESTRING'), nullable=True))
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
with op.batch_alter_table('trail', schema=None) as batch_op:
batch_op.drop_column('geom')
#with op.batch_alter_table('POI', schema=None) as batch_op:
# batch_op.create_index('idx_POI_point', ['point'], unique=False)
### end Alembic commands ###
|
normal
|
{
"blob_id": "d724b4f57cf7683d6b6385bf991ed23a5dd8208f",
"index": 3881,
"step-1": "<mask token>\n\n\ndef upgrade():\n with op.batch_alter_table('trail', schema=None) as batch_op:\n batch_op.add_column(sa.Column('geom', geoalchemy2.types.Geometry(\n geometry_type='MULTILINESTRING'), nullable=True))\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef upgrade():\n with op.batch_alter_table('trail', schema=None) as batch_op:\n batch_op.add_column(sa.Column('geom', geoalchemy2.types.Geometry(\n geometry_type='MULTILINESTRING'), nullable=True))\n\n\ndef downgrade():\n with op.batch_alter_table('trail', schema=None) as batch_op:\n batch_op.drop_column('geom')\n",
"step-3": "<mask token>\nrevision = '56afb969b589'\ndown_revision = '2cf6c7c1f0d7'\n<mask token>\n\n\ndef upgrade():\n with op.batch_alter_table('trail', schema=None) as batch_op:\n batch_op.add_column(sa.Column('geom', geoalchemy2.types.Geometry(\n geometry_type='MULTILINESTRING'), nullable=True))\n\n\ndef downgrade():\n with op.batch_alter_table('trail', schema=None) as batch_op:\n batch_op.drop_column('geom')\n",
"step-4": "<mask token>\nrevision = '56afb969b589'\ndown_revision = '2cf6c7c1f0d7'\nfrom alembic import op\nimport sqlalchemy as sa\nimport flask_admin\nimport geoalchemy2\n\n\ndef upgrade():\n with op.batch_alter_table('trail', schema=None) as batch_op:\n batch_op.add_column(sa.Column('geom', geoalchemy2.types.Geometry(\n geometry_type='MULTILINESTRING'), nullable=True))\n\n\ndef downgrade():\n with op.batch_alter_table('trail', schema=None) as batch_op:\n batch_op.drop_column('geom')\n",
"step-5": "\"\"\"added Trail.Geometry without srid\n\nRevision ID: 56afb969b589\nRevises: 2cf6c7c1f0d7\nCreate Date: 2014-12-05 18:13:55.512637\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = '56afb969b589'\ndown_revision = '2cf6c7c1f0d7'\n\nfrom alembic import op\nimport sqlalchemy as sa\nimport flask_admin\nimport geoalchemy2\n\n\ndef upgrade():\n ### commands auto generated by Alembic - please adjust! ###\n #with op.batch_alter_table('POI', schema=None) as batch_op:\n # batch_op.drop_index('idx_POI_point')\n\n with op.batch_alter_table('trail', schema=None) as batch_op:\n batch_op.add_column(sa.Column('geom', geoalchemy2.types.Geometry(geometry_type='MULTILINESTRING'), nullable=True))\n\n ### end Alembic commands ###\n\n\ndef downgrade():\n ### commands auto generated by Alembic - please adjust! ###\n with op.batch_alter_table('trail', schema=None) as batch_op:\n batch_op.drop_column('geom')\n\n #with op.batch_alter_table('POI', schema=None) as batch_op:\n # batch_op.create_index('idx_POI_point', ['point'], unique=False)\n\n ### end Alembic commands ###\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
default_app_config = 'teacher.apps.A1Config'
|
normal
|
{
"blob_id": "c466c7e05608b1fbba5eea5bec16d301cee3688f",
"index": 9817,
"step-1": "<mask token>\n",
"step-2": "default_app_config = 'teacher.apps.A1Config'\n",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
## @file
# Contains several utilitities shared by migration tools.
#
# Copyright (c) 2007 - 2014, Intel Corporation. All rights reserved.<BR>
# This program and the accompanying materials
# are licensed and made available under the terms and conditions of the BSD License
# which accompanies this distribution. The full text of the license may be found at
# http://opensource.org/licenses/bsd-license.php
#
# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
#
##
# Import Modules
#
import Common.LongFilePathOs as os
import re
import EdkLogger
from optparse import OptionParser
from Common.BuildToolError import *
from XmlRoutines import *
from CommonDataClass.CommonClass import *
from Common.LongFilePathSupport import OpenLongFilePath as open
## Set all fields of CommonClass object.
#
# Set all attributes of CommonClass object from XML Dom object of XmlCommon.
#
# @param Common The destine CommonClass object.
# @param XmlCommon The source XML Dom object.
#
def SetCommon(Common, XmlCommon):
XmlTag = "Usage"
Common.Usage = XmlAttribute(XmlCommon, XmlTag).split()
XmlTag = "FeatureFlag"
Common.FeatureFlag = XmlAttribute(XmlCommon, XmlTag)
XmlTag = "SupArchList"
Common.SupArchList = XmlAttribute(XmlCommon, XmlTag).split()
XmlTag = XmlNodeName(XmlCommon) + "/" + "HelpText"
Common.HelpText = XmlElement(XmlCommon, XmlTag)
## Set some fields of CommonHeaderClass object.
#
# Set Name, Guid, FileName and FullPath fields of CommonHeaderClass object from
# XML Dom object of XmlCommonHeader, NameTag and FileName.
#
# @param CommonHeader The destine CommonClass object.
# @param XmlCommonHeader The source XML Dom object.
# @param NameTag The name tag in XML Dom object.
# @param FileName The file name of the XML file.
#
def SetIdentification(CommonHeader, XmlCommonHeader, NameTag, FileName):
XmlParentTag = XmlNodeName(XmlCommonHeader)
XmlTag = XmlParentTag + "/" + NameTag
CommonHeader.Name = XmlElement(XmlCommonHeader, XmlTag)
XmlTag = XmlParentTag + "/" + "GuidValue"
CommonHeader.Guid = XmlElement(XmlCommonHeader, XmlTag)
XmlTag = XmlParentTag + "/" + "Version"
CommonHeader.Version = XmlElement(XmlCommonHeader, XmlTag)
CommonHeader.FileName = os.path.basename(FileName)
CommonHeader.FullPath = os.path.abspath(FileName)
## Regular expression to match specification and value.
mReSpecification = re.compile(r"(?P<Specification>\w+)\s+(?P<Value>\w*)")
## Add specification to specification dictionary.
#
# Abstract specification name, value pair from Specification String and add them
# to specification dictionary.
#
# @param SpecificationDict The destine Specification dictionary.
# @param SpecificationString The source Specification String from which the
# specification name and value pair is abstracted.
#
def AddToSpecificationDict(SpecificationDict, SpecificationString):
"""Abstract specification name, value pair from Specification String"""
for SpecificationMatch in mReSpecification.finditer(SpecificationString):
Specification = SpecificationMatch.group("Specification")
Value = SpecificationMatch.group("Value")
SpecificationDict[Specification] = Value
## Set all fields of CommonHeaderClass object.
#
# Set all attributes of CommonHeaderClass object from XML Dom object of
# XmlCommonHeader, NameTag and FileName.
#
# @param CommonHeader The destine CommonClass object.
# @param XmlCommonHeader The source XML Dom object.
# @param NameTag The name tag in XML Dom object.
# @param FileName The file name of the XML file.
#
def SetCommonHeader(CommonHeader, XmlCommonHeader):
"""Set all attributes of CommonHeaderClass object from XmlCommonHeader"""
XmlParent = XmlNodeName(XmlCommonHeader)
XmlTag = XmlParent + "/" + "Abstract"
CommonHeader.Abstract = XmlElement(XmlCommonHeader, XmlTag)
XmlTag = XmlParent + "/" + "Description"
CommonHeader.Description = XmlElement(XmlCommonHeader, XmlTag)
XmlTag = XmlParent + "/" + "Copyright"
CommonHeader.Copyright = XmlElement(XmlCommonHeader, XmlTag)
XmlTag = XmlParent + "/" + "License"
CommonHeader.License = XmlElement(XmlCommonHeader, XmlTag)
XmlTag = XmlParent + "/" + "Specification"
Specification = XmlElement(XmlCommonHeader, XmlTag)
AddToSpecificationDict(CommonHeader.Specification, Specification)
XmlTag = XmlParent + "/" + "ModuleType"
CommonHeader.ModuleType = XmlElement(XmlCommonHeader, XmlTag)
## Load a new Cloned Record class object.
#
# Read an input XML ClonedRecord DOM object and return an object of Cloned Record
# contained in the DOM object.
#
# @param XmlCloned A child XML DOM object in a Common XML DOM.
#
# @retvel ClonedRecord A new Cloned Record object created by XmlCloned.
#
def LoadClonedRecord(XmlCloned):
ClonedRecord = ClonedRecordClass()
XmlTag = "Id"
ClonedRecord.Id = int(XmlAttribute(XmlCloned, XmlTag))
XmlTag = "FarGuid"
ClonedRecord.FarGuid = XmlAttribute(XmlCloned, XmlTag)
XmlTag = "Cloned/PackageGuid"
ClonedRecord.PackageGuid = XmlElement(XmlCloned, XmlTag)
XmlTag = "Cloned/PackageVersion"
ClonedRecord.PackageVersion = XmlElement(XmlCloned, XmlTag)
XmlTag = "Cloned/ModuleGuid"
ClonedRecord.ModuleGuid = XmlElement(XmlCloned, XmlTag)
XmlTag = "Cloned/ModuleVersion"
ClonedRecord.ModuleVersion = XmlElement(XmlCloned, XmlTag)
return ClonedRecord
## Load a new Guid/Protocol/Ppi common class object.
#
# Read an input XML Guid/Protocol/Ppi DOM object and return an object of
# Guid/Protocol/Ppi contained in the DOM object.
#
# @param XmlGuidProtocolPpiCommon A child XML DOM object in a Common XML DOM.
#
# @retvel GuidProtocolPpiCommon A new GuidProtocolPpiCommon class object
# created by XmlGuidProtocolPpiCommon.
#
def LoadGuidProtocolPpiCommon(XmlGuidProtocolPpiCommon):
GuidProtocolPpiCommon = GuidProtocolPpiCommonClass()
XmlTag = "Name"
GuidProtocolPpiCommon.Name = XmlAttribute(XmlGuidProtocolPpiCommon, XmlTag)
XmlParent = XmlNodeName(XmlGuidProtocolPpiCommon)
if XmlParent == "Entry":
XmlTag = "%s/C_Name" % XmlParent
elif XmlParent == "GuidCNames":
XmlTag = "%s/GuidCName" % XmlParent
else:
XmlTag = "%s/%sCName" % (XmlParent, XmlParent)
GuidProtocolPpiCommon.CName = XmlElement(XmlGuidProtocolPpiCommon, XmlTag)
XmlTag = XmlParent + "/" + "GuidValue"
GuidProtocolPpiCommon.Guid = XmlElement(XmlGuidProtocolPpiCommon, XmlTag)
if XmlParent.endswith("Notify"):
GuidProtocolPpiCommon.Notify = True
XmlTag = "GuidTypeList"
GuidTypes = XmlAttribute(XmlGuidProtocolPpiCommon, XmlTag)
GuidProtocolPpiCommon.GuidTypeList = GuidTypes.split()
XmlTag = "SupModuleList"
SupModules = XmlAttribute(XmlGuidProtocolPpiCommon, XmlTag)
GuidProtocolPpiCommon.SupModuleList = SupModules.split()
SetCommon(GuidProtocolPpiCommon, XmlGuidProtocolPpiCommon)
return GuidProtocolPpiCommon
## Load a new Pcd class object.
#
# Read an input XML Pcd DOM object and return an object of Pcd
# contained in the DOM object.
#
# @param XmlPcd A child XML DOM object in a Common XML DOM.
#
# @retvel Pcd A new Pcd object created by XmlPcd.
#
def LoadPcd(XmlPcd):
"""Return a new PcdClass object equivalent to XmlPcd"""
Pcd = PcdClass()
XmlTag = "PcdEntry/C_Name"
Pcd.CName = XmlElement(XmlPcd, XmlTag)
XmlTag = "PcdEntry/Token"
Pcd.Token = XmlElement(XmlPcd, XmlTag)
XmlTag = "PcdEntry/TokenSpaceGuidCName"
Pcd.TokenSpaceGuidCName = XmlElement(XmlPcd, XmlTag)
XmlTag = "PcdEntry/DatumType"
Pcd.DatumType = XmlElement(XmlPcd, XmlTag)
XmlTag = "PcdEntry/MaxDatumSize"
Pcd.MaxDatumSize = XmlElement(XmlPcd, XmlTag)
XmlTag = "PcdEntry/DefaultValue"
Pcd.DefaultValue = XmlElement(XmlPcd, XmlTag)
XmlTag = "PcdItemType"
Pcd.ItemType = XmlAttribute(XmlPcd, XmlTag)
XmlTag = "PcdEntry/ValidUsage"
Pcd.ValidUsage = XmlElement(XmlPcd, XmlTag).split()
XmlTag = "SupModuleList"
Pcd.SupModuleList = XmlAttribute(XmlPcd, XmlTag).split()
SetCommon(Pcd, XmlPcd)
return Pcd
## Load a new LibraryClass class object.
#
# Read an input XML LibraryClass DOM object and return an object of LibraryClass
# contained in the DOM object.
#
# @param XmlLibraryClass A child XML DOM object in a Common XML DOM.
#
# @retvel LibraryClass A new LibraryClass object created by XmlLibraryClass.
#
def LoadLibraryClass(XmlLibraryClass):
LibraryClass = LibraryClassClass()
XmlTag = "LibraryClass/Keyword"
LibraryClass.LibraryClass = XmlElement(XmlLibraryClass, XmlTag)
if LibraryClass.LibraryClass == "":
XmlTag = "Name"
LibraryClass.LibraryClass = XmlAttribute(XmlLibraryClass, XmlTag)
XmlTag = "LibraryClass/IncludeHeader"
LibraryClass.IncludeHeader = XmlElement(XmlLibraryClass, XmlTag)
XmlTag = "RecommendedInstanceVersion"
RecommendedInstanceVersion = XmlAttribute(XmlLibraryClass, XmlTag)
LibraryClass.RecommendedInstanceVersion = RecommendedInstanceVersion
XmlTag = "RecommendedInstanceGuid"
RecommendedInstanceGuid = XmlAttribute(XmlLibraryClass, XmlTag)
LibraryClass.RecommendedInstanceGuid = RecommendedInstanceGuid
XmlTag = "SupModuleList"
SupModules = XmlAttribute(XmlLibraryClass, XmlTag)
LibraryClass.SupModuleList = SupModules.split()
SetCommon(LibraryClass, XmlLibraryClass)
return LibraryClass
## Load a new Build Option class object.
#
# Read an input XML BuildOption DOM object and return an object of Build Option
# contained in the DOM object.
#
# @param XmlBuildOption A child XML DOM object in a Common XML DOM.
#
# @retvel BuildOption A new Build Option object created by XmlBuildOption.
#
def LoadBuildOption(XmlBuildOption):
"""Return a new BuildOptionClass object equivalent to XmlBuildOption"""
BuildOption = BuildOptionClass()
BuildOption.Option = XmlElementData(XmlBuildOption)
XmlTag = "BuildTargets"
BuildOption.BuildTargetList = XmlAttribute(XmlBuildOption, XmlTag).split()
XmlTag = "ToolChainFamily"
BuildOption.ToolChainFamily = XmlAttribute(XmlBuildOption, XmlTag)
XmlTag = "TagName"
BuildOption.TagName = XmlAttribute(XmlBuildOption, XmlTag)
XmlTag = "ToolCode"
BuildOption.ToolCode = XmlAttribute(XmlBuildOption, XmlTag)
XmlTag = "SupArchList"
BuildOption.SupArchList = XmlAttribute(XmlBuildOption, XmlTag).split()
return BuildOption
## Load a new User Extensions class object.
#
# Read an input XML UserExtensions DOM object and return an object of User
# Extensions contained in the DOM object.
#
# @param XmlUserExtensions A child XML DOM object in a Common XML DOM.
#
# @retvel UserExtensions A new User Extensions object created by
# XmlUserExtensions.
#
def LoadUserExtensions(XmlUserExtensions):
UserExtensions = UserExtensionsClass()
XmlTag = "UserID"
UserExtensions.UserID = XmlAttribute(XmlUserExtensions, XmlTag)
XmlTag = "Identifier"
UserExtensions.Identifier = XmlAttribute(XmlUserExtensions, XmlTag)
UserExtensions.Content = XmlElementData(XmlUserExtensions)
return UserExtensions
## Store content to a text file object.
#
# Write some text file content to a text file object. The contents may echo
# in screen in a verbose way.
#
# @param TextFile The text file object.
# @param Content The string object to be written to a text file.
#
def StoreTextFile(TextFile, Content):
EdkLogger.verbose(Content)
TextFile.write(Content)
## Add item to a section.
#
# Add an Item with specific CPU architecture to section dictionary.
# The possible duplication is ensured to be removed.
#
# @param Section Section dictionary indexed by CPU architecture.
# @param Arch CPU architecture: Ia32, X64, Ipf, ARM, AARCH64, Ebc or Common.
# @param Item The Item to be added to section dictionary.
#
def AddToSection(Section, Arch, Item):
SectionArch = Section.get(Arch, [])
if Item not in SectionArch:
SectionArch.append(Item)
Section[Arch] = SectionArch
## Get section contents.
#
# Return the content of section named SectionName.
# the contents is based on Methods and ObjectLists.
#
# @param SectionName The name of the section.
# @param Method A function returning a string item of an object.
# @param ObjectList The list of object.
#
# @retval Section The string content of a section.
#
def GetSection(SectionName, Method, ObjectList):
SupportedArches = ["common", "Ia32", "X64", "Ipf", "Ebc", "ARM", "AARCH64"]
SectionDict = {}
for Object in ObjectList:
Item = Method(Object)
if Item == "":
continue
Item = " %s" % Item
Arches = Object.SupArchList
if len(Arches) == 0:
AddToSection(SectionDict, "common", Item)
else:
for Arch in SupportedArches:
if Arch.upper() in Arches:
AddToSection(SectionDict, Arch, Item)
Section = ""
for Arch in SupportedArches:
SectionArch = "\n".join(SectionDict.get(Arch, []))
if SectionArch != "":
Section += "[%s.%s]\n%s\n" % (SectionName, Arch, SectionArch)
Section += "\n"
if Section != "":
Section += "\n"
return Section
## Store file header to a text file.
#
# Write standard file header to a text file. The content includes copyright,
# abstract, description and license extracted from CommonHeader class object.
#
# @param TextFile The text file object.
# @param CommonHeader The source CommonHeader class object.
#
def StoreHeader(TextFile, CommonHeader):
CopyRight = CommonHeader.Copyright
Abstract = CommonHeader.Abstract
Description = CommonHeader.Description
License = CommonHeader.License
Header = "#/** @file\n#\n"
Header += "# " + Abstract + "\n#\n"
Header += "# " + Description.strip().replace("\n", "\n# ") + "\n"
Header += "# " + CopyRight + "\n#\n"
Header += "# " + License.replace("\n", "\n# ").replace(" ", " ")
Header += "\n#\n#**/\n\n"
StoreTextFile(TextFile, Header)
## Store file header to a text file.
#
# Write Defines section to a text file. DefinesTupleList determines the content.
#
# @param TextFile The text file object.
# @param DefinesTupleList The list of (Tag, Value) to be added as one item.
#
def StoreDefinesSection(TextFile, DefinesTupleList):
Section = "[Defines]\n"
for DefineItem in DefinesTupleList:
Section += " %-30s = %s\n" % DefineItem
Section += "\n\n"
StoreTextFile(TextFile, Section)
## Return one User Extension section.
#
# Read the input UserExtentsions class object and return one section.
#
# @param UserExtensions An input UserExtensions class object.
#
# @retval UserExtensionSection A section representing UserExtensions object.
#
def GetUserExtensions(UserExtensions):
UserId = UserExtensions.UserID
Identifier = UserExtensions.Identifier
Content = UserExtensions.Content
return "[UserExtensions.%s.%s]\n %s\n\n" % (UserId, Identifier, Content)
## Regular expression to match an equation.
mReEquation = re.compile(r"\s*(\S+)\s*=\s*(\S*)\s*")
## Return a value tuple matching information in a text fle.
#
# Parse the text file and return a value tuple corresponding to an input tag
# tuple. In case of any error, an tuple of empty strings is returned.
#
# @param FileName The file name of the text file.
# @param TagTuple A tuple of tags as the key to the value.
#
# @param ValueTupe The returned tuple corresponding to the tag tuple.
#
def GetTextFileInfo(FileName, TagTuple):
ValueTuple = [""] * len(TagTuple)
try:
for Line in open(FileName):
Line = Line.split("#", 1)[0]
MatchEquation = mReEquation.match(Line)
if MatchEquation:
Tag = MatchEquation.group(1).upper()
Value = MatchEquation.group(2)
for Index in range(len(TagTuple)):
if TagTuple[Index] == Tag:
ValueTuple[Index] = Value
except:
EdkLogger.info("IO Error in reading file %s" % FileName)
return ValueTuple
## Return a value tuple matching information in an XML fle.
#
# Parse the XML file and return a value tuple corresponding to an input tag
# tuple. In case of any error, an tuple of empty strings is returned.
#
# @param FileName The file name of the XML file.
# @param TagTuple A tuple of tags as the key to the value.
#
# @param ValueTupe The returned tuple corresponding to the tag tuple.
#
def GetXmlFileInfo(FileName, TagTuple):
XmlDom = XmlParseFile(FileName)
return tuple([XmlElement(XmlDom, XmlTag) for XmlTag in TagTuple])
## Parse migration command line options
#
# Use standard Python module optparse to parse command line option of this tool.
#
# @param Source The source file type.
# @param Destinate The destinate file type.
#
# @retval Options A optparse object containing the parsed options.
# @retval InputFile Path of an source file to be migrated.
#
def MigrationOptionParser(Source, Destinate, ToolName, VersionNumber = 1.0):
# use clearer usage to override default usage message
UsageString = "%s [-a] [-v|-q] [-o <output_file>] <input_file>" % ToolName
Version = "%s Version %.2f" % (ToolName, VersionNumber)
Copyright = "Copyright (c) 2007, Intel Corporation. All rights reserved."
Parser = OptionParser(description=Copyright, version=Version, usage=UsageString)
Parser.add_option("-o", "--output", dest="OutputFile", help="The name of the %s file to be created." % Destinate)
Parser.add_option("-a", "--auto", dest="AutoWrite", action="store_true", default=False, help="Automatically create the %s file using the name of the %s file and replacing file extension" % (Source, Destinate))
Parser.add_option("-q", "--quiet", action="store_true", type=None, help="Disable all messages except FATAL ERRORS.")
Parser.add_option("-v", "--verbose", action="store_true", type=None, help="Turn on verbose output with informational messages printed.")
Options, Args = Parser.parse_args()
# Set logging level
if Options.verbose:
EdkLogger.setLevel(EdkLogger.VERBOSE)
elif Options.quiet:
EdkLogger.setLevel(EdkLogger.QUIET)
else:
EdkLogger.setLevel(EdkLogger.INFO)
# error check
if len(Args) == 0:
raise MigrationError(PARAMETER_MISSING, name="Input file", usage=Parser.get_usage())
if len(Args) > 1:
raise MigrationError(PARAMETER_INVALID, name="Too many input files", usage=Parser.get_usage())
InputFile = Args[0]
if not os.path.exists(InputFile):
raise MigrationError(FILE_NOT_FOUND, name=InputFile)
if Options.OutputFile:
if Options.AutoWrite:
raise MigrationError(OPTION_CONFLICT, arg1="-o", arg2="-a", usage=Parser.get_usage())
else:
if Options.AutoWrite:
Options.OutputFile = os.path.splitext(InputFile)[0] + "." + Destinate.lower()
else:
raise MigrationError(OPTION_MISSING, name="-o", usage=Parser.get_usage())
return Options, InputFile
# This acts like the main() function for the script, unless it is 'import'ed
# into another script.
if __name__ == '__main__':
pass
|
normal
|
{
"blob_id": "2dbb1051b35898288db629fd0c5b3887c429e9b8",
"index": 1313,
"step-1": "<mask token>\n\n\ndef SetCommon(Common, XmlCommon):\n XmlTag = 'Usage'\n Common.Usage = XmlAttribute(XmlCommon, XmlTag).split()\n XmlTag = 'FeatureFlag'\n Common.FeatureFlag = XmlAttribute(XmlCommon, XmlTag)\n XmlTag = 'SupArchList'\n Common.SupArchList = XmlAttribute(XmlCommon, XmlTag).split()\n XmlTag = XmlNodeName(XmlCommon) + '/' + 'HelpText'\n Common.HelpText = XmlElement(XmlCommon, XmlTag)\n\n\ndef SetIdentification(CommonHeader, XmlCommonHeader, NameTag, FileName):\n XmlParentTag = XmlNodeName(XmlCommonHeader)\n XmlTag = XmlParentTag + '/' + NameTag\n CommonHeader.Name = XmlElement(XmlCommonHeader, XmlTag)\n XmlTag = XmlParentTag + '/' + 'GuidValue'\n CommonHeader.Guid = XmlElement(XmlCommonHeader, XmlTag)\n XmlTag = XmlParentTag + '/' + 'Version'\n CommonHeader.Version = XmlElement(XmlCommonHeader, XmlTag)\n CommonHeader.FileName = os.path.basename(FileName)\n CommonHeader.FullPath = os.path.abspath(FileName)\n\n\n<mask token>\n\n\ndef AddToSpecificationDict(SpecificationDict, SpecificationString):\n \"\"\"Abstract specification name, value pair from Specification String\"\"\"\n for SpecificationMatch in mReSpecification.finditer(SpecificationString):\n Specification = SpecificationMatch.group('Specification')\n Value = SpecificationMatch.group('Value')\n SpecificationDict[Specification] = Value\n\n\n<mask token>\n\n\ndef LoadClonedRecord(XmlCloned):\n ClonedRecord = ClonedRecordClass()\n XmlTag = 'Id'\n ClonedRecord.Id = int(XmlAttribute(XmlCloned, XmlTag))\n XmlTag = 'FarGuid'\n ClonedRecord.FarGuid = XmlAttribute(XmlCloned, XmlTag)\n XmlTag = 'Cloned/PackageGuid'\n ClonedRecord.PackageGuid = XmlElement(XmlCloned, XmlTag)\n XmlTag = 'Cloned/PackageVersion'\n ClonedRecord.PackageVersion = XmlElement(XmlCloned, XmlTag)\n XmlTag = 'Cloned/ModuleGuid'\n ClonedRecord.ModuleGuid = XmlElement(XmlCloned, XmlTag)\n XmlTag = 'Cloned/ModuleVersion'\n ClonedRecord.ModuleVersion = XmlElement(XmlCloned, XmlTag)\n return ClonedRecord\n\n\ndef LoadGuidProtocolPpiCommon(XmlGuidProtocolPpiCommon):\n GuidProtocolPpiCommon = GuidProtocolPpiCommonClass()\n XmlTag = 'Name'\n GuidProtocolPpiCommon.Name = XmlAttribute(XmlGuidProtocolPpiCommon, XmlTag)\n XmlParent = XmlNodeName(XmlGuidProtocolPpiCommon)\n if XmlParent == 'Entry':\n XmlTag = '%s/C_Name' % XmlParent\n elif XmlParent == 'GuidCNames':\n XmlTag = '%s/GuidCName' % XmlParent\n else:\n XmlTag = '%s/%sCName' % (XmlParent, XmlParent)\n GuidProtocolPpiCommon.CName = XmlElement(XmlGuidProtocolPpiCommon, XmlTag)\n XmlTag = XmlParent + '/' + 'GuidValue'\n GuidProtocolPpiCommon.Guid = XmlElement(XmlGuidProtocolPpiCommon, XmlTag)\n if XmlParent.endswith('Notify'):\n GuidProtocolPpiCommon.Notify = True\n XmlTag = 'GuidTypeList'\n GuidTypes = XmlAttribute(XmlGuidProtocolPpiCommon, XmlTag)\n GuidProtocolPpiCommon.GuidTypeList = GuidTypes.split()\n XmlTag = 'SupModuleList'\n SupModules = XmlAttribute(XmlGuidProtocolPpiCommon, XmlTag)\n GuidProtocolPpiCommon.SupModuleList = SupModules.split()\n SetCommon(GuidProtocolPpiCommon, XmlGuidProtocolPpiCommon)\n return GuidProtocolPpiCommon\n\n\ndef LoadPcd(XmlPcd):\n \"\"\"Return a new PcdClass object equivalent to XmlPcd\"\"\"\n Pcd = PcdClass()\n XmlTag = 'PcdEntry/C_Name'\n Pcd.CName = XmlElement(XmlPcd, XmlTag)\n XmlTag = 'PcdEntry/Token'\n Pcd.Token = XmlElement(XmlPcd, XmlTag)\n XmlTag = 'PcdEntry/TokenSpaceGuidCName'\n Pcd.TokenSpaceGuidCName = XmlElement(XmlPcd, XmlTag)\n XmlTag = 'PcdEntry/DatumType'\n Pcd.DatumType = XmlElement(XmlPcd, XmlTag)\n XmlTag = 'PcdEntry/MaxDatumSize'\n Pcd.MaxDatumSize = XmlElement(XmlPcd, XmlTag)\n XmlTag = 'PcdEntry/DefaultValue'\n Pcd.DefaultValue = XmlElement(XmlPcd, XmlTag)\n XmlTag = 'PcdItemType'\n Pcd.ItemType = XmlAttribute(XmlPcd, XmlTag)\n XmlTag = 'PcdEntry/ValidUsage'\n Pcd.ValidUsage = XmlElement(XmlPcd, XmlTag).split()\n XmlTag = 'SupModuleList'\n Pcd.SupModuleList = XmlAttribute(XmlPcd, XmlTag).split()\n SetCommon(Pcd, XmlPcd)\n return Pcd\n\n\n<mask token>\n\n\ndef StoreTextFile(TextFile, Content):\n EdkLogger.verbose(Content)\n TextFile.write(Content)\n\n\ndef AddToSection(Section, Arch, Item):\n SectionArch = Section.get(Arch, [])\n if Item not in SectionArch:\n SectionArch.append(Item)\n Section[Arch] = SectionArch\n\n\n<mask token>\n\n\ndef GetUserExtensions(UserExtensions):\n UserId = UserExtensions.UserID\n Identifier = UserExtensions.Identifier\n Content = UserExtensions.Content\n return '[UserExtensions.%s.%s]\\n %s\\n\\n' % (UserId, Identifier, Content)\n\n\n<mask token>\n\n\ndef GetXmlFileInfo(FileName, TagTuple):\n XmlDom = XmlParseFile(FileName)\n return tuple([XmlElement(XmlDom, XmlTag) for XmlTag in TagTuple])\n\n\ndef MigrationOptionParser(Source, Destinate, ToolName, VersionNumber=1.0):\n UsageString = '%s [-a] [-v|-q] [-o <output_file>] <input_file>' % ToolName\n Version = '%s Version %.2f' % (ToolName, VersionNumber)\n Copyright = 'Copyright (c) 2007, Intel Corporation. All rights reserved.'\n Parser = OptionParser(description=Copyright, version=Version, usage=\n UsageString)\n Parser.add_option('-o', '--output', dest='OutputFile', help=\n 'The name of the %s file to be created.' % Destinate)\n Parser.add_option('-a', '--auto', dest='AutoWrite', action='store_true',\n default=False, help=\n 'Automatically create the %s file using the name of the %s file and replacing file extension'\n % (Source, Destinate))\n Parser.add_option('-q', '--quiet', action='store_true', type=None, help\n ='Disable all messages except FATAL ERRORS.')\n Parser.add_option('-v', '--verbose', action='store_true', type=None,\n help='Turn on verbose output with informational messages printed.')\n Options, Args = Parser.parse_args()\n if Options.verbose:\n EdkLogger.setLevel(EdkLogger.VERBOSE)\n elif Options.quiet:\n EdkLogger.setLevel(EdkLogger.QUIET)\n else:\n EdkLogger.setLevel(EdkLogger.INFO)\n if len(Args) == 0:\n raise MigrationError(PARAMETER_MISSING, name='Input file', usage=\n Parser.get_usage())\n if len(Args) > 1:\n raise MigrationError(PARAMETER_INVALID, name='Too many input files',\n usage=Parser.get_usage())\n InputFile = Args[0]\n if not os.path.exists(InputFile):\n raise MigrationError(FILE_NOT_FOUND, name=InputFile)\n if Options.OutputFile:\n if Options.AutoWrite:\n raise MigrationError(OPTION_CONFLICT, arg1='-o', arg2='-a',\n usage=Parser.get_usage())\n elif Options.AutoWrite:\n Options.OutputFile = os.path.splitext(InputFile)[0\n ] + '.' + Destinate.lower()\n else:\n raise MigrationError(OPTION_MISSING, name='-o', usage=Parser.\n get_usage())\n return Options, InputFile\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef SetCommon(Common, XmlCommon):\n XmlTag = 'Usage'\n Common.Usage = XmlAttribute(XmlCommon, XmlTag).split()\n XmlTag = 'FeatureFlag'\n Common.FeatureFlag = XmlAttribute(XmlCommon, XmlTag)\n XmlTag = 'SupArchList'\n Common.SupArchList = XmlAttribute(XmlCommon, XmlTag).split()\n XmlTag = XmlNodeName(XmlCommon) + '/' + 'HelpText'\n Common.HelpText = XmlElement(XmlCommon, XmlTag)\n\n\ndef SetIdentification(CommonHeader, XmlCommonHeader, NameTag, FileName):\n XmlParentTag = XmlNodeName(XmlCommonHeader)\n XmlTag = XmlParentTag + '/' + NameTag\n CommonHeader.Name = XmlElement(XmlCommonHeader, XmlTag)\n XmlTag = XmlParentTag + '/' + 'GuidValue'\n CommonHeader.Guid = XmlElement(XmlCommonHeader, XmlTag)\n XmlTag = XmlParentTag + '/' + 'Version'\n CommonHeader.Version = XmlElement(XmlCommonHeader, XmlTag)\n CommonHeader.FileName = os.path.basename(FileName)\n CommonHeader.FullPath = os.path.abspath(FileName)\n\n\n<mask token>\n\n\ndef AddToSpecificationDict(SpecificationDict, SpecificationString):\n \"\"\"Abstract specification name, value pair from Specification String\"\"\"\n for SpecificationMatch in mReSpecification.finditer(SpecificationString):\n Specification = SpecificationMatch.group('Specification')\n Value = SpecificationMatch.group('Value')\n SpecificationDict[Specification] = Value\n\n\ndef SetCommonHeader(CommonHeader, XmlCommonHeader):\n \"\"\"Set all attributes of CommonHeaderClass object from XmlCommonHeader\"\"\"\n XmlParent = XmlNodeName(XmlCommonHeader)\n XmlTag = XmlParent + '/' + 'Abstract'\n CommonHeader.Abstract = XmlElement(XmlCommonHeader, XmlTag)\n XmlTag = XmlParent + '/' + 'Description'\n CommonHeader.Description = XmlElement(XmlCommonHeader, XmlTag)\n XmlTag = XmlParent + '/' + 'Copyright'\n CommonHeader.Copyright = XmlElement(XmlCommonHeader, XmlTag)\n XmlTag = XmlParent + '/' + 'License'\n CommonHeader.License = XmlElement(XmlCommonHeader, XmlTag)\n XmlTag = XmlParent + '/' + 'Specification'\n Specification = XmlElement(XmlCommonHeader, XmlTag)\n AddToSpecificationDict(CommonHeader.Specification, Specification)\n XmlTag = XmlParent + '/' + 'ModuleType'\n CommonHeader.ModuleType = XmlElement(XmlCommonHeader, XmlTag)\n\n\ndef LoadClonedRecord(XmlCloned):\n ClonedRecord = ClonedRecordClass()\n XmlTag = 'Id'\n ClonedRecord.Id = int(XmlAttribute(XmlCloned, XmlTag))\n XmlTag = 'FarGuid'\n ClonedRecord.FarGuid = XmlAttribute(XmlCloned, XmlTag)\n XmlTag = 'Cloned/PackageGuid'\n ClonedRecord.PackageGuid = XmlElement(XmlCloned, XmlTag)\n XmlTag = 'Cloned/PackageVersion'\n ClonedRecord.PackageVersion = XmlElement(XmlCloned, XmlTag)\n XmlTag = 'Cloned/ModuleGuid'\n ClonedRecord.ModuleGuid = XmlElement(XmlCloned, XmlTag)\n XmlTag = 'Cloned/ModuleVersion'\n ClonedRecord.ModuleVersion = XmlElement(XmlCloned, XmlTag)\n return ClonedRecord\n\n\ndef LoadGuidProtocolPpiCommon(XmlGuidProtocolPpiCommon):\n GuidProtocolPpiCommon = GuidProtocolPpiCommonClass()\n XmlTag = 'Name'\n GuidProtocolPpiCommon.Name = XmlAttribute(XmlGuidProtocolPpiCommon, XmlTag)\n XmlParent = XmlNodeName(XmlGuidProtocolPpiCommon)\n if XmlParent == 'Entry':\n XmlTag = '%s/C_Name' % XmlParent\n elif XmlParent == 'GuidCNames':\n XmlTag = '%s/GuidCName' % XmlParent\n else:\n XmlTag = '%s/%sCName' % (XmlParent, XmlParent)\n GuidProtocolPpiCommon.CName = XmlElement(XmlGuidProtocolPpiCommon, XmlTag)\n XmlTag = XmlParent + '/' + 'GuidValue'\n GuidProtocolPpiCommon.Guid = XmlElement(XmlGuidProtocolPpiCommon, XmlTag)\n if XmlParent.endswith('Notify'):\n GuidProtocolPpiCommon.Notify = True\n XmlTag = 'GuidTypeList'\n GuidTypes = XmlAttribute(XmlGuidProtocolPpiCommon, XmlTag)\n GuidProtocolPpiCommon.GuidTypeList = GuidTypes.split()\n XmlTag = 'SupModuleList'\n SupModules = XmlAttribute(XmlGuidProtocolPpiCommon, XmlTag)\n GuidProtocolPpiCommon.SupModuleList = SupModules.split()\n SetCommon(GuidProtocolPpiCommon, XmlGuidProtocolPpiCommon)\n return GuidProtocolPpiCommon\n\n\ndef LoadPcd(XmlPcd):\n \"\"\"Return a new PcdClass object equivalent to XmlPcd\"\"\"\n Pcd = PcdClass()\n XmlTag = 'PcdEntry/C_Name'\n Pcd.CName = XmlElement(XmlPcd, XmlTag)\n XmlTag = 'PcdEntry/Token'\n Pcd.Token = XmlElement(XmlPcd, XmlTag)\n XmlTag = 'PcdEntry/TokenSpaceGuidCName'\n Pcd.TokenSpaceGuidCName = XmlElement(XmlPcd, XmlTag)\n XmlTag = 'PcdEntry/DatumType'\n Pcd.DatumType = XmlElement(XmlPcd, XmlTag)\n XmlTag = 'PcdEntry/MaxDatumSize'\n Pcd.MaxDatumSize = XmlElement(XmlPcd, XmlTag)\n XmlTag = 'PcdEntry/DefaultValue'\n Pcd.DefaultValue = XmlElement(XmlPcd, XmlTag)\n XmlTag = 'PcdItemType'\n Pcd.ItemType = XmlAttribute(XmlPcd, XmlTag)\n XmlTag = 'PcdEntry/ValidUsage'\n Pcd.ValidUsage = XmlElement(XmlPcd, XmlTag).split()\n XmlTag = 'SupModuleList'\n Pcd.SupModuleList = XmlAttribute(XmlPcd, XmlTag).split()\n SetCommon(Pcd, XmlPcd)\n return Pcd\n\n\ndef LoadLibraryClass(XmlLibraryClass):\n LibraryClass = LibraryClassClass()\n XmlTag = 'LibraryClass/Keyword'\n LibraryClass.LibraryClass = XmlElement(XmlLibraryClass, XmlTag)\n if LibraryClass.LibraryClass == '':\n XmlTag = 'Name'\n LibraryClass.LibraryClass = XmlAttribute(XmlLibraryClass, XmlTag)\n XmlTag = 'LibraryClass/IncludeHeader'\n LibraryClass.IncludeHeader = XmlElement(XmlLibraryClass, XmlTag)\n XmlTag = 'RecommendedInstanceVersion'\n RecommendedInstanceVersion = XmlAttribute(XmlLibraryClass, XmlTag)\n LibraryClass.RecommendedInstanceVersion = RecommendedInstanceVersion\n XmlTag = 'RecommendedInstanceGuid'\n RecommendedInstanceGuid = XmlAttribute(XmlLibraryClass, XmlTag)\n LibraryClass.RecommendedInstanceGuid = RecommendedInstanceGuid\n XmlTag = 'SupModuleList'\n SupModules = XmlAttribute(XmlLibraryClass, XmlTag)\n LibraryClass.SupModuleList = SupModules.split()\n SetCommon(LibraryClass, XmlLibraryClass)\n return LibraryClass\n\n\ndef LoadBuildOption(XmlBuildOption):\n \"\"\"Return a new BuildOptionClass object equivalent to XmlBuildOption\"\"\"\n BuildOption = BuildOptionClass()\n BuildOption.Option = XmlElementData(XmlBuildOption)\n XmlTag = 'BuildTargets'\n BuildOption.BuildTargetList = XmlAttribute(XmlBuildOption, XmlTag).split()\n XmlTag = 'ToolChainFamily'\n BuildOption.ToolChainFamily = XmlAttribute(XmlBuildOption, XmlTag)\n XmlTag = 'TagName'\n BuildOption.TagName = XmlAttribute(XmlBuildOption, XmlTag)\n XmlTag = 'ToolCode'\n BuildOption.ToolCode = XmlAttribute(XmlBuildOption, XmlTag)\n XmlTag = 'SupArchList'\n BuildOption.SupArchList = XmlAttribute(XmlBuildOption, XmlTag).split()\n return BuildOption\n\n\ndef LoadUserExtensions(XmlUserExtensions):\n UserExtensions = UserExtensionsClass()\n XmlTag = 'UserID'\n UserExtensions.UserID = XmlAttribute(XmlUserExtensions, XmlTag)\n XmlTag = 'Identifier'\n UserExtensions.Identifier = XmlAttribute(XmlUserExtensions, XmlTag)\n UserExtensions.Content = XmlElementData(XmlUserExtensions)\n return UserExtensions\n\n\ndef StoreTextFile(TextFile, Content):\n EdkLogger.verbose(Content)\n TextFile.write(Content)\n\n\ndef AddToSection(Section, Arch, Item):\n SectionArch = Section.get(Arch, [])\n if Item not in SectionArch:\n SectionArch.append(Item)\n Section[Arch] = SectionArch\n\n\n<mask token>\n\n\ndef StoreHeader(TextFile, CommonHeader):\n CopyRight = CommonHeader.Copyright\n Abstract = CommonHeader.Abstract\n Description = CommonHeader.Description\n License = CommonHeader.License\n Header = '#/** @file\\n#\\n'\n Header += '# ' + Abstract + '\\n#\\n'\n Header += '# ' + Description.strip().replace('\\n', '\\n# ') + '\\n'\n Header += '# ' + CopyRight + '\\n#\\n'\n Header += '# ' + License.replace('\\n', '\\n# ').replace(' ', ' ')\n Header += '\\n#\\n#**/\\n\\n'\n StoreTextFile(TextFile, Header)\n\n\ndef StoreDefinesSection(TextFile, DefinesTupleList):\n Section = '[Defines]\\n'\n for DefineItem in DefinesTupleList:\n Section += ' %-30s = %s\\n' % DefineItem\n Section += '\\n\\n'\n StoreTextFile(TextFile, Section)\n\n\ndef GetUserExtensions(UserExtensions):\n UserId = UserExtensions.UserID\n Identifier = UserExtensions.Identifier\n Content = UserExtensions.Content\n return '[UserExtensions.%s.%s]\\n %s\\n\\n' % (UserId, Identifier, Content)\n\n\n<mask token>\n\n\ndef GetTextFileInfo(FileName, TagTuple):\n ValueTuple = [''] * len(TagTuple)\n try:\n for Line in open(FileName):\n Line = Line.split('#', 1)[0]\n MatchEquation = mReEquation.match(Line)\n if MatchEquation:\n Tag = MatchEquation.group(1).upper()\n Value = MatchEquation.group(2)\n for Index in range(len(TagTuple)):\n if TagTuple[Index] == Tag:\n ValueTuple[Index] = Value\n except:\n EdkLogger.info('IO Error in reading file %s' % FileName)\n return ValueTuple\n\n\ndef GetXmlFileInfo(FileName, TagTuple):\n XmlDom = XmlParseFile(FileName)\n return tuple([XmlElement(XmlDom, XmlTag) for XmlTag in TagTuple])\n\n\ndef MigrationOptionParser(Source, Destinate, ToolName, VersionNumber=1.0):\n UsageString = '%s [-a] [-v|-q] [-o <output_file>] <input_file>' % ToolName\n Version = '%s Version %.2f' % (ToolName, VersionNumber)\n Copyright = 'Copyright (c) 2007, Intel Corporation. All rights reserved.'\n Parser = OptionParser(description=Copyright, version=Version, usage=\n UsageString)\n Parser.add_option('-o', '--output', dest='OutputFile', help=\n 'The name of the %s file to be created.' % Destinate)\n Parser.add_option('-a', '--auto', dest='AutoWrite', action='store_true',\n default=False, help=\n 'Automatically create the %s file using the name of the %s file and replacing file extension'\n % (Source, Destinate))\n Parser.add_option('-q', '--quiet', action='store_true', type=None, help\n ='Disable all messages except FATAL ERRORS.')\n Parser.add_option('-v', '--verbose', action='store_true', type=None,\n help='Turn on verbose output with informational messages printed.')\n Options, Args = Parser.parse_args()\n if Options.verbose:\n EdkLogger.setLevel(EdkLogger.VERBOSE)\n elif Options.quiet:\n EdkLogger.setLevel(EdkLogger.QUIET)\n else:\n EdkLogger.setLevel(EdkLogger.INFO)\n if len(Args) == 0:\n raise MigrationError(PARAMETER_MISSING, name='Input file', usage=\n Parser.get_usage())\n if len(Args) > 1:\n raise MigrationError(PARAMETER_INVALID, name='Too many input files',\n usage=Parser.get_usage())\n InputFile = Args[0]\n if not os.path.exists(InputFile):\n raise MigrationError(FILE_NOT_FOUND, name=InputFile)\n if Options.OutputFile:\n if Options.AutoWrite:\n raise MigrationError(OPTION_CONFLICT, arg1='-o', arg2='-a',\n usage=Parser.get_usage())\n elif Options.AutoWrite:\n Options.OutputFile = os.path.splitext(InputFile)[0\n ] + '.' + Destinate.lower()\n else:\n raise MigrationError(OPTION_MISSING, name='-o', usage=Parser.\n get_usage())\n return Options, InputFile\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef SetCommon(Common, XmlCommon):\n XmlTag = 'Usage'\n Common.Usage = XmlAttribute(XmlCommon, XmlTag).split()\n XmlTag = 'FeatureFlag'\n Common.FeatureFlag = XmlAttribute(XmlCommon, XmlTag)\n XmlTag = 'SupArchList'\n Common.SupArchList = XmlAttribute(XmlCommon, XmlTag).split()\n XmlTag = XmlNodeName(XmlCommon) + '/' + 'HelpText'\n Common.HelpText = XmlElement(XmlCommon, XmlTag)\n\n\ndef SetIdentification(CommonHeader, XmlCommonHeader, NameTag, FileName):\n XmlParentTag = XmlNodeName(XmlCommonHeader)\n XmlTag = XmlParentTag + '/' + NameTag\n CommonHeader.Name = XmlElement(XmlCommonHeader, XmlTag)\n XmlTag = XmlParentTag + '/' + 'GuidValue'\n CommonHeader.Guid = XmlElement(XmlCommonHeader, XmlTag)\n XmlTag = XmlParentTag + '/' + 'Version'\n CommonHeader.Version = XmlElement(XmlCommonHeader, XmlTag)\n CommonHeader.FileName = os.path.basename(FileName)\n CommonHeader.FullPath = os.path.abspath(FileName)\n\n\n<mask token>\n\n\ndef AddToSpecificationDict(SpecificationDict, SpecificationString):\n \"\"\"Abstract specification name, value pair from Specification String\"\"\"\n for SpecificationMatch in mReSpecification.finditer(SpecificationString):\n Specification = SpecificationMatch.group('Specification')\n Value = SpecificationMatch.group('Value')\n SpecificationDict[Specification] = Value\n\n\ndef SetCommonHeader(CommonHeader, XmlCommonHeader):\n \"\"\"Set all attributes of CommonHeaderClass object from XmlCommonHeader\"\"\"\n XmlParent = XmlNodeName(XmlCommonHeader)\n XmlTag = XmlParent + '/' + 'Abstract'\n CommonHeader.Abstract = XmlElement(XmlCommonHeader, XmlTag)\n XmlTag = XmlParent + '/' + 'Description'\n CommonHeader.Description = XmlElement(XmlCommonHeader, XmlTag)\n XmlTag = XmlParent + '/' + 'Copyright'\n CommonHeader.Copyright = XmlElement(XmlCommonHeader, XmlTag)\n XmlTag = XmlParent + '/' + 'License'\n CommonHeader.License = XmlElement(XmlCommonHeader, XmlTag)\n XmlTag = XmlParent + '/' + 'Specification'\n Specification = XmlElement(XmlCommonHeader, XmlTag)\n AddToSpecificationDict(CommonHeader.Specification, Specification)\n XmlTag = XmlParent + '/' + 'ModuleType'\n CommonHeader.ModuleType = XmlElement(XmlCommonHeader, XmlTag)\n\n\ndef LoadClonedRecord(XmlCloned):\n ClonedRecord = ClonedRecordClass()\n XmlTag = 'Id'\n ClonedRecord.Id = int(XmlAttribute(XmlCloned, XmlTag))\n XmlTag = 'FarGuid'\n ClonedRecord.FarGuid = XmlAttribute(XmlCloned, XmlTag)\n XmlTag = 'Cloned/PackageGuid'\n ClonedRecord.PackageGuid = XmlElement(XmlCloned, XmlTag)\n XmlTag = 'Cloned/PackageVersion'\n ClonedRecord.PackageVersion = XmlElement(XmlCloned, XmlTag)\n XmlTag = 'Cloned/ModuleGuid'\n ClonedRecord.ModuleGuid = XmlElement(XmlCloned, XmlTag)\n XmlTag = 'Cloned/ModuleVersion'\n ClonedRecord.ModuleVersion = XmlElement(XmlCloned, XmlTag)\n return ClonedRecord\n\n\ndef LoadGuidProtocolPpiCommon(XmlGuidProtocolPpiCommon):\n GuidProtocolPpiCommon = GuidProtocolPpiCommonClass()\n XmlTag = 'Name'\n GuidProtocolPpiCommon.Name = XmlAttribute(XmlGuidProtocolPpiCommon, XmlTag)\n XmlParent = XmlNodeName(XmlGuidProtocolPpiCommon)\n if XmlParent == 'Entry':\n XmlTag = '%s/C_Name' % XmlParent\n elif XmlParent == 'GuidCNames':\n XmlTag = '%s/GuidCName' % XmlParent\n else:\n XmlTag = '%s/%sCName' % (XmlParent, XmlParent)\n GuidProtocolPpiCommon.CName = XmlElement(XmlGuidProtocolPpiCommon, XmlTag)\n XmlTag = XmlParent + '/' + 'GuidValue'\n GuidProtocolPpiCommon.Guid = XmlElement(XmlGuidProtocolPpiCommon, XmlTag)\n if XmlParent.endswith('Notify'):\n GuidProtocolPpiCommon.Notify = True\n XmlTag = 'GuidTypeList'\n GuidTypes = XmlAttribute(XmlGuidProtocolPpiCommon, XmlTag)\n GuidProtocolPpiCommon.GuidTypeList = GuidTypes.split()\n XmlTag = 'SupModuleList'\n SupModules = XmlAttribute(XmlGuidProtocolPpiCommon, XmlTag)\n GuidProtocolPpiCommon.SupModuleList = SupModules.split()\n SetCommon(GuidProtocolPpiCommon, XmlGuidProtocolPpiCommon)\n return GuidProtocolPpiCommon\n\n\ndef LoadPcd(XmlPcd):\n \"\"\"Return a new PcdClass object equivalent to XmlPcd\"\"\"\n Pcd = PcdClass()\n XmlTag = 'PcdEntry/C_Name'\n Pcd.CName = XmlElement(XmlPcd, XmlTag)\n XmlTag = 'PcdEntry/Token'\n Pcd.Token = XmlElement(XmlPcd, XmlTag)\n XmlTag = 'PcdEntry/TokenSpaceGuidCName'\n Pcd.TokenSpaceGuidCName = XmlElement(XmlPcd, XmlTag)\n XmlTag = 'PcdEntry/DatumType'\n Pcd.DatumType = XmlElement(XmlPcd, XmlTag)\n XmlTag = 'PcdEntry/MaxDatumSize'\n Pcd.MaxDatumSize = XmlElement(XmlPcd, XmlTag)\n XmlTag = 'PcdEntry/DefaultValue'\n Pcd.DefaultValue = XmlElement(XmlPcd, XmlTag)\n XmlTag = 'PcdItemType'\n Pcd.ItemType = XmlAttribute(XmlPcd, XmlTag)\n XmlTag = 'PcdEntry/ValidUsage'\n Pcd.ValidUsage = XmlElement(XmlPcd, XmlTag).split()\n XmlTag = 'SupModuleList'\n Pcd.SupModuleList = XmlAttribute(XmlPcd, XmlTag).split()\n SetCommon(Pcd, XmlPcd)\n return Pcd\n\n\ndef LoadLibraryClass(XmlLibraryClass):\n LibraryClass = LibraryClassClass()\n XmlTag = 'LibraryClass/Keyword'\n LibraryClass.LibraryClass = XmlElement(XmlLibraryClass, XmlTag)\n if LibraryClass.LibraryClass == '':\n XmlTag = 'Name'\n LibraryClass.LibraryClass = XmlAttribute(XmlLibraryClass, XmlTag)\n XmlTag = 'LibraryClass/IncludeHeader'\n LibraryClass.IncludeHeader = XmlElement(XmlLibraryClass, XmlTag)\n XmlTag = 'RecommendedInstanceVersion'\n RecommendedInstanceVersion = XmlAttribute(XmlLibraryClass, XmlTag)\n LibraryClass.RecommendedInstanceVersion = RecommendedInstanceVersion\n XmlTag = 'RecommendedInstanceGuid'\n RecommendedInstanceGuid = XmlAttribute(XmlLibraryClass, XmlTag)\n LibraryClass.RecommendedInstanceGuid = RecommendedInstanceGuid\n XmlTag = 'SupModuleList'\n SupModules = XmlAttribute(XmlLibraryClass, XmlTag)\n LibraryClass.SupModuleList = SupModules.split()\n SetCommon(LibraryClass, XmlLibraryClass)\n return LibraryClass\n\n\ndef LoadBuildOption(XmlBuildOption):\n \"\"\"Return a new BuildOptionClass object equivalent to XmlBuildOption\"\"\"\n BuildOption = BuildOptionClass()\n BuildOption.Option = XmlElementData(XmlBuildOption)\n XmlTag = 'BuildTargets'\n BuildOption.BuildTargetList = XmlAttribute(XmlBuildOption, XmlTag).split()\n XmlTag = 'ToolChainFamily'\n BuildOption.ToolChainFamily = XmlAttribute(XmlBuildOption, XmlTag)\n XmlTag = 'TagName'\n BuildOption.TagName = XmlAttribute(XmlBuildOption, XmlTag)\n XmlTag = 'ToolCode'\n BuildOption.ToolCode = XmlAttribute(XmlBuildOption, XmlTag)\n XmlTag = 'SupArchList'\n BuildOption.SupArchList = XmlAttribute(XmlBuildOption, XmlTag).split()\n return BuildOption\n\n\ndef LoadUserExtensions(XmlUserExtensions):\n UserExtensions = UserExtensionsClass()\n XmlTag = 'UserID'\n UserExtensions.UserID = XmlAttribute(XmlUserExtensions, XmlTag)\n XmlTag = 'Identifier'\n UserExtensions.Identifier = XmlAttribute(XmlUserExtensions, XmlTag)\n UserExtensions.Content = XmlElementData(XmlUserExtensions)\n return UserExtensions\n\n\ndef StoreTextFile(TextFile, Content):\n EdkLogger.verbose(Content)\n TextFile.write(Content)\n\n\ndef AddToSection(Section, Arch, Item):\n SectionArch = Section.get(Arch, [])\n if Item not in SectionArch:\n SectionArch.append(Item)\n Section[Arch] = SectionArch\n\n\ndef GetSection(SectionName, Method, ObjectList):\n SupportedArches = ['common', 'Ia32', 'X64', 'Ipf', 'Ebc', 'ARM', 'AARCH64']\n SectionDict = {}\n for Object in ObjectList:\n Item = Method(Object)\n if Item == '':\n continue\n Item = ' %s' % Item\n Arches = Object.SupArchList\n if len(Arches) == 0:\n AddToSection(SectionDict, 'common', Item)\n else:\n for Arch in SupportedArches:\n if Arch.upper() in Arches:\n AddToSection(SectionDict, Arch, Item)\n Section = ''\n for Arch in SupportedArches:\n SectionArch = '\\n'.join(SectionDict.get(Arch, []))\n if SectionArch != '':\n Section += '[%s.%s]\\n%s\\n' % (SectionName, Arch, SectionArch)\n Section += '\\n'\n if Section != '':\n Section += '\\n'\n return Section\n\n\ndef StoreHeader(TextFile, CommonHeader):\n CopyRight = CommonHeader.Copyright\n Abstract = CommonHeader.Abstract\n Description = CommonHeader.Description\n License = CommonHeader.License\n Header = '#/** @file\\n#\\n'\n Header += '# ' + Abstract + '\\n#\\n'\n Header += '# ' + Description.strip().replace('\\n', '\\n# ') + '\\n'\n Header += '# ' + CopyRight + '\\n#\\n'\n Header += '# ' + License.replace('\\n', '\\n# ').replace(' ', ' ')\n Header += '\\n#\\n#**/\\n\\n'\n StoreTextFile(TextFile, Header)\n\n\ndef StoreDefinesSection(TextFile, DefinesTupleList):\n Section = '[Defines]\\n'\n for DefineItem in DefinesTupleList:\n Section += ' %-30s = %s\\n' % DefineItem\n Section += '\\n\\n'\n StoreTextFile(TextFile, Section)\n\n\ndef GetUserExtensions(UserExtensions):\n UserId = UserExtensions.UserID\n Identifier = UserExtensions.Identifier\n Content = UserExtensions.Content\n return '[UserExtensions.%s.%s]\\n %s\\n\\n' % (UserId, Identifier, Content)\n\n\n<mask token>\n\n\ndef GetTextFileInfo(FileName, TagTuple):\n ValueTuple = [''] * len(TagTuple)\n try:\n for Line in open(FileName):\n Line = Line.split('#', 1)[0]\n MatchEquation = mReEquation.match(Line)\n if MatchEquation:\n Tag = MatchEquation.group(1).upper()\n Value = MatchEquation.group(2)\n for Index in range(len(TagTuple)):\n if TagTuple[Index] == Tag:\n ValueTuple[Index] = Value\n except:\n EdkLogger.info('IO Error in reading file %s' % FileName)\n return ValueTuple\n\n\ndef GetXmlFileInfo(FileName, TagTuple):\n XmlDom = XmlParseFile(FileName)\n return tuple([XmlElement(XmlDom, XmlTag) for XmlTag in TagTuple])\n\n\ndef MigrationOptionParser(Source, Destinate, ToolName, VersionNumber=1.0):\n UsageString = '%s [-a] [-v|-q] [-o <output_file>] <input_file>' % ToolName\n Version = '%s Version %.2f' % (ToolName, VersionNumber)\n Copyright = 'Copyright (c) 2007, Intel Corporation. All rights reserved.'\n Parser = OptionParser(description=Copyright, version=Version, usage=\n UsageString)\n Parser.add_option('-o', '--output', dest='OutputFile', help=\n 'The name of the %s file to be created.' % Destinate)\n Parser.add_option('-a', '--auto', dest='AutoWrite', action='store_true',\n default=False, help=\n 'Automatically create the %s file using the name of the %s file and replacing file extension'\n % (Source, Destinate))\n Parser.add_option('-q', '--quiet', action='store_true', type=None, help\n ='Disable all messages except FATAL ERRORS.')\n Parser.add_option('-v', '--verbose', action='store_true', type=None,\n help='Turn on verbose output with informational messages printed.')\n Options, Args = Parser.parse_args()\n if Options.verbose:\n EdkLogger.setLevel(EdkLogger.VERBOSE)\n elif Options.quiet:\n EdkLogger.setLevel(EdkLogger.QUIET)\n else:\n EdkLogger.setLevel(EdkLogger.INFO)\n if len(Args) == 0:\n raise MigrationError(PARAMETER_MISSING, name='Input file', usage=\n Parser.get_usage())\n if len(Args) > 1:\n raise MigrationError(PARAMETER_INVALID, name='Too many input files',\n usage=Parser.get_usage())\n InputFile = Args[0]\n if not os.path.exists(InputFile):\n raise MigrationError(FILE_NOT_FOUND, name=InputFile)\n if Options.OutputFile:\n if Options.AutoWrite:\n raise MigrationError(OPTION_CONFLICT, arg1='-o', arg2='-a',\n usage=Parser.get_usage())\n elif Options.AutoWrite:\n Options.OutputFile = os.path.splitext(InputFile)[0\n ] + '.' + Destinate.lower()\n else:\n raise MigrationError(OPTION_MISSING, name='-o', usage=Parser.\n get_usage())\n return Options, InputFile\n\n\nif __name__ == '__main__':\n pass\n",
"step-4": "<mask token>\n\n\ndef SetCommon(Common, XmlCommon):\n XmlTag = 'Usage'\n Common.Usage = XmlAttribute(XmlCommon, XmlTag).split()\n XmlTag = 'FeatureFlag'\n Common.FeatureFlag = XmlAttribute(XmlCommon, XmlTag)\n XmlTag = 'SupArchList'\n Common.SupArchList = XmlAttribute(XmlCommon, XmlTag).split()\n XmlTag = XmlNodeName(XmlCommon) + '/' + 'HelpText'\n Common.HelpText = XmlElement(XmlCommon, XmlTag)\n\n\ndef SetIdentification(CommonHeader, XmlCommonHeader, NameTag, FileName):\n XmlParentTag = XmlNodeName(XmlCommonHeader)\n XmlTag = XmlParentTag + '/' + NameTag\n CommonHeader.Name = XmlElement(XmlCommonHeader, XmlTag)\n XmlTag = XmlParentTag + '/' + 'GuidValue'\n CommonHeader.Guid = XmlElement(XmlCommonHeader, XmlTag)\n XmlTag = XmlParentTag + '/' + 'Version'\n CommonHeader.Version = XmlElement(XmlCommonHeader, XmlTag)\n CommonHeader.FileName = os.path.basename(FileName)\n CommonHeader.FullPath = os.path.abspath(FileName)\n\n\nmReSpecification = re.compile('(?P<Specification>\\\\w+)\\\\s+(?P<Value>\\\\w*)')\n\n\ndef AddToSpecificationDict(SpecificationDict, SpecificationString):\n \"\"\"Abstract specification name, value pair from Specification String\"\"\"\n for SpecificationMatch in mReSpecification.finditer(SpecificationString):\n Specification = SpecificationMatch.group('Specification')\n Value = SpecificationMatch.group('Value')\n SpecificationDict[Specification] = Value\n\n\ndef SetCommonHeader(CommonHeader, XmlCommonHeader):\n \"\"\"Set all attributes of CommonHeaderClass object from XmlCommonHeader\"\"\"\n XmlParent = XmlNodeName(XmlCommonHeader)\n XmlTag = XmlParent + '/' + 'Abstract'\n CommonHeader.Abstract = XmlElement(XmlCommonHeader, XmlTag)\n XmlTag = XmlParent + '/' + 'Description'\n CommonHeader.Description = XmlElement(XmlCommonHeader, XmlTag)\n XmlTag = XmlParent + '/' + 'Copyright'\n CommonHeader.Copyright = XmlElement(XmlCommonHeader, XmlTag)\n XmlTag = XmlParent + '/' + 'License'\n CommonHeader.License = XmlElement(XmlCommonHeader, XmlTag)\n XmlTag = XmlParent + '/' + 'Specification'\n Specification = XmlElement(XmlCommonHeader, XmlTag)\n AddToSpecificationDict(CommonHeader.Specification, Specification)\n XmlTag = XmlParent + '/' + 'ModuleType'\n CommonHeader.ModuleType = XmlElement(XmlCommonHeader, XmlTag)\n\n\ndef LoadClonedRecord(XmlCloned):\n ClonedRecord = ClonedRecordClass()\n XmlTag = 'Id'\n ClonedRecord.Id = int(XmlAttribute(XmlCloned, XmlTag))\n XmlTag = 'FarGuid'\n ClonedRecord.FarGuid = XmlAttribute(XmlCloned, XmlTag)\n XmlTag = 'Cloned/PackageGuid'\n ClonedRecord.PackageGuid = XmlElement(XmlCloned, XmlTag)\n XmlTag = 'Cloned/PackageVersion'\n ClonedRecord.PackageVersion = XmlElement(XmlCloned, XmlTag)\n XmlTag = 'Cloned/ModuleGuid'\n ClonedRecord.ModuleGuid = XmlElement(XmlCloned, XmlTag)\n XmlTag = 'Cloned/ModuleVersion'\n ClonedRecord.ModuleVersion = XmlElement(XmlCloned, XmlTag)\n return ClonedRecord\n\n\ndef LoadGuidProtocolPpiCommon(XmlGuidProtocolPpiCommon):\n GuidProtocolPpiCommon = GuidProtocolPpiCommonClass()\n XmlTag = 'Name'\n GuidProtocolPpiCommon.Name = XmlAttribute(XmlGuidProtocolPpiCommon, XmlTag)\n XmlParent = XmlNodeName(XmlGuidProtocolPpiCommon)\n if XmlParent == 'Entry':\n XmlTag = '%s/C_Name' % XmlParent\n elif XmlParent == 'GuidCNames':\n XmlTag = '%s/GuidCName' % XmlParent\n else:\n XmlTag = '%s/%sCName' % (XmlParent, XmlParent)\n GuidProtocolPpiCommon.CName = XmlElement(XmlGuidProtocolPpiCommon, XmlTag)\n XmlTag = XmlParent + '/' + 'GuidValue'\n GuidProtocolPpiCommon.Guid = XmlElement(XmlGuidProtocolPpiCommon, XmlTag)\n if XmlParent.endswith('Notify'):\n GuidProtocolPpiCommon.Notify = True\n XmlTag = 'GuidTypeList'\n GuidTypes = XmlAttribute(XmlGuidProtocolPpiCommon, XmlTag)\n GuidProtocolPpiCommon.GuidTypeList = GuidTypes.split()\n XmlTag = 'SupModuleList'\n SupModules = XmlAttribute(XmlGuidProtocolPpiCommon, XmlTag)\n GuidProtocolPpiCommon.SupModuleList = SupModules.split()\n SetCommon(GuidProtocolPpiCommon, XmlGuidProtocolPpiCommon)\n return GuidProtocolPpiCommon\n\n\ndef LoadPcd(XmlPcd):\n \"\"\"Return a new PcdClass object equivalent to XmlPcd\"\"\"\n Pcd = PcdClass()\n XmlTag = 'PcdEntry/C_Name'\n Pcd.CName = XmlElement(XmlPcd, XmlTag)\n XmlTag = 'PcdEntry/Token'\n Pcd.Token = XmlElement(XmlPcd, XmlTag)\n XmlTag = 'PcdEntry/TokenSpaceGuidCName'\n Pcd.TokenSpaceGuidCName = XmlElement(XmlPcd, XmlTag)\n XmlTag = 'PcdEntry/DatumType'\n Pcd.DatumType = XmlElement(XmlPcd, XmlTag)\n XmlTag = 'PcdEntry/MaxDatumSize'\n Pcd.MaxDatumSize = XmlElement(XmlPcd, XmlTag)\n XmlTag = 'PcdEntry/DefaultValue'\n Pcd.DefaultValue = XmlElement(XmlPcd, XmlTag)\n XmlTag = 'PcdItemType'\n Pcd.ItemType = XmlAttribute(XmlPcd, XmlTag)\n XmlTag = 'PcdEntry/ValidUsage'\n Pcd.ValidUsage = XmlElement(XmlPcd, XmlTag).split()\n XmlTag = 'SupModuleList'\n Pcd.SupModuleList = XmlAttribute(XmlPcd, XmlTag).split()\n SetCommon(Pcd, XmlPcd)\n return Pcd\n\n\ndef LoadLibraryClass(XmlLibraryClass):\n LibraryClass = LibraryClassClass()\n XmlTag = 'LibraryClass/Keyword'\n LibraryClass.LibraryClass = XmlElement(XmlLibraryClass, XmlTag)\n if LibraryClass.LibraryClass == '':\n XmlTag = 'Name'\n LibraryClass.LibraryClass = XmlAttribute(XmlLibraryClass, XmlTag)\n XmlTag = 'LibraryClass/IncludeHeader'\n LibraryClass.IncludeHeader = XmlElement(XmlLibraryClass, XmlTag)\n XmlTag = 'RecommendedInstanceVersion'\n RecommendedInstanceVersion = XmlAttribute(XmlLibraryClass, XmlTag)\n LibraryClass.RecommendedInstanceVersion = RecommendedInstanceVersion\n XmlTag = 'RecommendedInstanceGuid'\n RecommendedInstanceGuid = XmlAttribute(XmlLibraryClass, XmlTag)\n LibraryClass.RecommendedInstanceGuid = RecommendedInstanceGuid\n XmlTag = 'SupModuleList'\n SupModules = XmlAttribute(XmlLibraryClass, XmlTag)\n LibraryClass.SupModuleList = SupModules.split()\n SetCommon(LibraryClass, XmlLibraryClass)\n return LibraryClass\n\n\ndef LoadBuildOption(XmlBuildOption):\n \"\"\"Return a new BuildOptionClass object equivalent to XmlBuildOption\"\"\"\n BuildOption = BuildOptionClass()\n BuildOption.Option = XmlElementData(XmlBuildOption)\n XmlTag = 'BuildTargets'\n BuildOption.BuildTargetList = XmlAttribute(XmlBuildOption, XmlTag).split()\n XmlTag = 'ToolChainFamily'\n BuildOption.ToolChainFamily = XmlAttribute(XmlBuildOption, XmlTag)\n XmlTag = 'TagName'\n BuildOption.TagName = XmlAttribute(XmlBuildOption, XmlTag)\n XmlTag = 'ToolCode'\n BuildOption.ToolCode = XmlAttribute(XmlBuildOption, XmlTag)\n XmlTag = 'SupArchList'\n BuildOption.SupArchList = XmlAttribute(XmlBuildOption, XmlTag).split()\n return BuildOption\n\n\ndef LoadUserExtensions(XmlUserExtensions):\n UserExtensions = UserExtensionsClass()\n XmlTag = 'UserID'\n UserExtensions.UserID = XmlAttribute(XmlUserExtensions, XmlTag)\n XmlTag = 'Identifier'\n UserExtensions.Identifier = XmlAttribute(XmlUserExtensions, XmlTag)\n UserExtensions.Content = XmlElementData(XmlUserExtensions)\n return UserExtensions\n\n\ndef StoreTextFile(TextFile, Content):\n EdkLogger.verbose(Content)\n TextFile.write(Content)\n\n\ndef AddToSection(Section, Arch, Item):\n SectionArch = Section.get(Arch, [])\n if Item not in SectionArch:\n SectionArch.append(Item)\n Section[Arch] = SectionArch\n\n\ndef GetSection(SectionName, Method, ObjectList):\n SupportedArches = ['common', 'Ia32', 'X64', 'Ipf', 'Ebc', 'ARM', 'AARCH64']\n SectionDict = {}\n for Object in ObjectList:\n Item = Method(Object)\n if Item == '':\n continue\n Item = ' %s' % Item\n Arches = Object.SupArchList\n if len(Arches) == 0:\n AddToSection(SectionDict, 'common', Item)\n else:\n for Arch in SupportedArches:\n if Arch.upper() in Arches:\n AddToSection(SectionDict, Arch, Item)\n Section = ''\n for Arch in SupportedArches:\n SectionArch = '\\n'.join(SectionDict.get(Arch, []))\n if SectionArch != '':\n Section += '[%s.%s]\\n%s\\n' % (SectionName, Arch, SectionArch)\n Section += '\\n'\n if Section != '':\n Section += '\\n'\n return Section\n\n\ndef StoreHeader(TextFile, CommonHeader):\n CopyRight = CommonHeader.Copyright\n Abstract = CommonHeader.Abstract\n Description = CommonHeader.Description\n License = CommonHeader.License\n Header = '#/** @file\\n#\\n'\n Header += '# ' + Abstract + '\\n#\\n'\n Header += '# ' + Description.strip().replace('\\n', '\\n# ') + '\\n'\n Header += '# ' + CopyRight + '\\n#\\n'\n Header += '# ' + License.replace('\\n', '\\n# ').replace(' ', ' ')\n Header += '\\n#\\n#**/\\n\\n'\n StoreTextFile(TextFile, Header)\n\n\ndef StoreDefinesSection(TextFile, DefinesTupleList):\n Section = '[Defines]\\n'\n for DefineItem in DefinesTupleList:\n Section += ' %-30s = %s\\n' % DefineItem\n Section += '\\n\\n'\n StoreTextFile(TextFile, Section)\n\n\ndef GetUserExtensions(UserExtensions):\n UserId = UserExtensions.UserID\n Identifier = UserExtensions.Identifier\n Content = UserExtensions.Content\n return '[UserExtensions.%s.%s]\\n %s\\n\\n' % (UserId, Identifier, Content)\n\n\nmReEquation = re.compile('\\\\s*(\\\\S+)\\\\s*=\\\\s*(\\\\S*)\\\\s*')\n\n\ndef GetTextFileInfo(FileName, TagTuple):\n ValueTuple = [''] * len(TagTuple)\n try:\n for Line in open(FileName):\n Line = Line.split('#', 1)[0]\n MatchEquation = mReEquation.match(Line)\n if MatchEquation:\n Tag = MatchEquation.group(1).upper()\n Value = MatchEquation.group(2)\n for Index in range(len(TagTuple)):\n if TagTuple[Index] == Tag:\n ValueTuple[Index] = Value\n except:\n EdkLogger.info('IO Error in reading file %s' % FileName)\n return ValueTuple\n\n\ndef GetXmlFileInfo(FileName, TagTuple):\n XmlDom = XmlParseFile(FileName)\n return tuple([XmlElement(XmlDom, XmlTag) for XmlTag in TagTuple])\n\n\ndef MigrationOptionParser(Source, Destinate, ToolName, VersionNumber=1.0):\n UsageString = '%s [-a] [-v|-q] [-o <output_file>] <input_file>' % ToolName\n Version = '%s Version %.2f' % (ToolName, VersionNumber)\n Copyright = 'Copyright (c) 2007, Intel Corporation. All rights reserved.'\n Parser = OptionParser(description=Copyright, version=Version, usage=\n UsageString)\n Parser.add_option('-o', '--output', dest='OutputFile', help=\n 'The name of the %s file to be created.' % Destinate)\n Parser.add_option('-a', '--auto', dest='AutoWrite', action='store_true',\n default=False, help=\n 'Automatically create the %s file using the name of the %s file and replacing file extension'\n % (Source, Destinate))\n Parser.add_option('-q', '--quiet', action='store_true', type=None, help\n ='Disable all messages except FATAL ERRORS.')\n Parser.add_option('-v', '--verbose', action='store_true', type=None,\n help='Turn on verbose output with informational messages printed.')\n Options, Args = Parser.parse_args()\n if Options.verbose:\n EdkLogger.setLevel(EdkLogger.VERBOSE)\n elif Options.quiet:\n EdkLogger.setLevel(EdkLogger.QUIET)\n else:\n EdkLogger.setLevel(EdkLogger.INFO)\n if len(Args) == 0:\n raise MigrationError(PARAMETER_MISSING, name='Input file', usage=\n Parser.get_usage())\n if len(Args) > 1:\n raise MigrationError(PARAMETER_INVALID, name='Too many input files',\n usage=Parser.get_usage())\n InputFile = Args[0]\n if not os.path.exists(InputFile):\n raise MigrationError(FILE_NOT_FOUND, name=InputFile)\n if Options.OutputFile:\n if Options.AutoWrite:\n raise MigrationError(OPTION_CONFLICT, arg1='-o', arg2='-a',\n usage=Parser.get_usage())\n elif Options.AutoWrite:\n Options.OutputFile = os.path.splitext(InputFile)[0\n ] + '.' + Destinate.lower()\n else:\n raise MigrationError(OPTION_MISSING, name='-o', usage=Parser.\n get_usage())\n return Options, InputFile\n\n\nif __name__ == '__main__':\n pass\n",
"step-5": "## @file\n# Contains several utilitities shared by migration tools.\n#\n# Copyright (c) 2007 - 2014, Intel Corporation. All rights reserved.<BR>\n# This program and the accompanying materials\n# are licensed and made available under the terms and conditions of the BSD License\n# which accompanies this distribution. The full text of the license may be found at\n# http://opensource.org/licenses/bsd-license.php\n#\n# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.\n#\n\n##\n# Import Modules\n#\nimport Common.LongFilePathOs as os\nimport re\nimport EdkLogger\nfrom optparse import OptionParser\nfrom Common.BuildToolError import *\nfrom XmlRoutines import *\nfrom CommonDataClass.CommonClass import *\nfrom Common.LongFilePathSupport import OpenLongFilePath as open\n\n## Set all fields of CommonClass object.\n#\n# Set all attributes of CommonClass object from XML Dom object of XmlCommon.\n#\n# @param Common The destine CommonClass object.\n# @param XmlCommon The source XML Dom object.\n#\ndef SetCommon(Common, XmlCommon):\n XmlTag = \"Usage\"\n Common.Usage = XmlAttribute(XmlCommon, XmlTag).split()\n\n XmlTag = \"FeatureFlag\"\n Common.FeatureFlag = XmlAttribute(XmlCommon, XmlTag)\n\n XmlTag = \"SupArchList\"\n Common.SupArchList = XmlAttribute(XmlCommon, XmlTag).split()\n\n XmlTag = XmlNodeName(XmlCommon) + \"/\" + \"HelpText\"\n Common.HelpText = XmlElement(XmlCommon, XmlTag)\n\n\n## Set some fields of CommonHeaderClass object.\n#\n# Set Name, Guid, FileName and FullPath fields of CommonHeaderClass object from\n# XML Dom object of XmlCommonHeader, NameTag and FileName.\n#\n# @param CommonHeader The destine CommonClass object.\n# @param XmlCommonHeader The source XML Dom object.\n# @param NameTag The name tag in XML Dom object.\n# @param FileName The file name of the XML file.\n#\ndef SetIdentification(CommonHeader, XmlCommonHeader, NameTag, FileName):\n XmlParentTag = XmlNodeName(XmlCommonHeader)\n\n XmlTag = XmlParentTag + \"/\" + NameTag\n CommonHeader.Name = XmlElement(XmlCommonHeader, XmlTag)\n\n XmlTag = XmlParentTag + \"/\" + \"GuidValue\"\n CommonHeader.Guid = XmlElement(XmlCommonHeader, XmlTag)\n\n XmlTag = XmlParentTag + \"/\" + \"Version\"\n CommonHeader.Version = XmlElement(XmlCommonHeader, XmlTag)\n\n CommonHeader.FileName = os.path.basename(FileName)\n CommonHeader.FullPath = os.path.abspath(FileName)\n\n\n## Regular expression to match specification and value.\nmReSpecification = re.compile(r\"(?P<Specification>\\w+)\\s+(?P<Value>\\w*)\")\n\n## Add specification to specification dictionary.\n#\n# Abstract specification name, value pair from Specification String and add them\n# to specification dictionary.\n#\n# @param SpecificationDict The destine Specification dictionary.\n# @param SpecificationString The source Specification String from which the\n# specification name and value pair is abstracted.\n#\ndef AddToSpecificationDict(SpecificationDict, SpecificationString):\n \"\"\"Abstract specification name, value pair from Specification String\"\"\"\n for SpecificationMatch in mReSpecification.finditer(SpecificationString):\n Specification = SpecificationMatch.group(\"Specification\")\n Value = SpecificationMatch.group(\"Value\")\n SpecificationDict[Specification] = Value\n\n## Set all fields of CommonHeaderClass object.\n#\n# Set all attributes of CommonHeaderClass object from XML Dom object of\n# XmlCommonHeader, NameTag and FileName.\n#\n# @param CommonHeader The destine CommonClass object.\n# @param XmlCommonHeader The source XML Dom object.\n# @param NameTag The name tag in XML Dom object.\n# @param FileName The file name of the XML file.\n#\ndef SetCommonHeader(CommonHeader, XmlCommonHeader):\n \"\"\"Set all attributes of CommonHeaderClass object from XmlCommonHeader\"\"\"\n XmlParent = XmlNodeName(XmlCommonHeader)\n\n XmlTag = XmlParent + \"/\" + \"Abstract\"\n CommonHeader.Abstract = XmlElement(XmlCommonHeader, XmlTag)\n\n XmlTag = XmlParent + \"/\" + \"Description\"\n CommonHeader.Description = XmlElement(XmlCommonHeader, XmlTag)\n\n XmlTag = XmlParent + \"/\" + \"Copyright\"\n CommonHeader.Copyright = XmlElement(XmlCommonHeader, XmlTag)\n\n XmlTag = XmlParent + \"/\" + \"License\"\n CommonHeader.License = XmlElement(XmlCommonHeader, XmlTag)\n\n XmlTag = XmlParent + \"/\" + \"Specification\"\n Specification = XmlElement(XmlCommonHeader, XmlTag)\n\n AddToSpecificationDict(CommonHeader.Specification, Specification)\n\n XmlTag = XmlParent + \"/\" + \"ModuleType\"\n CommonHeader.ModuleType = XmlElement(XmlCommonHeader, XmlTag)\n\n\n## Load a new Cloned Record class object.\n#\n# Read an input XML ClonedRecord DOM object and return an object of Cloned Record\n# contained in the DOM object.\n#\n# @param XmlCloned A child XML DOM object in a Common XML DOM.\n#\n# @retvel ClonedRecord A new Cloned Record object created by XmlCloned.\n#\ndef LoadClonedRecord(XmlCloned):\n ClonedRecord = ClonedRecordClass()\n\n XmlTag = \"Id\"\n ClonedRecord.Id = int(XmlAttribute(XmlCloned, XmlTag))\n\n XmlTag = \"FarGuid\"\n ClonedRecord.FarGuid = XmlAttribute(XmlCloned, XmlTag)\n\n XmlTag = \"Cloned/PackageGuid\"\n ClonedRecord.PackageGuid = XmlElement(XmlCloned, XmlTag)\n\n XmlTag = \"Cloned/PackageVersion\"\n ClonedRecord.PackageVersion = XmlElement(XmlCloned, XmlTag)\n\n XmlTag = \"Cloned/ModuleGuid\"\n ClonedRecord.ModuleGuid = XmlElement(XmlCloned, XmlTag)\n\n XmlTag = \"Cloned/ModuleVersion\"\n ClonedRecord.ModuleVersion = XmlElement(XmlCloned, XmlTag)\n\n return ClonedRecord\n\n\n## Load a new Guid/Protocol/Ppi common class object.\n#\n# Read an input XML Guid/Protocol/Ppi DOM object and return an object of\n# Guid/Protocol/Ppi contained in the DOM object.\n#\n# @param XmlGuidProtocolPpiCommon A child XML DOM object in a Common XML DOM.\n#\n# @retvel GuidProtocolPpiCommon A new GuidProtocolPpiCommon class object\n# created by XmlGuidProtocolPpiCommon.\n#\ndef LoadGuidProtocolPpiCommon(XmlGuidProtocolPpiCommon):\n GuidProtocolPpiCommon = GuidProtocolPpiCommonClass()\n\n XmlTag = \"Name\"\n GuidProtocolPpiCommon.Name = XmlAttribute(XmlGuidProtocolPpiCommon, XmlTag)\n\n XmlParent = XmlNodeName(XmlGuidProtocolPpiCommon)\n if XmlParent == \"Entry\":\n XmlTag = \"%s/C_Name\" % XmlParent\n elif XmlParent == \"GuidCNames\":\n XmlTag = \"%s/GuidCName\" % XmlParent\n else:\n XmlTag = \"%s/%sCName\" % (XmlParent, XmlParent)\n\n GuidProtocolPpiCommon.CName = XmlElement(XmlGuidProtocolPpiCommon, XmlTag)\n\n XmlTag = XmlParent + \"/\" + \"GuidValue\"\n GuidProtocolPpiCommon.Guid = XmlElement(XmlGuidProtocolPpiCommon, XmlTag)\n\n if XmlParent.endswith(\"Notify\"):\n GuidProtocolPpiCommon.Notify = True\n\n XmlTag = \"GuidTypeList\"\n GuidTypes = XmlAttribute(XmlGuidProtocolPpiCommon, XmlTag)\n GuidProtocolPpiCommon.GuidTypeList = GuidTypes.split()\n\n XmlTag = \"SupModuleList\"\n SupModules = XmlAttribute(XmlGuidProtocolPpiCommon, XmlTag)\n GuidProtocolPpiCommon.SupModuleList = SupModules.split()\n\n SetCommon(GuidProtocolPpiCommon, XmlGuidProtocolPpiCommon)\n\n return GuidProtocolPpiCommon\n\n\n## Load a new Pcd class object.\n#\n# Read an input XML Pcd DOM object and return an object of Pcd\n# contained in the DOM object.\n#\n# @param XmlPcd A child XML DOM object in a Common XML DOM.\n#\n# @retvel Pcd A new Pcd object created by XmlPcd.\n#\ndef LoadPcd(XmlPcd):\n \"\"\"Return a new PcdClass object equivalent to XmlPcd\"\"\"\n Pcd = PcdClass()\n\n XmlTag = \"PcdEntry/C_Name\"\n Pcd.CName = XmlElement(XmlPcd, XmlTag)\n\n XmlTag = \"PcdEntry/Token\"\n Pcd.Token = XmlElement(XmlPcd, XmlTag)\n\n XmlTag = \"PcdEntry/TokenSpaceGuidCName\"\n Pcd.TokenSpaceGuidCName = XmlElement(XmlPcd, XmlTag)\n\n XmlTag = \"PcdEntry/DatumType\"\n Pcd.DatumType = XmlElement(XmlPcd, XmlTag)\n\n XmlTag = \"PcdEntry/MaxDatumSize\"\n Pcd.MaxDatumSize = XmlElement(XmlPcd, XmlTag)\n\n XmlTag = \"PcdEntry/DefaultValue\"\n Pcd.DefaultValue = XmlElement(XmlPcd, XmlTag)\n\n XmlTag = \"PcdItemType\"\n Pcd.ItemType = XmlAttribute(XmlPcd, XmlTag)\n\n XmlTag = \"PcdEntry/ValidUsage\"\n Pcd.ValidUsage = XmlElement(XmlPcd, XmlTag).split()\n\n XmlTag = \"SupModuleList\"\n Pcd.SupModuleList = XmlAttribute(XmlPcd, XmlTag).split()\n\n SetCommon(Pcd, XmlPcd)\n\n return Pcd\n\n\n## Load a new LibraryClass class object.\n#\n# Read an input XML LibraryClass DOM object and return an object of LibraryClass\n# contained in the DOM object.\n#\n# @param XmlLibraryClass A child XML DOM object in a Common XML DOM.\n#\n# @retvel LibraryClass A new LibraryClass object created by XmlLibraryClass.\n#\ndef LoadLibraryClass(XmlLibraryClass):\n LibraryClass = LibraryClassClass()\n\n XmlTag = \"LibraryClass/Keyword\"\n LibraryClass.LibraryClass = XmlElement(XmlLibraryClass, XmlTag)\n if LibraryClass.LibraryClass == \"\":\n XmlTag = \"Name\"\n LibraryClass.LibraryClass = XmlAttribute(XmlLibraryClass, XmlTag)\n\n XmlTag = \"LibraryClass/IncludeHeader\"\n LibraryClass.IncludeHeader = XmlElement(XmlLibraryClass, XmlTag)\n\n XmlTag = \"RecommendedInstanceVersion\"\n RecommendedInstanceVersion = XmlAttribute(XmlLibraryClass, XmlTag)\n LibraryClass.RecommendedInstanceVersion = RecommendedInstanceVersion\n\n XmlTag = \"RecommendedInstanceGuid\"\n RecommendedInstanceGuid = XmlAttribute(XmlLibraryClass, XmlTag)\n LibraryClass.RecommendedInstanceGuid = RecommendedInstanceGuid\n\n XmlTag = \"SupModuleList\"\n SupModules = XmlAttribute(XmlLibraryClass, XmlTag)\n LibraryClass.SupModuleList = SupModules.split()\n\n SetCommon(LibraryClass, XmlLibraryClass)\n\n return LibraryClass\n\n\n## Load a new Build Option class object.\n#\n# Read an input XML BuildOption DOM object and return an object of Build Option\n# contained in the DOM object.\n#\n# @param XmlBuildOption A child XML DOM object in a Common XML DOM.\n#\n# @retvel BuildOption A new Build Option object created by XmlBuildOption.\n#\ndef LoadBuildOption(XmlBuildOption):\n \"\"\"Return a new BuildOptionClass object equivalent to XmlBuildOption\"\"\"\n BuildOption = BuildOptionClass()\n\n BuildOption.Option = XmlElementData(XmlBuildOption)\n\n XmlTag = \"BuildTargets\"\n BuildOption.BuildTargetList = XmlAttribute(XmlBuildOption, XmlTag).split()\n\n XmlTag = \"ToolChainFamily\"\n BuildOption.ToolChainFamily = XmlAttribute(XmlBuildOption, XmlTag)\n\n XmlTag = \"TagName\"\n BuildOption.TagName = XmlAttribute(XmlBuildOption, XmlTag)\n\n XmlTag = \"ToolCode\"\n BuildOption.ToolCode = XmlAttribute(XmlBuildOption, XmlTag)\n\n XmlTag = \"SupArchList\"\n BuildOption.SupArchList = XmlAttribute(XmlBuildOption, XmlTag).split()\n\n return BuildOption\n\n\n## Load a new User Extensions class object.\n#\n# Read an input XML UserExtensions DOM object and return an object of User\n# Extensions contained in the DOM object.\n#\n# @param XmlUserExtensions A child XML DOM object in a Common XML DOM.\n#\n# @retvel UserExtensions A new User Extensions object created by\n# XmlUserExtensions.\n#\ndef LoadUserExtensions(XmlUserExtensions):\n UserExtensions = UserExtensionsClass()\n\n XmlTag = \"UserID\"\n UserExtensions.UserID = XmlAttribute(XmlUserExtensions, XmlTag)\n\n XmlTag = \"Identifier\"\n UserExtensions.Identifier = XmlAttribute(XmlUserExtensions, XmlTag)\n\n UserExtensions.Content = XmlElementData(XmlUserExtensions)\n\n return UserExtensions\n\n\n## Store content to a text file object.\n#\n# Write some text file content to a text file object. The contents may echo\n# in screen in a verbose way.\n#\n# @param TextFile The text file object.\n# @param Content The string object to be written to a text file.\n#\ndef StoreTextFile(TextFile, Content):\n EdkLogger.verbose(Content)\n TextFile.write(Content)\n\n\n## Add item to a section.\n#\n# Add an Item with specific CPU architecture to section dictionary.\n# The possible duplication is ensured to be removed.\n#\n# @param Section Section dictionary indexed by CPU architecture.\n# @param Arch CPU architecture: Ia32, X64, Ipf, ARM, AARCH64, Ebc or Common.\n# @param Item The Item to be added to section dictionary.\n#\ndef AddToSection(Section, Arch, Item):\n SectionArch = Section.get(Arch, [])\n if Item not in SectionArch:\n SectionArch.append(Item)\n Section[Arch] = SectionArch\n\n\n## Get section contents.\n#\n# Return the content of section named SectionName.\n# the contents is based on Methods and ObjectLists.\n#\n# @param SectionName The name of the section.\n# @param Method A function returning a string item of an object.\n# @param ObjectList The list of object.\n#\n# @retval Section The string content of a section.\n#\ndef GetSection(SectionName, Method, ObjectList):\n SupportedArches = [\"common\", \"Ia32\", \"X64\", \"Ipf\", \"Ebc\", \"ARM\", \"AARCH64\"]\n SectionDict = {}\n for Object in ObjectList:\n Item = Method(Object)\n if Item == \"\":\n continue\n Item = \" %s\" % Item\n Arches = Object.SupArchList\n if len(Arches) == 0:\n AddToSection(SectionDict, \"common\", Item)\n else:\n for Arch in SupportedArches:\n if Arch.upper() in Arches:\n AddToSection(SectionDict, Arch, Item)\n\n Section = \"\"\n for Arch in SupportedArches:\n SectionArch = \"\\n\".join(SectionDict.get(Arch, []))\n if SectionArch != \"\":\n Section += \"[%s.%s]\\n%s\\n\" % (SectionName, Arch, SectionArch)\n Section += \"\\n\"\n if Section != \"\":\n Section += \"\\n\"\n return Section\n\n\n## Store file header to a text file.\n#\n# Write standard file header to a text file. The content includes copyright,\n# abstract, description and license extracted from CommonHeader class object.\n#\n# @param TextFile The text file object.\n# @param CommonHeader The source CommonHeader class object.\n#\ndef StoreHeader(TextFile, CommonHeader):\n CopyRight = CommonHeader.Copyright\n Abstract = CommonHeader.Abstract\n Description = CommonHeader.Description\n License = CommonHeader.License\n\n Header = \"#/** @file\\n#\\n\"\n Header += \"# \" + Abstract + \"\\n#\\n\"\n Header += \"# \" + Description.strip().replace(\"\\n\", \"\\n# \") + \"\\n\"\n Header += \"# \" + CopyRight + \"\\n#\\n\"\n Header += \"# \" + License.replace(\"\\n\", \"\\n# \").replace(\" \", \" \")\n Header += \"\\n#\\n#**/\\n\\n\"\n\n StoreTextFile(TextFile, Header)\n\n## Store file header to a text file.\n#\n# Write Defines section to a text file. DefinesTupleList determines the content.\n#\n# @param TextFile The text file object.\n# @param DefinesTupleList The list of (Tag, Value) to be added as one item.\n#\ndef StoreDefinesSection(TextFile, DefinesTupleList):\n Section = \"[Defines]\\n\"\n for DefineItem in DefinesTupleList:\n Section += \" %-30s = %s\\n\" % DefineItem\n\n Section += \"\\n\\n\"\n StoreTextFile(TextFile, Section)\n\n\n## Return one User Extension section.\n#\n# Read the input UserExtentsions class object and return one section.\n#\n# @param UserExtensions An input UserExtensions class object.\n#\n# @retval UserExtensionSection A section representing UserExtensions object.\n#\ndef GetUserExtensions(UserExtensions):\n UserId = UserExtensions.UserID\n Identifier = UserExtensions.Identifier\n Content = UserExtensions.Content\n\n return \"[UserExtensions.%s.%s]\\n %s\\n\\n\" % (UserId, Identifier, Content)\n\n## Regular expression to match an equation.\nmReEquation = re.compile(r\"\\s*(\\S+)\\s*=\\s*(\\S*)\\s*\")\n\n## Return a value tuple matching information in a text fle.\n#\n# Parse the text file and return a value tuple corresponding to an input tag\n# tuple. In case of any error, an tuple of empty strings is returned.\n#\n# @param FileName The file name of the text file.\n# @param TagTuple A tuple of tags as the key to the value.\n#\n# @param ValueTupe The returned tuple corresponding to the tag tuple.\n#\ndef GetTextFileInfo(FileName, TagTuple):\n ValueTuple = [\"\"] * len(TagTuple)\n try:\n for Line in open(FileName):\n Line = Line.split(\"#\", 1)[0]\n MatchEquation = mReEquation.match(Line)\n if MatchEquation:\n Tag = MatchEquation.group(1).upper()\n Value = MatchEquation.group(2)\n for Index in range(len(TagTuple)):\n if TagTuple[Index] == Tag:\n ValueTuple[Index] = Value\n except:\n EdkLogger.info(\"IO Error in reading file %s\" % FileName)\n\n return ValueTuple\n\n\n## Return a value tuple matching information in an XML fle.\n#\n# Parse the XML file and return a value tuple corresponding to an input tag\n# tuple. In case of any error, an tuple of empty strings is returned.\n#\n# @param FileName The file name of the XML file.\n# @param TagTuple A tuple of tags as the key to the value.\n#\n# @param ValueTupe The returned tuple corresponding to the tag tuple.\n#\ndef GetXmlFileInfo(FileName, TagTuple):\n XmlDom = XmlParseFile(FileName)\n return tuple([XmlElement(XmlDom, XmlTag) for XmlTag in TagTuple])\n\n\n## Parse migration command line options\n#\n# Use standard Python module optparse to parse command line option of this tool.\n#\n# @param Source The source file type.\n# @param Destinate The destinate file type.\n#\n# @retval Options A optparse object containing the parsed options.\n# @retval InputFile Path of an source file to be migrated.\n#\ndef MigrationOptionParser(Source, Destinate, ToolName, VersionNumber = 1.0):\n # use clearer usage to override default usage message\n UsageString = \"%s [-a] [-v|-q] [-o <output_file>] <input_file>\" % ToolName\n Version = \"%s Version %.2f\" % (ToolName, VersionNumber)\n Copyright = \"Copyright (c) 2007, Intel Corporation. All rights reserved.\"\n\n Parser = OptionParser(description=Copyright, version=Version, usage=UsageString)\n Parser.add_option(\"-o\", \"--output\", dest=\"OutputFile\", help=\"The name of the %s file to be created.\" % Destinate)\n Parser.add_option(\"-a\", \"--auto\", dest=\"AutoWrite\", action=\"store_true\", default=False, help=\"Automatically create the %s file using the name of the %s file and replacing file extension\" % (Source, Destinate))\n Parser.add_option(\"-q\", \"--quiet\", action=\"store_true\", type=None, help=\"Disable all messages except FATAL ERRORS.\")\n Parser.add_option(\"-v\", \"--verbose\", action=\"store_true\", type=None, help=\"Turn on verbose output with informational messages printed.\")\n\n Options, Args = Parser.parse_args()\n\n # Set logging level\n if Options.verbose:\n EdkLogger.setLevel(EdkLogger.VERBOSE)\n elif Options.quiet:\n EdkLogger.setLevel(EdkLogger.QUIET)\n else:\n EdkLogger.setLevel(EdkLogger.INFO)\n\n # error check\n if len(Args) == 0:\n raise MigrationError(PARAMETER_MISSING, name=\"Input file\", usage=Parser.get_usage())\n if len(Args) > 1:\n raise MigrationError(PARAMETER_INVALID, name=\"Too many input files\", usage=Parser.get_usage())\n\n InputFile = Args[0]\n if not os.path.exists(InputFile):\n raise MigrationError(FILE_NOT_FOUND, name=InputFile)\n\n if Options.OutputFile:\n if Options.AutoWrite:\n raise MigrationError(OPTION_CONFLICT, arg1=\"-o\", arg2=\"-a\", usage=Parser.get_usage())\n else:\n if Options.AutoWrite:\n Options.OutputFile = os.path.splitext(InputFile)[0] + \".\" + Destinate.lower()\n else:\n raise MigrationError(OPTION_MISSING, name=\"-o\", usage=Parser.get_usage())\n\n return Options, InputFile\n\n# This acts like the main() function for the script, unless it is 'import'ed\n# into another script.\nif __name__ == '__main__':\n pass\n",
"step-ids": [
11,
18,
20,
21,
23
]
}
|
[
11,
18,
20,
21,
23
] |
import sys
import os
# Module "sys"
#
# See docs for the sys module: https://docs.python.org/3.7/library/sys.html
# Print out the command line arguments in sys.argv, one per line:
# Print out the plaform from sys:
# for arg in sys.argv:
# print(arg)
# Print out the Python version from sys:print(sys.platform)
# print(sys, sep="\n", sys.path)
print("platform: "+sys.platform + "\n" + "maxsize: "+str(sys.maxsize) + "\n" + "argv: "+str(sys.argv))
# # Module "os"
# #
# # See the docs for the OS module: https://docs.python.org/3.7/library/os.html
# # Print the current process ID
print("Process ID: "+ str(os.getpid()) + "\n" + "cwd: " + os.getcwd() + "\n" + "login id: " + os.getlogin())
# # Print the current working directory (cwd):
# print()
# # Print your login name
# print()
|
normal
|
{
"blob_id": "3fed96e9bedb157a14cf9c441de5aae8b4f6edc8",
"index": 8664,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint('platform: ' + sys.platform + '\\n' + 'maxsize: ' + str(sys.maxsize) +\n '\\n' + 'argv: ' + str(sys.argv))\nprint('Process ID: ' + str(os.getpid()) + '\\n' + 'cwd: ' + os.getcwd() +\n '\\n' + 'login id: ' + os.getlogin())\n",
"step-3": "import sys\nimport os\nprint('platform: ' + sys.platform + '\\n' + 'maxsize: ' + str(sys.maxsize) +\n '\\n' + 'argv: ' + str(sys.argv))\nprint('Process ID: ' + str(os.getpid()) + '\\n' + 'cwd: ' + os.getcwd() +\n '\\n' + 'login id: ' + os.getlogin())\n",
"step-4": "import sys\nimport os\n\n# Module \"sys\"\n#\n# See docs for the sys module: https://docs.python.org/3.7/library/sys.html\n\n# Print out the command line arguments in sys.argv, one per line:\n\n\n# Print out the plaform from sys:\n# for arg in sys.argv:\n# print(arg)\n\n\n# Print out the Python version from sys:print(sys.platform)\n# print(sys, sep=\"\\n\", sys.path)\nprint(\"platform: \"+sys.platform + \"\\n\" + \"maxsize: \"+str(sys.maxsize) + \"\\n\" + \"argv: \"+str(sys.argv))\n\n\n\n\n# # Module \"os\"\n# #\n# # See the docs for the OS module: https://docs.python.org/3.7/library/os.html\n\n# # Print the current process ID\nprint(\"Process ID: \"+ str(os.getpid()) + \"\\n\" + \"cwd: \" + os.getcwd() + \"\\n\" + \"login id: \" + os.getlogin())\n\n# # Print the current working directory (cwd):\n# print()\n\n# # Print your login name\n# print()\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.8 on 2018-04-12 12:37
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('cstasker', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='usertask',
name='ut_id',
field=models.BigIntegerField(primary_key=True, serialize=False),
),
]
|
normal
|
{
"blob_id": "2fbf312e1f8388008bb9ab9ba0ee4ccee1a8beae",
"index": 3594,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('cstasker', '0001_initial')]\n operations = [migrations.AlterField(model_name='usertask', name='ut_id',\n field=models.BigIntegerField(primary_key=True, serialize=False))]\n",
"step-4": "from __future__ import unicode_literals\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n dependencies = [('cstasker', '0001_initial')]\n operations = [migrations.AlterField(model_name='usertask', name='ut_id',\n field=models.BigIntegerField(primary_key=True, serialize=False))]\n",
"step-5": "# -*- coding: utf-8 -*-\n# Generated by Django 1.11.8 on 2018-04-12 12:37\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('cstasker', '0001_initial'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='usertask',\n name='ut_id',\n field=models.BigIntegerField(primary_key=True, serialize=False),\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
Python 3.6.8 (tags/v3.6.8:3c6b436a57, Dec 24 2018, 00:16:47) [MSC v.1916 64 bit (AMD64)] on win32
Type "help", "copyright", "credits" or "license()" for more information.
>>> import turtle
turtle.setup(650,350,200,200)
turtle.penup()
turtle.fd(-250)
turtle.pendown()
turtle.pensize(25)
turtle.pencolor("purple")
turtle.seth(-40)
for i in range(4):
turtle.circle(40,80)
turtle.circle(-40,80)
turtle.circle(40,80/2)
turtle.fd(40)
turtle.circle(16,180)
turtle.fd(40 * 2/3)
turtle.done()
|
normal
|
{
"blob_id": "e069ad88b5173e5859f1b01b9fb45951d1e82593",
"index": 4280,
"step-1": "Python 3.6.8 (tags/v3.6.8:3c6b436a57, Dec 24 2018, 00:16:47) [MSC v.1916 64 bit (AMD64)] on win32\nType \"help\", \"copyright\", \"credits\" or \"license()\" for more information.\n>>> import turtle \nturtle.setup(650,350,200,200)\nturtle.penup() \nturtle.fd(-250) \nturtle.pendown() \nturtle.pensize(25) \nturtle.pencolor(\"purple\") \nturtle.seth(-40) \nfor i in range(4): \n turtle.circle(40,80) \n turtle.circle(-40,80)\nturtle.circle(40,80/2)\nturtle.fd(40)\nturtle.circle(16,180)\nturtle.fd(40 * 2/3)\nturtle.done()\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
# -*- coding: utf-8 -*-
# @Time : 2018/12/13 21:32
# @Author : sundongjian
# @Email : [email protected]
# @File : __init__.py.py
# @Software: PyCharm
|
normal
|
{
"blob_id": "00ec56420831d8f4ab14259c7b07f1be0bcb7d78",
"index": 9161,
"step-1": "# -*- coding: utf-8 -*-\r\n# @Time : 2018/12/13 21:32\r\n# @Author : sundongjian\r\n# @Email : [email protected]\r\n# @File : __init__.py.py\r\n# @Software: PyCharm",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
1
]
}
|
[
1
] |
# Copyright (c) 2012 - Samuel Loretan <tynril at gmail.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import urllib2
try:
import json
except ImportError:
import simplejson as json
class Gw2Spidy:
"""This utility class allows easy access to the GW2Spidy data."""
headers = {'User-Agent': 'gw2spidy.py'}
@staticmethod
def getTypesList():
"""Get a list of item types and subtypes."""
return Gw2Spidy._request('types')['results']
@staticmethod
def getDisciplinesList():
"""Get a list of crafting disciplines."""
return Gw2Spidy._request('disciplines')['results']
@staticmethod
def getRaritiesList():
"""Get a list of item rarities."""
return Gw2Spidy._request('rarities')['results']
@staticmethod
def getAllItemsList():
"""Get a list of all items."""
return Gw2Spidy._request('all-items', 'all')['results']
@staticmethod
def getItemsOfType(typeId):
"""Get a list of all items of a certain type."""
return Gw2Spidy._request('all-items', str(typeId))['results']
@staticmethod
def getItemData(itemId):
"""Get the data of a particular item. High frequency of update."""
return Gw2Spidy._request('item', str(itemId))['result']
@staticmethod
def getItemBuyListings(itemId, allPages = False):
"""Get a list of all buy offers for a certain item."""
return Gw2Spidy._paginatedRequest(allPages, 'listings', str(itemId), 'buy')
@staticmethod
def getItemSellListings(itemId, allPages = False):
"""Get a list of all sell offers for a certain item."""
return Gw2Spidy._paginatedRequest(allPages, 'listings', str(itemId), 'sell')
@staticmethod
def searchItems(name, allPages = False):
"""Search items by name. Might be slow, not recommended."""
return Gw2Spidy._paginatedRequest(allPages, 'item-search', name)
@staticmethod
def getAllRecipesList(allPages = False):
"""Get a list of all crafting recipes."""
return Gw2Spidy._paginatedRequest(allPages, 'recipes', 'all')
@staticmethod
def getRecipesOfDiscipline(disciplineId, allPages = False):
"""Get a list of all crafting recipes for a certain discipline."""
return Gw2Spidy._paginatedRequest(allPages, 'recipes', str(disciplineId))
@staticmethod
def getRecipeData(recipeId):
"""Get the data of a particular recipe."""
return Gw2Spidy._request('recipe', str(recipeId))
@staticmethod
def getGemPrice():
"""Get the current gem/gold conversion rate."""
return Gw2Spidy._request('gem-price')
@staticmethod
def _paginatedRequest(allPages, *args):
"""Handle paginated requests, downloading all pages if requested."""
data = []
currentPage = 0
while True:
newData = Gw2Spidy._request(*(args + (str(currentPage),)))
if not allPages:
return newData['results']
data.extend(newData['results'])
currentPage = currentPage + 1
if newData['page'] == newData['last_page']:
break
return data
@staticmethod
def _request(*args):
"""Makes a request on the GW2Spidy API."""
url = 'http://www.gw2spidy.com/api/v0.9/json/' + '/'.join(args)
r = urllib2.Request(url, headers=Gw2Spidy.headers)
if 'Cookie' not in Gw2Spidy.headers:
resp = urllib2.urlopen(r)
if 'set-cookie' in resp.headers:
Gw2Spidy.headers['Cookie'] = resp.headers['set-cookie'].split(';', 1)[0]
return json.loads(resp.read())
return json.loads(urllib2.urlopen(r).read())
|
normal
|
{
"blob_id": "109a0ba0952bd5923ecbefa41556de7aa9f9eea8",
"index": 4197,
"step-1": "# Copyright (c) 2012 - Samuel Loretan <tynril at gmail.com>\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\nimport urllib2\ntry:\n import json\nexcept ImportError:\n import simplejson as json\n\nclass Gw2Spidy:\n \"\"\"This utility class allows easy access to the GW2Spidy data.\"\"\"\n headers = {'User-Agent': 'gw2spidy.py'}\n\n @staticmethod\n def getTypesList():\n \"\"\"Get a list of item types and subtypes.\"\"\"\n return Gw2Spidy._request('types')['results']\n\n @staticmethod\n def getDisciplinesList():\n \"\"\"Get a list of crafting disciplines.\"\"\"\n return Gw2Spidy._request('disciplines')['results']\n\n @staticmethod\n def getRaritiesList():\n \"\"\"Get a list of item rarities.\"\"\"\n return Gw2Spidy._request('rarities')['results']\n\n @staticmethod\n def getAllItemsList():\n \"\"\"Get a list of all items.\"\"\"\n return Gw2Spidy._request('all-items', 'all')['results']\n\n @staticmethod\n def getItemsOfType(typeId):\n \"\"\"Get a list of all items of a certain type.\"\"\"\n return Gw2Spidy._request('all-items', str(typeId))['results']\n\n @staticmethod\n def getItemData(itemId):\n \"\"\"Get the data of a particular item. High frequency of update.\"\"\"\n return Gw2Spidy._request('item', str(itemId))['result']\n\n @staticmethod\n def getItemBuyListings(itemId, allPages = False):\n \"\"\"Get a list of all buy offers for a certain item.\"\"\"\n return Gw2Spidy._paginatedRequest(allPages, 'listings', str(itemId), 'buy')\n\n @staticmethod\n def getItemSellListings(itemId, allPages = False):\n \"\"\"Get a list of all sell offers for a certain item.\"\"\"\n return Gw2Spidy._paginatedRequest(allPages, 'listings', str(itemId), 'sell')\n\n @staticmethod\n def searchItems(name, allPages = False):\n \"\"\"Search items by name. Might be slow, not recommended.\"\"\"\n return Gw2Spidy._paginatedRequest(allPages, 'item-search', name)\n\n @staticmethod\n def getAllRecipesList(allPages = False):\n \"\"\"Get a list of all crafting recipes.\"\"\"\n return Gw2Spidy._paginatedRequest(allPages, 'recipes', 'all')\n\n @staticmethod\n def getRecipesOfDiscipline(disciplineId, allPages = False):\n \"\"\"Get a list of all crafting recipes for a certain discipline.\"\"\"\n return Gw2Spidy._paginatedRequest(allPages, 'recipes', str(disciplineId))\n\n @staticmethod\n def getRecipeData(recipeId):\n \"\"\"Get the data of a particular recipe.\"\"\"\n return Gw2Spidy._request('recipe', str(recipeId))\n\n @staticmethod\n def getGemPrice():\n \"\"\"Get the current gem/gold conversion rate.\"\"\"\n return Gw2Spidy._request('gem-price')\n\n @staticmethod\n def _paginatedRequest(allPages, *args):\n \"\"\"Handle paginated requests, downloading all pages if requested.\"\"\"\n data = []\n currentPage = 0\n while True:\n newData = Gw2Spidy._request(*(args + (str(currentPage),)))\n if not allPages:\n return newData['results']\n data.extend(newData['results'])\n currentPage = currentPage + 1\n if newData['page'] == newData['last_page']:\n break\n return data\n\n @staticmethod\n def _request(*args):\n \"\"\"Makes a request on the GW2Spidy API.\"\"\"\n url = 'http://www.gw2spidy.com/api/v0.9/json/' + '/'.join(args)\n\tr = urllib2.Request(url, headers=Gw2Spidy.headers)\n\tif 'Cookie' not in Gw2Spidy.headers:\n\t resp = urllib2.urlopen(r)\n\t if 'set-cookie' in resp.headers:\n\t\tGw2Spidy.headers['Cookie'] = resp.headers['set-cookie'].split(';', 1)[0]\n\t return json.loads(resp.read())\n return json.loads(urllib2.urlopen(r).read())\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
import torch
from torch import nn
import pytorch_ssim
class Custom_Loss_for_Autoencoder(nn.Module):
def __init__(self, window_size=6):
super(Custom_Loss_for_Autoencoder, self).__init__()
self.ssim = pytorch_ssim.SSIM(window_size=window_size)
self.mse = nn.MSELoss()
def forward(self, reconstructed_images, images):
l1 = self.mse(reconstructed_images, images)
l2 = self.ssim(reconstructed_images, images)
return l1 - l2
|
normal
|
{
"blob_id": "ce3e2aa2534bb404b45202bcb76e9d07080560cb",
"index": 2739,
"step-1": "<mask token>\n\n\nclass Custom_Loss_for_Autoencoder(nn.Module):\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass Custom_Loss_for_Autoencoder(nn.Module):\n <mask token>\n\n def forward(self, reconstructed_images, images):\n l1 = self.mse(reconstructed_images, images)\n l2 = self.ssim(reconstructed_images, images)\n return l1 - l2\n",
"step-3": "<mask token>\n\n\nclass Custom_Loss_for_Autoencoder(nn.Module):\n\n def __init__(self, window_size=6):\n super(Custom_Loss_for_Autoencoder, self).__init__()\n self.ssim = pytorch_ssim.SSIM(window_size=window_size)\n self.mse = nn.MSELoss()\n\n def forward(self, reconstructed_images, images):\n l1 = self.mse(reconstructed_images, images)\n l2 = self.ssim(reconstructed_images, images)\n return l1 - l2\n",
"step-4": "import torch\nfrom torch import nn\nimport pytorch_ssim\n\n\nclass Custom_Loss_for_Autoencoder(nn.Module):\n\n def __init__(self, window_size=6):\n super(Custom_Loss_for_Autoencoder, self).__init__()\n self.ssim = pytorch_ssim.SSIM(window_size=window_size)\n self.mse = nn.MSELoss()\n\n def forward(self, reconstructed_images, images):\n l1 = self.mse(reconstructed_images, images)\n l2 = self.ssim(reconstructed_images, images)\n return l1 - l2\n",
"step-5": null,
"step-ids": [
1,
2,
3,
4
]
}
|
[
1,
2,
3,
4
] |
from flask import Flask
from flask import render_template
from flask import make_response
import json
from lib import powerswitch
app = Flask(__name__)
@app.route('/')
def hello_world():
return render_template('index.html')
@app.route('/on/')
def on():
state = powerswitch.on()
return json.dumps(state)
@app.route('/off/')
def off():
state = powerswitch.off()
return json.dumps(state)
@app.route('/toggle/')
def toggle():
state = powerswitch.toggle()
return json.dumps(state)
@app.route('/state/')
def state():
state = powerswitch.state()
return json.dumps(state)
if __name__ == "__main__":
powerswitch.on()
app.run(host='0.0.0.0', port=80, debug=True)
|
normal
|
{
"blob_id": "18d3f58048b7e5d792eb2494ecc62bb158ac7407",
"index": 254,
"step-1": "<mask token>\n\n\[email protected]('/')\ndef hello_world():\n return render_template('index.html')\n\n\n<mask token>\n\n\[email protected]('/off/')\ndef off():\n state = powerswitch.off()\n return json.dumps(state)\n\n\[email protected]('/toggle/')\ndef toggle():\n state = powerswitch.toggle()\n return json.dumps(state)\n\n\[email protected]('/state/')\ndef state():\n state = powerswitch.state()\n return json.dumps(state)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\[email protected]('/')\ndef hello_world():\n return render_template('index.html')\n\n\[email protected]('/on/')\ndef on():\n state = powerswitch.on()\n return json.dumps(state)\n\n\[email protected]('/off/')\ndef off():\n state = powerswitch.off()\n return json.dumps(state)\n\n\[email protected]('/toggle/')\ndef toggle():\n state = powerswitch.toggle()\n return json.dumps(state)\n\n\[email protected]('/state/')\ndef state():\n state = powerswitch.state()\n return json.dumps(state)\n\n\nif __name__ == '__main__':\n powerswitch.on()\n app.run(host='0.0.0.0', port=80, debug=True)\n",
"step-3": "<mask token>\napp = Flask(__name__)\n\n\[email protected]('/')\ndef hello_world():\n return render_template('index.html')\n\n\[email protected]('/on/')\ndef on():\n state = powerswitch.on()\n return json.dumps(state)\n\n\[email protected]('/off/')\ndef off():\n state = powerswitch.off()\n return json.dumps(state)\n\n\[email protected]('/toggle/')\ndef toggle():\n state = powerswitch.toggle()\n return json.dumps(state)\n\n\[email protected]('/state/')\ndef state():\n state = powerswitch.state()\n return json.dumps(state)\n\n\nif __name__ == '__main__':\n powerswitch.on()\n app.run(host='0.0.0.0', port=80, debug=True)\n",
"step-4": "from flask import Flask\nfrom flask import render_template\nfrom flask import make_response\nimport json\nfrom lib import powerswitch\napp = Flask(__name__)\n\n\[email protected]('/')\ndef hello_world():\n return render_template('index.html')\n\n\[email protected]('/on/')\ndef on():\n state = powerswitch.on()\n return json.dumps(state)\n\n\[email protected]('/off/')\ndef off():\n state = powerswitch.off()\n return json.dumps(state)\n\n\[email protected]('/toggle/')\ndef toggle():\n state = powerswitch.toggle()\n return json.dumps(state)\n\n\[email protected]('/state/')\ndef state():\n state = powerswitch.state()\n return json.dumps(state)\n\n\nif __name__ == '__main__':\n powerswitch.on()\n app.run(host='0.0.0.0', port=80, debug=True)\n",
"step-5": "from flask import Flask\nfrom flask import render_template\nfrom flask import make_response\n\nimport json\n\nfrom lib import powerswitch\n\napp = Flask(__name__)\n\[email protected]('/')\ndef hello_world():\n return render_template('index.html')\n\[email protected]('/on/')\ndef on():\n state = powerswitch.on()\n return json.dumps(state)\n\[email protected]('/off/')\ndef off():\n state = powerswitch.off()\n return json.dumps(state)\n\[email protected]('/toggle/')\ndef toggle():\n state = powerswitch.toggle()\n return json.dumps(state)\n\[email protected]('/state/')\ndef state():\n state = powerswitch.state()\n return json.dumps(state)\n\nif __name__ == \"__main__\":\n powerswitch.on()\n app.run(host='0.0.0.0', port=80, debug=True)\n",
"step-ids": [
4,
6,
7,
8,
9
]
}
|
[
4,
6,
7,
8,
9
] |
import subprocess
import glob
import os
import time
import sys
import xml.etree.ElementTree as ET
import getpass
import psutil
if len(sys.argv)==1:
photoscanname = r"C:\Program Files\Agisoft\PhotoScan Pro\photoscan.exe"
scriptname = r"C:\Users\slocumr\github\SimUAS\batchphotoscan\agiproc.py"
#xmlnames = r"P:\Slocum\USVI_project\01_DATA\20180319_USVI_UAS_BATHY\02_PROCDATA\06_PROCIMAGES\*\06_QUICKPROC\*2.xml"
xmlnames = r"C:\Users\slocumr\github\SimUAS\data\testagiproc\06_QUICKPROC\*.xml"
nprocesses = 1
else:
photoscanname = sys.argv[1]
scriptname = sys.argv[2]
xmlnames = sys.argv[3]
nprocesses = 1
SLEEPTIME = 10
DODEBUG = True
# get xmlfiles
xmlfiles = glob.glob(xmlnames)
nfiles = len(xmlfiles)
# empty lists
processes = []
procname = []
procind = []
logname = []
currentloghandles = []
currentind = []
proclog = open("simUASagiproc_log.log",'at')
try:
# detect already processed or processing folders
nexist = 0
for i,fname in enumerate(xmlfiles):
rootdir,f = os.path.split(fname)
rootoutput = ET.parse(fname).getroot().find('export').get('rootname')
logname.append( rootdir + "/" + rootoutput + "/autoproc.log" )
procind.append(i)
if os.path.exists(rootdir + "/" + rootoutput + "/autoproc.log"):
nexist = nexist+1
print('{:3d}/{:3d} ALREADY EXIST'.format(nexist,nfiles))
proclog.write('{:3d}/{:3d} ALREADY EXIST'.format(nexist,nfiles) + '\n')
for fname,i,logfile in zip(xmlfiles,procind,logname):
i = i+1
if not os.path.exists(logfile):
currentind.append(i)
print(time.strftime("%b %d %Y %H:%M:%S", time.gmtime(time.time())) + " : START : " + '{:3d}/{:3d}'.format(i,nfiles) + " : " + fname)
proclog.write(time.strftime("%b %d %Y %H:%M:%S", time.gmtime(time.time())) + " : START : " + '{:3d}/{:3d}'.format(i,nfiles) + " : " + fname + '\n')
foldername,foo = os.path.split(logfile)
if not os.path.exists(foldername):
os.makedirs(foldername)
iloghandle = open(logfile,'wt')
iloghandle.write(time.strftime("%b %d %Y %H:%M:%S", time.gmtime(time.time())) + "\n")
iloghandle.write(getpass.getuser() + "\n")
iloghandle.flush()
currentloghandles.append(iloghandle)
processes.append(subprocess.Popen([photoscanname,"-r",scriptname,fname],stdin=iloghandle, stdout=iloghandle, stderr=iloghandle))
procname.append(fname)
while len(processes)>=nprocesses:
time.sleep(SLEEPTIME)
if DODEBUG:
cpu_percent = psutil.cpu_percent()
ram_percent = psutil.virtual_memory().percent
print(time.strftime("%b %d %Y %H:%M:%S", time.gmtime(time.time())) + ' CPU: {:5.1f} RAM: {:5.1f}'.format(cpu_percent,ram_percent))
proclog.write(time.strftime("%b %d %Y %H:%M:%S", time.gmtime(time.time())) + ' CPU: {:5.1f} RAM: {:5.1f}'.format(cpu_percent,ram_percent) + '\n')
for p, ind, name, log in zip(processes, currentind, procname, currentloghandles):
if p.poll() is not None:
print(time.strftime("%b %d %Y %H:%M:%S", time.gmtime(time.time())) + " : DONE : " + '{:3d}/{:3d}'.format(ind,nfiles) + " : " + fname)
proclog.write(time.strftime("%b %d %Y %H:%M:%S", time.gmtime(time.time())) + " : DONE : " + '{:3d}/{:3d}'.format(ind,nfiles) + " : " + fname + '\n')
iloghandle.write(time.strftime("%b %d %Y %H:%M:%S", time.gmtime(time.time())) + "\n")
iloghandle.flush()
iloghandle.close()
procname[:] = [n for n,p in zip(procname,processes) if p.poll() is None]
currentind[:] = [ind for ind,p in zip(currentind,processes) if p.poll() is None]
currentloghandles[:] = [log for log,p in zip(currentloghandles,processes) if p.poll() is None]
processes[:] = [p for p in processes if p.poll() is None]
# Wait for everything to finish
while len(processes)>0:
time.sleep(SLEEPTIME)
if DODEBUG:
cpu_percent = psutil.cpu_percent()
ram_percent = psutil.virtual_memory().percent
print(time.strftime("%b %d %Y %H:%M:%S", time.gmtime(time.time())) + ' CPU: {:5.1f} RAM: {:5.1f}'.format(cpu_percent,ram_percent))
proclog.write(time.strftime("%b %d %Y %H:%M:%S", time.gmtime(time.time())) + ' CPU: {:5.1f} RAM: {:5.1f}'.format(cpu_percent,ram_percent) + '\n')
for p, ind, name, log in zip(processes, currentind, procname, currentloghandles):
if p.poll() is not None:
print(time.strftime("%b %d %Y %H:%M:%S", time.gmtime(time.time())) + " : DONE : " + '{:3d}/{:3d}'.format(ind,nfiles) + " : " + fname)
proclog.write(time.strftime("%b %d %Y %H:%M:%S", time.gmtime(time.time())) + " : DONE : " + '{:3d}/{:3d}'.format(ind,nfiles) + " : " + fname + '\n')
iloghandle= log
iloghandle.write(time.strftime("%b %d %Y %H:%M:%S", time.gmtime(time.time())) + "\n")
iloghandle.flush()
iloghandle.close()
procname[:] = [n for n,p in zip(procname,processes) if p.poll() is None]
currentind[:] = [ind for ind,p in zip(currentind,processes) if p.poll() is None]
currentloghandles[:] = [log for log,p in zip(currentloghandles,processes) if p.poll() is None]
processes[:] = [p for p in processes if p.poll() is None]
except KeyboardInterrupt:
for p, ind, name, iloghandle in zip(processes, currentind, procname, currentloghandles):
print(time.strftime("%b %d %Y %H:%M:%S", time.gmtime(time.time())) + " : KILL : " + '{:3d}/{:3d}'.format(ind,nfiles) + " : " + name)
proclog.write(time.strftime("%b %d %Y %H:%M:%S", time.gmtime(time.time())) + " : KILL : " + '{:3d}/{:3d}'.format(ind,nfiles) + " : " + name + '\n')
p.kill()
iloghandle.flush()
iloghandle.close()
time.sleep(0.1)
os.remove(logname[ind-1])
proclog.flush()
proclog.close()
print("Done")
|
normal
|
{
"blob_id": "00f95733505b3e853a76bbdd65439bcb230fa262",
"index": 3345,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif len(sys.argv) == 1:\n photoscanname = 'C:\\\\Program Files\\\\Agisoft\\\\PhotoScan Pro\\\\photoscan.exe'\n scriptname = (\n 'C:\\\\Users\\\\slocumr\\\\github\\\\SimUAS\\\\batchphotoscan\\\\agiproc.py')\n xmlnames = (\n 'C:\\\\Users\\\\slocumr\\\\github\\\\SimUAS\\\\data\\\\testagiproc\\\\06_QUICKPROC\\\\*.xml'\n )\n nprocesses = 1\nelse:\n photoscanname = sys.argv[1]\n scriptname = sys.argv[2]\n xmlnames = sys.argv[3]\n nprocesses = 1\n<mask token>\ntry:\n nexist = 0\n for i, fname in enumerate(xmlfiles):\n rootdir, f = os.path.split(fname)\n rootoutput = ET.parse(fname).getroot().find('export').get('rootname')\n logname.append(rootdir + '/' + rootoutput + '/autoproc.log')\n procind.append(i)\n if os.path.exists(rootdir + '/' + rootoutput + '/autoproc.log'):\n nexist = nexist + 1\n print('{:3d}/{:3d} ALREADY EXIST'.format(nexist, nfiles))\n proclog.write('{:3d}/{:3d} ALREADY EXIST'.format(nexist, nfiles) + '\\n')\n for fname, i, logfile in zip(xmlfiles, procind, logname):\n i = i + 1\n if not os.path.exists(logfile):\n currentind.append(i)\n print(time.strftime('%b %d %Y %H:%M:%S', time.gmtime(time.time(\n ))) + ' : START : ' + '{:3d}/{:3d}'.format(i, nfiles) +\n ' : ' + fname)\n proclog.write(time.strftime('%b %d %Y %H:%M:%S', time.gmtime(\n time.time())) + ' : START : ' + '{:3d}/{:3d}'.format(i,\n nfiles) + ' : ' + fname + '\\n')\n foldername, foo = os.path.split(logfile)\n if not os.path.exists(foldername):\n os.makedirs(foldername)\n iloghandle = open(logfile, 'wt')\n iloghandle.write(time.strftime('%b %d %Y %H:%M:%S', time.gmtime\n (time.time())) + '\\n')\n iloghandle.write(getpass.getuser() + '\\n')\n iloghandle.flush()\n currentloghandles.append(iloghandle)\n processes.append(subprocess.Popen([photoscanname, '-r',\n scriptname, fname], stdin=iloghandle, stdout=iloghandle,\n stderr=iloghandle))\n procname.append(fname)\n while len(processes) >= nprocesses:\n time.sleep(SLEEPTIME)\n if DODEBUG:\n cpu_percent = psutil.cpu_percent()\n ram_percent = psutil.virtual_memory().percent\n print(time.strftime('%b %d %Y %H:%M:%S', time.gmtime(\n time.time())) + ' CPU: {:5.1f} RAM: {:5.1f}'.\n format(cpu_percent, ram_percent))\n proclog.write(time.strftime('%b %d %Y %H:%M:%S', time.\n gmtime(time.time())) +\n ' CPU: {:5.1f} RAM: {:5.1f}'.format(cpu_percent,\n ram_percent) + '\\n')\n for p, ind, name, log in zip(processes, currentind,\n procname, currentloghandles):\n if p.poll() is not None:\n print(time.strftime('%b %d %Y %H:%M:%S', time.\n gmtime(time.time())) + ' : DONE : ' +\n '{:3d}/{:3d}'.format(ind, nfiles) + ' : ' + fname)\n proclog.write(time.strftime('%b %d %Y %H:%M:%S',\n time.gmtime(time.time())) + ' : DONE : ' +\n '{:3d}/{:3d}'.format(ind, nfiles) + ' : ' +\n fname + '\\n')\n iloghandle.write(time.strftime('%b %d %Y %H:%M:%S',\n time.gmtime(time.time())) + '\\n')\n iloghandle.flush()\n iloghandle.close()\n procname[:] = [n for n, p in zip(procname, processes) if p.\n poll() is None]\n currentind[:] = [ind for ind, p in zip(currentind,\n processes) if p.poll() is None]\n currentloghandles[:] = [log for log, p in zip(\n currentloghandles, processes) if p.poll() is None]\n processes[:] = [p for p in processes if p.poll() is None]\n while len(processes) > 0:\n time.sleep(SLEEPTIME)\n if DODEBUG:\n cpu_percent = psutil.cpu_percent()\n ram_percent = psutil.virtual_memory().percent\n print(time.strftime('%b %d %Y %H:%M:%S', time.gmtime(time.time(\n ))) + ' CPU: {:5.1f} RAM: {:5.1f}'.format(cpu_percent,\n ram_percent))\n proclog.write(time.strftime('%b %d %Y %H:%M:%S', time.gmtime(\n time.time())) + ' CPU: {:5.1f} RAM: {:5.1f}'.format(\n cpu_percent, ram_percent) + '\\n')\n for p, ind, name, log in zip(processes, currentind, procname,\n currentloghandles):\n if p.poll() is not None:\n print(time.strftime('%b %d %Y %H:%M:%S', time.gmtime(time.\n time())) + ' : DONE : ' + '{:3d}/{:3d}'.format(ind,\n nfiles) + ' : ' + fname)\n proclog.write(time.strftime('%b %d %Y %H:%M:%S', time.\n gmtime(time.time())) + ' : DONE : ' + '{:3d}/{:3d}'.\n format(ind, nfiles) + ' : ' + fname + '\\n')\n iloghandle = log\n iloghandle.write(time.strftime('%b %d %Y %H:%M:%S', time.\n gmtime(time.time())) + '\\n')\n iloghandle.flush()\n iloghandle.close()\n procname[:] = [n for n, p in zip(procname, processes) if p.poll() is\n None]\n currentind[:] = [ind for ind, p in zip(currentind, processes) if p.\n poll() is None]\n currentloghandles[:] = [log for log, p in zip(currentloghandles,\n processes) if p.poll() is None]\n processes[:] = [p for p in processes if p.poll() is None]\nexcept KeyboardInterrupt:\n for p, ind, name, iloghandle in zip(processes, currentind, procname,\n currentloghandles):\n print(time.strftime('%b %d %Y %H:%M:%S', time.gmtime(time.time())) +\n ' : KILL : ' + '{:3d}/{:3d}'.format(ind, nfiles) + ' : ' + name)\n proclog.write(time.strftime('%b %d %Y %H:%M:%S', time.gmtime(time.\n time())) + ' : KILL : ' + '{:3d}/{:3d}'.format(ind, nfiles) +\n ' : ' + name + '\\n')\n p.kill()\n iloghandle.flush()\n iloghandle.close()\n time.sleep(0.1)\n os.remove(logname[ind - 1])\nproclog.flush()\nproclog.close()\nprint('Done')\n",
"step-3": "<mask token>\nif len(sys.argv) == 1:\n photoscanname = 'C:\\\\Program Files\\\\Agisoft\\\\PhotoScan Pro\\\\photoscan.exe'\n scriptname = (\n 'C:\\\\Users\\\\slocumr\\\\github\\\\SimUAS\\\\batchphotoscan\\\\agiproc.py')\n xmlnames = (\n 'C:\\\\Users\\\\slocumr\\\\github\\\\SimUAS\\\\data\\\\testagiproc\\\\06_QUICKPROC\\\\*.xml'\n )\n nprocesses = 1\nelse:\n photoscanname = sys.argv[1]\n scriptname = sys.argv[2]\n xmlnames = sys.argv[3]\n nprocesses = 1\nSLEEPTIME = 10\nDODEBUG = True\nxmlfiles = glob.glob(xmlnames)\nnfiles = len(xmlfiles)\nprocesses = []\nprocname = []\nprocind = []\nlogname = []\ncurrentloghandles = []\ncurrentind = []\nproclog = open('simUASagiproc_log.log', 'at')\ntry:\n nexist = 0\n for i, fname in enumerate(xmlfiles):\n rootdir, f = os.path.split(fname)\n rootoutput = ET.parse(fname).getroot().find('export').get('rootname')\n logname.append(rootdir + '/' + rootoutput + '/autoproc.log')\n procind.append(i)\n if os.path.exists(rootdir + '/' + rootoutput + '/autoproc.log'):\n nexist = nexist + 1\n print('{:3d}/{:3d} ALREADY EXIST'.format(nexist, nfiles))\n proclog.write('{:3d}/{:3d} ALREADY EXIST'.format(nexist, nfiles) + '\\n')\n for fname, i, logfile in zip(xmlfiles, procind, logname):\n i = i + 1\n if not os.path.exists(logfile):\n currentind.append(i)\n print(time.strftime('%b %d %Y %H:%M:%S', time.gmtime(time.time(\n ))) + ' : START : ' + '{:3d}/{:3d}'.format(i, nfiles) +\n ' : ' + fname)\n proclog.write(time.strftime('%b %d %Y %H:%M:%S', time.gmtime(\n time.time())) + ' : START : ' + '{:3d}/{:3d}'.format(i,\n nfiles) + ' : ' + fname + '\\n')\n foldername, foo = os.path.split(logfile)\n if not os.path.exists(foldername):\n os.makedirs(foldername)\n iloghandle = open(logfile, 'wt')\n iloghandle.write(time.strftime('%b %d %Y %H:%M:%S', time.gmtime\n (time.time())) + '\\n')\n iloghandle.write(getpass.getuser() + '\\n')\n iloghandle.flush()\n currentloghandles.append(iloghandle)\n processes.append(subprocess.Popen([photoscanname, '-r',\n scriptname, fname], stdin=iloghandle, stdout=iloghandle,\n stderr=iloghandle))\n procname.append(fname)\n while len(processes) >= nprocesses:\n time.sleep(SLEEPTIME)\n if DODEBUG:\n cpu_percent = psutil.cpu_percent()\n ram_percent = psutil.virtual_memory().percent\n print(time.strftime('%b %d %Y %H:%M:%S', time.gmtime(\n time.time())) + ' CPU: {:5.1f} RAM: {:5.1f}'.\n format(cpu_percent, ram_percent))\n proclog.write(time.strftime('%b %d %Y %H:%M:%S', time.\n gmtime(time.time())) +\n ' CPU: {:5.1f} RAM: {:5.1f}'.format(cpu_percent,\n ram_percent) + '\\n')\n for p, ind, name, log in zip(processes, currentind,\n procname, currentloghandles):\n if p.poll() is not None:\n print(time.strftime('%b %d %Y %H:%M:%S', time.\n gmtime(time.time())) + ' : DONE : ' +\n '{:3d}/{:3d}'.format(ind, nfiles) + ' : ' + fname)\n proclog.write(time.strftime('%b %d %Y %H:%M:%S',\n time.gmtime(time.time())) + ' : DONE : ' +\n '{:3d}/{:3d}'.format(ind, nfiles) + ' : ' +\n fname + '\\n')\n iloghandle.write(time.strftime('%b %d %Y %H:%M:%S',\n time.gmtime(time.time())) + '\\n')\n iloghandle.flush()\n iloghandle.close()\n procname[:] = [n for n, p in zip(procname, processes) if p.\n poll() is None]\n currentind[:] = [ind for ind, p in zip(currentind,\n processes) if p.poll() is None]\n currentloghandles[:] = [log for log, p in zip(\n currentloghandles, processes) if p.poll() is None]\n processes[:] = [p for p in processes if p.poll() is None]\n while len(processes) > 0:\n time.sleep(SLEEPTIME)\n if DODEBUG:\n cpu_percent = psutil.cpu_percent()\n ram_percent = psutil.virtual_memory().percent\n print(time.strftime('%b %d %Y %H:%M:%S', time.gmtime(time.time(\n ))) + ' CPU: {:5.1f} RAM: {:5.1f}'.format(cpu_percent,\n ram_percent))\n proclog.write(time.strftime('%b %d %Y %H:%M:%S', time.gmtime(\n time.time())) + ' CPU: {:5.1f} RAM: {:5.1f}'.format(\n cpu_percent, ram_percent) + '\\n')\n for p, ind, name, log in zip(processes, currentind, procname,\n currentloghandles):\n if p.poll() is not None:\n print(time.strftime('%b %d %Y %H:%M:%S', time.gmtime(time.\n time())) + ' : DONE : ' + '{:3d}/{:3d}'.format(ind,\n nfiles) + ' : ' + fname)\n proclog.write(time.strftime('%b %d %Y %H:%M:%S', time.\n gmtime(time.time())) + ' : DONE : ' + '{:3d}/{:3d}'.\n format(ind, nfiles) + ' : ' + fname + '\\n')\n iloghandle = log\n iloghandle.write(time.strftime('%b %d %Y %H:%M:%S', time.\n gmtime(time.time())) + '\\n')\n iloghandle.flush()\n iloghandle.close()\n procname[:] = [n for n, p in zip(procname, processes) if p.poll() is\n None]\n currentind[:] = [ind for ind, p in zip(currentind, processes) if p.\n poll() is None]\n currentloghandles[:] = [log for log, p in zip(currentloghandles,\n processes) if p.poll() is None]\n processes[:] = [p for p in processes if p.poll() is None]\nexcept KeyboardInterrupt:\n for p, ind, name, iloghandle in zip(processes, currentind, procname,\n currentloghandles):\n print(time.strftime('%b %d %Y %H:%M:%S', time.gmtime(time.time())) +\n ' : KILL : ' + '{:3d}/{:3d}'.format(ind, nfiles) + ' : ' + name)\n proclog.write(time.strftime('%b %d %Y %H:%M:%S', time.gmtime(time.\n time())) + ' : KILL : ' + '{:3d}/{:3d}'.format(ind, nfiles) +\n ' : ' + name + '\\n')\n p.kill()\n iloghandle.flush()\n iloghandle.close()\n time.sleep(0.1)\n os.remove(logname[ind - 1])\nproclog.flush()\nproclog.close()\nprint('Done')\n",
"step-4": "import subprocess\nimport glob\nimport os\nimport time\nimport sys\nimport xml.etree.ElementTree as ET\nimport getpass\nimport psutil\nif len(sys.argv) == 1:\n photoscanname = 'C:\\\\Program Files\\\\Agisoft\\\\PhotoScan Pro\\\\photoscan.exe'\n scriptname = (\n 'C:\\\\Users\\\\slocumr\\\\github\\\\SimUAS\\\\batchphotoscan\\\\agiproc.py')\n xmlnames = (\n 'C:\\\\Users\\\\slocumr\\\\github\\\\SimUAS\\\\data\\\\testagiproc\\\\06_QUICKPROC\\\\*.xml'\n )\n nprocesses = 1\nelse:\n photoscanname = sys.argv[1]\n scriptname = sys.argv[2]\n xmlnames = sys.argv[3]\n nprocesses = 1\nSLEEPTIME = 10\nDODEBUG = True\nxmlfiles = glob.glob(xmlnames)\nnfiles = len(xmlfiles)\nprocesses = []\nprocname = []\nprocind = []\nlogname = []\ncurrentloghandles = []\ncurrentind = []\nproclog = open('simUASagiproc_log.log', 'at')\ntry:\n nexist = 0\n for i, fname in enumerate(xmlfiles):\n rootdir, f = os.path.split(fname)\n rootoutput = ET.parse(fname).getroot().find('export').get('rootname')\n logname.append(rootdir + '/' + rootoutput + '/autoproc.log')\n procind.append(i)\n if os.path.exists(rootdir + '/' + rootoutput + '/autoproc.log'):\n nexist = nexist + 1\n print('{:3d}/{:3d} ALREADY EXIST'.format(nexist, nfiles))\n proclog.write('{:3d}/{:3d} ALREADY EXIST'.format(nexist, nfiles) + '\\n')\n for fname, i, logfile in zip(xmlfiles, procind, logname):\n i = i + 1\n if not os.path.exists(logfile):\n currentind.append(i)\n print(time.strftime('%b %d %Y %H:%M:%S', time.gmtime(time.time(\n ))) + ' : START : ' + '{:3d}/{:3d}'.format(i, nfiles) +\n ' : ' + fname)\n proclog.write(time.strftime('%b %d %Y %H:%M:%S', time.gmtime(\n time.time())) + ' : START : ' + '{:3d}/{:3d}'.format(i,\n nfiles) + ' : ' + fname + '\\n')\n foldername, foo = os.path.split(logfile)\n if not os.path.exists(foldername):\n os.makedirs(foldername)\n iloghandle = open(logfile, 'wt')\n iloghandle.write(time.strftime('%b %d %Y %H:%M:%S', time.gmtime\n (time.time())) + '\\n')\n iloghandle.write(getpass.getuser() + '\\n')\n iloghandle.flush()\n currentloghandles.append(iloghandle)\n processes.append(subprocess.Popen([photoscanname, '-r',\n scriptname, fname], stdin=iloghandle, stdout=iloghandle,\n stderr=iloghandle))\n procname.append(fname)\n while len(processes) >= nprocesses:\n time.sleep(SLEEPTIME)\n if DODEBUG:\n cpu_percent = psutil.cpu_percent()\n ram_percent = psutil.virtual_memory().percent\n print(time.strftime('%b %d %Y %H:%M:%S', time.gmtime(\n time.time())) + ' CPU: {:5.1f} RAM: {:5.1f}'.\n format(cpu_percent, ram_percent))\n proclog.write(time.strftime('%b %d %Y %H:%M:%S', time.\n gmtime(time.time())) +\n ' CPU: {:5.1f} RAM: {:5.1f}'.format(cpu_percent,\n ram_percent) + '\\n')\n for p, ind, name, log in zip(processes, currentind,\n procname, currentloghandles):\n if p.poll() is not None:\n print(time.strftime('%b %d %Y %H:%M:%S', time.\n gmtime(time.time())) + ' : DONE : ' +\n '{:3d}/{:3d}'.format(ind, nfiles) + ' : ' + fname)\n proclog.write(time.strftime('%b %d %Y %H:%M:%S',\n time.gmtime(time.time())) + ' : DONE : ' +\n '{:3d}/{:3d}'.format(ind, nfiles) + ' : ' +\n fname + '\\n')\n iloghandle.write(time.strftime('%b %d %Y %H:%M:%S',\n time.gmtime(time.time())) + '\\n')\n iloghandle.flush()\n iloghandle.close()\n procname[:] = [n for n, p in zip(procname, processes) if p.\n poll() is None]\n currentind[:] = [ind for ind, p in zip(currentind,\n processes) if p.poll() is None]\n currentloghandles[:] = [log for log, p in zip(\n currentloghandles, processes) if p.poll() is None]\n processes[:] = [p for p in processes if p.poll() is None]\n while len(processes) > 0:\n time.sleep(SLEEPTIME)\n if DODEBUG:\n cpu_percent = psutil.cpu_percent()\n ram_percent = psutil.virtual_memory().percent\n print(time.strftime('%b %d %Y %H:%M:%S', time.gmtime(time.time(\n ))) + ' CPU: {:5.1f} RAM: {:5.1f}'.format(cpu_percent,\n ram_percent))\n proclog.write(time.strftime('%b %d %Y %H:%M:%S', time.gmtime(\n time.time())) + ' CPU: {:5.1f} RAM: {:5.1f}'.format(\n cpu_percent, ram_percent) + '\\n')\n for p, ind, name, log in zip(processes, currentind, procname,\n currentloghandles):\n if p.poll() is not None:\n print(time.strftime('%b %d %Y %H:%M:%S', time.gmtime(time.\n time())) + ' : DONE : ' + '{:3d}/{:3d}'.format(ind,\n nfiles) + ' : ' + fname)\n proclog.write(time.strftime('%b %d %Y %H:%M:%S', time.\n gmtime(time.time())) + ' : DONE : ' + '{:3d}/{:3d}'.\n format(ind, nfiles) + ' : ' + fname + '\\n')\n iloghandle = log\n iloghandle.write(time.strftime('%b %d %Y %H:%M:%S', time.\n gmtime(time.time())) + '\\n')\n iloghandle.flush()\n iloghandle.close()\n procname[:] = [n for n, p in zip(procname, processes) if p.poll() is\n None]\n currentind[:] = [ind for ind, p in zip(currentind, processes) if p.\n poll() is None]\n currentloghandles[:] = [log for log, p in zip(currentloghandles,\n processes) if p.poll() is None]\n processes[:] = [p for p in processes if p.poll() is None]\nexcept KeyboardInterrupt:\n for p, ind, name, iloghandle in zip(processes, currentind, procname,\n currentloghandles):\n print(time.strftime('%b %d %Y %H:%M:%S', time.gmtime(time.time())) +\n ' : KILL : ' + '{:3d}/{:3d}'.format(ind, nfiles) + ' : ' + name)\n proclog.write(time.strftime('%b %d %Y %H:%M:%S', time.gmtime(time.\n time())) + ' : KILL : ' + '{:3d}/{:3d}'.format(ind, nfiles) +\n ' : ' + name + '\\n')\n p.kill()\n iloghandle.flush()\n iloghandle.close()\n time.sleep(0.1)\n os.remove(logname[ind - 1])\nproclog.flush()\nproclog.close()\nprint('Done')\n",
"step-5": "import subprocess\nimport glob\nimport os\nimport time\nimport sys\nimport xml.etree.ElementTree as ET\nimport getpass\nimport psutil\n\nif len(sys.argv)==1:\n photoscanname = r\"C:\\Program Files\\Agisoft\\PhotoScan Pro\\photoscan.exe\"\n scriptname = r\"C:\\Users\\slocumr\\github\\SimUAS\\batchphotoscan\\agiproc.py\"\n #xmlnames = r\"P:\\Slocum\\USVI_project\\01_DATA\\20180319_USVI_UAS_BATHY\\02_PROCDATA\\06_PROCIMAGES\\*\\06_QUICKPROC\\*2.xml\"\n xmlnames = r\"C:\\Users\\slocumr\\github\\SimUAS\\data\\testagiproc\\06_QUICKPROC\\*.xml\"\n nprocesses = 1\nelse:\n photoscanname = sys.argv[1]\n scriptname = sys.argv[2]\n xmlnames = sys.argv[3]\n nprocesses = 1\n\nSLEEPTIME = 10\nDODEBUG = True\n\n# get xmlfiles\nxmlfiles = glob.glob(xmlnames)\nnfiles = len(xmlfiles)\n\n# empty lists\nprocesses = []\nprocname = []\nprocind = []\nlogname = []\ncurrentloghandles = []\ncurrentind = []\n\nproclog = open(\"simUASagiproc_log.log\",'at')\ntry:\n # detect already processed or processing folders\n nexist = 0\n for i,fname in enumerate(xmlfiles):\n rootdir,f = os.path.split(fname)\n rootoutput = ET.parse(fname).getroot().find('export').get('rootname')\n logname.append( rootdir + \"/\" + rootoutput + \"/autoproc.log\" )\n procind.append(i)\n if os.path.exists(rootdir + \"/\" + rootoutput + \"/autoproc.log\"):\n nexist = nexist+1\n print('{:3d}/{:3d} ALREADY EXIST'.format(nexist,nfiles))\n proclog.write('{:3d}/{:3d} ALREADY EXIST'.format(nexist,nfiles) + '\\n')\n for fname,i,logfile in zip(xmlfiles,procind,logname):\n i = i+1\n if not os.path.exists(logfile):\n\n currentind.append(i)\n print(time.strftime(\"%b %d %Y %H:%M:%S\", time.gmtime(time.time())) + \" : START : \" + '{:3d}/{:3d}'.format(i,nfiles) + \" : \" + fname)\n proclog.write(time.strftime(\"%b %d %Y %H:%M:%S\", time.gmtime(time.time())) + \" : START : \" + '{:3d}/{:3d}'.format(i,nfiles) + \" : \" + fname + '\\n')\n foldername,foo = os.path.split(logfile)\n if not os.path.exists(foldername):\n os.makedirs(foldername)\n iloghandle = open(logfile,'wt')\n iloghandle.write(time.strftime(\"%b %d %Y %H:%M:%S\", time.gmtime(time.time())) + \"\\n\")\n iloghandle.write(getpass.getuser() + \"\\n\")\n iloghandle.flush()\n currentloghandles.append(iloghandle)\n processes.append(subprocess.Popen([photoscanname,\"-r\",scriptname,fname],stdin=iloghandle, stdout=iloghandle, stderr=iloghandle))\n procname.append(fname)\n while len(processes)>=nprocesses:\n time.sleep(SLEEPTIME)\n if DODEBUG:\n cpu_percent = psutil.cpu_percent()\n ram_percent = psutil.virtual_memory().percent\n print(time.strftime(\"%b %d %Y %H:%M:%S\", time.gmtime(time.time())) + ' CPU: {:5.1f} RAM: {:5.1f}'.format(cpu_percent,ram_percent))\n proclog.write(time.strftime(\"%b %d %Y %H:%M:%S\", time.gmtime(time.time())) + ' CPU: {:5.1f} RAM: {:5.1f}'.format(cpu_percent,ram_percent) + '\\n')\n for p, ind, name, log in zip(processes, currentind, procname, currentloghandles):\n if p.poll() is not None:\n print(time.strftime(\"%b %d %Y %H:%M:%S\", time.gmtime(time.time())) + \" : DONE : \" + '{:3d}/{:3d}'.format(ind,nfiles) + \" : \" + fname)\n proclog.write(time.strftime(\"%b %d %Y %H:%M:%S\", time.gmtime(time.time())) + \" : DONE : \" + '{:3d}/{:3d}'.format(ind,nfiles) + \" : \" + fname + '\\n')\n iloghandle.write(time.strftime(\"%b %d %Y %H:%M:%S\", time.gmtime(time.time())) + \"\\n\")\n iloghandle.flush()\n iloghandle.close()\n procname[:] = [n for n,p in zip(procname,processes) if p.poll() is None]\n currentind[:] = [ind for ind,p in zip(currentind,processes) if p.poll() is None]\n currentloghandles[:] = [log for log,p in zip(currentloghandles,processes) if p.poll() is None]\n processes[:] = [p for p in processes if p.poll() is None]\n \n # Wait for everything to finish\n while len(processes)>0:\n time.sleep(SLEEPTIME)\n if DODEBUG:\n cpu_percent = psutil.cpu_percent()\n ram_percent = psutil.virtual_memory().percent\n print(time.strftime(\"%b %d %Y %H:%M:%S\", time.gmtime(time.time())) + ' CPU: {:5.1f} RAM: {:5.1f}'.format(cpu_percent,ram_percent))\n proclog.write(time.strftime(\"%b %d %Y %H:%M:%S\", time.gmtime(time.time())) + ' CPU: {:5.1f} RAM: {:5.1f}'.format(cpu_percent,ram_percent) + '\\n')\n for p, ind, name, log in zip(processes, currentind, procname, currentloghandles):\n if p.poll() is not None:\n print(time.strftime(\"%b %d %Y %H:%M:%S\", time.gmtime(time.time())) + \" : DONE : \" + '{:3d}/{:3d}'.format(ind,nfiles) + \" : \" + fname)\n proclog.write(time.strftime(\"%b %d %Y %H:%M:%S\", time.gmtime(time.time())) + \" : DONE : \" + '{:3d}/{:3d}'.format(ind,nfiles) + \" : \" + fname + '\\n')\n iloghandle= log\n iloghandle.write(time.strftime(\"%b %d %Y %H:%M:%S\", time.gmtime(time.time())) + \"\\n\")\n iloghandle.flush()\n iloghandle.close()\n procname[:] = [n for n,p in zip(procname,processes) if p.poll() is None]\n currentind[:] = [ind for ind,p in zip(currentind,processes) if p.poll() is None]\n currentloghandles[:] = [log for log,p in zip(currentloghandles,processes) if p.poll() is None]\n processes[:] = [p for p in processes if p.poll() is None]\nexcept KeyboardInterrupt:\n for p, ind, name, iloghandle in zip(processes, currentind, procname, currentloghandles):\n print(time.strftime(\"%b %d %Y %H:%M:%S\", time.gmtime(time.time())) + \" : KILL : \" + '{:3d}/{:3d}'.format(ind,nfiles) + \" : \" + name)\n proclog.write(time.strftime(\"%b %d %Y %H:%M:%S\", time.gmtime(time.time())) + \" : KILL : \" + '{:3d}/{:3d}'.format(ind,nfiles) + \" : \" + name + '\\n')\n p.kill()\n iloghandle.flush()\n iloghandle.close()\n time.sleep(0.1)\n os.remove(logname[ind-1])\nproclog.flush()\nproclog.close()\nprint(\"Done\")\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
"""
-*- coding:utf-8 -*-
@ Time : 14:05
@ Name : handle_ini_file.py
@ Author : xiaoyin_ing
@ Email : [email protected]
@ Software : PyCharm
...
"""
from configparser import ConfigParser
from Common.handle_path import conf_dir
import os
class HandleConfig(ConfigParser):
def __init__(self, ini_file_neme):
super().__init__()
self.ini_file_neme = ini_file_neme
def red_conf__(self):
file_path = os.path.join(conf_dir, self.ini_file_neme)
self.read(file_path, encoding="utf-8")
red_conf = HandleConfig("xiaoyin.ini")
red_conf.red_conf__()
# 日志模块用到的属性
log_data_list = [red_conf.get("log", "log_name"), red_conf.get("log", "log_level"), red_conf.getboolean("log", "file")]
# print(log_data_list)
|
normal
|
{
"blob_id": "01e60123ad87d9ff49812fe3a6f5d55bc85921c5",
"index": 4071,
"step-1": "<mask token>\n\n\nclass HandleConfig(ConfigParser):\n\n def __init__(self, ini_file_neme):\n super().__init__()\n self.ini_file_neme = ini_file_neme\n\n def red_conf__(self):\n file_path = os.path.join(conf_dir, self.ini_file_neme)\n self.read(file_path, encoding='utf-8')\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass HandleConfig(ConfigParser):\n\n def __init__(self, ini_file_neme):\n super().__init__()\n self.ini_file_neme = ini_file_neme\n\n def red_conf__(self):\n file_path = os.path.join(conf_dir, self.ini_file_neme)\n self.read(file_path, encoding='utf-8')\n\n\n<mask token>\nred_conf.red_conf__()\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass HandleConfig(ConfigParser):\n\n def __init__(self, ini_file_neme):\n super().__init__()\n self.ini_file_neme = ini_file_neme\n\n def red_conf__(self):\n file_path = os.path.join(conf_dir, self.ini_file_neme)\n self.read(file_path, encoding='utf-8')\n\n\nred_conf = HandleConfig('xiaoyin.ini')\nred_conf.red_conf__()\nlog_data_list = [red_conf.get('log', 'log_name'), red_conf.get('log',\n 'log_level'), red_conf.getboolean('log', 'file')]\n",
"step-4": "<mask token>\nfrom configparser import ConfigParser\nfrom Common.handle_path import conf_dir\nimport os\n\n\nclass HandleConfig(ConfigParser):\n\n def __init__(self, ini_file_neme):\n super().__init__()\n self.ini_file_neme = ini_file_neme\n\n def red_conf__(self):\n file_path = os.path.join(conf_dir, self.ini_file_neme)\n self.read(file_path, encoding='utf-8')\n\n\nred_conf = HandleConfig('xiaoyin.ini')\nred_conf.red_conf__()\nlog_data_list = [red_conf.get('log', 'log_name'), red_conf.get('log',\n 'log_level'), red_conf.getboolean('log', 'file')]\n",
"step-5": "\"\"\"\n-*- coding:utf-8 -*-\n@ Time : 14:05\n@ Name : handle_ini_file.py\n@ Author : xiaoyin_ing\n@ Email : [email protected]\n@ Software : PyCharm\n ...\n \n\"\"\"\nfrom configparser import ConfigParser\nfrom Common.handle_path import conf_dir\nimport os\n\n\nclass HandleConfig(ConfigParser):\n def __init__(self, ini_file_neme):\n super().__init__()\n self.ini_file_neme = ini_file_neme\n\n def red_conf__(self):\n file_path = os.path.join(conf_dir, self.ini_file_neme)\n self.read(file_path, encoding=\"utf-8\")\n\n\nred_conf = HandleConfig(\"xiaoyin.ini\")\nred_conf.red_conf__()\n\n# 日志模块用到的属性\nlog_data_list = [red_conf.get(\"log\", \"log_name\"), red_conf.get(\"log\", \"log_level\"), red_conf.getboolean(\"log\", \"file\")]\n# print(log_data_list)\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
class Rect():
def __init__(self, w, h):
self.w = w
self.h = h
def half(self):
return self.w / 2;
bricks = [Rect(40, 25), Rect(30, 25), Rect(28, 25), Rect(13, 25)]
def setup():
size(500, 500)
noLoop()
def draw():
posx = 0
posy = 0
i = 0
for y in range(20):
posx = 0
for x in range(50):
fill(random(100, 250))
brick = get_brick(i)
rect(posx, posy, brick.w, brick.h)
posx += brick.w
i += 1
posy += brick.h
def get_brick(index):
i = int(random(len(bricks)))
# i = index % len(bricks)
return bricks[i]
|
normal
|
{
"blob_id": "807f0094a9736abdfa3f5b629615a80f1e0d13ef",
"index": 3037,
"step-1": "class Rect:\n\n def __init__(self, w, h):\n self.w = w\n self.h = h\n\n def half(self):\n return self.w / 2\n\n\n<mask token>\n\n\ndef setup():\n size(500, 500)\n noLoop()\n\n\n<mask token>\n",
"step-2": "class Rect:\n\n def __init__(self, w, h):\n self.w = w\n self.h = h\n\n def half(self):\n return self.w / 2\n\n\n<mask token>\n\n\ndef setup():\n size(500, 500)\n noLoop()\n\n\ndef draw():\n posx = 0\n posy = 0\n i = 0\n for y in range(20):\n posx = 0\n for x in range(50):\n fill(random(100, 250))\n brick = get_brick(i)\n rect(posx, posy, brick.w, brick.h)\n posx += brick.w\n i += 1\n posy += brick.h\n\n\n<mask token>\n",
"step-3": "class Rect:\n\n def __init__(self, w, h):\n self.w = w\n self.h = h\n\n def half(self):\n return self.w / 2\n\n\n<mask token>\n\n\ndef setup():\n size(500, 500)\n noLoop()\n\n\ndef draw():\n posx = 0\n posy = 0\n i = 0\n for y in range(20):\n posx = 0\n for x in range(50):\n fill(random(100, 250))\n brick = get_brick(i)\n rect(posx, posy, brick.w, brick.h)\n posx += brick.w\n i += 1\n posy += brick.h\n\n\ndef get_brick(index):\n i = int(random(len(bricks)))\n return bricks[i]\n",
"step-4": "class Rect:\n\n def __init__(self, w, h):\n self.w = w\n self.h = h\n\n def half(self):\n return self.w / 2\n\n\nbricks = [Rect(40, 25), Rect(30, 25), Rect(28, 25), Rect(13, 25)]\n\n\ndef setup():\n size(500, 500)\n noLoop()\n\n\ndef draw():\n posx = 0\n posy = 0\n i = 0\n for y in range(20):\n posx = 0\n for x in range(50):\n fill(random(100, 250))\n brick = get_brick(i)\n rect(posx, posy, brick.w, brick.h)\n posx += brick.w\n i += 1\n posy += brick.h\n\n\ndef get_brick(index):\n i = int(random(len(bricks)))\n return bricks[i]\n",
"step-5": "class Rect():\n def __init__(self, w, h):\n self.w = w\n self.h = h\n \n def half(self):\n return self.w / 2;\n \nbricks = [Rect(40, 25), Rect(30, 25), Rect(28, 25), Rect(13, 25)]\n\ndef setup():\n size(500, 500)\n noLoop()\n \ndef draw():\n \n posx = 0\n posy = 0\n i = 0\n for y in range(20):\n posx = 0\n for x in range(50):\n fill(random(100, 250))\n brick = get_brick(i)\n rect(posx, posy, brick.w, brick.h)\n posx += brick.w\n i += 1\n posy += brick.h\n\ndef get_brick(index):\n i = int(random(len(bricks)))\n# i = index % len(bricks)\n return bricks[i]\n",
"step-ids": [
4,
5,
6,
7,
8
]
}
|
[
4,
5,
6,
7,
8
] |
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, unicode_literals
import urllib
def normalize_mac_address(address):
return address.lower().replace("-", ":")
def urlencode(s):
return urllib.quote(s.encode("utf-8"), "")
def urlencode_plus(s):
return urllib.quote_plus(s.encode("utf-8"), "")
|
normal
|
{
"blob_id": "33b8baf2ca819315eaa5f16c7986390acb4d6efd",
"index": 878,
"step-1": "<mask token>\n\n\ndef normalize_mac_address(address):\n return address.lower().replace('-', ':')\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef normalize_mac_address(address):\n return address.lower().replace('-', ':')\n\n\ndef urlencode(s):\n return urllib.quote(s.encode('utf-8'), '')\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef normalize_mac_address(address):\n return address.lower().replace('-', ':')\n\n\ndef urlencode(s):\n return urllib.quote(s.encode('utf-8'), '')\n\n\ndef urlencode_plus(s):\n return urllib.quote_plus(s.encode('utf-8'), '')\n",
"step-4": "from __future__ import absolute_import, division, unicode_literals\nimport urllib\n\n\ndef normalize_mac_address(address):\n return address.lower().replace('-', ':')\n\n\ndef urlencode(s):\n return urllib.quote(s.encode('utf-8'), '')\n\n\ndef urlencode_plus(s):\n return urllib.quote_plus(s.encode('utf-8'), '')\n",
"step-5": "# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import, division, unicode_literals\n\nimport urllib\n\n\ndef normalize_mac_address(address):\n return address.lower().replace(\"-\", \":\")\n\n\ndef urlencode(s):\n return urllib.quote(s.encode(\"utf-8\"), \"\")\n\n\ndef urlencode_plus(s):\n return urllib.quote_plus(s.encode(\"utf-8\"), \"\")\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
TTTSIZE = 4
def who_win_line(line):
elements = set(line)
if '.' in elements:
return '.'
elements.discard('T')
if len(elements) >= 2:
return 'D'
else:
return elements.pop()
def who_win_tic_tac_toe(original_rows):
#print('%s' % repr(original_rows))
board_full = True
rows = [row[0:TTTSIZE] for row in original_rows]
#print('%s' % repr(rows))
columns = [ [rows[0][0], rows[1][0], rows[2][0], rows[3][0]],
[rows[0][1], rows[1][1], rows[2][1], rows[3][1]],
[rows[0][2], rows[1][2], rows[2][2], rows[3][2]],
[rows[0][3], rows[1][3], rows[2][3], rows[3][3]] ]
diagonal1 = [rows[0][0], rows[1][1], rows[2][2], rows[3][3]]
diagonal2 = [rows[0][3], rows[1][2], rows[2][1], rows[3][0]]
lines = rows
lines.extend(columns)
lines.append(diagonal1)
lines.append(diagonal2)
for line in lines:
winner = who_win_line(line)
if winner == 'X':
return 'X won'
elif winner == 'O':
return 'O won'
elif winner == '.':
board_full = False
if board_full:
return 'Draw'
else:
return 'Game has not completed'
import sys
#import pdb
if __name__ == '__main__':
filename_prefix = sys.argv[1]
filename_in = filename_prefix + ".in"
filename_out = filename_prefix + ".out"
file_in = open(filename_in, 'r')
lines = file_in.readlines()
testcnt = int(lines[0])
idx = 1
file_out = open(filename_out, 'w')
#pdb.set_trace()
for test in range(testcnt):
res = who_win_tic_tac_toe(lines[idx : idx + TTTSIZE])
file_out.write("Case #{0}: {1}\n".format(test + 1, res))
idx += TTTSIZE + 1
|
normal
|
{
"blob_id": "2e041e33b5c34c2bddc72b36ff641817f1e21db2",
"index": 3735,
"step-1": "<mask token>\n\n\ndef who_win_line(line):\n elements = set(line)\n if '.' in elements:\n return '.'\n elements.discard('T')\n if len(elements) >= 2:\n return 'D'\n else:\n return elements.pop()\n\n\ndef who_win_tic_tac_toe(original_rows):\n board_full = True\n rows = [row[0:TTTSIZE] for row in original_rows]\n columns = [[rows[0][0], rows[1][0], rows[2][0], rows[3][0]], [rows[0][1\n ], rows[1][1], rows[2][1], rows[3][1]], [rows[0][2], rows[1][2],\n rows[2][2], rows[3][2]], [rows[0][3], rows[1][3], rows[2][3], rows[\n 3][3]]]\n diagonal1 = [rows[0][0], rows[1][1], rows[2][2], rows[3][3]]\n diagonal2 = [rows[0][3], rows[1][2], rows[2][1], rows[3][0]]\n lines = rows\n lines.extend(columns)\n lines.append(diagonal1)\n lines.append(diagonal2)\n for line in lines:\n winner = who_win_line(line)\n if winner == 'X':\n return 'X won'\n elif winner == 'O':\n return 'O won'\n elif winner == '.':\n board_full = False\n if board_full:\n return 'Draw'\n else:\n return 'Game has not completed'\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef who_win_line(line):\n elements = set(line)\n if '.' in elements:\n return '.'\n elements.discard('T')\n if len(elements) >= 2:\n return 'D'\n else:\n return elements.pop()\n\n\ndef who_win_tic_tac_toe(original_rows):\n board_full = True\n rows = [row[0:TTTSIZE] for row in original_rows]\n columns = [[rows[0][0], rows[1][0], rows[2][0], rows[3][0]], [rows[0][1\n ], rows[1][1], rows[2][1], rows[3][1]], [rows[0][2], rows[1][2],\n rows[2][2], rows[3][2]], [rows[0][3], rows[1][3], rows[2][3], rows[\n 3][3]]]\n diagonal1 = [rows[0][0], rows[1][1], rows[2][2], rows[3][3]]\n diagonal2 = [rows[0][3], rows[1][2], rows[2][1], rows[3][0]]\n lines = rows\n lines.extend(columns)\n lines.append(diagonal1)\n lines.append(diagonal2)\n for line in lines:\n winner = who_win_line(line)\n if winner == 'X':\n return 'X won'\n elif winner == 'O':\n return 'O won'\n elif winner == '.':\n board_full = False\n if board_full:\n return 'Draw'\n else:\n return 'Game has not completed'\n\n\n<mask token>\nif __name__ == '__main__':\n filename_prefix = sys.argv[1]\n filename_in = filename_prefix + '.in'\n filename_out = filename_prefix + '.out'\n file_in = open(filename_in, 'r')\n lines = file_in.readlines()\n testcnt = int(lines[0])\n idx = 1\n file_out = open(filename_out, 'w')\n for test in range(testcnt):\n res = who_win_tic_tac_toe(lines[idx:idx + TTTSIZE])\n file_out.write('Case #{0}: {1}\\n'.format(test + 1, res))\n idx += TTTSIZE + 1\n",
"step-3": "TTTSIZE = 4\n\n\ndef who_win_line(line):\n elements = set(line)\n if '.' in elements:\n return '.'\n elements.discard('T')\n if len(elements) >= 2:\n return 'D'\n else:\n return elements.pop()\n\n\ndef who_win_tic_tac_toe(original_rows):\n board_full = True\n rows = [row[0:TTTSIZE] for row in original_rows]\n columns = [[rows[0][0], rows[1][0], rows[2][0], rows[3][0]], [rows[0][1\n ], rows[1][1], rows[2][1], rows[3][1]], [rows[0][2], rows[1][2],\n rows[2][2], rows[3][2]], [rows[0][3], rows[1][3], rows[2][3], rows[\n 3][3]]]\n diagonal1 = [rows[0][0], rows[1][1], rows[2][2], rows[3][3]]\n diagonal2 = [rows[0][3], rows[1][2], rows[2][1], rows[3][0]]\n lines = rows\n lines.extend(columns)\n lines.append(diagonal1)\n lines.append(diagonal2)\n for line in lines:\n winner = who_win_line(line)\n if winner == 'X':\n return 'X won'\n elif winner == 'O':\n return 'O won'\n elif winner == '.':\n board_full = False\n if board_full:\n return 'Draw'\n else:\n return 'Game has not completed'\n\n\n<mask token>\nif __name__ == '__main__':\n filename_prefix = sys.argv[1]\n filename_in = filename_prefix + '.in'\n filename_out = filename_prefix + '.out'\n file_in = open(filename_in, 'r')\n lines = file_in.readlines()\n testcnt = int(lines[0])\n idx = 1\n file_out = open(filename_out, 'w')\n for test in range(testcnt):\n res = who_win_tic_tac_toe(lines[idx:idx + TTTSIZE])\n file_out.write('Case #{0}: {1}\\n'.format(test + 1, res))\n idx += TTTSIZE + 1\n",
"step-4": "TTTSIZE = 4\n\n\ndef who_win_line(line):\n elements = set(line)\n if '.' in elements:\n return '.'\n elements.discard('T')\n if len(elements) >= 2:\n return 'D'\n else:\n return elements.pop()\n\n\ndef who_win_tic_tac_toe(original_rows):\n board_full = True\n rows = [row[0:TTTSIZE] for row in original_rows]\n columns = [[rows[0][0], rows[1][0], rows[2][0], rows[3][0]], [rows[0][1\n ], rows[1][1], rows[2][1], rows[3][1]], [rows[0][2], rows[1][2],\n rows[2][2], rows[3][2]], [rows[0][3], rows[1][3], rows[2][3], rows[\n 3][3]]]\n diagonal1 = [rows[0][0], rows[1][1], rows[2][2], rows[3][3]]\n diagonal2 = [rows[0][3], rows[1][2], rows[2][1], rows[3][0]]\n lines = rows\n lines.extend(columns)\n lines.append(diagonal1)\n lines.append(diagonal2)\n for line in lines:\n winner = who_win_line(line)\n if winner == 'X':\n return 'X won'\n elif winner == 'O':\n return 'O won'\n elif winner == '.':\n board_full = False\n if board_full:\n return 'Draw'\n else:\n return 'Game has not completed'\n\n\nimport sys\nif __name__ == '__main__':\n filename_prefix = sys.argv[1]\n filename_in = filename_prefix + '.in'\n filename_out = filename_prefix + '.out'\n file_in = open(filename_in, 'r')\n lines = file_in.readlines()\n testcnt = int(lines[0])\n idx = 1\n file_out = open(filename_out, 'w')\n for test in range(testcnt):\n res = who_win_tic_tac_toe(lines[idx:idx + TTTSIZE])\n file_out.write('Case #{0}: {1}\\n'.format(test + 1, res))\n idx += TTTSIZE + 1\n",
"step-5": "TTTSIZE = 4\r\n\r\ndef who_win_line(line):\r\n elements = set(line)\r\n if '.' in elements:\r\n return '.'\r\n elements.discard('T')\r\n if len(elements) >= 2:\r\n return 'D'\r\n else:\r\n return elements.pop()\r\n\r\ndef who_win_tic_tac_toe(original_rows):\r\n #print('%s' % repr(original_rows))\r\n board_full = True\r\n rows = [row[0:TTTSIZE] for row in original_rows]\r\n #print('%s' % repr(rows))\r\n columns = [ [rows[0][0], rows[1][0], rows[2][0], rows[3][0]],\r\n [rows[0][1], rows[1][1], rows[2][1], rows[3][1]],\r\n [rows[0][2], rows[1][2], rows[2][2], rows[3][2]],\r\n [rows[0][3], rows[1][3], rows[2][3], rows[3][3]] ]\r\n diagonal1 = [rows[0][0], rows[1][1], rows[2][2], rows[3][3]]\r\n diagonal2 = [rows[0][3], rows[1][2], rows[2][1], rows[3][0]]\r\n\r\n lines = rows\r\n lines.extend(columns)\r\n lines.append(diagonal1)\r\n lines.append(diagonal2)\r\n\r\n for line in lines:\r\n winner = who_win_line(line)\r\n if winner == 'X':\r\n return 'X won'\r\n elif winner == 'O':\r\n return 'O won'\r\n elif winner == '.':\r\n board_full = False\r\n if board_full:\r\n return 'Draw'\r\n else:\r\n return 'Game has not completed'\r\n\r\n\r\nimport sys\r\n#import pdb\r\n\r\nif __name__ == '__main__':\r\n filename_prefix = sys.argv[1]\r\n filename_in = filename_prefix + \".in\"\r\n filename_out = filename_prefix + \".out\"\r\n\r\n file_in = open(filename_in, 'r')\r\n lines = file_in.readlines()\r\n\r\n testcnt = int(lines[0])\r\n idx = 1\r\n\r\n file_out = open(filename_out, 'w')\r\n\r\n #pdb.set_trace()\r\n for test in range(testcnt):\r\n res = who_win_tic_tac_toe(lines[idx : idx + TTTSIZE])\r\n file_out.write(\"Case #{0}: {1}\\n\".format(test + 1, res))\r\n idx += TTTSIZE + 1\r\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
from django.urls import path
from .views import (
TreeCreateView,
TreeListView,
TreeUpdateView,
)
app_name = 'trees'
urlpatterns = [
path('list/', TreeListView.as_view(),
name='list'),
path('create/', TreeCreateView.as_view(),
name='create'),
path('<int:pk>/update/', TreeCreateView.as_view(),
name='update'),
]
|
normal
|
{
"blob_id": "0c1de2c1eb5a4de7aeb14ad6b27aa61e07bc4c51",
"index": 602,
"step-1": "<mask token>\n",
"step-2": "<mask token>\napp_name = 'trees'\nurlpatterns = [path('list/', TreeListView.as_view(), name='list'), path(\n 'create/', TreeCreateView.as_view(), name='create'), path(\n '<int:pk>/update/', TreeCreateView.as_view(), name='update')]\n",
"step-3": "from django.urls import path\nfrom .views import TreeCreateView, TreeListView, TreeUpdateView\napp_name = 'trees'\nurlpatterns = [path('list/', TreeListView.as_view(), name='list'), path(\n 'create/', TreeCreateView.as_view(), name='create'), path(\n '<int:pk>/update/', TreeCreateView.as_view(), name='update')]\n",
"step-4": "from django.urls import path\nfrom .views import (\n TreeCreateView,\n TreeListView,\n TreeUpdateView,\n)\n\n\napp_name = 'trees'\n\nurlpatterns = [\n path('list/', TreeListView.as_view(),\n name='list'),\n path('create/', TreeCreateView.as_view(),\n name='create'),\n path('<int:pk>/update/', TreeCreateView.as_view(),\n name='update'),\n]\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import json
import requests
import boto3
import uuid
import time
profile_name = 'mine'
region = 'us-west-2'
session = boto3.Session(profile_name=profile_name)
api = session.client('apigateway', region_name=region)
cf = session.client('cloudformation', region_name=region)
def get_key(name_of_key):
print('Discovering API Key')
response = api.get_api_keys(includeValues=True)
items = response['items']
for item in items:
if name_of_key in item['name']:
return item['value']
def get_url(name_of_stack):
print('Discovering Cloudformation Exports')
exports = cf.list_exports()['Exports']
for export in exports:
if export['Name'] == 'url-{}'.format(name_of_stack):
return export['Value']
def post(url, key, data):
data_json = json.dumps(data)
headers = {'Content-type': 'application/json', 'x-api-key': key}
return requests.post(url, data=data_json, headers=headers)
if __name__ == "__main__":
name = 'advanced'
full_url = get_url(name)
api_key = get_key(name)
while True:
body = {
"input": [
str(uuid.uuid4()),
str(uuid.uuid4())
]
}
print(post(full_url, api_key, body))
time.sleep(1)
|
normal
|
{
"blob_id": "10fda09f47c292cb3dc901f42d38ead7757460f5",
"index": 3699,
"step-1": "<mask token>\n\n\ndef get_key(name_of_key):\n print('Discovering API Key')\n response = api.get_api_keys(includeValues=True)\n items = response['items']\n for item in items:\n if name_of_key in item['name']:\n return item['value']\n\n\ndef get_url(name_of_stack):\n print('Discovering Cloudformation Exports')\n exports = cf.list_exports()['Exports']\n for export in exports:\n if export['Name'] == 'url-{}'.format(name_of_stack):\n return export['Value']\n\n\ndef post(url, key, data):\n data_json = json.dumps(data)\n headers = {'Content-type': 'application/json', 'x-api-key': key}\n return requests.post(url, data=data_json, headers=headers)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef get_key(name_of_key):\n print('Discovering API Key')\n response = api.get_api_keys(includeValues=True)\n items = response['items']\n for item in items:\n if name_of_key in item['name']:\n return item['value']\n\n\ndef get_url(name_of_stack):\n print('Discovering Cloudformation Exports')\n exports = cf.list_exports()['Exports']\n for export in exports:\n if export['Name'] == 'url-{}'.format(name_of_stack):\n return export['Value']\n\n\ndef post(url, key, data):\n data_json = json.dumps(data)\n headers = {'Content-type': 'application/json', 'x-api-key': key}\n return requests.post(url, data=data_json, headers=headers)\n\n\nif __name__ == '__main__':\n name = 'advanced'\n full_url = get_url(name)\n api_key = get_key(name)\n while True:\n body = {'input': [str(uuid.uuid4()), str(uuid.uuid4())]}\n print(post(full_url, api_key, body))\n time.sleep(1)\n",
"step-3": "<mask token>\nprofile_name = 'mine'\nregion = 'us-west-2'\nsession = boto3.Session(profile_name=profile_name)\napi = session.client('apigateway', region_name=region)\ncf = session.client('cloudformation', region_name=region)\n\n\ndef get_key(name_of_key):\n print('Discovering API Key')\n response = api.get_api_keys(includeValues=True)\n items = response['items']\n for item in items:\n if name_of_key in item['name']:\n return item['value']\n\n\ndef get_url(name_of_stack):\n print('Discovering Cloudformation Exports')\n exports = cf.list_exports()['Exports']\n for export in exports:\n if export['Name'] == 'url-{}'.format(name_of_stack):\n return export['Value']\n\n\ndef post(url, key, data):\n data_json = json.dumps(data)\n headers = {'Content-type': 'application/json', 'x-api-key': key}\n return requests.post(url, data=data_json, headers=headers)\n\n\nif __name__ == '__main__':\n name = 'advanced'\n full_url = get_url(name)\n api_key = get_key(name)\n while True:\n body = {'input': [str(uuid.uuid4()), str(uuid.uuid4())]}\n print(post(full_url, api_key, body))\n time.sleep(1)\n",
"step-4": "import json\nimport requests\nimport boto3\nimport uuid\nimport time\nprofile_name = 'mine'\nregion = 'us-west-2'\nsession = boto3.Session(profile_name=profile_name)\napi = session.client('apigateway', region_name=region)\ncf = session.client('cloudformation', region_name=region)\n\n\ndef get_key(name_of_key):\n print('Discovering API Key')\n response = api.get_api_keys(includeValues=True)\n items = response['items']\n for item in items:\n if name_of_key in item['name']:\n return item['value']\n\n\ndef get_url(name_of_stack):\n print('Discovering Cloudformation Exports')\n exports = cf.list_exports()['Exports']\n for export in exports:\n if export['Name'] == 'url-{}'.format(name_of_stack):\n return export['Value']\n\n\ndef post(url, key, data):\n data_json = json.dumps(data)\n headers = {'Content-type': 'application/json', 'x-api-key': key}\n return requests.post(url, data=data_json, headers=headers)\n\n\nif __name__ == '__main__':\n name = 'advanced'\n full_url = get_url(name)\n api_key = get_key(name)\n while True:\n body = {'input': [str(uuid.uuid4()), str(uuid.uuid4())]}\n print(post(full_url, api_key, body))\n time.sleep(1)\n",
"step-5": "import json\nimport requests\nimport boto3\nimport uuid\nimport time\n\nprofile_name = 'mine'\nregion = 'us-west-2'\nsession = boto3.Session(profile_name=profile_name)\napi = session.client('apigateway', region_name=region)\ncf = session.client('cloudformation', region_name=region)\n\n\ndef get_key(name_of_key):\n print('Discovering API Key')\n response = api.get_api_keys(includeValues=True)\n items = response['items']\n for item in items:\n if name_of_key in item['name']:\n return item['value']\n\n\ndef get_url(name_of_stack):\n print('Discovering Cloudformation Exports')\n exports = cf.list_exports()['Exports']\n for export in exports:\n if export['Name'] == 'url-{}'.format(name_of_stack):\n return export['Value']\n\n\ndef post(url, key, data):\n data_json = json.dumps(data)\n headers = {'Content-type': 'application/json', 'x-api-key': key}\n return requests.post(url, data=data_json, headers=headers)\n\n\nif __name__ == \"__main__\":\n name = 'advanced'\n full_url = get_url(name)\n api_key = get_key(name)\n while True:\n body = {\n \"input\": [\n str(uuid.uuid4()),\n str(uuid.uuid4())\n ]\n }\n print(post(full_url, api_key, body))\n time.sleep(1)\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
# OpenWeatherMap API Key
api_key = "078c8443640961d5ce547c8269db5fd7"
|
normal
|
{
"blob_id": "4eb3d94a5fd22fc29000ec32475de9cbae1c183a",
"index": 5255,
"step-1": "<mask token>\n",
"step-2": "api_key = '078c8443640961d5ce547c8269db5fd7'\n",
"step-3": "# OpenWeatherMap API Key\napi_key = \"078c8443640961d5ce547c8269db5fd7\"\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
import random
OPTIONS = ['rock', 'paper', 'scissors']
def get_human_choice():
print('(1) Rock\n(2) Paper\n(3) Scissors')
return OPTIONS[int(input('Enter the number of your choice: ')) - 1]
def get_computer_choice():
return random.choice(OPTIONS)
def print_choices(human_choice, computer_choice):
print(f'You chose {human_choice.title()}')
print(f'The computer chose {computer_choice.title()}')
def eval_game_result(human_choice, computer_choice):
if human_choice == computer_choice:
return 'draw'
elif human_choice == 'rock':
return 'human' if computer_choice == 'scissors' else 'computer'
elif human_choice == 'paper':
return 'human' if computer_choice == 'rock' else 'computer'
else:
return 'human' if computer_choice == 'paper' else 'computer'
def compose_output_message(result, human_choice, computer_choice):
if result == 'draw':
return 'Draw!'
elif result == 'human':
return f'Yes, {human_choice} beat {computer_choice}!'
else:
return f'Sorry, {computer_choice} beat {human_choice}'
def print_result(message):
print(message)
human_choice = get_human_choice()
computer_choice = get_computer_choice()
print_choices(human_choice, computer_choice)
game_result = eval_game_result(human_choice, computer_choice)
print_result(compose_output_message(game_result, human_choice, computer_choice)
)
|
normal
|
{
"blob_id": "2e6bce05c8ba21aa322e306d2cdb8871531d7341",
"index": 5499,
"step-1": "<mask token>\n\n\ndef get_human_choice():\n print('(1) Rock\\n(2) Paper\\n(3) Scissors')\n return OPTIONS[int(input('Enter the number of your choice: ')) - 1]\n\n\ndef get_computer_choice():\n return random.choice(OPTIONS)\n\n\ndef print_choices(human_choice, computer_choice):\n print(f'You chose {human_choice.title()}')\n print(f'The computer chose {computer_choice.title()}')\n\n\ndef eval_game_result(human_choice, computer_choice):\n if human_choice == computer_choice:\n return 'draw'\n elif human_choice == 'rock':\n return 'human' if computer_choice == 'scissors' else 'computer'\n elif human_choice == 'paper':\n return 'human' if computer_choice == 'rock' else 'computer'\n else:\n return 'human' if computer_choice == 'paper' else 'computer'\n\n\ndef compose_output_message(result, human_choice, computer_choice):\n if result == 'draw':\n return 'Draw!'\n elif result == 'human':\n return f'Yes, {human_choice} beat {computer_choice}!'\n else:\n return f'Sorry, {computer_choice} beat {human_choice}'\n\n\ndef print_result(message):\n print(message)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef get_human_choice():\n print('(1) Rock\\n(2) Paper\\n(3) Scissors')\n return OPTIONS[int(input('Enter the number of your choice: ')) - 1]\n\n\ndef get_computer_choice():\n return random.choice(OPTIONS)\n\n\ndef print_choices(human_choice, computer_choice):\n print(f'You chose {human_choice.title()}')\n print(f'The computer chose {computer_choice.title()}')\n\n\ndef eval_game_result(human_choice, computer_choice):\n if human_choice == computer_choice:\n return 'draw'\n elif human_choice == 'rock':\n return 'human' if computer_choice == 'scissors' else 'computer'\n elif human_choice == 'paper':\n return 'human' if computer_choice == 'rock' else 'computer'\n else:\n return 'human' if computer_choice == 'paper' else 'computer'\n\n\ndef compose_output_message(result, human_choice, computer_choice):\n if result == 'draw':\n return 'Draw!'\n elif result == 'human':\n return f'Yes, {human_choice} beat {computer_choice}!'\n else:\n return f'Sorry, {computer_choice} beat {human_choice}'\n\n\ndef print_result(message):\n print(message)\n\n\n<mask token>\nprint_choices(human_choice, computer_choice)\n<mask token>\nprint_result(compose_output_message(game_result, human_choice, computer_choice)\n )\n",
"step-3": "<mask token>\nOPTIONS = ['rock', 'paper', 'scissors']\n\n\ndef get_human_choice():\n print('(1) Rock\\n(2) Paper\\n(3) Scissors')\n return OPTIONS[int(input('Enter the number of your choice: ')) - 1]\n\n\ndef get_computer_choice():\n return random.choice(OPTIONS)\n\n\ndef print_choices(human_choice, computer_choice):\n print(f'You chose {human_choice.title()}')\n print(f'The computer chose {computer_choice.title()}')\n\n\ndef eval_game_result(human_choice, computer_choice):\n if human_choice == computer_choice:\n return 'draw'\n elif human_choice == 'rock':\n return 'human' if computer_choice == 'scissors' else 'computer'\n elif human_choice == 'paper':\n return 'human' if computer_choice == 'rock' else 'computer'\n else:\n return 'human' if computer_choice == 'paper' else 'computer'\n\n\ndef compose_output_message(result, human_choice, computer_choice):\n if result == 'draw':\n return 'Draw!'\n elif result == 'human':\n return f'Yes, {human_choice} beat {computer_choice}!'\n else:\n return f'Sorry, {computer_choice} beat {human_choice}'\n\n\ndef print_result(message):\n print(message)\n\n\nhuman_choice = get_human_choice()\ncomputer_choice = get_computer_choice()\nprint_choices(human_choice, computer_choice)\ngame_result = eval_game_result(human_choice, computer_choice)\nprint_result(compose_output_message(game_result, human_choice, computer_choice)\n )\n",
"step-4": "import random\nOPTIONS = ['rock', 'paper', 'scissors']\n\n\ndef get_human_choice():\n print('(1) Rock\\n(2) Paper\\n(3) Scissors')\n return OPTIONS[int(input('Enter the number of your choice: ')) - 1]\n\n\ndef get_computer_choice():\n return random.choice(OPTIONS)\n\n\ndef print_choices(human_choice, computer_choice):\n print(f'You chose {human_choice.title()}')\n print(f'The computer chose {computer_choice.title()}')\n\n\ndef eval_game_result(human_choice, computer_choice):\n if human_choice == computer_choice:\n return 'draw'\n elif human_choice == 'rock':\n return 'human' if computer_choice == 'scissors' else 'computer'\n elif human_choice == 'paper':\n return 'human' if computer_choice == 'rock' else 'computer'\n else:\n return 'human' if computer_choice == 'paper' else 'computer'\n\n\ndef compose_output_message(result, human_choice, computer_choice):\n if result == 'draw':\n return 'Draw!'\n elif result == 'human':\n return f'Yes, {human_choice} beat {computer_choice}!'\n else:\n return f'Sorry, {computer_choice} beat {human_choice}'\n\n\ndef print_result(message):\n print(message)\n\n\nhuman_choice = get_human_choice()\ncomputer_choice = get_computer_choice()\nprint_choices(human_choice, computer_choice)\ngame_result = eval_game_result(human_choice, computer_choice)\nprint_result(compose_output_message(game_result, human_choice, computer_choice)\n )\n",
"step-5": null,
"step-ids": [
6,
7,
8,
9
]
}
|
[
6,
7,
8,
9
] |
# 运算符的优先级
# 和数学中一样,在Python运算也有优先级,比如先乘除 后加减
# 运算符的优先级可以根据优先级的表格来查询,
# 在表格中位置越靠下的运算符优先级越高,优先级越高的越优先计算
# 如果优先级一样则自左向右计算
# 关于优先级的表格,你知道有这么一个东西就够了,千万不要去记
# 在开发中如果遇到优先级不清楚的,则可以通过小括号来改变运算顺序
a = 1 + 2 * 3
# 一样 and高 or高
# 如果or的优先级高,或者两个运算符的优先级一样高
# 则需要先进行或运算,则运算结果是3
# 如果and的优先级高,则应该先计算与运算
# 则运算结果是1
a = 1 or 2 and 3
# print(a)
# 逻辑运算符(补充)
# 逻辑运算符可以连着使用
result = 1 < 2 < 3 # 相当于 1 < 2 and 2 < 3
result = 10 < 20 > 15
print(result)
|
normal
|
{
"blob_id": "25550cbaf6e0e5bdbbe3852bb8cdc05ac300d315",
"index": 8872,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(result)\n",
"step-3": "a = 1 + 2 * 3\na = 1 or 2 and 3\nresult = 1 < 2 < 3\nresult = 10 < 20 > 15\nprint(result)\n",
"step-4": "# 运算符的优先级\n# 和数学中一样,在Python运算也有优先级,比如先乘除 后加减\n# 运算符的优先级可以根据优先级的表格来查询,\n# 在表格中位置越靠下的运算符优先级越高,优先级越高的越优先计算\n# 如果优先级一样则自左向右计算\n# 关于优先级的表格,你知道有这么一个东西就够了,千万不要去记\n# 在开发中如果遇到优先级不清楚的,则可以通过小括号来改变运算顺序\na = 1 + 2 * 3\n\n# 一样 and高 or高\n# 如果or的优先级高,或者两个运算符的优先级一样高\n# 则需要先进行或运算,则运算结果是3\n# 如果and的优先级高,则应该先计算与运算\n# 则运算结果是1\na = 1 or 2 and 3\n\n# print(a)\n\n# 逻辑运算符(补充)\n# 逻辑运算符可以连着使用\nresult = 1 < 2 < 3 # 相当于 1 < 2 and 2 < 3\nresult = 10 < 20 > 15\n\nprint(result)",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import os
import pickle
from matplotlib import pyplot as plt
cwd = os.path.join(os.getcwd(), 'DEDA_2020SS_Crypto_Options_RND_HD',
'CrypOpt_RiskNeutralDensity')
data_path = os.path.join(cwd, 'data') + '/'
day = '2020-03-11'
res = pickle.load(open(data_path + 'results_{}.pkl'.format(day), 'rb'))
# ---------------------------------------------------------------------- SMILES
fig1, axes = plt.subplots(2,4, figsize=(10,7))
for key, ax in zip(sorted(res), axes.flatten()):
print(key, ax)
ax.plot(res[key]['df'].M, res[key]['df'].iv, '.')
ax.plot(res[key]['M'], res[key]['smile'])
ax.text(0.99, 0.99, r'$\tau$ = ' + str(key),
horizontalalignment='right',
verticalalignment='top',
transform=ax.transAxes)
axes.flatten()[0].set_ylabel('implied volatility')
axes.flatten()[4].set_ylabel('implied volatility')
axes.flatten()[4].set_xlabel('moneyness')
axes.flatten()[5].set_xlabel('moneyness')
axes.flatten()[6].set_xlabel('moneyness')
axes.flatten()[7].set_xlabel('moneyness')
plt.tight_layout()
fig1.savefig(os.path.join(cwd, '{}_smiles.png'.format(day)), transparent=True)
# ------------------------------------------------------------------------ RNDs
fig2, axes = plt.subplots(2,4, figsize=(10,7))
for key, ax in zip(sorted(res), axes.flatten()):
print(key, ax)
ax.plot(res[key]['K'][::-1], res[key]['q'])
ax.text(0.99, 0.99, r'$\tau$ = ' + str(key),
horizontalalignment='right',
verticalalignment='top',
transform=ax.transAxes)
ax.set_yticks([])
axes.flatten()[0].set_ylabel('risk neutral density')
axes.flatten()[4].set_ylabel('risk neutral density')
axes.flatten()[4].set_xlabel('spot price')
axes.flatten()[5].set_xlabel('spot price')
axes.flatten()[6].set_xlabel('spot price')
axes.flatten()[7].set_xlabel('spot price')
plt.tight_layout()
fig2.savefig(os.path.join(cwd, '{}_RND.png'.format(day)), transparent=True)
# ----------------------------------------------------------------- DERIVATIVES
fig3, axes = plt.subplots(2,4, figsize=(10,7))
for key, ax in zip(sorted(res), axes.flatten()):
print(key, ax)
ax.plot(res[key]['M'], res[key]['smile'])
ax.plot(res[key]['M'], res[key]['first'])
ax.plot(res[key]['M'], res[key]['second'])
ax.text(0.99, 0.01, r'$\tau$ = ' + str(key),
horizontalalignment='right',
verticalalignment='bottom',
transform=ax.transAxes)
ax.set_yticks([])
axes.flatten()[0].set_ylabel('implied volatility')
axes.flatten()[4].set_ylabel('implied volatility')
axes.flatten()[4].set_xlabel('moneyness')
axes.flatten()[5].set_xlabel('moneyness')
axes.flatten()[6].set_xlabel('moneyness')
axes.flatten()[7].set_xlabel('moneyness')
plt.tight_layout()
fig3.savefig(os.path.join(cwd, '{}_derivatives.png'.format(day)), transparent=True)
# ----------------------------------------------------------------- TAU PROCESS
for key in res:
s = res[key]
fig4, axes = plt.subplots(1,3, figsize=(10,4))
ax = axes[0]
ax.plot(s['df'].M, s['df'].iv, '.', c='r')
ax.plot(s['M'], s['smile'])
ax.set_xlabel('moneyness')
ax.set_ylabel('implied volatility')
ax = axes[1]
ax.plot(s['M'], s['smile'])
ax.plot(s['M'], s['first'])
ax.plot(s['M'], s['second'])
ax.set_xlabel('moneyness')
ax.set_ylabel('implied volatility')
ax = axes[2]
ax.plot(s['S'], s['q'])
ax.set_xlabel('spot price')
ax.set_ylabel(r'risk neutral density')
ax.set_yticks([])
plt.tight_layout()
fig4.savefig(os.path.join(cwd, '{}_T{}.png'.format(day, key)), transparent=True)
|
normal
|
{
"blob_id": "a01f812584e4cee14c9fe15e9fb6ede4ae3e937a",
"index": 4953,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor key, ax in zip(sorted(res), axes.flatten()):\n print(key, ax)\n ax.plot(res[key]['df'].M, res[key]['df'].iv, '.')\n ax.plot(res[key]['M'], res[key]['smile'])\n ax.text(0.99, 0.99, '$\\\\tau$ = ' + str(key), horizontalalignment=\n 'right', verticalalignment='top', transform=ax.transAxes)\naxes.flatten()[0].set_ylabel('implied volatility')\naxes.flatten()[4].set_ylabel('implied volatility')\naxes.flatten()[4].set_xlabel('moneyness')\naxes.flatten()[5].set_xlabel('moneyness')\naxes.flatten()[6].set_xlabel('moneyness')\naxes.flatten()[7].set_xlabel('moneyness')\nplt.tight_layout()\nfig1.savefig(os.path.join(cwd, '{}_smiles.png'.format(day)), transparent=True)\n<mask token>\nfor key, ax in zip(sorted(res), axes.flatten()):\n print(key, ax)\n ax.plot(res[key]['K'][::-1], res[key]['q'])\n ax.text(0.99, 0.99, '$\\\\tau$ = ' + str(key), horizontalalignment=\n 'right', verticalalignment='top', transform=ax.transAxes)\n ax.set_yticks([])\naxes.flatten()[0].set_ylabel('risk neutral density')\naxes.flatten()[4].set_ylabel('risk neutral density')\naxes.flatten()[4].set_xlabel('spot price')\naxes.flatten()[5].set_xlabel('spot price')\naxes.flatten()[6].set_xlabel('spot price')\naxes.flatten()[7].set_xlabel('spot price')\nplt.tight_layout()\nfig2.savefig(os.path.join(cwd, '{}_RND.png'.format(day)), transparent=True)\n<mask token>\nfor key, ax in zip(sorted(res), axes.flatten()):\n print(key, ax)\n ax.plot(res[key]['M'], res[key]['smile'])\n ax.plot(res[key]['M'], res[key]['first'])\n ax.plot(res[key]['M'], res[key]['second'])\n ax.text(0.99, 0.01, '$\\\\tau$ = ' + str(key), horizontalalignment=\n 'right', verticalalignment='bottom', transform=ax.transAxes)\n ax.set_yticks([])\naxes.flatten()[0].set_ylabel('implied volatility')\naxes.flatten()[4].set_ylabel('implied volatility')\naxes.flatten()[4].set_xlabel('moneyness')\naxes.flatten()[5].set_xlabel('moneyness')\naxes.flatten()[6].set_xlabel('moneyness')\naxes.flatten()[7].set_xlabel('moneyness')\nplt.tight_layout()\nfig3.savefig(os.path.join(cwd, '{}_derivatives.png'.format(day)),\n transparent=True)\nfor key in res:\n s = res[key]\n fig4, axes = plt.subplots(1, 3, figsize=(10, 4))\n ax = axes[0]\n ax.plot(s['df'].M, s['df'].iv, '.', c='r')\n ax.plot(s['M'], s['smile'])\n ax.set_xlabel('moneyness')\n ax.set_ylabel('implied volatility')\n ax = axes[1]\n ax.plot(s['M'], s['smile'])\n ax.plot(s['M'], s['first'])\n ax.plot(s['M'], s['second'])\n ax.set_xlabel('moneyness')\n ax.set_ylabel('implied volatility')\n ax = axes[2]\n ax.plot(s['S'], s['q'])\n ax.set_xlabel('spot price')\n ax.set_ylabel('risk neutral density')\n ax.set_yticks([])\n plt.tight_layout()\n fig4.savefig(os.path.join(cwd, '{}_T{}.png'.format(day, key)),\n transparent=True)\n",
"step-3": "<mask token>\ncwd = os.path.join(os.getcwd(), 'DEDA_2020SS_Crypto_Options_RND_HD',\n 'CrypOpt_RiskNeutralDensity')\ndata_path = os.path.join(cwd, 'data') + '/'\nday = '2020-03-11'\nres = pickle.load(open(data_path + 'results_{}.pkl'.format(day), 'rb'))\nfig1, axes = plt.subplots(2, 4, figsize=(10, 7))\nfor key, ax in zip(sorted(res), axes.flatten()):\n print(key, ax)\n ax.plot(res[key]['df'].M, res[key]['df'].iv, '.')\n ax.plot(res[key]['M'], res[key]['smile'])\n ax.text(0.99, 0.99, '$\\\\tau$ = ' + str(key), horizontalalignment=\n 'right', verticalalignment='top', transform=ax.transAxes)\naxes.flatten()[0].set_ylabel('implied volatility')\naxes.flatten()[4].set_ylabel('implied volatility')\naxes.flatten()[4].set_xlabel('moneyness')\naxes.flatten()[5].set_xlabel('moneyness')\naxes.flatten()[6].set_xlabel('moneyness')\naxes.flatten()[7].set_xlabel('moneyness')\nplt.tight_layout()\nfig1.savefig(os.path.join(cwd, '{}_smiles.png'.format(day)), transparent=True)\nfig2, axes = plt.subplots(2, 4, figsize=(10, 7))\nfor key, ax in zip(sorted(res), axes.flatten()):\n print(key, ax)\n ax.plot(res[key]['K'][::-1], res[key]['q'])\n ax.text(0.99, 0.99, '$\\\\tau$ = ' + str(key), horizontalalignment=\n 'right', verticalalignment='top', transform=ax.transAxes)\n ax.set_yticks([])\naxes.flatten()[0].set_ylabel('risk neutral density')\naxes.flatten()[4].set_ylabel('risk neutral density')\naxes.flatten()[4].set_xlabel('spot price')\naxes.flatten()[5].set_xlabel('spot price')\naxes.flatten()[6].set_xlabel('spot price')\naxes.flatten()[7].set_xlabel('spot price')\nplt.tight_layout()\nfig2.savefig(os.path.join(cwd, '{}_RND.png'.format(day)), transparent=True)\nfig3, axes = plt.subplots(2, 4, figsize=(10, 7))\nfor key, ax in zip(sorted(res), axes.flatten()):\n print(key, ax)\n ax.plot(res[key]['M'], res[key]['smile'])\n ax.plot(res[key]['M'], res[key]['first'])\n ax.plot(res[key]['M'], res[key]['second'])\n ax.text(0.99, 0.01, '$\\\\tau$ = ' + str(key), horizontalalignment=\n 'right', verticalalignment='bottom', transform=ax.transAxes)\n ax.set_yticks([])\naxes.flatten()[0].set_ylabel('implied volatility')\naxes.flatten()[4].set_ylabel('implied volatility')\naxes.flatten()[4].set_xlabel('moneyness')\naxes.flatten()[5].set_xlabel('moneyness')\naxes.flatten()[6].set_xlabel('moneyness')\naxes.flatten()[7].set_xlabel('moneyness')\nplt.tight_layout()\nfig3.savefig(os.path.join(cwd, '{}_derivatives.png'.format(day)),\n transparent=True)\nfor key in res:\n s = res[key]\n fig4, axes = plt.subplots(1, 3, figsize=(10, 4))\n ax = axes[0]\n ax.plot(s['df'].M, s['df'].iv, '.', c='r')\n ax.plot(s['M'], s['smile'])\n ax.set_xlabel('moneyness')\n ax.set_ylabel('implied volatility')\n ax = axes[1]\n ax.plot(s['M'], s['smile'])\n ax.plot(s['M'], s['first'])\n ax.plot(s['M'], s['second'])\n ax.set_xlabel('moneyness')\n ax.set_ylabel('implied volatility')\n ax = axes[2]\n ax.plot(s['S'], s['q'])\n ax.set_xlabel('spot price')\n ax.set_ylabel('risk neutral density')\n ax.set_yticks([])\n plt.tight_layout()\n fig4.savefig(os.path.join(cwd, '{}_T{}.png'.format(day, key)),\n transparent=True)\n",
"step-4": "import os\nimport pickle\nfrom matplotlib import pyplot as plt\ncwd = os.path.join(os.getcwd(), 'DEDA_2020SS_Crypto_Options_RND_HD',\n 'CrypOpt_RiskNeutralDensity')\ndata_path = os.path.join(cwd, 'data') + '/'\nday = '2020-03-11'\nres = pickle.load(open(data_path + 'results_{}.pkl'.format(day), 'rb'))\nfig1, axes = plt.subplots(2, 4, figsize=(10, 7))\nfor key, ax in zip(sorted(res), axes.flatten()):\n print(key, ax)\n ax.plot(res[key]['df'].M, res[key]['df'].iv, '.')\n ax.plot(res[key]['M'], res[key]['smile'])\n ax.text(0.99, 0.99, '$\\\\tau$ = ' + str(key), horizontalalignment=\n 'right', verticalalignment='top', transform=ax.transAxes)\naxes.flatten()[0].set_ylabel('implied volatility')\naxes.flatten()[4].set_ylabel('implied volatility')\naxes.flatten()[4].set_xlabel('moneyness')\naxes.flatten()[5].set_xlabel('moneyness')\naxes.flatten()[6].set_xlabel('moneyness')\naxes.flatten()[7].set_xlabel('moneyness')\nplt.tight_layout()\nfig1.savefig(os.path.join(cwd, '{}_smiles.png'.format(day)), transparent=True)\nfig2, axes = plt.subplots(2, 4, figsize=(10, 7))\nfor key, ax in zip(sorted(res), axes.flatten()):\n print(key, ax)\n ax.plot(res[key]['K'][::-1], res[key]['q'])\n ax.text(0.99, 0.99, '$\\\\tau$ = ' + str(key), horizontalalignment=\n 'right', verticalalignment='top', transform=ax.transAxes)\n ax.set_yticks([])\naxes.flatten()[0].set_ylabel('risk neutral density')\naxes.flatten()[4].set_ylabel('risk neutral density')\naxes.flatten()[4].set_xlabel('spot price')\naxes.flatten()[5].set_xlabel('spot price')\naxes.flatten()[6].set_xlabel('spot price')\naxes.flatten()[7].set_xlabel('spot price')\nplt.tight_layout()\nfig2.savefig(os.path.join(cwd, '{}_RND.png'.format(day)), transparent=True)\nfig3, axes = plt.subplots(2, 4, figsize=(10, 7))\nfor key, ax in zip(sorted(res), axes.flatten()):\n print(key, ax)\n ax.plot(res[key]['M'], res[key]['smile'])\n ax.plot(res[key]['M'], res[key]['first'])\n ax.plot(res[key]['M'], res[key]['second'])\n ax.text(0.99, 0.01, '$\\\\tau$ = ' + str(key), horizontalalignment=\n 'right', verticalalignment='bottom', transform=ax.transAxes)\n ax.set_yticks([])\naxes.flatten()[0].set_ylabel('implied volatility')\naxes.flatten()[4].set_ylabel('implied volatility')\naxes.flatten()[4].set_xlabel('moneyness')\naxes.flatten()[5].set_xlabel('moneyness')\naxes.flatten()[6].set_xlabel('moneyness')\naxes.flatten()[7].set_xlabel('moneyness')\nplt.tight_layout()\nfig3.savefig(os.path.join(cwd, '{}_derivatives.png'.format(day)),\n transparent=True)\nfor key in res:\n s = res[key]\n fig4, axes = plt.subplots(1, 3, figsize=(10, 4))\n ax = axes[0]\n ax.plot(s['df'].M, s['df'].iv, '.', c='r')\n ax.plot(s['M'], s['smile'])\n ax.set_xlabel('moneyness')\n ax.set_ylabel('implied volatility')\n ax = axes[1]\n ax.plot(s['M'], s['smile'])\n ax.plot(s['M'], s['first'])\n ax.plot(s['M'], s['second'])\n ax.set_xlabel('moneyness')\n ax.set_ylabel('implied volatility')\n ax = axes[2]\n ax.plot(s['S'], s['q'])\n ax.set_xlabel('spot price')\n ax.set_ylabel('risk neutral density')\n ax.set_yticks([])\n plt.tight_layout()\n fig4.savefig(os.path.join(cwd, '{}_T{}.png'.format(day, key)),\n transparent=True)\n",
"step-5": "import os\nimport pickle\nfrom matplotlib import pyplot as plt\n\ncwd = os.path.join(os.getcwd(), 'DEDA_2020SS_Crypto_Options_RND_HD',\n 'CrypOpt_RiskNeutralDensity')\ndata_path = os.path.join(cwd, 'data') + '/'\n\nday = '2020-03-11'\nres = pickle.load(open(data_path + 'results_{}.pkl'.format(day), 'rb'))\n\n\n# ---------------------------------------------------------------------- SMILES\nfig1, axes = plt.subplots(2,4, figsize=(10,7))\nfor key, ax in zip(sorted(res), axes.flatten()):\n print(key, ax)\n ax.plot(res[key]['df'].M, res[key]['df'].iv, '.')\n ax.plot(res[key]['M'], res[key]['smile'])\n ax.text(0.99, 0.99, r'$\\tau$ = ' + str(key),\n horizontalalignment='right',\n verticalalignment='top',\n transform=ax.transAxes)\naxes.flatten()[0].set_ylabel('implied volatility')\naxes.flatten()[4].set_ylabel('implied volatility')\naxes.flatten()[4].set_xlabel('moneyness')\naxes.flatten()[5].set_xlabel('moneyness')\naxes.flatten()[6].set_xlabel('moneyness')\naxes.flatten()[7].set_xlabel('moneyness')\nplt.tight_layout()\nfig1.savefig(os.path.join(cwd, '{}_smiles.png'.format(day)), transparent=True)\n\n\n# ------------------------------------------------------------------------ RNDs\nfig2, axes = plt.subplots(2,4, figsize=(10,7))\nfor key, ax in zip(sorted(res), axes.flatten()):\n print(key, ax)\n ax.plot(res[key]['K'][::-1], res[key]['q'])\n ax.text(0.99, 0.99, r'$\\tau$ = ' + str(key),\n horizontalalignment='right',\n verticalalignment='top',\n transform=ax.transAxes)\n ax.set_yticks([])\naxes.flatten()[0].set_ylabel('risk neutral density')\naxes.flatten()[4].set_ylabel('risk neutral density')\naxes.flatten()[4].set_xlabel('spot price')\naxes.flatten()[5].set_xlabel('spot price')\naxes.flatten()[6].set_xlabel('spot price')\naxes.flatten()[7].set_xlabel('spot price')\nplt.tight_layout()\nfig2.savefig(os.path.join(cwd, '{}_RND.png'.format(day)), transparent=True)\n\n\n# ----------------------------------------------------------------- DERIVATIVES\nfig3, axes = plt.subplots(2,4, figsize=(10,7))\nfor key, ax in zip(sorted(res), axes.flatten()):\n print(key, ax)\n ax.plot(res[key]['M'], res[key]['smile'])\n ax.plot(res[key]['M'], res[key]['first'])\n ax.plot(res[key]['M'], res[key]['second'])\n ax.text(0.99, 0.01, r'$\\tau$ = ' + str(key),\n horizontalalignment='right',\n verticalalignment='bottom',\n transform=ax.transAxes)\n ax.set_yticks([])\naxes.flatten()[0].set_ylabel('implied volatility')\naxes.flatten()[4].set_ylabel('implied volatility')\naxes.flatten()[4].set_xlabel('moneyness')\naxes.flatten()[5].set_xlabel('moneyness')\naxes.flatten()[6].set_xlabel('moneyness')\naxes.flatten()[7].set_xlabel('moneyness')\nplt.tight_layout()\nfig3.savefig(os.path.join(cwd, '{}_derivatives.png'.format(day)), transparent=True)\n\n\n# ----------------------------------------------------------------- TAU PROCESS\nfor key in res:\n s = res[key]\n\n fig4, axes = plt.subplots(1,3, figsize=(10,4))\n ax = axes[0]\n ax.plot(s['df'].M, s['df'].iv, '.', c='r')\n ax.plot(s['M'], s['smile'])\n ax.set_xlabel('moneyness')\n ax.set_ylabel('implied volatility')\n\n ax = axes[1]\n ax.plot(s['M'], s['smile'])\n ax.plot(s['M'], s['first'])\n ax.plot(s['M'], s['second'])\n ax.set_xlabel('moneyness')\n ax.set_ylabel('implied volatility')\n\n ax = axes[2]\n ax.plot(s['S'], s['q'])\n ax.set_xlabel('spot price')\n ax.set_ylabel(r'risk neutral density')\n ax.set_yticks([])\n\n plt.tight_layout()\n\n fig4.savefig(os.path.join(cwd, '{}_T{}.png'.format(day, key)), transparent=True)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
#!/usr/bin/env python
from __future__ import division
from __future__ import print_function
import numpy as np
from mpi4py import MPI
from parutils import pprint
comm = MPI.COMM_WORLD
pprint("-"*78)
pprint(" Running on %d cores" % comm.size)
pprint("-"*78)
comm.Barrier()
# Prepare a vector of N=5 elements to be broadcasted...
N = 5
if comm.rank == 0:
A = np.arange(N, dtype=np.float64) # rank 0 has proper data
else:
A = np.zeros(N, dtype=np.float64) # rank 0 has proper data
print("rank {0}: {1}".format(comm.rank, A))
comm.Barrier()
# Broadcast A from rank 0 to everybody
comm.Bcast( [A, MPI.DOUBLE], root=0)
# Everybody should now have the same...
print("[%02d] %s" % (comm.rank, A))
|
normal
|
{
"blob_id": "839b3ebffebce95de25f75edc67a647bd1318268",
"index": 5077,
"step-1": "<mask token>\n",
"step-2": "<mask token>\npprint('-' * 78)\npprint(' Running on %d cores' % comm.size)\npprint('-' * 78)\ncomm.Barrier()\n<mask token>\nif comm.rank == 0:\n A = np.arange(N, dtype=np.float64)\nelse:\n A = np.zeros(N, dtype=np.float64)\nprint('rank {0}: {1}'.format(comm.rank, A))\ncomm.Barrier()\ncomm.Bcast([A, MPI.DOUBLE], root=0)\nprint('[%02d] %s' % (comm.rank, A))\n",
"step-3": "<mask token>\ncomm = MPI.COMM_WORLD\npprint('-' * 78)\npprint(' Running on %d cores' % comm.size)\npprint('-' * 78)\ncomm.Barrier()\nN = 5\nif comm.rank == 0:\n A = np.arange(N, dtype=np.float64)\nelse:\n A = np.zeros(N, dtype=np.float64)\nprint('rank {0}: {1}'.format(comm.rank, A))\ncomm.Barrier()\ncomm.Bcast([A, MPI.DOUBLE], root=0)\nprint('[%02d] %s' % (comm.rank, A))\n",
"step-4": "from __future__ import division\nfrom __future__ import print_function\nimport numpy as np\nfrom mpi4py import MPI\nfrom parutils import pprint\ncomm = MPI.COMM_WORLD\npprint('-' * 78)\npprint(' Running on %d cores' % comm.size)\npprint('-' * 78)\ncomm.Barrier()\nN = 5\nif comm.rank == 0:\n A = np.arange(N, dtype=np.float64)\nelse:\n A = np.zeros(N, dtype=np.float64)\nprint('rank {0}: {1}'.format(comm.rank, A))\ncomm.Barrier()\ncomm.Bcast([A, MPI.DOUBLE], root=0)\nprint('[%02d] %s' % (comm.rank, A))\n",
"step-5": "#!/usr/bin/env python\n\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\nfrom mpi4py import MPI\n\nfrom parutils import pprint\n\ncomm = MPI.COMM_WORLD\n\npprint(\"-\"*78)\npprint(\" Running on %d cores\" % comm.size)\npprint(\"-\"*78)\n\ncomm.Barrier()\n\n# Prepare a vector of N=5 elements to be broadcasted...\nN = 5\nif comm.rank == 0:\n A = np.arange(N, dtype=np.float64) # rank 0 has proper data\nelse:\n A = np.zeros(N, dtype=np.float64) # rank 0 has proper data\nprint(\"rank {0}: {1}\".format(comm.rank, A))\ncomm.Barrier()\n# Broadcast A from rank 0 to everybody\ncomm.Bcast( [A, MPI.DOUBLE], root=0)\n\n# Everybody should now have the same...\nprint(\"[%02d] %s\" % (comm.rank, A))\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# ch14_26.py
fn = 'out14_26.txt'
x = 100
with open(fn, 'w') as file_Obj:
file_Obj.write(x) # 直接輸出數值x產生錯誤
|
normal
|
{
"blob_id": "e4f07355300003943d2fc09f80746a1201de7e37",
"index": 1678,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwith open(fn, 'w') as file_Obj:\n file_Obj.write(x)\n",
"step-3": "fn = 'out14_26.txt'\nx = 100\nwith open(fn, 'w') as file_Obj:\n file_Obj.write(x)\n",
"step-4": "# ch14_26.py\r\nfn = 'out14_26.txt'\r\nx = 100\r\n\r\nwith open(fn, 'w') as file_Obj:\r\n file_Obj.write(x) # 直接輸出數值x產生錯誤\r\n\r\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
"""Largest product in a series
Problem 8
The four adjacent digits in the 1000-digit number that have the greatest product
are 9 x 9 x 8 x 9 = 5832.
73167176531330624919225119674426574742355349194934
96983520312774506326239578318016984801869478851843
85861560789112949495459501737958331952853208805511
12540698747158523863050715693290963295227443043557
66896648950445244523161731856403098711121722383113
62229893423380308135336276614282806444486645238749
30358907296290491560440772390713810515859307960866
70172427121883998797908792274921901699720888093776
65727333001053367881220235421809751254540594752243
52584907711670556013604839586446706324415722155397
53697817977846174064955149290862569321978468622482
83972241375657056057490261407972968652414535100474
82166370484403199890008895243450658541227588666881
16427171479924442928230863465674813919123162824586
17866458359124566529476545682848912883142607690042
24219022671055626321111109370544217506941658960408
07198403850962455444362981230987879927244284909188
84580156166097919133875499200524063689912560717606
05886116467109405077541002256983155200055935729725
71636269561882670428252483600823257530420752963450
Find the thirteen adjacent digits in the 1000-digit number that have the
greatest product. What is the value of this product?"""
import numpy
series = """73167176531330624919225119674426574742355349194934
96983520312774506326239578318016984801869478851843
85861560789112949495459501737958331952853208805511
12540698747158523863050715693290963295227443043557
66896648950445244523161731856403098711121722383113
62229893423380308135336276614282806444486645238749
30358907296290491560440772390713810515859307960866
70172427121883998797908792274921901699720888093776
65727333001053367881220235421809751254540594752243
52584907711670556013604839586446706324415722155397
53697817977846174064955149290862569321978468622482
83972241375657056057490261407972968652414535100474
82166370484403199890008895243450658541227588666881
16427171479924442928230863465674813919123162824586
17866458359124566529476545682848912883142607690042
24219022671055626321111109370544217506941658960408
07198403850962455444362981230987879927244284909188
84580156166097919133875499200524063689912560717606
05886116467109405077541002256983155200055935729725"""
series = [int(s) for s in series if s not in [' ', '\n']]
old_max = 0
for offset in range(13, len(series) + 1):
numbers = series[offset-13:offset]
result = numpy.prod(numbers)
if result > old_max:
old_max = result
print old_max
|
normal
|
{
"blob_id": "601d32bf30aa454bbc7d31d6ce4b7296cef0fdfe",
"index": 9374,
"step-1": "\"\"\"Largest product in a series\nProblem 8\nThe four adjacent digits in the 1000-digit number that have the greatest product\nare 9 x 9 x 8 x 9 = 5832.\n\n73167176531330624919225119674426574742355349194934\n96983520312774506326239578318016984801869478851843\n85861560789112949495459501737958331952853208805511\n12540698747158523863050715693290963295227443043557\n66896648950445244523161731856403098711121722383113\n62229893423380308135336276614282806444486645238749\n30358907296290491560440772390713810515859307960866\n70172427121883998797908792274921901699720888093776\n65727333001053367881220235421809751254540594752243\n52584907711670556013604839586446706324415722155397\n53697817977846174064955149290862569321978468622482\n83972241375657056057490261407972968652414535100474\n82166370484403199890008895243450658541227588666881\n16427171479924442928230863465674813919123162824586\n17866458359124566529476545682848912883142607690042\n24219022671055626321111109370544217506941658960408\n07198403850962455444362981230987879927244284909188\n84580156166097919133875499200524063689912560717606\n05886116467109405077541002256983155200055935729725\n71636269561882670428252483600823257530420752963450\nFind the thirteen adjacent digits in the 1000-digit number that have the\ngreatest product. What is the value of this product?\"\"\"\n\nimport numpy\n\nseries = \"\"\"73167176531330624919225119674426574742355349194934\n 96983520312774506326239578318016984801869478851843\n 85861560789112949495459501737958331952853208805511\n 12540698747158523863050715693290963295227443043557\n 66896648950445244523161731856403098711121722383113\n 62229893423380308135336276614282806444486645238749\n 30358907296290491560440772390713810515859307960866\n 70172427121883998797908792274921901699720888093776\n 65727333001053367881220235421809751254540594752243\n 52584907711670556013604839586446706324415722155397\n 53697817977846174064955149290862569321978468622482\n 83972241375657056057490261407972968652414535100474\n 82166370484403199890008895243450658541227588666881\n 16427171479924442928230863465674813919123162824586\n 17866458359124566529476545682848912883142607690042\n 24219022671055626321111109370544217506941658960408\n 07198403850962455444362981230987879927244284909188\n 84580156166097919133875499200524063689912560717606\n 05886116467109405077541002256983155200055935729725\"\"\"\n\nseries = [int(s) for s in series if s not in [' ', '\\n']]\nold_max = 0\nfor offset in range(13, len(series) + 1):\n numbers = series[offset-13:offset]\n result = numpy.prod(numbers)\n if result > old_max:\n old_max = result\nprint old_max",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
#Embedded file name: c:/depot/games/branches/release/EVE-TRANQUILITY/eve/client/script/paperDoll/SkinRaytracing.py
import trinity
import blue
import telemetry
import ctypes
import math
import time
import geo2
import struct
import itertools
import weakref
import uthread
import paperDoll as PD
import log
import random
mylog = log.Channel('optix', 'python')
def LogInfo(text, *args):
for arg in args:
text += ' ' + str(arg)
mylog.Log(text, log.LGINFO)
def LogWarn(text, *args):
for arg in args:
text = text + ' ' + str(arg)
mylog.Log(text, log.LGWARN)
class SkinRaytracingTools():
__guid__ = 'paperDoll.SkinRaytracingTools'
@staticmethod
def SetOptixMatrixFromTrinity(optix, matrixName, ratio = None):
proj = trinity.TriProjection()
view = trinity.TriView()
view.transform = trinity.GetViewTransform()
proj.PerspectiveFov(trinity.GetFieldOfView(), trinity.GetAspectRatio() if ratio is None else ratio, trinity.GetFrontClip(), trinity.GetBackClip())
projToView = geo2.MatrixInverse(proj.transform)
viewToWorld = geo2.MatrixInverse(view.transform)
projToWorld = geo2.MatrixMultiply(projToView, viewToWorld)
r0 = projToWorld[0]
r1 = projToWorld[1]
r2 = projToWorld[2]
r3 = projToWorld[3]
mat = trinity.TriMatrix(r0[0], r0[1], r0[2], r0[3], r1[0], r1[1], r1[2], r1[3], r2[0], r2[1], r2[2], r2[3], r3[0], r3[1], r3[2], r3[3])
optix.SetMatrix4x4(matrixName, mat)
r0 = view.transform[0]
r1 = view.transform[1]
r2 = view.transform[2]
r3 = view.transform[3]
mat = trinity.TriMatrix(r0[0], r0[1], r0[2], r0[3], r1[0], r1[1], r1[2], r1[3], r2[0], r2[1], r2[2], r2[3], r3[0], r3[1], r3[2], r3[3])
optix.SetMatrix4x4('viewTransform', mat)
return mat
@staticmethod
def CreateSamplerForTexture(name, map, waitForFinish):
rt = trinity.Tr2RenderTarget(map.width, map.height, 1, trinity.PIXEL_FORMAT.B8G8R8A8_UNORM)
job = trinity.CreateRenderJob()
job.PushRenderTarget(rt)
job.PushDepthStencil(None)
job.SetStdRndStates(trinity.RM_FULLSCREEN)
job.RenderTexture(map)
job.PopDepthStencil()
job.PopRenderTarget()
job.ScheduleOnce()
if waitForFinish:
job.WaitForFinish()
sampler = trinity.Tr2OptixTextureSampler()
if True:
res = trinity.TriTextureRes()
res.CreateAndCopyFromRenderTarget(rt)
sampler.CreateFromTexture(res)
else:
sampler.CreateFromRenderTarget(rt)
sampler.SetNormalizedIndexingMode(True)
if True:
return (sampler, res)
else:
return (sampler, rt)
@staticmethod
def ConvertCubeToTextures(cube):
names = ['PX',
'NX',
'PY',
'NY',
'PZ',
'NZ']
viewVec = [(1, 0, 0),
(-1, 0, 0),
(0, 1, 0),
(0, -1, 0),
(0, 0, 1),
(0, 0, -1)]
upVec = [(0, 1, 0),
(0, 1, 0),
(0, 0, 1),
(0, 0, -1),
(0, 1, 0),
(0, 1, 0)]
spaceScene = trinity.EveSpaceScene()
spaceScene.envMap1ResPath = str(cube.resourcePath)
spaceScene.envMapScaling = (1, 1, -1)
spaceScene.backgroundRenderingEnabled = True
spaceScene.backgroundEffect = trinity.Load('res:/dx9/scene/starfield/bakeNebula.red')
blue.resMan.Wait()
node = PD.FindParameterByName(spaceScene.backgroundEffect, 'NebulaBrightness')
if node is None:
node = trinity.Tr2FloatParameter()
node.name = 'NebulaBrightness'
spaceScene.backgroundEffect.parameters.append(node)
if node is not None:
node.value = 100
node = PD.FindResourceByName(spaceScene.backgroundEffect, 'NebulaMap')
if node is None:
node = trinity.TriTexture2DParam()
node.name = 'NebulaMap'
spaceScene.backgroundEffect.resources.append(node)
node.SetResource(cube.resource)
blue.resMan.Wait()
mipmapped = []
useTexture = True
for i in xrange(len(names)):
name = names[i]
rt = PD.SkinLightmapRenderer.CreateRenderTarget(cube.resource.width, cube.resource.height, trinity.PIXEL_FORMAT.B8G8R8A8_UNORM, useRT=True)
job = trinity.CreateRenderJob(name=name)
job.PushRenderTarget(rt)
job.PushDepthStencil(None)
job.Clear([(1, 0, 0),
(0.2, 0, 0),
(0, 1, 0),
(0, 0.2, 0),
(0, 0, 1),
(0, 0, 0.2)][i], None)
proj = trinity.TriProjection()
proj.PerspectiveFov(math.pi * 0.5, 1, 0.1, 1000)
view = trinity.TriView()
view.SetLookAtPosition((0, 0, 0), viewVec[i], upVec[i])
viewport = trinity.TriViewport(0, 0, cube.resource.width, cube.resource.height, 0.0, 1.0)
job.SetView(view)
job.SetProjection(proj)
job.SetViewport(viewport)
job.Update(spaceScene)
job.RenderScene(spaceScene)
job.PopDepthStencil()
job.PopRenderTarget()
if useTexture:
tex = trinity.TriTextureRes(cube.resource.width, cube.resource.height, 1, trinity.PIXEL_FORMAT.B8G8R8A8_UNORM)
if True:
job.ScheduleOnce()
job.WaitForFinish()
if useTexture:
mipmapped.append(tex)
else:
mipmapped.append(rt)
else:
job.ScheduleRecurring()
return (mipmapped, names)
@staticmethod
def FindAllTextureResourcesFromEffect(effect, scope):
textures = {}
samplers = []
cubemaps = []
if effect is not None:
for r in effect.resources:
if type(r) == trinity.TriTexture2DParameter and r.resource is not None:
textures[r.name] = r.resource
elif type(r) == trinity.TriTextureCubeParameter and r.resource is not None:
if r.name in cubemaps:
continue
LogInfo('', r.name, ': Converting to individual textures')
cubemaps.append(r.name)
mipmaps, names = SkinRaytracingTools.ConvertCubeToTextures(r)
for i in range(len(names)):
if i < len(mipmaps):
sampler = trinity.Tr2OptixTextureSampler()
sampler.CreateFromTexture(mipmaps[i])
sampler.SetNormalizedIndexingMode(True)
scope.SetSampler(r.name + names[i], sampler)
LogInfo('No-Copy Cube Side Interop for ' + r.name + names[i])
samplers.append(mipmaps[i])
samplers.append(sampler)
return (textures, samplers)
@staticmethod
def FindAllTextureResources(dynamic, scope):
textures = {}
samplers = []
cubemaps = []
def ProcessMesh(mesh):
for area in itertools.chain(mesh.opaqueAreas, mesh.decalAreas, mesh.transparentAreas):
newTextures, newSamplers = SkinRaytracingTools.FindAllTextureResourcesFromEffect(area.effect, scope)
textures.update(newTextures)
samplers.extend(newSamplers)
if type(dynamic) == trinity.Tr2IntSkinnedObject:
for mesh in dynamic.visualModel.meshes:
ProcessMesh(mesh)
elif type(dynamic) == trinity.EveShip2:
ProcessMesh(dynamic.highDetailMesh.object)
elif type(dynamic) == trinity.EveStation2:
ProcessMesh(dynamic.highDetailMesh.object)
return (textures, samplers)
@staticmethod
def InteropTexture(name, texture, waitForFinish, scope):
if texture.format == trinity.PIXEL_FORMAT.B8G8R8A8_UNORM:
sampler = trinity.Tr2OptixTextureSampler()
sampler.CreateFromTexture(texture)
sampler.SetNormalizedIndexingMode(True)
scope.SetSampler(name, sampler)
LogInfo('No-Copy Interop for', name)
return (sampler, None)
if texture.type == trinity.TRIRTYPE_CUBETEXTURE:
LogInfo('Copy-Interop for cubes not supported, skipping', name)
return
sampler_rt = SkinRaytracingTools.CreateSamplerForTexture(name, texture, waitForFinish)
if sampler_rt is None or len(sampler_rt) < 1:
LogInfo('InteropTexture failed for', name)
else:
scope.SetSampler(name, sampler_rt[0])
LogInfo('Interop for', name)
return sampler_rt
@staticmethod
def InteropAllTexturesFromEffect(optix, effect, waitForFinish, nameTranslation = None, scope = None, cache = None):
if scope is None:
scope = optix
textures, samplers = SkinRaytracingTools.FindAllTextureResourcesFromEffect(effect, scope)
for name, texture in textures.iteritems():
if 'spotlight' in name.lower():
continue
if nameTranslation is not None:
name = nameTranslation.get(name, name)
if cache is not None and texture in cache:
sampler = cache[texture]
scope.SetSampler(name, sampler[0])
LogInfo('Interop cache for', name)
else:
sampler = SkinRaytracingTools.InteropTexture(name, texture, waitForFinish, scope)
if sampler and cache is not None:
cache[texture] = sampler
if sampler is not None:
samplers.append(sampler)
return samplers
@staticmethod
def InteropAllTextures(optix, dynamic, waitForFinish, nameTranslation = None, scope = None):
if scope is None:
scope = optix
textures, samplers = SkinRaytracingTools.FindAllTextureResources(dynamic, scope)
for name, texture in textures.iteritems():
if 'spotlight' in name.lower():
continue
if nameTranslation is not None:
name = nameTranslation.get(name, name)
sampler = SkinRaytracingTools.InteropTexture(name, texture, waitForFinish, scope)
if sampler is not None:
samplers.append(sampler)
return samplers
@staticmethod
def SafeLinearize(values):
peak = max(1, max(values[0], max(values[1], values[2])))
return (peak * math.pow(values[0] / peak, 2.2),
peak * math.pow(values[1] / peak, 2.2),
peak * math.pow(values[2] / peak, 2.2),
values[3])
@staticmethod
def CopyParametersToContext(effect, instance, linearNames = None):
for p in effect.parameters:
if type(p) is trinity.Tr2Vector4Parameter:
value = SkinRaytracingTools.SafeLinearize(p.value) if linearNames is not None and p.name in linearNames else p.value
instance.SetFloat4(p.name, value[0], value[1], value[2], value[3])
elif type(p) is trinity.TriFloatParameter or type(p) is trinity.Tr2FloatParameter:
instance.SetFloat4(p.name, p.value, 0, 0, 0)
@staticmethod
def CreateBufferForLights(lights, leaveEmpty = False, preserveAlpha = False):
bufEveLights = trinity.Tr2OptixBuffer()
bufEveLights.CreateUserData(64, len(lights), trinity.OPTIX_BUFFER_OUTPUT, False)
bufEveLights.MapUser()
buffer = ''
if leaveEmpty:
lights = []
for light in lights:
innerAngle = light.coneAlphaInner
outerAngle = light.coneAlphaOuter
if innerAngle + 1.0 > outerAngle:
innerAngle = outerAngle - 1.0
innerAngle = math.cos(innerAngle * 3.1415927 / 180.0)
outerAngle = math.cos(outerAngle * 3.1415927 / 180.0)
coneDir = geo2.Vec3Normalize((light.coneDirection[0], light.coneDirection[1], light.coneDirection[2]))
import struct
buffer += struct.pack('16f', light.position[0], light.position[1], light.position[2], light.radius, math.pow(light.color[0], 2.2), math.pow(light.color[1], 2.2), math.pow(light.color[2], 2.2), light.falloff if not preserveAlpha else light.color[3], coneDir[0], coneDir[1], coneDir[2], outerAngle, innerAngle, 0, 0, 0)
bufEveLights.SetUserDataFromStruct(buffer)
bufEveLights.UnmapUser()
return bufEveLights
@staticmethod
def CreateUInt1Buffer(optix, name):
buffer = trinity.Tr2OptixBuffer()
buffer.CreateUInt1(1, 1, trinity.OPTIX_BUFFER_INPUT_OUTPUT)
buffer.Map()
buffer.SetUserDataI(0, 0)
buffer.Unmap()
optix.SetBuffer(name, buffer)
return buffer
@staticmethod
def matEqual(m1, m2):
return m1._11 == m2._11 and m1._12 == m2._12 and m1._13 == m2._13 and m1._14 == m2._14 and m1._21 == m2._21 and m1._22 == m2._22 and m1._23 == m2._23 and m1._24 == m2._24 and m1._31 == m2._31 and m1._32 == m2._32 and m1._33 == m2._33 and m1._34 == m2._34 and m1._41 == m2._41 and m1._42 == m2._42 and m1._43 == m2._43 and m1._44 == m2._44
@staticmethod
def FuncWrapper(weakSelf, func):
if weakSelf():
func(weakSelf())
class OitHelper():
def __init__(self, optix):
self.oitAllocatorBuffer = SkinRaytracingTools.CreateUInt1Buffer(optix, 'oit_allocator')
oitPoolBuffer = trinity.Tr2OptixBuffer()
oitPoolBuffer.CreateUserData(64 + 112, 1048576, trinity.OPTIX_BUFFER_INPUT_OUTPUT, True)
optix.SetBuffer('oit_pool', oitPoolBuffer)
self.oitPoolBuffer = oitPoolBuffer
def ResetAllocationCount(self):
self.oitAllocatorBuffer.Map()
self.oitAllocatorBuffer.SetUserDataI(0, 0)
self.oitAllocatorBuffer.Unmap()
def GetAllocationCount(self):
self.oitAllocatorBuffer.Map()
count = self.oitAllocatorBuffer.GetUserDataI(0)
self.oitAllocatorBuffer.Unmap()
return count
class RayCountHelper():
def __init__(self, optix):
self.rayCountBuffer = SkinRaytracingTools.CreateUInt1Buffer(optix, 'ray_count')
def ResetCount(self):
self.rayCountBuffer.Map()
self.rayCountBuffer.SetUserDataI(0, 0)
self.rayCountBuffer.Unmap()
def GetCount(self):
self.rayCountBuffer.Map()
count = self.rayCountBuffer.GetUserDataI(0)
self.rayCountBuffer.Unmap()
return count
class CaptureHelper():
def __init__(self, width, height):
self.capture = trinity.Tr2RenderTarget(width, height, 1, trinity.PIXEL_FORMAT.B8G8R8A8_UNORM)
def SaveSurfaceToFile(self, filename):
trinity.SaveRenderTarget(filename, self.capture)
LogInfo('Saved to', filename)
def CreateRenderSteps(self, rj, blitfx):
rj.PushRenderTarget(self.capture).name = 'Begin screenshot capture'
rj.PushDepthStencil(None).name = ' push depth'
rj.RenderEffect(blitfx).name = ' Blit to screenshot'
rj.PopDepthStencil().name = ' pop depth'
rj.PopRenderTarget().name = 'End screenshot capture'
class FullScreenBlitter():
def __init__(self, width, height):
self.effect = trinity.Tr2Effect()
self.effect.effectFilePath = 'res:/graphics/effect/optix/shaders/gammaBlit.fx'
if self.effect.effectResource is None:
LogWarn('Failed to load effect 1')
return
self.highpassEffect = trinity.Tr2Effect()
self.highpassEffect.effectFilePath = 'res:/graphics/effect/optix/shaders/highpassFilter.fx'
if self.highpassEffect.effectResource is None:
LogWarn('Failed to load effect 1')
return
self.gaussianHorizEffect = trinity.Tr2Effect()
self.gaussianHorizEffect.effectFilePath = 'res:/graphics/effect/optix/shaders/gaussianBlur.fx'
if self.gaussianHorizEffect.effectResource is None:
LogWarn('Failed to load effect 3')
return
self.gaussianVertEffect = trinity.Tr2Effect()
self.gaussianVertEffect.effectFilePath = 'res:/graphics/effect/optix/shaders/gaussianBlur.fx'
if self.gaussianVertEffect.effectResource is None:
LogWarn('Failed to load effect 3')
return
for effect in [self.effect,
self.highpassEffect,
self.gaussianHorizEffect,
self.gaussianVertEffect]:
while effect.effectResource.isLoading:
PD.Yield()
self.blitcolor = trinity.Tr2Vector4Parameter()
self.blitcolor.name = 'Color'
for effect in [self.effect,
self.highpassEffect,
self.gaussianHorizEffect,
self.gaussianVertEffect]:
effect.PopulateParameters()
effect.RebuildCachedData()
effect.parameters.append(self.blitcolor)
sizesParam = trinity.Tr2Vector4Parameter()
sizesParam.name = 'InvSize'
sizesParam.value = (1.0 / width,
1.0 / height,
0,
0)
for effect in [self.effect, self.highpassEffect]:
effect.parameters.append(sizesParam)
sizesHorizParam = trinity.Tr2Vector4Parameter()
sizesHorizParam.name = 'invTexelSize'
sizesHorizParam.value = (1.0 / width,
0.0,
0,
0)
self.gaussianHorizEffect.parameters.append(sizesHorizParam)
sizesVertParam = trinity.Tr2Vector4Parameter()
sizesVertParam.name = 'invTexelSize'
sizesVertParam.value = (0.0,
1.0 / height,
0,
0)
self.gaussianVertEffect.parameters.append(sizesVertParam)
def SetTexture(self, optixOutputTexture, highpassTexture, filteredTexture):
tex = trinity.TriTexture2DParameter()
tex.name = 'Texture'
tex.SetResource(optixOutputTexture)
for effect in [self.effect, self.highpassEffect]:
effect.resources.append(tex)
tex = trinity.TriTexture2DParameter()
tex.name = 'Texture'
tex.SetResource(highpassTexture)
self.gaussianHorizEffect.resources.append(tex)
tex = trinity.TriTexture2DParameter()
tex.name = 'Texture'
tex.SetResource(filteredTexture)
self.gaussianVertEffect.resources.append(tex)
tex = trinity.TriTexture2DParameter()
tex.name = 'BloomTexture'
tex.SetResource(highpassTexture)
self.effect.resources.append(tex)
def UpdateFrameCount(self, framecount):
invFC = 1.0 / framecount if framecount > 0 else 1.0
self.blitcolor.value = (invFC,
invFC,
invFC,
invFC)
class FullOptixRenderer():
__guid__ = 'paperDoll.FullOptixRenderer'
instance = None
def AddCallback(self, func, name, rj):
cb = trinity.TriStepPythonCB()
weakSelf = weakref.ref(self)
cb.SetCallback(lambda : SkinRaytracingTools.FuncWrapper(weakSelf, func))
cb.name = name
rj.steps.append(cb)
def GetFrameCount(self):
return self.framecount
def SaveScreenshot(self, filename):
self.capture.SaveSurfaceToFile(filename)
def AddRenderPreviewStep(self, renderJob):
renderJob.SetStdRndStates(trinity.RM_FULLSCREEN).name = ' [optix] fullscreen quad'
renderJob.PushDepthStencil(None).name = ' [optix] push depth'
renderJob.RenderEffect(self.blitfx.effect).name = ' [optix] Blit to screenshot'
renderJob.PopDepthStencil().name = ' [optix] pop depth'
def RefreshMatrices(self):
model = self.skinnedObject
self.optix.RefreshMatrices(model, self.skinnedOptix)
self.RunSkinningAndTesselation()
self.ApplySettings()
print 'Refreshed'
@staticmethod
def RaytraceFrame(selfRef):
start = time.time()
VP = SkinRaytracingTools.SetOptixMatrixFromTrinity(selfRef.optix, 'clipToWorld', selfRef.width / float(selfRef.height))
if not SkinRaytracingTools.matEqual(VP, selfRef.previousVP):
selfRef.previousVP = VP
selfRef.outputBuffer.Clear()
selfRef.framecount = 0
model = selfRef.skinnedObject
pos1 = model.GetBonePosition(model.GetBoneIndex('fj_eyeballLeft'))
pos2 = model.GetBonePosition(model.GetBoneIndex('fj_eyeballRight'))
dist1 = geo2.Vec3Distance(pos1, trinity.GetViewPosition())
dist2 = geo2.Vec3Distance(pos2, trinity.GetViewPosition())
autodof = min(dist1, dist2)
dof = selfRef.settings.get('lens_focal_distance', autodof)
print 'Auto-depth-of-field is at', autodof, ', actual focal distance is', dof
selfRef.optix.SetFloat3('depthOfField', dof - trinity.GetFrontClip(), selfRef.settings['lens_radius'], 0)
else:
selfRef.framecount += 1
selfRef.optix.SetUInt('frameIteration', selfRef.framecount)
selfRef.oit.ResetAllocationCount()
selfRef.rayCounter.ResetCount()
time1 = time.time()
selfRef.optix.Run(0, selfRef.width, selfRef.height)
time2 = time.time()
sec = time2 - time1
raycount = selfRef.rayCounter.GetCount()
raysec = 0
if sec > 0:
raysec = raycount / float(sec)
time3 = time.time()
if selfRef.framecount % 32 == 0:
stop = time.time()
print selfRef.oit.GetAllocationCount(), 'oit allocations'
selfRef.blitfx.UpdateFrameCount(selfRef.framecount)
selfRef.outputBuffer.CopyToTexture(selfRef.outputTexture)
print 'time %05.3f / %05.3f / %05.3f / %05.3f msec' % (float(time1 - start) * 1000,
float(time2 - time1) * 1000,
float(time3 - time2) * 1000,
float(stop - time3) * 1000),
print '%d rays in %05.3f ms / %10d Krays/sec / %d rays per pixel' % (raycount,
sec * 1000,
raysec / 1000,
selfRef.framecount)
@telemetry.ZONE_METHOD
def OnBeforeOptixPositionsUV(self):
PD.SkinLightmapRenderer.DoChangeEffect('oxPosWorldUVEffect', self.oxMeshes)
if self.skinnedObject is not None and self.skinnedObject.visualModel is not None:
self.savedMeshes = self.skinnedObject.visualModel.meshes[:]
filteredMeshes = [ ref.object for ref in self.oxMeshes.iterkeys() if ref.object is not None ]
PD.SkinLightmapRenderer.DoCopyMeshesToVisual(self.skinnedObject, filteredMeshes)
self.scene.filterList.removeAt(-1)
self.scene.filterList.append(self.skinnedObject)
self.scene.useFilterList = True
@telemetry.ZONE_METHOD
def OnBeforeOptixNormalsUV(self):
PD.SkinLightmapRenderer.DoChangeEffect('oxNormalWorldUVEffect', self.oxMeshes)
def OnAfterOptix(self):
PD.SkinLightmapRenderer.DoRestoreShaders(meshes=self.oxMeshes)
PD.SkinLightmapRenderer.DoCopyMeshesToVisual(self.skinnedObject, self.savedMeshes)
del self.savedMeshes
self.scene.useFilterList = False
self.scene.filterList.removeAt(-1)
def _InitUVUnwrap(self):
self.oxMeshes = {}
self.scatterFX = set()
self.unwrapSize = 1024
posUV = PD.SkinLightmapRenderer.PreloadEffect(PD.SkinLightmapRenderer.OPTIX_POSWORLD_UV_EFFECT)
normalUV = PD.SkinLightmapRenderer.PreloadEffect(PD.SkinLightmapRenderer.OPTIX_NORMALWORLD_UV_EFFECT)
deriv = PD.SkinLightmapRenderer.PreloadEffect(PD.SkinLightmapRenderer.STRETCHMAP_RENDERER_EFFECT)
self.oxDepth = trinity.Tr2DepthStencil(self.unwrapSize, self.unwrapSize, trinity.DEPTH_STENCIL_FORMAT.D24S8, 1, 0)
for mesh in self.skinnedObject.visualModel.meshes:
if PD.SkinLightmapRenderer.IsScattering(mesh):
m = PD.SkinLightmapRenderer.Mesh()
m.ExtractOrigEffect(mesh)
m.CreateOptixEffects(includeStretchMap=True)
PD.AddWeakBlue(self, 'oxMeshes', mesh, m)
fx = PD.GetEffectsFromMesh(mesh)
for f in fx:
self.scatterFX.add(f)
self.oxWorldPosMapUV = PD.SkinLightmapRenderer.CreateRenderTarget(self.unwrapSize, self.unwrapSize, trinity.PIXEL_FORMAT.R32G32B32A32_FLOAT, useRT=True)
self.oxWorldNormalMapUV = PD.SkinLightmapRenderer.CreateRenderTarget(self.unwrapSize, self.unwrapSize, trinity.PIXEL_FORMAT.R32G32B32A32_FLOAT, useRT=True)
self.stretchMap = PD.SkinLightmapRenderer.CreateRenderTarget(self.unwrapSize / 2, self.unwrapSize / 2, trinity.PIXEL_FORMAT.R32G32B32A32_FLOAT, useRT=True)
rj = trinity.CreateRenderJob('Optix UV Unwrap')
rj.PushRenderTarget(self.oxWorldPosMapUV)
rj.PushDepthStencil(self.oxDepth)
rj.Clear((0, 0, 0, 0), 1.0)
rj.SetStdRndStates(trinity.RM_FULLSCREEN)
vp = trinity.TriViewport()
vp.x = 0
vp.y = 0
vp.width = self.unwrapSize
vp.height = self.unwrapSize
rj.SetViewport(vp)
PD.SkinLightmapRenderer.AddCallback(self, FullOptixRenderer.OnBeforeOptixPositionsUV, 'onBeforeOptixPositionsUV', rj)
rj.RenderScene(self.scene).name = 'Optix WorldPos (UV space)'
PD.SkinLightmapRenderer.AddCallback(self, lambda weakSelf: PD.SkinLightmapRenderer.DoChangeEffect('oxNormalWorldUVEffect', meshes=weakSelf.oxMeshes), '', rj)
rj.SetRenderTarget(self.oxWorldNormalMapUV)
rj.Clear((0, 0, 0, 0), 1.0)
rj.RenderScene(self.scene).name = 'Optix Normals (UV space)'
rj.SetRenderTarget(self.stretchMap)
rj.Clear((0, 0, 0, 0), 1.0)
vp2 = trinity.TriViewport()
vp2.x = 0
vp2.y = 0
vp2.width = self.unwrapSize / 2
vp2.height = self.unwrapSize / 2
rj.SetViewport(vp2)
PD.SkinLightmapRenderer.AddCallback(self, lambda weakSelf: PD.SkinLightmapRenderer.DoChangeEffect('stretchmapRenderEffect', meshes=weakSelf.oxMeshes), '', rj)
rj.RenderScene(self.scene).name = 'Stretchmap'
PD.SkinLightmapRenderer.AddCallback(self, FullOptixRenderer.OnAfterOptix, 'onAfterOptix', rj)
rj.PopRenderTarget()
rj.PopDepthStencil()
rj.ScheduleOnce()
rj.WaitForFinish()
if False:
PD.SkinLightmapRenderer.SaveTarget(self.oxWorldPosMapUV, 'c:/depot/oxworldposuv2.dds', isRT=True)
PD.SkinLightmapRenderer.SaveTarget(self.oxWorldNormalMapUV, 'c:/depot/oxworldnormaluv2.dds', isRT=True)
PD.SkinLightmapRenderer.SaveTarget(self.stretchMap, 'c:/depot/stretchmap2.dds', isRT=True)
print '** MAPS SAVED **'
def RunSkinningAndTesselation(self):
print '*** Tesselation phase ***'
batchTypes = self.skinnedOptix[0]
optix = self.optix
ptx = {}
ptx[72] = self.path + 'eve_skinning_kernel72.ptx'
ptx[64] = self.path + 'eve_skinning_kernel64.ptx'
for bytes, ptxfile in ptx.iteritems():
LogInfo('Processing ', bytes, 'bytes/vertex')
skinningProgram = trinity.Tr2OptixProgram(ptxfile, 'kernel_no_tesselation')
skinningProgramTesselate = trinity.Tr2OptixProgram(ptxfile, 'kernel_tesselation')
optix.SetEntryPointCount(2)
optix.SetRayGenerationProgram(0, skinningProgram)
optix.SetRayGenerationProgram(1, skinningProgramTesselate)
for batchType in range(len(batchTypes)):
batches = batchTypes[batchType]
out = []
def needsTesselation(fx):
return 'skinnedavatarhair_detailed.fx' in fx.effectFilePath.lower()
for batch in batches:
if 'furshell' in batch[1].effectFilePath.lower():
out.append(None)
continue
tesselate = needsTesselation(batch[1])
triangle_count = batch[6]
bytes_per_vertex = batch[8]
if bytes_per_vertex != bytes:
out.append(None)
continue
vertex_buffer_output = trinity.Tr2OptixBuffer()
vertex_buffer_output.CreateUserData(bytes_per_vertex, triangle_count * 3 * 4 if tesselate else triangle_count * 3, trinity.OPTIX_BUFFER_INPUT_OUTPUT, True)
out.append(vertex_buffer_output)
for i, batch in enumerate(batches):
if 'furshell' in batch[1].effectFilePath.lower():
continue
triangle_count = batch[6]
tesselate = needsTesselation(batch[1])
bytes_per_vertex = batch[8]
if bytes_per_vertex != bytes:
continue
if tesselate:
LogInfo('Tesselating geometry ', batch, ' of type ', batchType)
else:
LogInfo('Skinning geometry ', batch, ' of type ', batchType)
optix.SetBuffer('vertex_buffer', batch[2])
optix.SetBuffer('index_buffer', batch[3])
optix.SetBuffer('vertex_buffer_output', out[i])
optix.SetUInt('first_index_index', batch[5])
optix.SetBuffer('matrix_buffer', batch[7])
program = int(tesselate)
optix.Run(program, triangle_count, 1)
batch[0].SetBuffer('vertex_buffer', out[i])
if tesselate:
batch[0].SetPrimitiveCount(triangle_count * 4)
optix.SetRayGenerationProgram(0, self.raygen)
optix.SetRayGenerationProgram(1, self.raygen)
def RemoveBadGeometry(self, model):
self.haveBeard = False
self.beardFx = None
for mesh in model.visualModel.meshes:
for area in mesh.decalAreas:
if PD.IsBeard(area):
self.haveBeard = True
self.beardFx = area.effect
area.debugIsHidden = True
break
for mesh in model.visualModel.meshes:
for area in mesh.transparentAreas:
lname = area.name.lower()
if lname.startswith('eyeshadow_'):
mesh.transparentAreas.removeAt(-1)
break
if False:
for mesh in model.visualModel.meshes:
for area in mesh.opaqueAreas:
lname = area.name.lower()
if 'eye' not in lname or 'eyewet' in lname or 'eyelash' in lname:
mesh.opaqueAreas.removeAt(-1)
break
for area in mesh.transparentAreas:
lname = area.name.lower()
if 'eye' not in lname or 'eyewet' in lname or 'eyelash' in lname:
mesh.transparentAreas.removeAt(-1)
break
if False:
print 'raytracing', len(model.visualModel.meshes), 'meshes'
for mesh in model.visualModel.meshes:
lname = mesh.name.lower()
if not lname.startswith('hair'):
print 'removing', lname
mesh.opaqueAreas.removeAt(-1)
mesh.decalAreas.removeAt(-1)
mesh.transparentAreas.removeAt(-1)
elif False:
print 'removing', lname
for a in mesh.opaqueAreas:
print 'opaque', a.name
for a in mesh.decalAreas:
print 'decal', a.name
for a in mesh.transparentAreas:
print 'transp', a.name
mesh.opaqueAreas.removeAt(-1)
mesh.decalAreas.removeAt(-1)
mesh.transparentAreas.removeAt(-1)
else:
print 'keeping', lname
def TransferBeardParameters(self, optix):
if self.haveBeard:
LogInfo('Beard found')
beardLength = self.settings['beardLength']
optix.SetFloat3('beardOptions', beardLength[0], beardLength[1], self.settings['beardGravity'])
floatMap = {'FurLength': 'beard_fur_length',
'UVScale': 'beard_uv_scale',
'AlphaMultiplier': 'beard_alpha_multiplier',
'CombStrength': 'beard_comb_strength',
'FurGrainRotation': 'beard_fur_grain_rotation',
'MirrorGrain': 'beard_mirror_grain',
'FurParallax': 'beard_fur_parallax'}
float3Map = {'gravityOffset': 'beard_gravity_offset',
'MaterialDiffuseColor': 'beard_diffuse_color'}
for param in self.beardFx.parameters:
optixName = floatMap.get(param.name, None)
if optixName is not None:
optix.SetFloat(optixName, param.value)
else:
optixName = float3Map.get(param.name, None)
if optixName is not None:
optix.SetFloat3(optixName, param.value[0], param.value[1], param.value[2])
def GenerateBeardGeometry(self, optix, path, any_hit_shadow):
if not self.haveBeard:
return None
LogInfo('generating beard splines')
SkinRaytracingTools.SetOptixMatrixFromTrinity(optix, 'clipToWorld')
beardProgram = trinity.Tr2OptixProgram(path + 'eve_beard_kernel.ptx', 'kernel')
curveOutputBuffer = trinity.Tr2OptixBuffer()
curveCount = 512
curveOutputBuffer.CreateUserData(80, curveCount * curveCount, trinity.OPTIX_BUFFER_INPUT_OUTPUT, True)
optix.SetBuffer('output', curveOutputBuffer)
rayTypeCount = optix.GetRayTypeCount()
optix.SetRayTypeCount(1)
optix.SetEntryPointCount(2)
optix.SetRayGenerationProgram(0, beardProgram)
optix.SetRayGenerationProgram(1, beardProgram)
optix.SetEntryPointCount(1)
LogInfo('beard: about to Run')
optix.Run(0, curveCount, curveCount)
LogInfo('beard: Run done')
optix.SetRayTypeCount(rayTypeCount)
hairGeometry = trinity.Tr2OptixGeometry()
hairGeometry.InitializeFromProgram(path + 'bezier_curves.ptx', 'intersect', 'bounds')
subdivideDepth = 2
hairGeometry.SetPrimitiveCount(curveCount * curveCount * (1 << subdivideDepth))
optix.SetUInt('presubdivide_depth', subdivideDepth)
optix.SetBuffer('curves', curveOutputBuffer)
LogInfo('beard: geometry setup done')
beardInstance = trinity.Tr2OptixGeometryInstance()
beardInstance.SetGeometry(hairGeometry)
closest_hit_BeardShader = trinity.Tr2OptixProgram(path + 'eve_beard_shader.ptx', 'closest_hit_BeardShader')
beardMaterial = trinity.Tr2OptixMaterial()
beardMaterial.SetClosestHit(0, closest_hit_BeardShader)
beardMaterial.SetAnyHit(1, any_hit_shadow)
beardInstance.SetMaterial(beardMaterial)
LogInfo('beard: geometry instance setup done')
return beardInstance
def _DoInit(self, scene = None):
model = None
if scene is None:
scene = PD.SkinLightmapRenderer.Scene()
self.scene = scene
self.previousVP = trinity.TriMatrix()
self.framecount = 1
self.useOIT = True
if scene is None:
LogWarn('No scene!')
return
for dynamic in scene.dynamics:
if dynamic.__typename__ == 'Tr2IntSkinnedObject':
model = dynamic
break
else:
LogWarn('No Tr2IntSkinnedObject found')
return
if model is None:
LogWarn('No Tr2IntSkinnedObject found')
return
self.skinnedObject = model
if self.skinnedObject.visualModel is None:
LogWarn('skinnedObject has no visualMeshes')
return
bg = trinity.renderContext.GetDefaultBackBuffer()
step = trinity.renderJobs.FindStepByName('SET_SWAPCHAIN_RT')
if step is not None:
bg = step.renderTarget
self.width = self.settings.get('outputWidth', bg.width)
self.height = self.settings.get('outputHeight', bg.height)
self.blitfx = FullScreenBlitter(self.width, self.height)
self.RemoveBadGeometry(model)
outputTexture = trinity.TriTextureRes(self.width, self.height, 1, trinity.PIXEL_FORMAT.R32G32B32A32_FLOAT)
self.outputTexture = outputTexture
self.capture = CaptureHelper(self.width, self.height)
self._InitUVUnwrap()
for steps in trinity.renderJobs.recurring:
if steps.name == 'FullOptixRenderer':
steps.UnscheduleRecurring()
start = time.clock()
optix = trinity.Tr2Optix()
self.optix = optix
optix.SetInteropDevice()
optix.SetRayTypeCount(4)
optix.SetEntryPointCount(1)
if False:
optix.EnableAllExceptions()
optix.SetPrintEnabled(True)
optix.SetPrintBufferSize(16384)
optix.SetUInt('radiance_ray_type', 0)
optix.SetUInt('shadow_ray_type', 1)
optix.SetUInt('translucency_ray_type', 2)
optix.SetUInt('translucency_ray_type', 3)
optix.SetFloat('scene_epsilon', 0.001)
optix.SetUInt('frameIteration', 0)
self.outputBuffer = trinity.Tr2OptixBuffer()
self.outputBuffer.CreateFloat4(self.width, self.height, trinity.OPTIX_BUFFER_INPUT_OUTPUT)
optix.SetBuffer('output_buffer', self.outputBuffer)
self.ApplySettings()
path = str(blue.paths.ResolvePath('res:/graphics/effect/optix/NCC/'))
self.path = path
LogInfo('Getting files from', path)
everything = []
any_hit_shadow = trinity.Tr2OptixProgram(path + 'eve_shadow.ptx', 'any_hit_shadow')
any_hit_shadow_blend = trinity.Tr2OptixProgram(path + 'eve_shadow.ptx', 'any_hit_shadow_blend')
shader_diffuse_only_feeler = trinity.Tr2OptixProgram(path + 'eve_bounce.ptx', 'closest_hit_DiffuseOnlyFeeler2')
any_hit_cutout = trinity.Tr2OptixProgram(path + 'eve_cutout.ptx', 'any_hit_CutoutMask')
any_hit_diffuse_feeler_blend = trinity.Tr2OptixProgram(path + 'eve_shadow.ptx', 'any_hit_diffuse_feeler_blend')
everything.append(any_hit_shadow)
everything.append(any_hit_shadow_blend)
everything.append(shader_diffuse_only_feeler)
everything.append(any_hit_cutout)
mainRay = 0
shadowRay = 1
bounceRay = 3
def MakeMaterialWithShader(shader):
material = trinity.Tr2OptixMaterial()
material.SetClosestHit(mainRay, shader)
material.SetAnyHit(shadowRay, any_hit_shadow)
material.SetClosestHit(bounceRay, shader_diffuse_only_feeler)
everything.append(material)
return (material, shader)
def MakeMaterial(ptxFile, shaderName):
shader = trinity.Tr2OptixProgram(path + ptxFile + '.ptx', shaderName)
everything.append(shader)
return MakeMaterialWithShader(shader)
def MakeDecal(material):
material.SetAnyHit(mainRay, any_hit_cutout)
material.SetAnyHit(shadowRay, any_hit_shadow_blend)
material.SetAnyHit(bounceRay, any_hit_cutout)
skin_single_material, skin_single_shade = MakeMaterial('eve_skin', 'closest_hit_ShadeSinglePassSkin_Single2')
skin_single_material_scatter = MakeMaterial('eve_skin', 'closest_hit_ShadeSinglePassSkin_Single_Scatter2')[0]
skin_single_material_decal = MakeMaterialWithShader(skin_single_shade)[0]
MakeDecal(skin_single_material_decal)
glasses_shade = trinity.Tr2OptixProgram(path + 'eve_glasses.ptx', 'glasses_shade')
glasses_shadow = trinity.Tr2OptixProgram(path + 'eve_glasses.ptx', 'glasses_shadow')
glass_material = trinity.Tr2OptixMaterial()
glass_material.SetAnyHit(mainRay, glasses_shade)
glass_material.SetAnyHit(shadowRay, glasses_shadow)
glass_material.SetClosestHit(bounceRay, shader_diffuse_only_feeler)
everything.append(glasses_shade)
everything.append(glasses_shadow)
vizNames = ['closest_hit_VizNormal',
'closest_hit_VizUV',
'closest_hit_VizConstantColor',
'closest_hit_VizDiffuse']
vizualizer, vizualizer_shade = MakeMaterial('eve_basic', vizNames[0])
vizualizer_decal = MakeMaterialWithShader(vizualizer_shade)[0]
MakeDecal(vizualizer_decal)
skin_double_material, skin_double_shade = MakeMaterial('eve_skin', 'closest_hit_ShadeSinglePassSkin_Double2')
skin_double_material_decal = MakeMaterialWithShader(skin_double_shade)[0]
MakeDecal(skin_double_material_decal)
skin_double_material_transparent = MakeMaterial('eve_skin', 'closest_hit_ShadeSinglePassSkin_Double2_Blend')[0]
skin_double_material_transparent.SetAnyHit(mainRay, any_hit_cutout)
skin_double_material_transparent.SetAnyHit(shadowRay, any_hit_shadow_blend)
skin_double_material_transparent.SetAnyHit(bounceRay, any_hit_cutout)
avatar_brdf_material, avatar_brdf_shade = MakeMaterial('eve_brdf', 'closest_hit_ShadeAvatarBRDF_Single2')
avatar_brdf_material_decal = MakeMaterialWithShader(avatar_brdf_shade)[0]
MakeDecal(avatar_brdf_material_decal)
avatar_brdf_double_material, avatar_brdf_double_shade = MakeMaterial('eve_brdf', 'closest_hit_ShadeAvatarBRDF_Double2')
avatar_brdf_double_material_decal = MakeMaterialWithShader(avatar_brdf_double_shade)[0]
MakeDecal(avatar_brdf_double_material_decal)
avatar_hair_material = trinity.Tr2OptixMaterial()
avatar_hair_shade = trinity.Tr2OptixProgram(path + 'eve_hair.ptx', 'closest_hit_ShadeAvatarHair2' if self.useOIT else 'closest_hit_ShadeAvatarHair2_Blend')
avatar_hair_material.SetClosestHit(mainRay, avatar_hair_shade)
if self.useOIT:
avatar_hair_oit = trinity.Tr2OptixProgram(path + 'eve_hair.ptx', 'any_hit_HairOIT')
avatar_hair_material.SetAnyHit(mainRay, avatar_hair_oit)
avatar_hair_material.SetAnyHit(shadowRay, any_hit_shadow_blend)
avatar_hair_material.SetClosestHit(bounceRay, shader_diffuse_only_feeler)
everything.append(avatar_hair_shade)
everything.append(avatar_hair_material)
avatar_hair_material_decal = trinity.Tr2OptixMaterial()
avatar_hair_material_decal.SetClosestHit(mainRay, avatar_hair_shade)
avatar_hair_material_decal.SetAnyHit(mainRay, avatar_hair_oit if self.useOIT else any_hit_cutout)
avatar_hair_material_decal.SetAnyHit(shadowRay, any_hit_shadow_blend)
avatar_hair_material_decal.SetClosestHit(bounceRay, shader_diffuse_only_feeler)
avatar_hair_material_decal.SetAnyHit(bounceRay, any_hit_cutout)
everything.append(avatar_hair_material_decal)
eye_shade = trinity.Tr2OptixProgram(path + 'eve_eyes.ptx', 'closest_hit_ShadeEye')
eye_material = trinity.Tr2OptixMaterial()
eye_material.SetClosestHit(mainRay, eye_shade)
eye_material.SetAnyHit(shadowRay, any_hit_shadow)
everything.append(eye_shade)
everything.append(eye_material)
eye_wetness_shade = trinity.Tr2OptixProgram(path + 'eve_eyes.ptx', 'closest_hit_ShadeEyeWetness')
eye_wetness_material = trinity.Tr2OptixMaterial()
eye_wetness_material.SetClosestHit(mainRay, eye_wetness_shade)
eye_wetness_material.SetAnyHit(shadowRay, any_hit_shadow)
everything.append(eye_wetness_shade)
everything.append(eye_wetness_material)
portrait_basic_material, portrait_basic_shade = MakeMaterial('eve_basic', 'closest_hit_ShadePortraitBasic')
portrait_basic_material_decal = MakeMaterialWithShader(portrait_basic_shade)[0]
MakeDecal(portrait_basic_material_decal)
LogInfo('global setup OK', time.clock() - start, 'seconds')
def MakeSamplerFromMap(texture, name):
sampler = trinity.Tr2OptixTextureSampler()
sampler.CreateFromSurface(texture)
sampler.SetNormalizedIndexingMode(True)
optix.SetSampler(name, sampler)
LogInfo('No-Copy Interop for ', name)
everything.append(sampler)
MakeSamplerFromMap(self.oxWorldPosMapUV, 'world_pos_uv_buffer')
MakeSamplerFromMap(self.oxWorldNormalMapUV, 'world_normal_uv_buffer')
MakeSamplerFromMap(self.stretchMap, 'stretchmap_buffer')
useHdrProbe = False
if useHdrProbe:
optix.SetSamplerFromProbe('hdr_probe_sampler', 'c:/depot/optix/data/Japan_subway2_FINAL.hdr')
start = time.clock()
self.skinnedOptix = optix.CreateFromSkinnedModel(model, 72, path + 'triangle72.ptx', 'mesh_intersect', 'mesh_bounds', 64, path + 'triangle64.ptx', 'mesh_intersect', 'mesh_bounds')
optixBatches = self.skinnedOptix[0]
self.TransferBeardParameters(optix)
group = trinity.Tr2OptixGeometryGroup()
groupChildren = []
self.rayCounter = RayCountHelper(self.optix)
self.oit = OitHelper(self.optix)
self.raygen = trinity.Tr2OptixProgram(path + 'raygen.ptx', 'ray_request')
self.RunSkinningAndTesselation()
start = time.clock()
samplers = SkinRaytracingTools.InteropAllTextures(optix, model, waitForFinish=True)
everything.append(samplers)
backdrop = trinity.TriTexture2DParameter()
backdrop.resourcePath = self.settings['backgroundBitmap']
skinmap = trinity.TriTexture2DParameter()
skinmap.resourcePath = 'res:/Graphics/Character/female/paperdoll/head/head_generic/SkinMap.png'
blue.resMan.Wait()
everything.append(SkinRaytracingTools.InteropTexture('BackgroundEnvMap', backdrop.resource, waitForFinish=True, scope=optix))
everything.append(SkinRaytracingTools.InteropTexture('SkinMap', skinmap.resource, waitForFinish=True, scope=optix))
LogInfo('texture interop OK', time.clock() - start, 'seconds')
splines = self.GenerateBeardGeometry(optix, path, any_hit_shadow)
if splines is not None:
groupChildren.append(splines)
print '*** Raytracing phase ***'
def SetAlphaRef(instance, batchType):
if batchType == 1:
instance.SetFloat4('alphaRef', 0.75, 0, 0, 0)
elif batchType == 2:
instance.SetFloat4('alphaRef', 0.01, 0, 0, 0)
haveGlasses = False
for batchType in range(len(optixBatches)):
isOpaque = batchType == 0
batches = optixBatches[batchType]
for batch in batches:
if 'furshell' in batch[1].effectFilePath.lower():
continue
instance = trinity.Tr2OptixGeometryInstance()
everything.append(instance)
instance.SetGeometry(batch[0])
r = random.random()
g = random.random()
b = random.random()
instance.SetFloat4('viz_constant_color', r, g, b, 1.0)
fxpath = batch[1].effectFilePath.lower()
if False:
instance.SetMaterial(vizualizer if isOpaque else vizualizer_decal)
elif 'glassshader' in fxpath:
instance.SetMaterial(glass_material)
if not haveGlasses:
haveGlasses = True
elif 'skinnedavatarbrdfsinglepassskin_single.fx' in fxpath:
if batch[1] in self.scatterFX:
instance.SetMaterial(skin_single_material_scatter)
else:
instance.SetMaterial(skin_single_material if isOpaque else skin_single_material_decal)
SetAlphaRef(instance, batchType)
elif 'skinnedavatarbrdfsinglepassskin_double.fx' in fxpath:
instance.SetMaterial([skin_double_material, skin_double_material_decal, skin_double_material_transparent][batchType])
SetAlphaRef(instance, batchType)
elif 'skinnedavatarbrdflinear.fx' in fxpath:
instance.SetMaterial(avatar_brdf_material if isOpaque else avatar_brdf_material_decal)
elif 'skinnedavatarbrdfdoublelinear.fx' in fxpath:
instance.SetMaterial(avatar_brdf_double_material if isOpaque else avatar_brdf_double_material_decal)
elif 'skinnedavatarhair_detailed.fx' in fxpath:
instance.SetMaterial(avatar_hair_material if isOpaque else avatar_hair_material_decal)
instance.SetFloat4('alphaRef', 0.01, 0, 0, 0)
instance.SetUInt('enableCulling', 0)
elif 'eyeshader.fx' in fxpath:
instance.SetMaterial(eye_material)
elif 'eyewetnessshader.fx' in fxpath:
instance.SetMaterial(eye_wetness_material)
elif 'portraitbasic.fx' in fxpath:
instance.SetMaterial(portrait_basic_material if isOpaque else portrait_basic_material_decal)
else:
instance.SetMaterial(vizualizer if isOpaque else vizualizer_decal)
SkinRaytracingTools.CopyParametersToContext(batch[1], instance)
groupChildren.append(instance)
group.SetChildCount(len(groupChildren))
for x in xrange(len(groupChildren)):
group.SetChild(x, groupChildren[x])
everything.append(group)
group.SetAcceleration('Bvh', 'Bvh')
LogInfo('scene interop OK', time.clock() - start, 'seconds')
start = time.clock()
bufEveLights = SkinRaytracingTools.CreateBufferForLights(scene.lights, useHdrProbe)
optix.SetBuffer('trinity_lights', bufEveLights)
LogInfo('lights interop OK', time.clock() - start, 'seconds')
start = time.clock()
optix.SetGeometryGroup('top_scene', group)
optix.SetGeometryGroup('shadow_casters', group)
optix.SetRayGenerationProgram(0, self.raygen)
optix.SetEntryPointCount(1)
miss = None
if not useHdrProbe:
miss = trinity.Tr2OptixProgram(path + 'eve_miss.ptx', 'miss')
else:
miss = trinity.Tr2OptixProgram(path + 'eve_miss_probe.ptx', 'miss')
optix.SetMissProgram(3, miss)
optix.SetFloat3('bg_color', 1.0, 0, 0)
everything.append(miss)
if False:
exception = trinity.Tr2OptixProgram(path + 'eve_miss.ptx', 'exception')
optix.SetExceptionProgram(0, exception)
everything.append(exception)
optix.SetStackSize(4096)
self.everything = everything
SkinRaytracingTools.SetOptixMatrixFromTrinity(optix, 'clipToWorld', self.width / float(self.height))
LogInfo('general setup OK', time.clock() - start, 'seconds')
optix.ReportObjectCounts()
start = time.clock()
optix.Compile()
LogInfo('compile OK', time.clock() - start, 'seconds')
start = time.clock()
optix.Validate()
LogInfo('validate OK', time.clock() - start, 'seconds')
start = time.clock()
optix.Run(0, 0, 0)
LogInfo('BVH OK', time.clock() - start, 'seconds')
start = time.clock()
self.blitfx.SetTexture(outputTexture, outputTexture, outputTexture)
rj = trinity.CreateRenderJob('FullOptixRenderer')
rj.PushRenderTarget(self.outputRT)
rj.PushDepthStencil(None)
self.AddCallback(FullOptixRenderer.RaytraceFrame, 'Raytrace Frame', rj)
rj.CopyRtToTexture(outputTexture).name = 'cuda -> outputTexture'
rj.PopDepthStencil()
rj.PopRenderTarget()
rj.SetStdRndStates(trinity.RM_FULLSCREEN).name = 'fullscreen quad'
rj.RenderEffect(self.blitfx.effect).name = ' blit'
self.capture.CreateRenderSteps(rj, self.blitfx.effect)
rj.steps.append(trinity.TriStepRenderFps())
rj.ScheduleRecurring(insertFront=False)
self.renderJob = rj
LogInfo('final setup OK', time.clock() - start, 'seconds')
model.display = False
self.EnablePaperDollJobs(False)
@staticmethod
def EnablePaperDollJobs(enable):
if False:
for job in trinity.renderJobs.recurring:
if 'paperdollrenderjob' in job.name.lower():
for step in job.steps:
step.enabled = enable
if enable:
trinity.device.tickInterval = 10
else:
trinity.device.tickInterval = 0
def ApplySettings(self):
self.optix.SetFloat('light_size', self.settings['light_size'])
self.optix.SetFloat3('depthOfField', 1.0, self.settings['lens_radius'], 0)
self.optix.SetFloat('HairShadows', self.settings['HairShadows'])
self.optix.SetFloat('EnvMapBoost', self.settings['EnvMapBoost'] / 3.1415927)
self.previousVP.Identity()
def SetLensRadius(self, lens_radius):
self.settings['lens_radius'] = lens_radius
self.ApplySettings()
def SetLensFocalDistance(self, lens_focal_distance):
if lens_focal_distance <= 0:
self.settings.pop('lens_focal_distance', 0)
else:
self.settings['lens_focal_distance'] = lens_focal_distance
self.ApplySettings()
def SetLightSize(self, light_size):
self.settings['light_size'] = light_size
self.ApplySettings()
def SetHairShadowsEnabled(self, enabled):
self.settings['HairShadows'] = float(enabled)
self.ApplySettings()
def SetBackgroundIntensity(self, intensity):
self.settings['EnvMapBoost'] = intensity
self.ApplySettings()
def __init__(self, scene = None, backgroundBitmap = None, memento = None, beardLength = (0.01, 0.01), beardGravity = 0.0005, outputWidth = None, outputHeight = None, asyncSetup = True, listenForUpdate = True):
LogInfo('init', self)
blue.motherLode.maxMemUsage = 0
blue.resMan.ClearAllCachedObjects()
self.framecount = 0
self.listenForUpdate = listenForUpdate
if memento is not None:
self.settings = memento
else:
self.settings = {}
self.settings['light_size'] = 0.125
self.settings['lens_radius'] = 0.001
self.settings['HairShadows'] = 1.0
self.settings['EnvMapBoost'] = 1.0
self.settings['backgroundBitmap'] = backgroundBitmap if backgroundBitmap is not None else 'res:/texture/global/red_blue_ramp.dds'
self.settings['beardLength'] = beardLength
self.settings['beardGravity'] = beardGravity
if outputWidth is not None:
self.settings['outputWidth'] = outputWidth
if outputHeight is not None:
self.settings['outputHeight'] = outputHeight
if asyncSetup:
uthread.new(self._DoInit, scene=scene)
else:
self._DoInit(scene=scene)
def GetMemento(self):
return self.settings
def __del__(self):
LogInfo('deleting', self)
if hasattr(self, 'renderJob'):
self.renderJob.UnscheduleRecurring()
self.renderJob = None
del self.raygen
del self.rayCounter
del self.oit
del self.outputBuffer
del self.skinnedOptix
del self.everything
LogInfo('Post-cleanup leak check:')
self.optix.ReportObjectCounts()
self.EnablePaperDollJobs(True)
@staticmethod
def Pause():
if FullOptixRenderer.instance is not None:
FullOptixRenderer.instance.renderJob.UnscheduleRecurring()
@staticmethod
def NotifyUpdate():
if FullOptixRenderer.instance is not None and FullOptixRenderer.instance.listenForUpdate:
LogInfo('NotifyUpdate, restarting', FullOptixRenderer.instance)
memento = FullOptixRenderer.instance.GetMemento()
FullOptixRenderer.instance = None
FullOptixRenderer.instance = FullOptixRenderer(memento=memento)
class ShipOptixRenderer():
__guid__ = 'paperDoll.ShipOptixRenderer'
instance = None
def AddCallback(self, func, name, rj):
cb = trinity.TriStepPythonCB()
weakSelf = weakref.ref(self)
cb.SetCallback(lambda : SkinRaytracingTools.FuncWrapper(weakSelf, func))
cb.name = name
rj.steps.append(cb)
def GetFrameCount(self):
return self.framecount
def SaveScreenshot(self, filename):
self.capture.SaveSurfaceToFile(filename)
def AddRenderPreviewStep(self, renderJob):
renderJob.SetStdRndStates(trinity.RM_FULLSCREEN).name = ' [optix] fullscreen quad'
renderJob.PushDepthStencil(None).name = ' [optix] push depth'
renderJob.RenderEffect(self.blitfx.effect).name = ' [optix] Blit to screenshot'
renderJob.PopDepthStencil().name = ' [optix] pop depth'
@staticmethod
def RaytraceFrame(selfRef):
start = time.time()
VP = SkinRaytracingTools.SetOptixMatrixFromTrinity(selfRef.optix, 'clipToWorld', selfRef.width / float(selfRef.height))
if not SkinRaytracingTools.matEqual(VP, selfRef.previousVP):
selfRef.previousVP = VP
selfRef.outputBuffer.Clear()
selfRef.framecount = 0
pos1 = (0, 0, 0)
pos2 = pos1
dist1 = geo2.Vec3Distance(pos1, trinity.GetViewPosition())
dist2 = geo2.Vec3Distance(pos2, trinity.GetViewPosition())
autodof = min(dist1, dist2)
dof = selfRef.settings.get('lens_focal_distance', autodof)
LogInfo('Auto-depth-of-field is at', autodof, ', actual focal distance is', dof)
selfRef.optix.SetFloat3('depthOfField', dof - trinity.GetFrontClip(), selfRef.settings['lens_radius'], 0)
else:
selfRef.framecount += 1
selfRef.optix.SetUInt('frameIteration', selfRef.framecount)
selfRef.oit.ResetAllocationCount()
selfRef.rayCounter.ResetCount()
time1 = time.time()
selfRef.optix.Run(0, selfRef.width, selfRef.height)
time2 = time.time()
traceTime = time2 - time1
raycount = selfRef.rayCounter.GetCount()
raysec = 0
if traceTime > 0:
raysec = raycount / float(traceTime)
time3 = time.time()
if selfRef.framecount % 32 == 0:
oit = selfRef.oit.GetAllocationCount()
if oit > 0:
print oit, 'oit allocations'
selfRef.blitfx.UpdateFrameCount(selfRef.framecount)
selfRef.outputBuffer.CopyToTexture(selfRef.outputTexture)
stop = time.time()
message = 'time: call %05.3f / trace %05.3f / read %05.3f ms' % (float(time1 - start) * 1000, float(time2 - time1) * 1000, float(stop - time3) * 1000)
message += '// traced %d rays in %05.3f ms / %10d Krays/sec / %d frames' % (raycount,
traceTime * 1000,
raysec / 1000,
selfRef.framecount)
LogInfo(message)
def ConvertCubeMapToSH(self, optix, ptxPath, cubeResPath):
self.shBuffer = trinity.Tr2OptixBuffer()
self.shBuffer.CreateFloat4(9, 1, trinity.OPTIX_BUFFER_INPUT_OUTPUT)
optix.SetBuffer('sh_buffer', self.shBuffer)
self.shBuffer.Clear()
program = trinity.Tr2OptixProgram(ptxPath + 'cubemapsh.ptx', 'kernel')
optix.SetRayGenerationProgram(0, program)
optix.ReportObjectCounts()
cube = trinity.TriTextureCubeParameter()
cube.resourcePath = cubeResPath
cube.name = 'Nebula'
blue.resMan.Wait()
mipmaps, names = SkinRaytracingTools.ConvertCubeToTextures(cube)
for i in range(len(names)):
if i < len(mipmaps):
sampler = trinity.Tr2OptixTextureSampler()
sampler.CreateFromTexture(mipmaps[i])
sampler.SetNormalizedIndexingMode(True)
optix.SetSampler(cube.name + names[i], sampler)
LogInfo('No-Copy Cube Side Interop for ' + cube.name + names[i])
optix.Run(0, cube.resource.width, cube.resource.width)
if False:
names = ['Y00',
'Y1m1',
'Y10',
'Y11',
'Y2m2',
'Y2m1',
'Y20',
'Y21',
'Y22']
self.shBuffer.Map()
ofs = 0
for name in names:
print name, ': (',
print self.shBuffer.GetUserDataF(ofs), ',',
ofs = ofs + 4
print self.shBuffer.GetUserDataF(ofs), ',',
ofs = ofs + 4
print self.shBuffer.GetUserDataF(ofs), ')'
ofs = ofs + 4
self.shBuffer.Unmap()
def CachedCreateMaterial(self, path, effect):
material = self.materialCache.get(effect, None)
if material is not None:
return material
shader = None
if effect in ('tripleglowv3', 'doubleglowv3', 'singleglowv3'):
shader = trinity.Tr2OptixProgram(path + 'v3ship_glow.ptx', 'closest_hit_' + effect)
elif effect in ('singleheatv3',):
shader = trinity.Tr2OptixProgram(path + 'v3ship_heat.ptx', 'closest_hit_' + effect)
elif effect in ('tripleglowoilv3',):
shader = trinity.Tr2OptixProgram(path + 'v3ship_glow_oil.ptx', 'closest_hit_' + effect)
elif effect == 'skinned_tripleglowv3':
shader = trinity.Tr2OptixProgram(path + 'v3ship_glow.ptx', 'closest_hit_tripleglowv3')
if shader is None:
return
material = trinity.Tr2OptixMaterial()
material.SetClosestHit(0, shader)
material.SetAnyHit(1, self.any_hit_shadow)
return material
def _DoInit(self, scene = None):
if scene is None:
scene = trinity.device.scene
self.scene = scene
self.previousVP = trinity.TriMatrix()
self.framecount = 1
self.materialCache = {}
self.useOIT = True
if scene is None:
LogWarn('No scene!')
return
bg = trinity.renderContext.GetDefaultBackBuffer()
step = trinity.renderJobs.FindStepByName('SET_SWAPCHAIN_RT')
if step is not None:
bg = step.renderTarget
self.width = self.settings.get('outputWidth', bg.width)
self.height = self.settings.get('outputHeight', bg.height)
self.blitfx = FullScreenBlitter(self.width, self.height)
bloomScale = 4
if False:
self.highpassRT = PD.SkinLightmapRenderer.CreateRenderTarget(self.width / bloomScale, self.height / bloomScale, trinity.PIXEL_FORMAT.R32G32B32A32_FLOAT)
self.filteredRT = PD.SkinLightmapRenderer.CreateRenderTarget(self.width / bloomScale, self.height / bloomScale, trinity.PIXEL_FORMAT.R32G32B32A32_FLOAT)
outputTexture = trinity.TriTextureRes(self.width, self.height, 1, trinity.PIXEL_FORMAT.R32G32B32A32_FLOAT)
self.outputTexture = outputTexture
self.capture = CaptureHelper(self.width, self.height)
for steps in trinity.renderJobs.recurring:
if steps.name == 'ShipOptixRenderer':
steps.UnscheduleRecurring()
path = str(blue.paths.ResolvePath('res:/graphics/effect/optix/ship/'))
self.path = path
LogInfo('Getting files from', path)
start = time.clock()
optix = trinity.Tr2Optix()
self.optix = optix
optix.SetInteropDevice()
optix.SetRayTypeCount(4)
optix.SetEntryPointCount(1)
if False:
optix.EnableAllExceptions()
if False:
optix.SetPrintEnabled(True)
optix.SetPrintBufferSize(16384)
optix.SetFloat('scene_epsilon', 0.01)
optix.SetUInt('frameIteration', 0)
nebula = PD.FindResourceByName(scene.backgroundEffect, 'NebulaMap') if scene.backgroundEffect is not None else None
if nebula is not None:
LogInfo('Converting to SH ', nebula.resourcePath)
self.ConvertCubeMapToSH(optix, path, nebula.resourcePath)
else:
self.shBuffer = trinity.Tr2OptixBuffer()
self.shBuffer.CreateFloat4(9, 1, trinity.OPTIX_BUFFER_INPUT_OUTPUT)
optix.SetBuffer('sh_buffer', self.shBuffer)
self.shBuffer.Clear()
self.outputBuffer = trinity.Tr2OptixBuffer()
self.outputBuffer.CreateFloat4(self.width, self.height, trinity.OPTIX_BUFFER_INPUT_OUTPUT)
optix.SetBuffer('output_buffer', self.outputBuffer)
self.ApplySettings()
everything = []
mainRay = 0
shadowRay = 1
bounceRay = 3
def MakeMaterialWithShader(shader):
return (material, shader)
def MakeMaterial(ptxFile, shaderName):
everything.append(shader)
return MakeMaterialWithShader(shader)
LogInfo('global setup OK', time.clock() - start, 'seconds')
useHdrProbe = False
start = time.clock()
self.rayCounter = RayCountHelper(self.optix)
self.oit = OitHelper(self.optix)
self.raygen = trinity.Tr2OptixProgram(path + 'raygen.ptx', 'ray_request')
shader = trinity.Tr2OptixProgram(path + 'vizualizer.ptx', 'closest_hit_VizGreen')
viz_material = trinity.Tr2OptixMaterial()
viz_material.SetClosestHit(0, shader)
everything.append(viz_material)
if False:
any_hit_shadow = trinity.Tr2OptixProgram(path + 'shadow.ptx', 'any_hit_shadow')
viz_material.SetAnyHit(1, any_hit_shadow)
self.any_hit_shadow = any_hit_shadow
else:
self.any_hit_shadow = None
start = time.clock()
nameTranslation = {'GlowNormalSpecularMap': 'NormalMap'}
def GroupByVertexBuffer(optixBatches):
output = []
for batchType in range(len(optixBatches)):
batches = optixBatches[batchType]
vbDict = {}
for batch in batches:
vb = batch[2]
list = vbDict.get(vb, None)
if list is not None:
list.append(batch)
else:
vbDict[vb] = [batch]
list = []
for vb in vbDict.iterkeys():
list.append(vbDict[vb])
output.append(list)
return output
cache = {}
programs = {'skinned_tripleglowv3_48': 'triangle48',
'singlev3_48': 'triangle48',
'singleheatv3_48': 'triangle48',
'tripleglowv3_40': 'triangle40',
'singleheatv3_40': 'triangle40',
'singlefresnelreflectionwithglow_56': 'triangle56',
'doublefresnelreflectionwithglow_56': 'triangle56',
'tripleglowoilv3_80': 'triangle80'}
if False:
nullintersect = trinity.Tr2OptixProgram(path + 'nullgeometry.ptx', 'intersect')
nullbounds = trinity.Tr2OptixProgram(path + 'nullgeometry.ptx', 'bounds')
everything.append(nullintersect)
everything.append(nullbounds)
mylogOK = set({})
mylogFail = set({})
linearNames = set({})
linearNames.add('MaterialDiffuseColor')
linearNames.add('MaterialReflectionColor')
linearNames.add('MaskDiffuseColor')
linearNames.add('MaskReflectionColor')
linearNames.add('SubMaskDiffuseColor')
linearNames.add('SubMaskReflectionColor')
linearNames.add('GlowColor')
topScene = trinity.Tr2OptixGroup()
interopSamplerCache = {}
for dynamic in scene.objects:
if dynamic.__typename__ not in ('EveShip2', 'EveStation2'):
continue
model = dynamic
if model.highDetailMesh is None or model.highDetailMesh.object is None:
LogWarn('ship has no high detail meshes')
continue
skinnedOptix = optix.CreateFromEveSpaceObject2(model, 0, '', '', '')
everything.append(skinnedOptix)
optixBatches = skinnedOptix[0]
self.objectsToRefresh[model] = skinnedOptix
sorted = GroupByVertexBuffer(optixBatches)
groups = []
for batchType in range(len(optixBatches)):
isOpaque = batchType == 0
vbBatches = sorted[batchType]
for batches in vbBatches:
groupChildren = []
for batch in batches:
effect = batch[1].effectFilePath.lower()
effect = effect[effect.rfind('/') + 1:]
effect = effect[:effect.rfind('.fx')]
ptx = programs.get(effect + '_' + str(batch[8]), '')
if ptx == '':
mylogFail.add(effect)
batch[0].SetIntersectProgram(nullintersect)
batch[0].SetBoundsProgram(nullbounds)
continue
mylogOK.add(effect)
intersect, bounds = cache.get(ptx, (None, None))
if intersect is None:
intersect = trinity.Tr2OptixProgram(path + ptx + '.ptx', 'intersect')
bounds = trinity.Tr2OptixProgram(path + ptx + '.ptx', 'bounds')
cache[ptx] = (intersect, bounds)
batch[0].SetIntersectProgram(intersect)
batch[0].SetBoundsProgram(bounds)
batchGeometryInstance = trinity.Tr2OptixGeometryInstance()
everything.append(batchGeometryInstance)
batchGeometryInstance.SetGeometry(batch[0])
if True:
material = self.CachedCreateMaterial(path, effect)
if material is None:
material = viz_material
else:
material = viz_material
batchGeometryInstance.SetMaterial(material)
SkinRaytracingTools.CopyParametersToContext(batch[1], batchGeometryInstance, linearNames)
groupChildren.append(batchGeometryInstance)
samplers = SkinRaytracingTools.InteropAllTexturesFromEffect(optix, batch[1], waitForFinish=True, nameTranslation=nameTranslation, scope=batchGeometryInstance, cache=interopSamplerCache)
everything.append(samplers)
group = trinity.Tr2OptixGeometryGroup()
group.SetChildCount(len(groupChildren))
for x in xrange(len(groupChildren)):
group.SetChild(x, groupChildren[x])
group.SetAcceleration('Bvh', 'Bvh')
self.objectsToMarkDirty.append(group)
groups.append(group)
everything.append(cache)
baseOffset = topScene.GetChildCount()
topScene.SetChildCount(baseOffset + len(groups))
for x in xrange(len(groups)):
topScene.SetChild(baseOffset + x, groups[x])
everything.append(groups)
if False:
sphereGeometry = trinity.Tr2OptixGeometry()
sphereGeometry.InitializeFromProgram(path + 'sphere_program.ptx', 'intersect', 'bounds')
sphereGeometry.SetPrimitiveCount(1)
everything.append(sphereGeometry)
sphereInstance = trinity.Tr2OptixGeometryInstance()
sphereInstance.SetGeometry(sphereGeometry)
sphereInstance.SetMaterial(viz_material)
sphereInstance.SetFloat4('pos_r', 0, 0, 0, 100)
sphereInstance.SetFloat4('color_watt', 1, 0, 0, 1)
everything.append(sphereInstance)
group = trinity.Tr2OptixGeometryGroup()
group.SetChildCount(1)
group.SetChild(0, sphereInstance)
group.SetAcceleration('Bvh', 'Bvh')
topScene.SetChildCount(topScene.GetChildCount() + 1)
topScene.SetChild(topScene.GetChildCount() - 1, group)
everything.append(topScene)
topScene.SetAcceleration('Bvh', 'Bvh')
self.objectsToMarkDirty.append(topScene)
optix.SetGroup('top_scene', topScene)
optix.SetGroup('shadow_casters', topScene)
if len(mylogOK) > 0:
LogInfo('Converted succesfully:', str(mylogOK))
else:
LogWarn('No effects converted succesfully!')
if len(mylogFail) > 0:
LogWarn('Failed to convert:', str(mylogFail))
if type(scene) == trinity.EveSpaceScene:
c = SkinRaytracingTools.SafeLinearize(scene.sunDiffuseColor)
optix.SetFloat4('SunDiffuseColor', c[0], c[1], c[2], c[3])
c = scene.sunDirection
optix.SetFloat4('SunDirWorld', -c[0], -c[1], -c[2], 0)
c = SkinRaytracingTools.SafeLinearize(scene.ambientColor)
optix.SetFloat4('SceneAmbientColor', c[0], c[1], c[2], c[3])
c = SkinRaytracingTools.SafeLinearize(scene.fogColor)
optix.SetFloat4('SceneFogColor', c[0], c[1], c[2], c[3])
LogInfo('scene interop OK', time.clock() - start, 'seconds')
start = time.clock()
light = trinity.Tr2InteriorLightSource()
if True:
wattage = 2000000
light.color = (1,
1,
1,
wattage)
light.radius = 50
light.position = (200, 500, -300)
else:
wattage = 10000000
light.color = (1,
1,
1,
wattage)
light.radius = 1000
light.position = (0, 0, 0)
bufEveLights = SkinRaytracingTools.CreateBufferForLights([], useHdrProbe, preserveAlpha=True)
optix.SetBuffer('trinity_lights', bufEveLights)
LogInfo('lights interop OK', time.clock() - start, 'seconds')
if False:
sphereGeometry = trinity.Tr2OptixGeometry()
sphereGeometry.InitializeFromProgram(path + 'sphere_program.ptx', 'intersect', 'bounds')
sphereGeometry.SetPrimitiveCount(1)
sphereMaterial = trinity.Tr2OptixMaterial()
sphereShader = trinity.Tr2OptixProgram(path + 'sphere_program.ptx', 'closest_hit_radiance')
sphereMaterial.SetClosestHit(0, sphereShader)
sphereInstance = trinity.Tr2OptixGeometryInstance()
sphereInstance.SetGeometry(sphereGeometry)
sphereInstance.SetMaterial(sphereMaterial)
sphereInstance.SetFloat4('pos_r', light.position[0], light.position[1], light.position[2], light.radius)
sphereInstance.SetFloat4('color_watt', light.color[0], light.color[1], light.color[2], light.color[3])
n = topScene.GetChildCount()
topScene.SetChildCount(n + 1)
sphereGroup = trinity.Tr2OptixGeometryGroup()
sphereGroup.SetChildCount(1)
sphereGroup.SetChild(0, sphereInstance)
sphereGroup.SetAcceleration('Bvh', 'Bvh')
topScene.SetChild(n, sphereGroup)
start = time.clock()
optix.SetRayGenerationProgram(0, self.raygen)
optix.SetEntryPointCount(1)
miss = None
if not useHdrProbe:
miss = trinity.Tr2OptixProgram(path + 'eve_miss.ptx', 'miss')
else:
miss = trinity.Tr2OptixProgram(path + 'eve_miss_probe.ptx', 'miss')
optix.SetMissProgram(3, miss)
optix.SetFloat3('bg_color', 1.0, 0, 0)
everything.append(miss)
if False:
exception = trinity.Tr2OptixProgram(path + 'eve_miss.ptx', 'exception')
optix.SetExceptionProgram(0, exception)
everything.append(exception)
optix.SetStackSize(4096)
self.everything = everything
SkinRaytracingTools.SetOptixMatrixFromTrinity(optix, 'clipToWorld', self.width / float(self.height))
LogInfo('general setup OK', time.clock() - start, 'seconds')
optix.ReportObjectCounts()
start = time.clock()
optix.Compile()
LogInfo('compile OK', time.clock() - start, 'seconds')
start = time.clock()
optix.Validate()
LogInfo('validate OK', time.clock() - start, 'seconds')
start = time.clock()
optix.Run(0, 0, 0)
LogInfo('BVH OK', time.clock() - start, 'seconds')
start = time.clock()
if False:
self.blitfx.SetTexture(outputTexture, self.highpassRT, self.filteredRT)
else:
self.blitfx.SetTexture(outputTexture, outputTexture, outputTexture)
rj = trinity.CreateRenderJob('ShipOptixRenderer')
self.AddCallback(ShipOptixRenderer.RaytraceFrame, 'Raytrace Frame', rj)
rj.SetStdRndStates(trinity.RM_FULLSCREEN).name = 'fullscreen state'
if False:
rj.SetRenderTarget(self.highpassRT.wrappedRenderTarget).name = ' SetRT highpassRT'
rj.RenderEffect(self.blitfx.highpassEffect).name = ' high pass'
rj.SetRenderTarget(self.filteredRT.wrappedRenderTarget).name = ' SetRT filteredRT'
rj.RenderEffect(self.blitfx.gaussianHorizEffect).name = ' horizontal blur'
rj.SetRenderTarget(self.highpassRT.wrappedRenderTarget).name = ' SetRT highpassRT'
rj.RenderEffect(self.blitfx.gaussianVertEffect).name = ' vertical blur'
rj.SetStdRndStates(trinity.RM_FULLSCREEN).name = 'fullscreen state'
rj.RenderEffect(self.blitfx.effect).name = ' blit'
tp2 = None
for job in trinity.renderJobs.recurring:
if job.name == 'TrinityPanel:View1':
tp2 = job
if tp2 is None:
rj.ScheduleRecurring(insertFront=False)
else:
final = None
for step in tp2.steps:
if step.name == 'SET_FINAL_RT':
final = step
break
if final is not None:
tp2.steps.insert(tp2.steps.index(final), trinity.TriStepRunJob(rj))
else:
tp2.steps.append(trinity.TriStepRunJob(rj))
self.renderJob = rj
LogInfo('final setup OK', time.clock() - start, 'seconds')
FullOptixRenderer.EnablePaperDollJobs(False)
def ApplySettings(self):
self.optix.SetFloat('light_size', self.settings['light_size'])
self.optix.SetFloat3('depthOfField', 1.0, self.settings['lens_radius'], 0)
self.optix.SetFloat('HairShadows', self.settings['HairShadows'])
self.optix.SetFloat('EnvMapBoost', self.settings['EnvMapBoost'] / 3.1415927)
self.previousVP.Identity()
def SetLensRadius(self, lens_radius):
self.settings['lens_radius'] = lens_radius
self.ApplySettings()
def SetLensFocalDistance(self, lens_focal_distance):
if lens_focal_distance <= 0:
self.settings.pop('lens_focal_distance', 0)
else:
self.settings['lens_focal_distance'] = lens_focal_distance
self.ApplySettings()
def SetLightSize(self, light_size):
self.settings['light_size'] = light_size
self.ApplySettings()
def SetHairShadowsEnabled(self, enabled):
self.settings['HairShadows'] = float(enabled)
self.ApplySettings()
def SetBackgroundIntensity(self, intensity):
self.settings['EnvMapBoost'] = intensity
self.ApplySettings()
def __init__(self, scene = None, backgroundBitmap = None, memento = None, beardLength = (0.01, 0.01), beardGravity = 0.0005, outputWidth = None, outputHeight = None, asyncSetup = True, listenForUpdate = True):
LogInfo('init', self)
blue.motherLode.maxMemUsage = 0
blue.resMan.ClearAllCachedObjects()
self.framecount = 0
self.listenForUpdate = listenForUpdate
self.everything = None
self.objectsToRefresh = {}
self.objectsToMarkDirty = []
if memento is not None:
self.settings = memento
else:
self.settings = {}
self.settings['light_size'] = 0.125
self.settings['lens_radius'] = 0.001
self.settings['HairShadows'] = 1.0
self.settings['EnvMapBoost'] = 1.0
self.settings['backgroundBitmap'] = backgroundBitmap if backgroundBitmap is not None else 'res:/texture/global/red_blue_ramp.dds'
self.settings['beardLength'] = beardLength
self.settings['beardGravity'] = beardGravity
if outputWidth is not None:
self.settings['outputWidth'] = outputWidth
if outputHeight is not None:
self.settings['outputHeight'] = outputHeight
if asyncSetup:
uthread.new(self._DoInit, scene=scene)
else:
self._DoInit(scene=scene)
def GetMemento(self):
return self.settings
def __del__(self):
LogInfo('deleting', self)
if hasattr(self, 'renderJob'):
self.renderJob.UnscheduleRecurring()
self.renderJob = None
del self.any_hit_shadow
del self.raygen
del self.rayCounter
del self.oit
del self.shBuffer
del self.outputBuffer
del self.everything
del self.objectsToRefresh
del self.objectsToMarkDirty
self.optix.ClearObjects()
LogInfo('Post-cleanup leak check:')
self.optix.ReportObjectCounts()
FullOptixRenderer.EnablePaperDollJobs(True)
def RefreshMatrices(self):
for ship, optixList in self.objectsToRefresh.iteritems():
self.optix.RefreshMatrices(ship, optixList)
for dirty in self.objectsToMarkDirty:
dirty.MarkDirty()
self.ApplySettings()
LogInfo('Refreshed')
@staticmethod
def Pause():
if FullOptixRenderer.instance is not None:
FullOptixRenderer.instance.renderJob.UnscheduleRecurring()
@staticmethod
def NotifyUpdate():
if FullOptixRenderer.instance is not None and FullOptixRenderer.instance.listenForUpdate:
LogInfo('NotifyUpdate, restarting', FullOptixRenderer.instance)
memento = FullOptixRenderer.instance.GetMemento()
FullOptixRenderer.instance = None
FullOptixRenderer.instance = FullOptixRenderer(memento=memento)
|
normal
|
{
"blob_id": "3c01ca27a5eef877b606b93b04ffe6f73168cd6b",
"index": 9090,
"step-1": "#Embedded file name: c:/depot/games/branches/release/EVE-TRANQUILITY/eve/client/script/paperDoll/SkinRaytracing.py\nimport trinity\nimport blue\nimport telemetry\nimport ctypes\nimport math\nimport time\nimport geo2\nimport struct\nimport itertools\nimport weakref\nimport uthread\nimport paperDoll as PD\nimport log\nimport random\nmylog = log.Channel('optix', 'python')\n\ndef LogInfo(text, *args):\n for arg in args:\n text += ' ' + str(arg)\n\n mylog.Log(text, log.LGINFO)\n\n\ndef LogWarn(text, *args):\n for arg in args:\n text = text + ' ' + str(arg)\n\n mylog.Log(text, log.LGWARN)\n\n\nclass SkinRaytracingTools():\n __guid__ = 'paperDoll.SkinRaytracingTools'\n\n @staticmethod\n def SetOptixMatrixFromTrinity(optix, matrixName, ratio = None):\n proj = trinity.TriProjection()\n view = trinity.TriView()\n view.transform = trinity.GetViewTransform()\n proj.PerspectiveFov(trinity.GetFieldOfView(), trinity.GetAspectRatio() if ratio is None else ratio, trinity.GetFrontClip(), trinity.GetBackClip())\n projToView = geo2.MatrixInverse(proj.transform)\n viewToWorld = geo2.MatrixInverse(view.transform)\n projToWorld = geo2.MatrixMultiply(projToView, viewToWorld)\n r0 = projToWorld[0]\n r1 = projToWorld[1]\n r2 = projToWorld[2]\n r3 = projToWorld[3]\n mat = trinity.TriMatrix(r0[0], r0[1], r0[2], r0[3], r1[0], r1[1], r1[2], r1[3], r2[0], r2[1], r2[2], r2[3], r3[0], r3[1], r3[2], r3[3])\n optix.SetMatrix4x4(matrixName, mat)\n r0 = view.transform[0]\n r1 = view.transform[1]\n r2 = view.transform[2]\n r3 = view.transform[3]\n mat = trinity.TriMatrix(r0[0], r0[1], r0[2], r0[3], r1[0], r1[1], r1[2], r1[3], r2[0], r2[1], r2[2], r2[3], r3[0], r3[1], r3[2], r3[3])\n optix.SetMatrix4x4('viewTransform', mat)\n return mat\n\n @staticmethod\n def CreateSamplerForTexture(name, map, waitForFinish):\n rt = trinity.Tr2RenderTarget(map.width, map.height, 1, trinity.PIXEL_FORMAT.B8G8R8A8_UNORM)\n job = trinity.CreateRenderJob()\n job.PushRenderTarget(rt)\n job.PushDepthStencil(None)\n job.SetStdRndStates(trinity.RM_FULLSCREEN)\n job.RenderTexture(map)\n job.PopDepthStencil()\n job.PopRenderTarget()\n job.ScheduleOnce()\n if waitForFinish:\n job.WaitForFinish()\n sampler = trinity.Tr2OptixTextureSampler()\n if True:\n res = trinity.TriTextureRes()\n res.CreateAndCopyFromRenderTarget(rt)\n sampler.CreateFromTexture(res)\n else:\n sampler.CreateFromRenderTarget(rt)\n sampler.SetNormalizedIndexingMode(True)\n if True:\n return (sampler, res)\n else:\n return (sampler, rt)\n\n @staticmethod\n def ConvertCubeToTextures(cube):\n names = ['PX',\n 'NX',\n 'PY',\n 'NY',\n 'PZ',\n 'NZ']\n viewVec = [(1, 0, 0),\n (-1, 0, 0),\n (0, 1, 0),\n (0, -1, 0),\n (0, 0, 1),\n (0, 0, -1)]\n upVec = [(0, 1, 0),\n (0, 1, 0),\n (0, 0, 1),\n (0, 0, -1),\n (0, 1, 0),\n (0, 1, 0)]\n spaceScene = trinity.EveSpaceScene()\n spaceScene.envMap1ResPath = str(cube.resourcePath)\n spaceScene.envMapScaling = (1, 1, -1)\n spaceScene.backgroundRenderingEnabled = True\n spaceScene.backgroundEffect = trinity.Load('res:/dx9/scene/starfield/bakeNebula.red')\n blue.resMan.Wait()\n node = PD.FindParameterByName(spaceScene.backgroundEffect, 'NebulaBrightness')\n if node is None:\n node = trinity.Tr2FloatParameter()\n node.name = 'NebulaBrightness'\n spaceScene.backgroundEffect.parameters.append(node)\n if node is not None:\n node.value = 100\n node = PD.FindResourceByName(spaceScene.backgroundEffect, 'NebulaMap')\n if node is None:\n node = trinity.TriTexture2DParam()\n node.name = 'NebulaMap'\n spaceScene.backgroundEffect.resources.append(node)\n node.SetResource(cube.resource)\n blue.resMan.Wait()\n mipmapped = []\n useTexture = True\n for i in xrange(len(names)):\n name = names[i]\n rt = PD.SkinLightmapRenderer.CreateRenderTarget(cube.resource.width, cube.resource.height, trinity.PIXEL_FORMAT.B8G8R8A8_UNORM, useRT=True)\n job = trinity.CreateRenderJob(name=name)\n job.PushRenderTarget(rt)\n job.PushDepthStencil(None)\n job.Clear([(1, 0, 0),\n (0.2, 0, 0),\n (0, 1, 0),\n (0, 0.2, 0),\n (0, 0, 1),\n (0, 0, 0.2)][i], None)\n proj = trinity.TriProjection()\n proj.PerspectiveFov(math.pi * 0.5, 1, 0.1, 1000)\n view = trinity.TriView()\n view.SetLookAtPosition((0, 0, 0), viewVec[i], upVec[i])\n viewport = trinity.TriViewport(0, 0, cube.resource.width, cube.resource.height, 0.0, 1.0)\n job.SetView(view)\n job.SetProjection(proj)\n job.SetViewport(viewport)\n job.Update(spaceScene)\n job.RenderScene(spaceScene)\n job.PopDepthStencil()\n job.PopRenderTarget()\n if useTexture:\n tex = trinity.TriTextureRes(cube.resource.width, cube.resource.height, 1, trinity.PIXEL_FORMAT.B8G8R8A8_UNORM)\n if True:\n job.ScheduleOnce()\n job.WaitForFinish()\n if useTexture:\n mipmapped.append(tex)\n else:\n mipmapped.append(rt)\n else:\n job.ScheduleRecurring()\n\n return (mipmapped, names)\n\n @staticmethod\n def FindAllTextureResourcesFromEffect(effect, scope):\n textures = {}\n samplers = []\n cubemaps = []\n if effect is not None:\n for r in effect.resources:\n if type(r) == trinity.TriTexture2DParameter and r.resource is not None:\n textures[r.name] = r.resource\n elif type(r) == trinity.TriTextureCubeParameter and r.resource is not None:\n if r.name in cubemaps:\n continue\n LogInfo('', r.name, ': Converting to individual textures')\n cubemaps.append(r.name)\n mipmaps, names = SkinRaytracingTools.ConvertCubeToTextures(r)\n for i in range(len(names)):\n if i < len(mipmaps):\n sampler = trinity.Tr2OptixTextureSampler()\n sampler.CreateFromTexture(mipmaps[i])\n sampler.SetNormalizedIndexingMode(True)\n scope.SetSampler(r.name + names[i], sampler)\n LogInfo('No-Copy Cube Side Interop for ' + r.name + names[i])\n samplers.append(mipmaps[i])\n samplers.append(sampler)\n\n return (textures, samplers)\n\n @staticmethod\n def FindAllTextureResources(dynamic, scope):\n textures = {}\n samplers = []\n cubemaps = []\n\n def ProcessMesh(mesh):\n for area in itertools.chain(mesh.opaqueAreas, mesh.decalAreas, mesh.transparentAreas):\n newTextures, newSamplers = SkinRaytracingTools.FindAllTextureResourcesFromEffect(area.effect, scope)\n textures.update(newTextures)\n samplers.extend(newSamplers)\n\n if type(dynamic) == trinity.Tr2IntSkinnedObject:\n for mesh in dynamic.visualModel.meshes:\n ProcessMesh(mesh)\n\n elif type(dynamic) == trinity.EveShip2:\n ProcessMesh(dynamic.highDetailMesh.object)\n elif type(dynamic) == trinity.EveStation2:\n ProcessMesh(dynamic.highDetailMesh.object)\n return (textures, samplers)\n\n @staticmethod\n def InteropTexture(name, texture, waitForFinish, scope):\n if texture.format == trinity.PIXEL_FORMAT.B8G8R8A8_UNORM:\n sampler = trinity.Tr2OptixTextureSampler()\n sampler.CreateFromTexture(texture)\n sampler.SetNormalizedIndexingMode(True)\n scope.SetSampler(name, sampler)\n LogInfo('No-Copy Interop for', name)\n return (sampler, None)\n if texture.type == trinity.TRIRTYPE_CUBETEXTURE:\n LogInfo('Copy-Interop for cubes not supported, skipping', name)\n return\n sampler_rt = SkinRaytracingTools.CreateSamplerForTexture(name, texture, waitForFinish)\n if sampler_rt is None or len(sampler_rt) < 1:\n LogInfo('InteropTexture failed for', name)\n else:\n scope.SetSampler(name, sampler_rt[0])\n LogInfo('Interop for', name)\n return sampler_rt\n\n @staticmethod\n def InteropAllTexturesFromEffect(optix, effect, waitForFinish, nameTranslation = None, scope = None, cache = None):\n if scope is None:\n scope = optix\n textures, samplers = SkinRaytracingTools.FindAllTextureResourcesFromEffect(effect, scope)\n for name, texture in textures.iteritems():\n if 'spotlight' in name.lower():\n continue\n if nameTranslation is not None:\n name = nameTranslation.get(name, name)\n if cache is not None and texture in cache:\n sampler = cache[texture]\n scope.SetSampler(name, sampler[0])\n LogInfo('Interop cache for', name)\n else:\n sampler = SkinRaytracingTools.InteropTexture(name, texture, waitForFinish, scope)\n if sampler and cache is not None:\n cache[texture] = sampler\n if sampler is not None:\n samplers.append(sampler)\n\n return samplers\n\n @staticmethod\n def InteropAllTextures(optix, dynamic, waitForFinish, nameTranslation = None, scope = None):\n if scope is None:\n scope = optix\n textures, samplers = SkinRaytracingTools.FindAllTextureResources(dynamic, scope)\n for name, texture in textures.iteritems():\n if 'spotlight' in name.lower():\n continue\n if nameTranslation is not None:\n name = nameTranslation.get(name, name)\n sampler = SkinRaytracingTools.InteropTexture(name, texture, waitForFinish, scope)\n if sampler is not None:\n samplers.append(sampler)\n\n return samplers\n\n @staticmethod\n def SafeLinearize(values):\n peak = max(1, max(values[0], max(values[1], values[2])))\n return (peak * math.pow(values[0] / peak, 2.2),\n peak * math.pow(values[1] / peak, 2.2),\n peak * math.pow(values[2] / peak, 2.2),\n values[3])\n\n @staticmethod\n def CopyParametersToContext(effect, instance, linearNames = None):\n for p in effect.parameters:\n if type(p) is trinity.Tr2Vector4Parameter:\n value = SkinRaytracingTools.SafeLinearize(p.value) if linearNames is not None and p.name in linearNames else p.value\n instance.SetFloat4(p.name, value[0], value[1], value[2], value[3])\n elif type(p) is trinity.TriFloatParameter or type(p) is trinity.Tr2FloatParameter:\n instance.SetFloat4(p.name, p.value, 0, 0, 0)\n\n @staticmethod\n def CreateBufferForLights(lights, leaveEmpty = False, preserveAlpha = False):\n bufEveLights = trinity.Tr2OptixBuffer()\n bufEveLights.CreateUserData(64, len(lights), trinity.OPTIX_BUFFER_OUTPUT, False)\n bufEveLights.MapUser()\n buffer = ''\n if leaveEmpty:\n lights = []\n for light in lights:\n innerAngle = light.coneAlphaInner\n outerAngle = light.coneAlphaOuter\n if innerAngle + 1.0 > outerAngle:\n innerAngle = outerAngle - 1.0\n innerAngle = math.cos(innerAngle * 3.1415927 / 180.0)\n outerAngle = math.cos(outerAngle * 3.1415927 / 180.0)\n coneDir = geo2.Vec3Normalize((light.coneDirection[0], light.coneDirection[1], light.coneDirection[2]))\n import struct\n buffer += struct.pack('16f', light.position[0], light.position[1], light.position[2], light.radius, math.pow(light.color[0], 2.2), math.pow(light.color[1], 2.2), math.pow(light.color[2], 2.2), light.falloff if not preserveAlpha else light.color[3], coneDir[0], coneDir[1], coneDir[2], outerAngle, innerAngle, 0, 0, 0)\n\n bufEveLights.SetUserDataFromStruct(buffer)\n bufEveLights.UnmapUser()\n return bufEveLights\n\n @staticmethod\n def CreateUInt1Buffer(optix, name):\n buffer = trinity.Tr2OptixBuffer()\n buffer.CreateUInt1(1, 1, trinity.OPTIX_BUFFER_INPUT_OUTPUT)\n buffer.Map()\n buffer.SetUserDataI(0, 0)\n buffer.Unmap()\n optix.SetBuffer(name, buffer)\n return buffer\n\n @staticmethod\n def matEqual(m1, m2):\n return m1._11 == m2._11 and m1._12 == m2._12 and m1._13 == m2._13 and m1._14 == m2._14 and m1._21 == m2._21 and m1._22 == m2._22 and m1._23 == m2._23 and m1._24 == m2._24 and m1._31 == m2._31 and m1._32 == m2._32 and m1._33 == m2._33 and m1._34 == m2._34 and m1._41 == m2._41 and m1._42 == m2._42 and m1._43 == m2._43 and m1._44 == m2._44\n\n @staticmethod\n def FuncWrapper(weakSelf, func):\n if weakSelf():\n func(weakSelf())\n\n\nclass OitHelper():\n\n def __init__(self, optix):\n self.oitAllocatorBuffer = SkinRaytracingTools.CreateUInt1Buffer(optix, 'oit_allocator')\n oitPoolBuffer = trinity.Tr2OptixBuffer()\n oitPoolBuffer.CreateUserData(64 + 112, 1048576, trinity.OPTIX_BUFFER_INPUT_OUTPUT, True)\n optix.SetBuffer('oit_pool', oitPoolBuffer)\n self.oitPoolBuffer = oitPoolBuffer\n\n def ResetAllocationCount(self):\n self.oitAllocatorBuffer.Map()\n self.oitAllocatorBuffer.SetUserDataI(0, 0)\n self.oitAllocatorBuffer.Unmap()\n\n def GetAllocationCount(self):\n self.oitAllocatorBuffer.Map()\n count = self.oitAllocatorBuffer.GetUserDataI(0)\n self.oitAllocatorBuffer.Unmap()\n return count\n\n\nclass RayCountHelper():\n\n def __init__(self, optix):\n self.rayCountBuffer = SkinRaytracingTools.CreateUInt1Buffer(optix, 'ray_count')\n\n def ResetCount(self):\n self.rayCountBuffer.Map()\n self.rayCountBuffer.SetUserDataI(0, 0)\n self.rayCountBuffer.Unmap()\n\n def GetCount(self):\n self.rayCountBuffer.Map()\n count = self.rayCountBuffer.GetUserDataI(0)\n self.rayCountBuffer.Unmap()\n return count\n\n\nclass CaptureHelper():\n\n def __init__(self, width, height):\n self.capture = trinity.Tr2RenderTarget(width, height, 1, trinity.PIXEL_FORMAT.B8G8R8A8_UNORM)\n\n def SaveSurfaceToFile(self, filename):\n trinity.SaveRenderTarget(filename, self.capture)\n LogInfo('Saved to', filename)\n\n def CreateRenderSteps(self, rj, blitfx):\n rj.PushRenderTarget(self.capture).name = 'Begin screenshot capture'\n rj.PushDepthStencil(None).name = ' push depth'\n rj.RenderEffect(blitfx).name = ' Blit to screenshot'\n rj.PopDepthStencil().name = ' pop depth'\n rj.PopRenderTarget().name = 'End screenshot capture'\n\n\nclass FullScreenBlitter():\n\n def __init__(self, width, height):\n self.effect = trinity.Tr2Effect()\n self.effect.effectFilePath = 'res:/graphics/effect/optix/shaders/gammaBlit.fx'\n if self.effect.effectResource is None:\n LogWarn('Failed to load effect 1')\n return\n self.highpassEffect = trinity.Tr2Effect()\n self.highpassEffect.effectFilePath = 'res:/graphics/effect/optix/shaders/highpassFilter.fx'\n if self.highpassEffect.effectResource is None:\n LogWarn('Failed to load effect 1')\n return\n self.gaussianHorizEffect = trinity.Tr2Effect()\n self.gaussianHorizEffect.effectFilePath = 'res:/graphics/effect/optix/shaders/gaussianBlur.fx'\n if self.gaussianHorizEffect.effectResource is None:\n LogWarn('Failed to load effect 3')\n return\n self.gaussianVertEffect = trinity.Tr2Effect()\n self.gaussianVertEffect.effectFilePath = 'res:/graphics/effect/optix/shaders/gaussianBlur.fx'\n if self.gaussianVertEffect.effectResource is None:\n LogWarn('Failed to load effect 3')\n return\n for effect in [self.effect,\n self.highpassEffect,\n self.gaussianHorizEffect,\n self.gaussianVertEffect]:\n while effect.effectResource.isLoading:\n PD.Yield()\n\n self.blitcolor = trinity.Tr2Vector4Parameter()\n self.blitcolor.name = 'Color'\n for effect in [self.effect,\n self.highpassEffect,\n self.gaussianHorizEffect,\n self.gaussianVertEffect]:\n effect.PopulateParameters()\n effect.RebuildCachedData()\n effect.parameters.append(self.blitcolor)\n\n sizesParam = trinity.Tr2Vector4Parameter()\n sizesParam.name = 'InvSize'\n sizesParam.value = (1.0 / width,\n 1.0 / height,\n 0,\n 0)\n for effect in [self.effect, self.highpassEffect]:\n effect.parameters.append(sizesParam)\n\n sizesHorizParam = trinity.Tr2Vector4Parameter()\n sizesHorizParam.name = 'invTexelSize'\n sizesHorizParam.value = (1.0 / width,\n 0.0,\n 0,\n 0)\n self.gaussianHorizEffect.parameters.append(sizesHorizParam)\n sizesVertParam = trinity.Tr2Vector4Parameter()\n sizesVertParam.name = 'invTexelSize'\n sizesVertParam.value = (0.0,\n 1.0 / height,\n 0,\n 0)\n self.gaussianVertEffect.parameters.append(sizesVertParam)\n\n def SetTexture(self, optixOutputTexture, highpassTexture, filteredTexture):\n tex = trinity.TriTexture2DParameter()\n tex.name = 'Texture'\n tex.SetResource(optixOutputTexture)\n for effect in [self.effect, self.highpassEffect]:\n effect.resources.append(tex)\n\n tex = trinity.TriTexture2DParameter()\n tex.name = 'Texture'\n tex.SetResource(highpassTexture)\n self.gaussianHorizEffect.resources.append(tex)\n tex = trinity.TriTexture2DParameter()\n tex.name = 'Texture'\n tex.SetResource(filteredTexture)\n self.gaussianVertEffect.resources.append(tex)\n tex = trinity.TriTexture2DParameter()\n tex.name = 'BloomTexture'\n tex.SetResource(highpassTexture)\n self.effect.resources.append(tex)\n\n def UpdateFrameCount(self, framecount):\n invFC = 1.0 / framecount if framecount > 0 else 1.0\n self.blitcolor.value = (invFC,\n invFC,\n invFC,\n invFC)\n\n\nclass FullOptixRenderer():\n __guid__ = 'paperDoll.FullOptixRenderer'\n instance = None\n\n def AddCallback(self, func, name, rj):\n cb = trinity.TriStepPythonCB()\n weakSelf = weakref.ref(self)\n cb.SetCallback(lambda : SkinRaytracingTools.FuncWrapper(weakSelf, func))\n cb.name = name\n rj.steps.append(cb)\n\n def GetFrameCount(self):\n return self.framecount\n\n def SaveScreenshot(self, filename):\n self.capture.SaveSurfaceToFile(filename)\n\n def AddRenderPreviewStep(self, renderJob):\n renderJob.SetStdRndStates(trinity.RM_FULLSCREEN).name = ' [optix] fullscreen quad'\n renderJob.PushDepthStencil(None).name = ' [optix] push depth'\n renderJob.RenderEffect(self.blitfx.effect).name = ' [optix] Blit to screenshot'\n renderJob.PopDepthStencil().name = ' [optix] pop depth'\n\n def RefreshMatrices(self):\n model = self.skinnedObject\n self.optix.RefreshMatrices(model, self.skinnedOptix)\n self.RunSkinningAndTesselation()\n self.ApplySettings()\n print 'Refreshed'\n\n @staticmethod\n def RaytraceFrame(selfRef):\n start = time.time()\n VP = SkinRaytracingTools.SetOptixMatrixFromTrinity(selfRef.optix, 'clipToWorld', selfRef.width / float(selfRef.height))\n if not SkinRaytracingTools.matEqual(VP, selfRef.previousVP):\n selfRef.previousVP = VP\n selfRef.outputBuffer.Clear()\n selfRef.framecount = 0\n model = selfRef.skinnedObject\n pos1 = model.GetBonePosition(model.GetBoneIndex('fj_eyeballLeft'))\n pos2 = model.GetBonePosition(model.GetBoneIndex('fj_eyeballRight'))\n dist1 = geo2.Vec3Distance(pos1, trinity.GetViewPosition())\n dist2 = geo2.Vec3Distance(pos2, trinity.GetViewPosition())\n autodof = min(dist1, dist2)\n dof = selfRef.settings.get('lens_focal_distance', autodof)\n print 'Auto-depth-of-field is at', autodof, ', actual focal distance is', dof\n selfRef.optix.SetFloat3('depthOfField', dof - trinity.GetFrontClip(), selfRef.settings['lens_radius'], 0)\n else:\n selfRef.framecount += 1\n selfRef.optix.SetUInt('frameIteration', selfRef.framecount)\n selfRef.oit.ResetAllocationCount()\n selfRef.rayCounter.ResetCount()\n time1 = time.time()\n selfRef.optix.Run(0, selfRef.width, selfRef.height)\n time2 = time.time()\n sec = time2 - time1\n raycount = selfRef.rayCounter.GetCount()\n raysec = 0\n if sec > 0:\n raysec = raycount / float(sec)\n time3 = time.time()\n if selfRef.framecount % 32 == 0:\n stop = time.time()\n print selfRef.oit.GetAllocationCount(), 'oit allocations'\n selfRef.blitfx.UpdateFrameCount(selfRef.framecount)\n selfRef.outputBuffer.CopyToTexture(selfRef.outputTexture)\n print 'time %05.3f / %05.3f / %05.3f / %05.3f msec' % (float(time1 - start) * 1000,\n float(time2 - time1) * 1000,\n float(time3 - time2) * 1000,\n float(stop - time3) * 1000),\n print '%d rays in %05.3f ms / %10d Krays/sec / %d rays per pixel' % (raycount,\n sec * 1000,\n raysec / 1000,\n selfRef.framecount)\n\n @telemetry.ZONE_METHOD\n def OnBeforeOptixPositionsUV(self):\n PD.SkinLightmapRenderer.DoChangeEffect('oxPosWorldUVEffect', self.oxMeshes)\n if self.skinnedObject is not None and self.skinnedObject.visualModel is not None:\n self.savedMeshes = self.skinnedObject.visualModel.meshes[:]\n filteredMeshes = [ ref.object for ref in self.oxMeshes.iterkeys() if ref.object is not None ]\n PD.SkinLightmapRenderer.DoCopyMeshesToVisual(self.skinnedObject, filteredMeshes)\n self.scene.filterList.removeAt(-1)\n self.scene.filterList.append(self.skinnedObject)\n self.scene.useFilterList = True\n\n @telemetry.ZONE_METHOD\n def OnBeforeOptixNormalsUV(self):\n PD.SkinLightmapRenderer.DoChangeEffect('oxNormalWorldUVEffect', self.oxMeshes)\n\n def OnAfterOptix(self):\n PD.SkinLightmapRenderer.DoRestoreShaders(meshes=self.oxMeshes)\n PD.SkinLightmapRenderer.DoCopyMeshesToVisual(self.skinnedObject, self.savedMeshes)\n del self.savedMeshes\n self.scene.useFilterList = False\n self.scene.filterList.removeAt(-1)\n\n def _InitUVUnwrap(self):\n self.oxMeshes = {}\n self.scatterFX = set()\n self.unwrapSize = 1024\n posUV = PD.SkinLightmapRenderer.PreloadEffect(PD.SkinLightmapRenderer.OPTIX_POSWORLD_UV_EFFECT)\n normalUV = PD.SkinLightmapRenderer.PreloadEffect(PD.SkinLightmapRenderer.OPTIX_NORMALWORLD_UV_EFFECT)\n deriv = PD.SkinLightmapRenderer.PreloadEffect(PD.SkinLightmapRenderer.STRETCHMAP_RENDERER_EFFECT)\n self.oxDepth = trinity.Tr2DepthStencil(self.unwrapSize, self.unwrapSize, trinity.DEPTH_STENCIL_FORMAT.D24S8, 1, 0)\n for mesh in self.skinnedObject.visualModel.meshes:\n if PD.SkinLightmapRenderer.IsScattering(mesh):\n m = PD.SkinLightmapRenderer.Mesh()\n m.ExtractOrigEffect(mesh)\n m.CreateOptixEffects(includeStretchMap=True)\n PD.AddWeakBlue(self, 'oxMeshes', mesh, m)\n fx = PD.GetEffectsFromMesh(mesh)\n for f in fx:\n self.scatterFX.add(f)\n\n self.oxWorldPosMapUV = PD.SkinLightmapRenderer.CreateRenderTarget(self.unwrapSize, self.unwrapSize, trinity.PIXEL_FORMAT.R32G32B32A32_FLOAT, useRT=True)\n self.oxWorldNormalMapUV = PD.SkinLightmapRenderer.CreateRenderTarget(self.unwrapSize, self.unwrapSize, trinity.PIXEL_FORMAT.R32G32B32A32_FLOAT, useRT=True)\n self.stretchMap = PD.SkinLightmapRenderer.CreateRenderTarget(self.unwrapSize / 2, self.unwrapSize / 2, trinity.PIXEL_FORMAT.R32G32B32A32_FLOAT, useRT=True)\n rj = trinity.CreateRenderJob('Optix UV Unwrap')\n rj.PushRenderTarget(self.oxWorldPosMapUV)\n rj.PushDepthStencil(self.oxDepth)\n rj.Clear((0, 0, 0, 0), 1.0)\n rj.SetStdRndStates(trinity.RM_FULLSCREEN)\n vp = trinity.TriViewport()\n vp.x = 0\n vp.y = 0\n vp.width = self.unwrapSize\n vp.height = self.unwrapSize\n rj.SetViewport(vp)\n PD.SkinLightmapRenderer.AddCallback(self, FullOptixRenderer.OnBeforeOptixPositionsUV, 'onBeforeOptixPositionsUV', rj)\n rj.RenderScene(self.scene).name = 'Optix WorldPos (UV space)'\n PD.SkinLightmapRenderer.AddCallback(self, lambda weakSelf: PD.SkinLightmapRenderer.DoChangeEffect('oxNormalWorldUVEffect', meshes=weakSelf.oxMeshes), '', rj)\n rj.SetRenderTarget(self.oxWorldNormalMapUV)\n rj.Clear((0, 0, 0, 0), 1.0)\n rj.RenderScene(self.scene).name = 'Optix Normals (UV space)'\n rj.SetRenderTarget(self.stretchMap)\n rj.Clear((0, 0, 0, 0), 1.0)\n vp2 = trinity.TriViewport()\n vp2.x = 0\n vp2.y = 0\n vp2.width = self.unwrapSize / 2\n vp2.height = self.unwrapSize / 2\n rj.SetViewport(vp2)\n PD.SkinLightmapRenderer.AddCallback(self, lambda weakSelf: PD.SkinLightmapRenderer.DoChangeEffect('stretchmapRenderEffect', meshes=weakSelf.oxMeshes), '', rj)\n rj.RenderScene(self.scene).name = 'Stretchmap'\n PD.SkinLightmapRenderer.AddCallback(self, FullOptixRenderer.OnAfterOptix, 'onAfterOptix', rj)\n rj.PopRenderTarget()\n rj.PopDepthStencil()\n rj.ScheduleOnce()\n rj.WaitForFinish()\n if False:\n PD.SkinLightmapRenderer.SaveTarget(self.oxWorldPosMapUV, 'c:/depot/oxworldposuv2.dds', isRT=True)\n PD.SkinLightmapRenderer.SaveTarget(self.oxWorldNormalMapUV, 'c:/depot/oxworldnormaluv2.dds', isRT=True)\n PD.SkinLightmapRenderer.SaveTarget(self.stretchMap, 'c:/depot/stretchmap2.dds', isRT=True)\n print '** MAPS SAVED **'\n\n def RunSkinningAndTesselation(self):\n print '*** Tesselation phase ***'\n batchTypes = self.skinnedOptix[0]\n optix = self.optix\n ptx = {}\n ptx[72] = self.path + 'eve_skinning_kernel72.ptx'\n ptx[64] = self.path + 'eve_skinning_kernel64.ptx'\n for bytes, ptxfile in ptx.iteritems():\n LogInfo('Processing ', bytes, 'bytes/vertex')\n skinningProgram = trinity.Tr2OptixProgram(ptxfile, 'kernel_no_tesselation')\n skinningProgramTesselate = trinity.Tr2OptixProgram(ptxfile, 'kernel_tesselation')\n optix.SetEntryPointCount(2)\n optix.SetRayGenerationProgram(0, skinningProgram)\n optix.SetRayGenerationProgram(1, skinningProgramTesselate)\n for batchType in range(len(batchTypes)):\n batches = batchTypes[batchType]\n out = []\n\n def needsTesselation(fx):\n return 'skinnedavatarhair_detailed.fx' in fx.effectFilePath.lower()\n\n for batch in batches:\n if 'furshell' in batch[1].effectFilePath.lower():\n out.append(None)\n continue\n tesselate = needsTesselation(batch[1])\n triangle_count = batch[6]\n bytes_per_vertex = batch[8]\n if bytes_per_vertex != bytes:\n out.append(None)\n continue\n vertex_buffer_output = trinity.Tr2OptixBuffer()\n vertex_buffer_output.CreateUserData(bytes_per_vertex, triangle_count * 3 * 4 if tesselate else triangle_count * 3, trinity.OPTIX_BUFFER_INPUT_OUTPUT, True)\n out.append(vertex_buffer_output)\n\n for i, batch in enumerate(batches):\n if 'furshell' in batch[1].effectFilePath.lower():\n continue\n triangle_count = batch[6]\n tesselate = needsTesselation(batch[1])\n bytes_per_vertex = batch[8]\n if bytes_per_vertex != bytes:\n continue\n if tesselate:\n LogInfo('Tesselating geometry ', batch, ' of type ', batchType)\n else:\n LogInfo('Skinning geometry ', batch, ' of type ', batchType)\n optix.SetBuffer('vertex_buffer', batch[2])\n optix.SetBuffer('index_buffer', batch[3])\n optix.SetBuffer('vertex_buffer_output', out[i])\n optix.SetUInt('first_index_index', batch[5])\n optix.SetBuffer('matrix_buffer', batch[7])\n program = int(tesselate)\n optix.Run(program, triangle_count, 1)\n batch[0].SetBuffer('vertex_buffer', out[i])\n if tesselate:\n batch[0].SetPrimitiveCount(triangle_count * 4)\n\n optix.SetRayGenerationProgram(0, self.raygen)\n optix.SetRayGenerationProgram(1, self.raygen)\n\n def RemoveBadGeometry(self, model):\n self.haveBeard = False\n self.beardFx = None\n for mesh in model.visualModel.meshes:\n for area in mesh.decalAreas:\n if PD.IsBeard(area):\n self.haveBeard = True\n self.beardFx = area.effect\n area.debugIsHidden = True\n break\n\n for mesh in model.visualModel.meshes:\n for area in mesh.transparentAreas:\n lname = area.name.lower()\n if lname.startswith('eyeshadow_'):\n mesh.transparentAreas.removeAt(-1)\n break\n\n if False:\n for mesh in model.visualModel.meshes:\n for area in mesh.opaqueAreas:\n lname = area.name.lower()\n if 'eye' not in lname or 'eyewet' in lname or 'eyelash' in lname:\n mesh.opaqueAreas.removeAt(-1)\n break\n\n for area in mesh.transparentAreas:\n lname = area.name.lower()\n if 'eye' not in lname or 'eyewet' in lname or 'eyelash' in lname:\n mesh.transparentAreas.removeAt(-1)\n break\n\n if False:\n print 'raytracing', len(model.visualModel.meshes), 'meshes'\n for mesh in model.visualModel.meshes:\n lname = mesh.name.lower()\n if not lname.startswith('hair'):\n print 'removing', lname\n mesh.opaqueAreas.removeAt(-1)\n mesh.decalAreas.removeAt(-1)\n mesh.transparentAreas.removeAt(-1)\n elif False:\n print 'removing', lname\n for a in mesh.opaqueAreas:\n print 'opaque', a.name\n\n for a in mesh.decalAreas:\n print 'decal', a.name\n\n for a in mesh.transparentAreas:\n print 'transp', a.name\n\n mesh.opaqueAreas.removeAt(-1)\n mesh.decalAreas.removeAt(-1)\n mesh.transparentAreas.removeAt(-1)\n else:\n print 'keeping', lname\n\n def TransferBeardParameters(self, optix):\n if self.haveBeard:\n LogInfo('Beard found')\n beardLength = self.settings['beardLength']\n optix.SetFloat3('beardOptions', beardLength[0], beardLength[1], self.settings['beardGravity'])\n floatMap = {'FurLength': 'beard_fur_length',\n 'UVScale': 'beard_uv_scale',\n 'AlphaMultiplier': 'beard_alpha_multiplier',\n 'CombStrength': 'beard_comb_strength',\n 'FurGrainRotation': 'beard_fur_grain_rotation',\n 'MirrorGrain': 'beard_mirror_grain',\n 'FurParallax': 'beard_fur_parallax'}\n float3Map = {'gravityOffset': 'beard_gravity_offset',\n 'MaterialDiffuseColor': 'beard_diffuse_color'}\n for param in self.beardFx.parameters:\n optixName = floatMap.get(param.name, None)\n if optixName is not None:\n optix.SetFloat(optixName, param.value)\n else:\n optixName = float3Map.get(param.name, None)\n if optixName is not None:\n optix.SetFloat3(optixName, param.value[0], param.value[1], param.value[2])\n\n def GenerateBeardGeometry(self, optix, path, any_hit_shadow):\n if not self.haveBeard:\n return None\n LogInfo('generating beard splines')\n SkinRaytracingTools.SetOptixMatrixFromTrinity(optix, 'clipToWorld')\n beardProgram = trinity.Tr2OptixProgram(path + 'eve_beard_kernel.ptx', 'kernel')\n curveOutputBuffer = trinity.Tr2OptixBuffer()\n curveCount = 512\n curveOutputBuffer.CreateUserData(80, curveCount * curveCount, trinity.OPTIX_BUFFER_INPUT_OUTPUT, True)\n optix.SetBuffer('output', curveOutputBuffer)\n rayTypeCount = optix.GetRayTypeCount()\n optix.SetRayTypeCount(1)\n optix.SetEntryPointCount(2)\n optix.SetRayGenerationProgram(0, beardProgram)\n optix.SetRayGenerationProgram(1, beardProgram)\n optix.SetEntryPointCount(1)\n LogInfo('beard: about to Run')\n optix.Run(0, curveCount, curveCount)\n LogInfo('beard: Run done')\n optix.SetRayTypeCount(rayTypeCount)\n hairGeometry = trinity.Tr2OptixGeometry()\n hairGeometry.InitializeFromProgram(path + 'bezier_curves.ptx', 'intersect', 'bounds')\n subdivideDepth = 2\n hairGeometry.SetPrimitiveCount(curveCount * curveCount * (1 << subdivideDepth))\n optix.SetUInt('presubdivide_depth', subdivideDepth)\n optix.SetBuffer('curves', curveOutputBuffer)\n LogInfo('beard: geometry setup done')\n beardInstance = trinity.Tr2OptixGeometryInstance()\n beardInstance.SetGeometry(hairGeometry)\n closest_hit_BeardShader = trinity.Tr2OptixProgram(path + 'eve_beard_shader.ptx', 'closest_hit_BeardShader')\n beardMaterial = trinity.Tr2OptixMaterial()\n beardMaterial.SetClosestHit(0, closest_hit_BeardShader)\n beardMaterial.SetAnyHit(1, any_hit_shadow)\n beardInstance.SetMaterial(beardMaterial)\n LogInfo('beard: geometry instance setup done')\n return beardInstance\n\n def _DoInit(self, scene = None):\n model = None\n if scene is None:\n scene = PD.SkinLightmapRenderer.Scene()\n self.scene = scene\n self.previousVP = trinity.TriMatrix()\n self.framecount = 1\n self.useOIT = True\n if scene is None:\n LogWarn('No scene!')\n return\n for dynamic in scene.dynamics:\n if dynamic.__typename__ == 'Tr2IntSkinnedObject':\n model = dynamic\n break\n else:\n LogWarn('No Tr2IntSkinnedObject found')\n return\n\n if model is None:\n LogWarn('No Tr2IntSkinnedObject found')\n return\n self.skinnedObject = model\n if self.skinnedObject.visualModel is None:\n LogWarn('skinnedObject has no visualMeshes')\n return\n bg = trinity.renderContext.GetDefaultBackBuffer()\n step = trinity.renderJobs.FindStepByName('SET_SWAPCHAIN_RT')\n if step is not None:\n bg = step.renderTarget\n self.width = self.settings.get('outputWidth', bg.width)\n self.height = self.settings.get('outputHeight', bg.height)\n self.blitfx = FullScreenBlitter(self.width, self.height)\n self.RemoveBadGeometry(model)\n outputTexture = trinity.TriTextureRes(self.width, self.height, 1, trinity.PIXEL_FORMAT.R32G32B32A32_FLOAT)\n self.outputTexture = outputTexture\n self.capture = CaptureHelper(self.width, self.height)\n self._InitUVUnwrap()\n for steps in trinity.renderJobs.recurring:\n if steps.name == 'FullOptixRenderer':\n steps.UnscheduleRecurring()\n\n start = time.clock()\n optix = trinity.Tr2Optix()\n self.optix = optix\n optix.SetInteropDevice()\n optix.SetRayTypeCount(4)\n optix.SetEntryPointCount(1)\n if False:\n optix.EnableAllExceptions()\n optix.SetPrintEnabled(True)\n optix.SetPrintBufferSize(16384)\n optix.SetUInt('radiance_ray_type', 0)\n optix.SetUInt('shadow_ray_type', 1)\n optix.SetUInt('translucency_ray_type', 2)\n optix.SetUInt('translucency_ray_type', 3)\n optix.SetFloat('scene_epsilon', 0.001)\n optix.SetUInt('frameIteration', 0)\n self.outputBuffer = trinity.Tr2OptixBuffer()\n self.outputBuffer.CreateFloat4(self.width, self.height, trinity.OPTIX_BUFFER_INPUT_OUTPUT)\n optix.SetBuffer('output_buffer', self.outputBuffer)\n self.ApplySettings()\n path = str(blue.paths.ResolvePath('res:/graphics/effect/optix/NCC/'))\n self.path = path\n LogInfo('Getting files from', path)\n everything = []\n any_hit_shadow = trinity.Tr2OptixProgram(path + 'eve_shadow.ptx', 'any_hit_shadow')\n any_hit_shadow_blend = trinity.Tr2OptixProgram(path + 'eve_shadow.ptx', 'any_hit_shadow_blend')\n shader_diffuse_only_feeler = trinity.Tr2OptixProgram(path + 'eve_bounce.ptx', 'closest_hit_DiffuseOnlyFeeler2')\n any_hit_cutout = trinity.Tr2OptixProgram(path + 'eve_cutout.ptx', 'any_hit_CutoutMask')\n any_hit_diffuse_feeler_blend = trinity.Tr2OptixProgram(path + 'eve_shadow.ptx', 'any_hit_diffuse_feeler_blend')\n everything.append(any_hit_shadow)\n everything.append(any_hit_shadow_blend)\n everything.append(shader_diffuse_only_feeler)\n everything.append(any_hit_cutout)\n mainRay = 0\n shadowRay = 1\n bounceRay = 3\n\n def MakeMaterialWithShader(shader):\n material = trinity.Tr2OptixMaterial()\n material.SetClosestHit(mainRay, shader)\n material.SetAnyHit(shadowRay, any_hit_shadow)\n material.SetClosestHit(bounceRay, shader_diffuse_only_feeler)\n everything.append(material)\n return (material, shader)\n\n def MakeMaterial(ptxFile, shaderName):\n shader = trinity.Tr2OptixProgram(path + ptxFile + '.ptx', shaderName)\n everything.append(shader)\n return MakeMaterialWithShader(shader)\n\n def MakeDecal(material):\n material.SetAnyHit(mainRay, any_hit_cutout)\n material.SetAnyHit(shadowRay, any_hit_shadow_blend)\n material.SetAnyHit(bounceRay, any_hit_cutout)\n\n skin_single_material, skin_single_shade = MakeMaterial('eve_skin', 'closest_hit_ShadeSinglePassSkin_Single2')\n skin_single_material_scatter = MakeMaterial('eve_skin', 'closest_hit_ShadeSinglePassSkin_Single_Scatter2')[0]\n skin_single_material_decal = MakeMaterialWithShader(skin_single_shade)[0]\n MakeDecal(skin_single_material_decal)\n glasses_shade = trinity.Tr2OptixProgram(path + 'eve_glasses.ptx', 'glasses_shade')\n glasses_shadow = trinity.Tr2OptixProgram(path + 'eve_glasses.ptx', 'glasses_shadow')\n glass_material = trinity.Tr2OptixMaterial()\n glass_material.SetAnyHit(mainRay, glasses_shade)\n glass_material.SetAnyHit(shadowRay, glasses_shadow)\n glass_material.SetClosestHit(bounceRay, shader_diffuse_only_feeler)\n everything.append(glasses_shade)\n everything.append(glasses_shadow)\n vizNames = ['closest_hit_VizNormal',\n 'closest_hit_VizUV',\n 'closest_hit_VizConstantColor',\n 'closest_hit_VizDiffuse']\n vizualizer, vizualizer_shade = MakeMaterial('eve_basic', vizNames[0])\n vizualizer_decal = MakeMaterialWithShader(vizualizer_shade)[0]\n MakeDecal(vizualizer_decal)\n skin_double_material, skin_double_shade = MakeMaterial('eve_skin', 'closest_hit_ShadeSinglePassSkin_Double2')\n skin_double_material_decal = MakeMaterialWithShader(skin_double_shade)[0]\n MakeDecal(skin_double_material_decal)\n skin_double_material_transparent = MakeMaterial('eve_skin', 'closest_hit_ShadeSinglePassSkin_Double2_Blend')[0]\n skin_double_material_transparent.SetAnyHit(mainRay, any_hit_cutout)\n skin_double_material_transparent.SetAnyHit(shadowRay, any_hit_shadow_blend)\n skin_double_material_transparent.SetAnyHit(bounceRay, any_hit_cutout)\n avatar_brdf_material, avatar_brdf_shade = MakeMaterial('eve_brdf', 'closest_hit_ShadeAvatarBRDF_Single2')\n avatar_brdf_material_decal = MakeMaterialWithShader(avatar_brdf_shade)[0]\n MakeDecal(avatar_brdf_material_decal)\n avatar_brdf_double_material, avatar_brdf_double_shade = MakeMaterial('eve_brdf', 'closest_hit_ShadeAvatarBRDF_Double2')\n avatar_brdf_double_material_decal = MakeMaterialWithShader(avatar_brdf_double_shade)[0]\n MakeDecal(avatar_brdf_double_material_decal)\n avatar_hair_material = trinity.Tr2OptixMaterial()\n avatar_hair_shade = trinity.Tr2OptixProgram(path + 'eve_hair.ptx', 'closest_hit_ShadeAvatarHair2' if self.useOIT else 'closest_hit_ShadeAvatarHair2_Blend')\n avatar_hair_material.SetClosestHit(mainRay, avatar_hair_shade)\n if self.useOIT:\n avatar_hair_oit = trinity.Tr2OptixProgram(path + 'eve_hair.ptx', 'any_hit_HairOIT')\n avatar_hair_material.SetAnyHit(mainRay, avatar_hair_oit)\n avatar_hair_material.SetAnyHit(shadowRay, any_hit_shadow_blend)\n avatar_hair_material.SetClosestHit(bounceRay, shader_diffuse_only_feeler)\n everything.append(avatar_hair_shade)\n everything.append(avatar_hair_material)\n avatar_hair_material_decal = trinity.Tr2OptixMaterial()\n avatar_hair_material_decal.SetClosestHit(mainRay, avatar_hair_shade)\n avatar_hair_material_decal.SetAnyHit(mainRay, avatar_hair_oit if self.useOIT else any_hit_cutout)\n avatar_hair_material_decal.SetAnyHit(shadowRay, any_hit_shadow_blend)\n avatar_hair_material_decal.SetClosestHit(bounceRay, shader_diffuse_only_feeler)\n avatar_hair_material_decal.SetAnyHit(bounceRay, any_hit_cutout)\n everything.append(avatar_hair_material_decal)\n eye_shade = trinity.Tr2OptixProgram(path + 'eve_eyes.ptx', 'closest_hit_ShadeEye')\n eye_material = trinity.Tr2OptixMaterial()\n eye_material.SetClosestHit(mainRay, eye_shade)\n eye_material.SetAnyHit(shadowRay, any_hit_shadow)\n everything.append(eye_shade)\n everything.append(eye_material)\n eye_wetness_shade = trinity.Tr2OptixProgram(path + 'eve_eyes.ptx', 'closest_hit_ShadeEyeWetness')\n eye_wetness_material = trinity.Tr2OptixMaterial()\n eye_wetness_material.SetClosestHit(mainRay, eye_wetness_shade)\n eye_wetness_material.SetAnyHit(shadowRay, any_hit_shadow)\n everything.append(eye_wetness_shade)\n everything.append(eye_wetness_material)\n portrait_basic_material, portrait_basic_shade = MakeMaterial('eve_basic', 'closest_hit_ShadePortraitBasic')\n portrait_basic_material_decal = MakeMaterialWithShader(portrait_basic_shade)[0]\n MakeDecal(portrait_basic_material_decal)\n LogInfo('global setup OK', time.clock() - start, 'seconds')\n\n def MakeSamplerFromMap(texture, name):\n sampler = trinity.Tr2OptixTextureSampler()\n sampler.CreateFromSurface(texture)\n sampler.SetNormalizedIndexingMode(True)\n optix.SetSampler(name, sampler)\n LogInfo('No-Copy Interop for ', name)\n everything.append(sampler)\n\n MakeSamplerFromMap(self.oxWorldPosMapUV, 'world_pos_uv_buffer')\n MakeSamplerFromMap(self.oxWorldNormalMapUV, 'world_normal_uv_buffer')\n MakeSamplerFromMap(self.stretchMap, 'stretchmap_buffer')\n useHdrProbe = False\n if useHdrProbe:\n optix.SetSamplerFromProbe('hdr_probe_sampler', 'c:/depot/optix/data/Japan_subway2_FINAL.hdr')\n start = time.clock()\n self.skinnedOptix = optix.CreateFromSkinnedModel(model, 72, path + 'triangle72.ptx', 'mesh_intersect', 'mesh_bounds', 64, path + 'triangle64.ptx', 'mesh_intersect', 'mesh_bounds')\n optixBatches = self.skinnedOptix[0]\n self.TransferBeardParameters(optix)\n group = trinity.Tr2OptixGeometryGroup()\n groupChildren = []\n self.rayCounter = RayCountHelper(self.optix)\n self.oit = OitHelper(self.optix)\n self.raygen = trinity.Tr2OptixProgram(path + 'raygen.ptx', 'ray_request')\n self.RunSkinningAndTesselation()\n start = time.clock()\n samplers = SkinRaytracingTools.InteropAllTextures(optix, model, waitForFinish=True)\n everything.append(samplers)\n backdrop = trinity.TriTexture2DParameter()\n backdrop.resourcePath = self.settings['backgroundBitmap']\n skinmap = trinity.TriTexture2DParameter()\n skinmap.resourcePath = 'res:/Graphics/Character/female/paperdoll/head/head_generic/SkinMap.png'\n blue.resMan.Wait()\n everything.append(SkinRaytracingTools.InteropTexture('BackgroundEnvMap', backdrop.resource, waitForFinish=True, scope=optix))\n everything.append(SkinRaytracingTools.InteropTexture('SkinMap', skinmap.resource, waitForFinish=True, scope=optix))\n LogInfo('texture interop OK', time.clock() - start, 'seconds')\n splines = self.GenerateBeardGeometry(optix, path, any_hit_shadow)\n if splines is not None:\n groupChildren.append(splines)\n print '*** Raytracing phase ***'\n\n def SetAlphaRef(instance, batchType):\n if batchType == 1:\n instance.SetFloat4('alphaRef', 0.75, 0, 0, 0)\n elif batchType == 2:\n instance.SetFloat4('alphaRef', 0.01, 0, 0, 0)\n\n haveGlasses = False\n for batchType in range(len(optixBatches)):\n isOpaque = batchType == 0\n batches = optixBatches[batchType]\n for batch in batches:\n if 'furshell' in batch[1].effectFilePath.lower():\n continue\n instance = trinity.Tr2OptixGeometryInstance()\n everything.append(instance)\n instance.SetGeometry(batch[0])\n r = random.random()\n g = random.random()\n b = random.random()\n instance.SetFloat4('viz_constant_color', r, g, b, 1.0)\n fxpath = batch[1].effectFilePath.lower()\n if False:\n instance.SetMaterial(vizualizer if isOpaque else vizualizer_decal)\n elif 'glassshader' in fxpath:\n instance.SetMaterial(glass_material)\n if not haveGlasses:\n haveGlasses = True\n elif 'skinnedavatarbrdfsinglepassskin_single.fx' in fxpath:\n if batch[1] in self.scatterFX:\n instance.SetMaterial(skin_single_material_scatter)\n else:\n instance.SetMaterial(skin_single_material if isOpaque else skin_single_material_decal)\n SetAlphaRef(instance, batchType)\n elif 'skinnedavatarbrdfsinglepassskin_double.fx' in fxpath:\n instance.SetMaterial([skin_double_material, skin_double_material_decal, skin_double_material_transparent][batchType])\n SetAlphaRef(instance, batchType)\n elif 'skinnedavatarbrdflinear.fx' in fxpath:\n instance.SetMaterial(avatar_brdf_material if isOpaque else avatar_brdf_material_decal)\n elif 'skinnedavatarbrdfdoublelinear.fx' in fxpath:\n instance.SetMaterial(avatar_brdf_double_material if isOpaque else avatar_brdf_double_material_decal)\n elif 'skinnedavatarhair_detailed.fx' in fxpath:\n instance.SetMaterial(avatar_hair_material if isOpaque else avatar_hair_material_decal)\n instance.SetFloat4('alphaRef', 0.01, 0, 0, 0)\n instance.SetUInt('enableCulling', 0)\n elif 'eyeshader.fx' in fxpath:\n instance.SetMaterial(eye_material)\n elif 'eyewetnessshader.fx' in fxpath:\n instance.SetMaterial(eye_wetness_material)\n elif 'portraitbasic.fx' in fxpath:\n instance.SetMaterial(portrait_basic_material if isOpaque else portrait_basic_material_decal)\n else:\n instance.SetMaterial(vizualizer if isOpaque else vizualizer_decal)\n SkinRaytracingTools.CopyParametersToContext(batch[1], instance)\n groupChildren.append(instance)\n\n group.SetChildCount(len(groupChildren))\n for x in xrange(len(groupChildren)):\n group.SetChild(x, groupChildren[x])\n\n everything.append(group)\n group.SetAcceleration('Bvh', 'Bvh')\n LogInfo('scene interop OK', time.clock() - start, 'seconds')\n start = time.clock()\n bufEveLights = SkinRaytracingTools.CreateBufferForLights(scene.lights, useHdrProbe)\n optix.SetBuffer('trinity_lights', bufEveLights)\n LogInfo('lights interop OK', time.clock() - start, 'seconds')\n start = time.clock()\n optix.SetGeometryGroup('top_scene', group)\n optix.SetGeometryGroup('shadow_casters', group)\n optix.SetRayGenerationProgram(0, self.raygen)\n optix.SetEntryPointCount(1)\n miss = None\n if not useHdrProbe:\n miss = trinity.Tr2OptixProgram(path + 'eve_miss.ptx', 'miss')\n else:\n miss = trinity.Tr2OptixProgram(path + 'eve_miss_probe.ptx', 'miss')\n optix.SetMissProgram(3, miss)\n optix.SetFloat3('bg_color', 1.0, 0, 0)\n everything.append(miss)\n if False:\n exception = trinity.Tr2OptixProgram(path + 'eve_miss.ptx', 'exception')\n optix.SetExceptionProgram(0, exception)\n everything.append(exception)\n optix.SetStackSize(4096)\n self.everything = everything\n SkinRaytracingTools.SetOptixMatrixFromTrinity(optix, 'clipToWorld', self.width / float(self.height))\n LogInfo('general setup OK', time.clock() - start, 'seconds')\n optix.ReportObjectCounts()\n start = time.clock()\n optix.Compile()\n LogInfo('compile OK', time.clock() - start, 'seconds')\n start = time.clock()\n optix.Validate()\n LogInfo('validate OK', time.clock() - start, 'seconds')\n start = time.clock()\n optix.Run(0, 0, 0)\n LogInfo('BVH OK', time.clock() - start, 'seconds')\n start = time.clock()\n self.blitfx.SetTexture(outputTexture, outputTexture, outputTexture)\n rj = trinity.CreateRenderJob('FullOptixRenderer')\n rj.PushRenderTarget(self.outputRT)\n rj.PushDepthStencil(None)\n self.AddCallback(FullOptixRenderer.RaytraceFrame, 'Raytrace Frame', rj)\n rj.CopyRtToTexture(outputTexture).name = 'cuda -> outputTexture'\n rj.PopDepthStencil()\n rj.PopRenderTarget()\n rj.SetStdRndStates(trinity.RM_FULLSCREEN).name = 'fullscreen quad'\n rj.RenderEffect(self.blitfx.effect).name = ' blit'\n self.capture.CreateRenderSteps(rj, self.blitfx.effect)\n rj.steps.append(trinity.TriStepRenderFps())\n rj.ScheduleRecurring(insertFront=False)\n self.renderJob = rj\n LogInfo('final setup OK', time.clock() - start, 'seconds')\n model.display = False\n self.EnablePaperDollJobs(False)\n\n @staticmethod\n def EnablePaperDollJobs(enable):\n if False:\n for job in trinity.renderJobs.recurring:\n if 'paperdollrenderjob' in job.name.lower():\n for step in job.steps:\n step.enabled = enable\n\n if enable:\n trinity.device.tickInterval = 10\n else:\n trinity.device.tickInterval = 0\n\n def ApplySettings(self):\n self.optix.SetFloat('light_size', self.settings['light_size'])\n self.optix.SetFloat3('depthOfField', 1.0, self.settings['lens_radius'], 0)\n self.optix.SetFloat('HairShadows', self.settings['HairShadows'])\n self.optix.SetFloat('EnvMapBoost', self.settings['EnvMapBoost'] / 3.1415927)\n self.previousVP.Identity()\n\n def SetLensRadius(self, lens_radius):\n self.settings['lens_radius'] = lens_radius\n self.ApplySettings()\n\n def SetLensFocalDistance(self, lens_focal_distance):\n if lens_focal_distance <= 0:\n self.settings.pop('lens_focal_distance', 0)\n else:\n self.settings['lens_focal_distance'] = lens_focal_distance\n self.ApplySettings()\n\n def SetLightSize(self, light_size):\n self.settings['light_size'] = light_size\n self.ApplySettings()\n\n def SetHairShadowsEnabled(self, enabled):\n self.settings['HairShadows'] = float(enabled)\n self.ApplySettings()\n\n def SetBackgroundIntensity(self, intensity):\n self.settings['EnvMapBoost'] = intensity\n self.ApplySettings()\n\n def __init__(self, scene = None, backgroundBitmap = None, memento = None, beardLength = (0.01, 0.01), beardGravity = 0.0005, outputWidth = None, outputHeight = None, asyncSetup = True, listenForUpdate = True):\n LogInfo('init', self)\n blue.motherLode.maxMemUsage = 0\n blue.resMan.ClearAllCachedObjects()\n self.framecount = 0\n self.listenForUpdate = listenForUpdate\n if memento is not None:\n self.settings = memento\n else:\n self.settings = {}\n self.settings['light_size'] = 0.125\n self.settings['lens_radius'] = 0.001\n self.settings['HairShadows'] = 1.0\n self.settings['EnvMapBoost'] = 1.0\n self.settings['backgroundBitmap'] = backgroundBitmap if backgroundBitmap is not None else 'res:/texture/global/red_blue_ramp.dds'\n self.settings['beardLength'] = beardLength\n self.settings['beardGravity'] = beardGravity\n if outputWidth is not None:\n self.settings['outputWidth'] = outputWidth\n if outputHeight is not None:\n self.settings['outputHeight'] = outputHeight\n if asyncSetup:\n uthread.new(self._DoInit, scene=scene)\n else:\n self._DoInit(scene=scene)\n\n def GetMemento(self):\n return self.settings\n\n def __del__(self):\n LogInfo('deleting', self)\n if hasattr(self, 'renderJob'):\n self.renderJob.UnscheduleRecurring()\n self.renderJob = None\n del self.raygen\n del self.rayCounter\n del self.oit\n del self.outputBuffer\n del self.skinnedOptix\n del self.everything\n LogInfo('Post-cleanup leak check:')\n self.optix.ReportObjectCounts()\n self.EnablePaperDollJobs(True)\n\n @staticmethod\n def Pause():\n if FullOptixRenderer.instance is not None:\n FullOptixRenderer.instance.renderJob.UnscheduleRecurring()\n\n @staticmethod\n def NotifyUpdate():\n if FullOptixRenderer.instance is not None and FullOptixRenderer.instance.listenForUpdate:\n LogInfo('NotifyUpdate, restarting', FullOptixRenderer.instance)\n memento = FullOptixRenderer.instance.GetMemento()\n FullOptixRenderer.instance = None\n FullOptixRenderer.instance = FullOptixRenderer(memento=memento)\n\n\nclass ShipOptixRenderer():\n __guid__ = 'paperDoll.ShipOptixRenderer'\n instance = None\n\n def AddCallback(self, func, name, rj):\n cb = trinity.TriStepPythonCB()\n weakSelf = weakref.ref(self)\n cb.SetCallback(lambda : SkinRaytracingTools.FuncWrapper(weakSelf, func))\n cb.name = name\n rj.steps.append(cb)\n\n def GetFrameCount(self):\n return self.framecount\n\n def SaveScreenshot(self, filename):\n self.capture.SaveSurfaceToFile(filename)\n\n def AddRenderPreviewStep(self, renderJob):\n renderJob.SetStdRndStates(trinity.RM_FULLSCREEN).name = ' [optix] fullscreen quad'\n renderJob.PushDepthStencil(None).name = ' [optix] push depth'\n renderJob.RenderEffect(self.blitfx.effect).name = ' [optix] Blit to screenshot'\n renderJob.PopDepthStencil().name = ' [optix] pop depth'\n\n @staticmethod\n def RaytraceFrame(selfRef):\n start = time.time()\n VP = SkinRaytracingTools.SetOptixMatrixFromTrinity(selfRef.optix, 'clipToWorld', selfRef.width / float(selfRef.height))\n if not SkinRaytracingTools.matEqual(VP, selfRef.previousVP):\n selfRef.previousVP = VP\n selfRef.outputBuffer.Clear()\n selfRef.framecount = 0\n pos1 = (0, 0, 0)\n pos2 = pos1\n dist1 = geo2.Vec3Distance(pos1, trinity.GetViewPosition())\n dist2 = geo2.Vec3Distance(pos2, trinity.GetViewPosition())\n autodof = min(dist1, dist2)\n dof = selfRef.settings.get('lens_focal_distance', autodof)\n LogInfo('Auto-depth-of-field is at', autodof, ', actual focal distance is', dof)\n selfRef.optix.SetFloat3('depthOfField', dof - trinity.GetFrontClip(), selfRef.settings['lens_radius'], 0)\n else:\n selfRef.framecount += 1\n selfRef.optix.SetUInt('frameIteration', selfRef.framecount)\n selfRef.oit.ResetAllocationCount()\n selfRef.rayCounter.ResetCount()\n time1 = time.time()\n selfRef.optix.Run(0, selfRef.width, selfRef.height)\n time2 = time.time()\n traceTime = time2 - time1\n raycount = selfRef.rayCounter.GetCount()\n raysec = 0\n if traceTime > 0:\n raysec = raycount / float(traceTime)\n time3 = time.time()\n if selfRef.framecount % 32 == 0:\n oit = selfRef.oit.GetAllocationCount()\n if oit > 0:\n print oit, 'oit allocations'\n selfRef.blitfx.UpdateFrameCount(selfRef.framecount)\n selfRef.outputBuffer.CopyToTexture(selfRef.outputTexture)\n stop = time.time()\n message = 'time: call %05.3f / trace %05.3f / read %05.3f ms' % (float(time1 - start) * 1000, float(time2 - time1) * 1000, float(stop - time3) * 1000)\n message += '// traced %d rays in %05.3f ms / %10d Krays/sec / %d frames' % (raycount,\n traceTime * 1000,\n raysec / 1000,\n selfRef.framecount)\n LogInfo(message)\n\n def ConvertCubeMapToSH(self, optix, ptxPath, cubeResPath):\n self.shBuffer = trinity.Tr2OptixBuffer()\n self.shBuffer.CreateFloat4(9, 1, trinity.OPTIX_BUFFER_INPUT_OUTPUT)\n optix.SetBuffer('sh_buffer', self.shBuffer)\n self.shBuffer.Clear()\n program = trinity.Tr2OptixProgram(ptxPath + 'cubemapsh.ptx', 'kernel')\n optix.SetRayGenerationProgram(0, program)\n optix.ReportObjectCounts()\n cube = trinity.TriTextureCubeParameter()\n cube.resourcePath = cubeResPath\n cube.name = 'Nebula'\n blue.resMan.Wait()\n mipmaps, names = SkinRaytracingTools.ConvertCubeToTextures(cube)\n for i in range(len(names)):\n if i < len(mipmaps):\n sampler = trinity.Tr2OptixTextureSampler()\n sampler.CreateFromTexture(mipmaps[i])\n sampler.SetNormalizedIndexingMode(True)\n optix.SetSampler(cube.name + names[i], sampler)\n LogInfo('No-Copy Cube Side Interop for ' + cube.name + names[i])\n\n optix.Run(0, cube.resource.width, cube.resource.width)\n if False:\n names = ['Y00',\n 'Y1m1',\n 'Y10',\n 'Y11',\n 'Y2m2',\n 'Y2m1',\n 'Y20',\n 'Y21',\n 'Y22']\n self.shBuffer.Map()\n ofs = 0\n for name in names:\n print name, ': (',\n print self.shBuffer.GetUserDataF(ofs), ',',\n ofs = ofs + 4\n print self.shBuffer.GetUserDataF(ofs), ',',\n ofs = ofs + 4\n print self.shBuffer.GetUserDataF(ofs), ')'\n ofs = ofs + 4\n\n self.shBuffer.Unmap()\n\n def CachedCreateMaterial(self, path, effect):\n material = self.materialCache.get(effect, None)\n if material is not None:\n return material\n shader = None\n if effect in ('tripleglowv3', 'doubleglowv3', 'singleglowv3'):\n shader = trinity.Tr2OptixProgram(path + 'v3ship_glow.ptx', 'closest_hit_' + effect)\n elif effect in ('singleheatv3',):\n shader = trinity.Tr2OptixProgram(path + 'v3ship_heat.ptx', 'closest_hit_' + effect)\n elif effect in ('tripleglowoilv3',):\n shader = trinity.Tr2OptixProgram(path + 'v3ship_glow_oil.ptx', 'closest_hit_' + effect)\n elif effect == 'skinned_tripleglowv3':\n shader = trinity.Tr2OptixProgram(path + 'v3ship_glow.ptx', 'closest_hit_tripleglowv3')\n if shader is None:\n return\n material = trinity.Tr2OptixMaterial()\n material.SetClosestHit(0, shader)\n material.SetAnyHit(1, self.any_hit_shadow)\n return material\n\n def _DoInit(self, scene = None):\n if scene is None:\n scene = trinity.device.scene\n self.scene = scene\n self.previousVP = trinity.TriMatrix()\n self.framecount = 1\n self.materialCache = {}\n self.useOIT = True\n if scene is None:\n LogWarn('No scene!')\n return\n bg = trinity.renderContext.GetDefaultBackBuffer()\n step = trinity.renderJobs.FindStepByName('SET_SWAPCHAIN_RT')\n if step is not None:\n bg = step.renderTarget\n self.width = self.settings.get('outputWidth', bg.width)\n self.height = self.settings.get('outputHeight', bg.height)\n self.blitfx = FullScreenBlitter(self.width, self.height)\n bloomScale = 4\n if False:\n self.highpassRT = PD.SkinLightmapRenderer.CreateRenderTarget(self.width / bloomScale, self.height / bloomScale, trinity.PIXEL_FORMAT.R32G32B32A32_FLOAT)\n self.filteredRT = PD.SkinLightmapRenderer.CreateRenderTarget(self.width / bloomScale, self.height / bloomScale, trinity.PIXEL_FORMAT.R32G32B32A32_FLOAT)\n outputTexture = trinity.TriTextureRes(self.width, self.height, 1, trinity.PIXEL_FORMAT.R32G32B32A32_FLOAT)\n self.outputTexture = outputTexture\n self.capture = CaptureHelper(self.width, self.height)\n for steps in trinity.renderJobs.recurring:\n if steps.name == 'ShipOptixRenderer':\n steps.UnscheduleRecurring()\n\n path = str(blue.paths.ResolvePath('res:/graphics/effect/optix/ship/'))\n self.path = path\n LogInfo('Getting files from', path)\n start = time.clock()\n optix = trinity.Tr2Optix()\n self.optix = optix\n optix.SetInteropDevice()\n optix.SetRayTypeCount(4)\n optix.SetEntryPointCount(1)\n if False:\n optix.EnableAllExceptions()\n if False:\n optix.SetPrintEnabled(True)\n optix.SetPrintBufferSize(16384)\n optix.SetFloat('scene_epsilon', 0.01)\n optix.SetUInt('frameIteration', 0)\n nebula = PD.FindResourceByName(scene.backgroundEffect, 'NebulaMap') if scene.backgroundEffect is not None else None\n if nebula is not None:\n LogInfo('Converting to SH ', nebula.resourcePath)\n self.ConvertCubeMapToSH(optix, path, nebula.resourcePath)\n else:\n self.shBuffer = trinity.Tr2OptixBuffer()\n self.shBuffer.CreateFloat4(9, 1, trinity.OPTIX_BUFFER_INPUT_OUTPUT)\n optix.SetBuffer('sh_buffer', self.shBuffer)\n self.shBuffer.Clear()\n self.outputBuffer = trinity.Tr2OptixBuffer()\n self.outputBuffer.CreateFloat4(self.width, self.height, trinity.OPTIX_BUFFER_INPUT_OUTPUT)\n optix.SetBuffer('output_buffer', self.outputBuffer)\n self.ApplySettings()\n everything = []\n mainRay = 0\n shadowRay = 1\n bounceRay = 3\n\n def MakeMaterialWithShader(shader):\n return (material, shader)\n\n def MakeMaterial(ptxFile, shaderName):\n everything.append(shader)\n return MakeMaterialWithShader(shader)\n\n LogInfo('global setup OK', time.clock() - start, 'seconds')\n useHdrProbe = False\n start = time.clock()\n self.rayCounter = RayCountHelper(self.optix)\n self.oit = OitHelper(self.optix)\n self.raygen = trinity.Tr2OptixProgram(path + 'raygen.ptx', 'ray_request')\n shader = trinity.Tr2OptixProgram(path + 'vizualizer.ptx', 'closest_hit_VizGreen')\n viz_material = trinity.Tr2OptixMaterial()\n viz_material.SetClosestHit(0, shader)\n everything.append(viz_material)\n if False:\n any_hit_shadow = trinity.Tr2OptixProgram(path + 'shadow.ptx', 'any_hit_shadow')\n viz_material.SetAnyHit(1, any_hit_shadow)\n self.any_hit_shadow = any_hit_shadow\n else:\n self.any_hit_shadow = None\n start = time.clock()\n nameTranslation = {'GlowNormalSpecularMap': 'NormalMap'}\n\n def GroupByVertexBuffer(optixBatches):\n output = []\n for batchType in range(len(optixBatches)):\n batches = optixBatches[batchType]\n vbDict = {}\n for batch in batches:\n vb = batch[2]\n list = vbDict.get(vb, None)\n if list is not None:\n list.append(batch)\n else:\n vbDict[vb] = [batch]\n\n list = []\n for vb in vbDict.iterkeys():\n list.append(vbDict[vb])\n\n output.append(list)\n\n return output\n\n cache = {}\n programs = {'skinned_tripleglowv3_48': 'triangle48',\n 'singlev3_48': 'triangle48',\n 'singleheatv3_48': 'triangle48',\n 'tripleglowv3_40': 'triangle40',\n 'singleheatv3_40': 'triangle40',\n 'singlefresnelreflectionwithglow_56': 'triangle56',\n 'doublefresnelreflectionwithglow_56': 'triangle56',\n 'tripleglowoilv3_80': 'triangle80'}\n if False:\n nullintersect = trinity.Tr2OptixProgram(path + 'nullgeometry.ptx', 'intersect')\n nullbounds = trinity.Tr2OptixProgram(path + 'nullgeometry.ptx', 'bounds')\n everything.append(nullintersect)\n everything.append(nullbounds)\n mylogOK = set({})\n mylogFail = set({})\n linearNames = set({})\n linearNames.add('MaterialDiffuseColor')\n linearNames.add('MaterialReflectionColor')\n linearNames.add('MaskDiffuseColor')\n linearNames.add('MaskReflectionColor')\n linearNames.add('SubMaskDiffuseColor')\n linearNames.add('SubMaskReflectionColor')\n linearNames.add('GlowColor')\n topScene = trinity.Tr2OptixGroup()\n interopSamplerCache = {}\n for dynamic in scene.objects:\n if dynamic.__typename__ not in ('EveShip2', 'EveStation2'):\n continue\n model = dynamic\n if model.highDetailMesh is None or model.highDetailMesh.object is None:\n LogWarn('ship has no high detail meshes')\n continue\n skinnedOptix = optix.CreateFromEveSpaceObject2(model, 0, '', '', '')\n everything.append(skinnedOptix)\n optixBatches = skinnedOptix[0]\n self.objectsToRefresh[model] = skinnedOptix\n sorted = GroupByVertexBuffer(optixBatches)\n groups = []\n for batchType in range(len(optixBatches)):\n isOpaque = batchType == 0\n vbBatches = sorted[batchType]\n for batches in vbBatches:\n groupChildren = []\n for batch in batches:\n effect = batch[1].effectFilePath.lower()\n effect = effect[effect.rfind('/') + 1:]\n effect = effect[:effect.rfind('.fx')]\n ptx = programs.get(effect + '_' + str(batch[8]), '')\n if ptx == '':\n mylogFail.add(effect)\n batch[0].SetIntersectProgram(nullintersect)\n batch[0].SetBoundsProgram(nullbounds)\n continue\n mylogOK.add(effect)\n intersect, bounds = cache.get(ptx, (None, None))\n if intersect is None:\n intersect = trinity.Tr2OptixProgram(path + ptx + '.ptx', 'intersect')\n bounds = trinity.Tr2OptixProgram(path + ptx + '.ptx', 'bounds')\n cache[ptx] = (intersect, bounds)\n batch[0].SetIntersectProgram(intersect)\n batch[0].SetBoundsProgram(bounds)\n batchGeometryInstance = trinity.Tr2OptixGeometryInstance()\n everything.append(batchGeometryInstance)\n batchGeometryInstance.SetGeometry(batch[0])\n if True:\n material = self.CachedCreateMaterial(path, effect)\n if material is None:\n material = viz_material\n else:\n material = viz_material\n batchGeometryInstance.SetMaterial(material)\n SkinRaytracingTools.CopyParametersToContext(batch[1], batchGeometryInstance, linearNames)\n groupChildren.append(batchGeometryInstance)\n samplers = SkinRaytracingTools.InteropAllTexturesFromEffect(optix, batch[1], waitForFinish=True, nameTranslation=nameTranslation, scope=batchGeometryInstance, cache=interopSamplerCache)\n everything.append(samplers)\n\n group = trinity.Tr2OptixGeometryGroup()\n group.SetChildCount(len(groupChildren))\n for x in xrange(len(groupChildren)):\n group.SetChild(x, groupChildren[x])\n\n group.SetAcceleration('Bvh', 'Bvh')\n self.objectsToMarkDirty.append(group)\n groups.append(group)\n\n everything.append(cache)\n baseOffset = topScene.GetChildCount()\n topScene.SetChildCount(baseOffset + len(groups))\n for x in xrange(len(groups)):\n topScene.SetChild(baseOffset + x, groups[x])\n\n everything.append(groups)\n\n if False:\n sphereGeometry = trinity.Tr2OptixGeometry()\n sphereGeometry.InitializeFromProgram(path + 'sphere_program.ptx', 'intersect', 'bounds')\n sphereGeometry.SetPrimitiveCount(1)\n everything.append(sphereGeometry)\n sphereInstance = trinity.Tr2OptixGeometryInstance()\n sphereInstance.SetGeometry(sphereGeometry)\n sphereInstance.SetMaterial(viz_material)\n sphereInstance.SetFloat4('pos_r', 0, 0, 0, 100)\n sphereInstance.SetFloat4('color_watt', 1, 0, 0, 1)\n everything.append(sphereInstance)\n group = trinity.Tr2OptixGeometryGroup()\n group.SetChildCount(1)\n group.SetChild(0, sphereInstance)\n group.SetAcceleration('Bvh', 'Bvh')\n topScene.SetChildCount(topScene.GetChildCount() + 1)\n topScene.SetChild(topScene.GetChildCount() - 1, group)\n everything.append(topScene)\n topScene.SetAcceleration('Bvh', 'Bvh')\n self.objectsToMarkDirty.append(topScene)\n optix.SetGroup('top_scene', topScene)\n optix.SetGroup('shadow_casters', topScene)\n if len(mylogOK) > 0:\n LogInfo('Converted succesfully:', str(mylogOK))\n else:\n LogWarn('No effects converted succesfully!')\n if len(mylogFail) > 0:\n LogWarn('Failed to convert:', str(mylogFail))\n if type(scene) == trinity.EveSpaceScene:\n c = SkinRaytracingTools.SafeLinearize(scene.sunDiffuseColor)\n optix.SetFloat4('SunDiffuseColor', c[0], c[1], c[2], c[3])\n c = scene.sunDirection\n optix.SetFloat4('SunDirWorld', -c[0], -c[1], -c[2], 0)\n c = SkinRaytracingTools.SafeLinearize(scene.ambientColor)\n optix.SetFloat4('SceneAmbientColor', c[0], c[1], c[2], c[3])\n c = SkinRaytracingTools.SafeLinearize(scene.fogColor)\n optix.SetFloat4('SceneFogColor', c[0], c[1], c[2], c[3])\n LogInfo('scene interop OK', time.clock() - start, 'seconds')\n start = time.clock()\n light = trinity.Tr2InteriorLightSource()\n if True:\n wattage = 2000000\n light.color = (1,\n 1,\n 1,\n wattage)\n light.radius = 50\n light.position = (200, 500, -300)\n else:\n wattage = 10000000\n light.color = (1,\n 1,\n 1,\n wattage)\n light.radius = 1000\n light.position = (0, 0, 0)\n bufEveLights = SkinRaytracingTools.CreateBufferForLights([], useHdrProbe, preserveAlpha=True)\n optix.SetBuffer('trinity_lights', bufEveLights)\n LogInfo('lights interop OK', time.clock() - start, 'seconds')\n if False:\n sphereGeometry = trinity.Tr2OptixGeometry()\n sphereGeometry.InitializeFromProgram(path + 'sphere_program.ptx', 'intersect', 'bounds')\n sphereGeometry.SetPrimitiveCount(1)\n sphereMaterial = trinity.Tr2OptixMaterial()\n sphereShader = trinity.Tr2OptixProgram(path + 'sphere_program.ptx', 'closest_hit_radiance')\n sphereMaterial.SetClosestHit(0, sphereShader)\n sphereInstance = trinity.Tr2OptixGeometryInstance()\n sphereInstance.SetGeometry(sphereGeometry)\n sphereInstance.SetMaterial(sphereMaterial)\n sphereInstance.SetFloat4('pos_r', light.position[0], light.position[1], light.position[2], light.radius)\n sphereInstance.SetFloat4('color_watt', light.color[0], light.color[1], light.color[2], light.color[3])\n n = topScene.GetChildCount()\n topScene.SetChildCount(n + 1)\n sphereGroup = trinity.Tr2OptixGeometryGroup()\n sphereGroup.SetChildCount(1)\n sphereGroup.SetChild(0, sphereInstance)\n sphereGroup.SetAcceleration('Bvh', 'Bvh')\n topScene.SetChild(n, sphereGroup)\n start = time.clock()\n optix.SetRayGenerationProgram(0, self.raygen)\n optix.SetEntryPointCount(1)\n miss = None\n if not useHdrProbe:\n miss = trinity.Tr2OptixProgram(path + 'eve_miss.ptx', 'miss')\n else:\n miss = trinity.Tr2OptixProgram(path + 'eve_miss_probe.ptx', 'miss')\n optix.SetMissProgram(3, miss)\n optix.SetFloat3('bg_color', 1.0, 0, 0)\n everything.append(miss)\n if False:\n exception = trinity.Tr2OptixProgram(path + 'eve_miss.ptx', 'exception')\n optix.SetExceptionProgram(0, exception)\n everything.append(exception)\n optix.SetStackSize(4096)\n self.everything = everything\n SkinRaytracingTools.SetOptixMatrixFromTrinity(optix, 'clipToWorld', self.width / float(self.height))\n LogInfo('general setup OK', time.clock() - start, 'seconds')\n optix.ReportObjectCounts()\n start = time.clock()\n optix.Compile()\n LogInfo('compile OK', time.clock() - start, 'seconds')\n start = time.clock()\n optix.Validate()\n LogInfo('validate OK', time.clock() - start, 'seconds')\n start = time.clock()\n optix.Run(0, 0, 0)\n LogInfo('BVH OK', time.clock() - start, 'seconds')\n start = time.clock()\n if False:\n self.blitfx.SetTexture(outputTexture, self.highpassRT, self.filteredRT)\n else:\n self.blitfx.SetTexture(outputTexture, outputTexture, outputTexture)\n rj = trinity.CreateRenderJob('ShipOptixRenderer')\n self.AddCallback(ShipOptixRenderer.RaytraceFrame, 'Raytrace Frame', rj)\n rj.SetStdRndStates(trinity.RM_FULLSCREEN).name = 'fullscreen state'\n if False:\n rj.SetRenderTarget(self.highpassRT.wrappedRenderTarget).name = ' SetRT highpassRT'\n rj.RenderEffect(self.blitfx.highpassEffect).name = ' high pass'\n rj.SetRenderTarget(self.filteredRT.wrappedRenderTarget).name = ' SetRT filteredRT'\n rj.RenderEffect(self.blitfx.gaussianHorizEffect).name = ' horizontal blur'\n rj.SetRenderTarget(self.highpassRT.wrappedRenderTarget).name = ' SetRT highpassRT'\n rj.RenderEffect(self.blitfx.gaussianVertEffect).name = ' vertical blur'\n rj.SetStdRndStates(trinity.RM_FULLSCREEN).name = 'fullscreen state'\n rj.RenderEffect(self.blitfx.effect).name = ' blit'\n tp2 = None\n for job in trinity.renderJobs.recurring:\n if job.name == 'TrinityPanel:View1':\n tp2 = job\n\n if tp2 is None:\n rj.ScheduleRecurring(insertFront=False)\n else:\n final = None\n for step in tp2.steps:\n if step.name == 'SET_FINAL_RT':\n final = step\n break\n\n if final is not None:\n tp2.steps.insert(tp2.steps.index(final), trinity.TriStepRunJob(rj))\n else:\n tp2.steps.append(trinity.TriStepRunJob(rj))\n self.renderJob = rj\n LogInfo('final setup OK', time.clock() - start, 'seconds')\n FullOptixRenderer.EnablePaperDollJobs(False)\n\n def ApplySettings(self):\n self.optix.SetFloat('light_size', self.settings['light_size'])\n self.optix.SetFloat3('depthOfField', 1.0, self.settings['lens_radius'], 0)\n self.optix.SetFloat('HairShadows', self.settings['HairShadows'])\n self.optix.SetFloat('EnvMapBoost', self.settings['EnvMapBoost'] / 3.1415927)\n self.previousVP.Identity()\n\n def SetLensRadius(self, lens_radius):\n self.settings['lens_radius'] = lens_radius\n self.ApplySettings()\n\n def SetLensFocalDistance(self, lens_focal_distance):\n if lens_focal_distance <= 0:\n self.settings.pop('lens_focal_distance', 0)\n else:\n self.settings['lens_focal_distance'] = lens_focal_distance\n self.ApplySettings()\n\n def SetLightSize(self, light_size):\n self.settings['light_size'] = light_size\n self.ApplySettings()\n\n def SetHairShadowsEnabled(self, enabled):\n self.settings['HairShadows'] = float(enabled)\n self.ApplySettings()\n\n def SetBackgroundIntensity(self, intensity):\n self.settings['EnvMapBoost'] = intensity\n self.ApplySettings()\n\n def __init__(self, scene = None, backgroundBitmap = None, memento = None, beardLength = (0.01, 0.01), beardGravity = 0.0005, outputWidth = None, outputHeight = None, asyncSetup = True, listenForUpdate = True):\n LogInfo('init', self)\n blue.motherLode.maxMemUsage = 0\n blue.resMan.ClearAllCachedObjects()\n self.framecount = 0\n self.listenForUpdate = listenForUpdate\n self.everything = None\n self.objectsToRefresh = {}\n self.objectsToMarkDirty = []\n if memento is not None:\n self.settings = memento\n else:\n self.settings = {}\n self.settings['light_size'] = 0.125\n self.settings['lens_radius'] = 0.001\n self.settings['HairShadows'] = 1.0\n self.settings['EnvMapBoost'] = 1.0\n self.settings['backgroundBitmap'] = backgroundBitmap if backgroundBitmap is not None else 'res:/texture/global/red_blue_ramp.dds'\n self.settings['beardLength'] = beardLength\n self.settings['beardGravity'] = beardGravity\n if outputWidth is not None:\n self.settings['outputWidth'] = outputWidth\n if outputHeight is not None:\n self.settings['outputHeight'] = outputHeight\n if asyncSetup:\n uthread.new(self._DoInit, scene=scene)\n else:\n self._DoInit(scene=scene)\n\n def GetMemento(self):\n return self.settings\n\n def __del__(self):\n LogInfo('deleting', self)\n if hasattr(self, 'renderJob'):\n self.renderJob.UnscheduleRecurring()\n self.renderJob = None\n del self.any_hit_shadow\n del self.raygen\n del self.rayCounter\n del self.oit\n del self.shBuffer\n del self.outputBuffer\n del self.everything\n del self.objectsToRefresh\n del self.objectsToMarkDirty\n self.optix.ClearObjects()\n LogInfo('Post-cleanup leak check:')\n self.optix.ReportObjectCounts()\n FullOptixRenderer.EnablePaperDollJobs(True)\n\n def RefreshMatrices(self):\n for ship, optixList in self.objectsToRefresh.iteritems():\n self.optix.RefreshMatrices(ship, optixList)\n\n for dirty in self.objectsToMarkDirty:\n dirty.MarkDirty()\n\n self.ApplySettings()\n LogInfo('Refreshed')\n\n @staticmethod\n def Pause():\n if FullOptixRenderer.instance is not None:\n FullOptixRenderer.instance.renderJob.UnscheduleRecurring()\n\n @staticmethod\n def NotifyUpdate():\n if FullOptixRenderer.instance is not None and FullOptixRenderer.instance.listenForUpdate:\n LogInfo('NotifyUpdate, restarting', FullOptixRenderer.instance)\n memento = FullOptixRenderer.instance.GetMemento()\n FullOptixRenderer.instance = None\n FullOptixRenderer.instance = FullOptixRenderer(memento=memento)",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
import cv2
img = cv2.imread('Chapter1/resources/jacuzi.jpg')
imgGrey = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
imgCanny = cv2.Canny(img,240,250)
cv2.imshow("output",imgCanny)
cv2.waitKey(0)
|
normal
|
{
"blob_id": "292cfecb701ecc179381d4453063aff532a0e877",
"index": 8961,
"step-1": "<mask token>\n",
"step-2": "<mask token>\ncv2.imshow('output', imgCanny)\ncv2.waitKey(0)\n",
"step-3": "<mask token>\nimg = cv2.imread('Chapter1/resources/jacuzi.jpg')\nimgGrey = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\nimgCanny = cv2.Canny(img, 240, 250)\ncv2.imshow('output', imgCanny)\ncv2.waitKey(0)\n",
"step-4": "import cv2\nimg = cv2.imread('Chapter1/resources/jacuzi.jpg')\nimgGrey = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\nimgCanny = cv2.Canny(img, 240, 250)\ncv2.imshow('output', imgCanny)\ncv2.waitKey(0)\n",
"step-5": "import cv2\n\nimg = cv2.imread('Chapter1/resources/jacuzi.jpg')\nimgGrey = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)\nimgCanny = cv2.Canny(img,240,250)\ncv2.imshow(\"output\",imgCanny)\ncv2.waitKey(0)",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
km=float(input())
cg=float(input())
print(round(km/cg,3),"km/l")
|
normal
|
{
"blob_id": "db33f7386d1eacbfbfd29aa367df310c557ae864",
"index": 8520,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(round(km / cg, 3), 'km/l')\n",
"step-3": "km = float(input())\ncg = float(input())\nprint(round(km / cg, 3), 'km/l')\n",
"step-4": "km=float(input())\ncg=float(input())\nprint(round(km/cg,3),\"km/l\")",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
#!/usr/bin/python
#The MIT License (MIT)
#
#Copyright (c) 2015 Stephen P. Smith
#
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is
#furnished to do so, subject to the following conditions:
#
#The above copyright notice and this permission notice shall be included in all
#copies or substantial portions of the Software.
#
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
#SOFTWARE.
import time, math
import RPi.GPIO as GPIO
#import numpy
class max31865(object):
"""Reading Temperature from the MAX31865 with GPIO using
the Raspberry Pi. Any pins can be used.
Numpy can be used to completely solve the Callendar-Van Dusen equation
but it slows the temp reading down. I commented it out in the code.
Both the quadratic formula using Callendar-Van Dusen equation (ignoring the
3rd and 4th degree parts of the polynomial) and the straight line approx.
temperature is calculated with the quadratic formula one being the most accurate.
"""
def __init__(self, csPin = 8, misoPin = 9, mosiPin = 10, clkPin = 11):
self.csPin = csPin
self.misoPin = misoPin
self.mosiPin = mosiPin
self.clkPin = clkPin
self.setupGPIO()
def setupGPIO(self):
GPIO.setwarnings(False)
GPIO.setmode(GPIO.BCM)
GPIO.setup(self.csPin, GPIO.OUT)
GPIO.setup(self.misoPin, GPIO.IN)
GPIO.setup(self.mosiPin, GPIO.OUT)
GPIO.setup(self.clkPin, GPIO.OUT)
GPIO.output(self.csPin, GPIO.HIGH)
GPIO.output(self.clkPin, GPIO.LOW)
GPIO.output(self.mosiPin, GPIO.LOW)
def readTemp(self):
#
# b10000000 = 0x80
# 0x8x to specify 'write register value'
# 0xx0 to specify 'configuration register'
#
# 0b10110010 = 0xB2
# Config Register
# ---------------
# bit 7: Vbias -> 1 (ON)
# bit 6: Conversion Mode -> 0 (MANUAL)
# bit5: 1-shot ->1 (ON)
# bit4: 3-wire select -> 1 (3 wire config)
# bits 3-2: fault detection cycle -> 0 (none)
# bit 1: fault status clear -> 1 (clear any fault)
# bit 0: 50/60 Hz filter select -> 0 (60Hz)
#
# 0b11010010 or 0xD2 for continuous auto conversion
# at 60Hz (faster conversion)
#
#one shot
self.writeRegister(0, 0xB2)
# conversion time is less than 100ms
time.sleep(.1) #give it 100ms for conversion
# read all registers
out = self.readRegisters(0,8)
conf_reg = out[0]
# print("config register byte: %x" % conf_reg)
[rtd_msb, rtd_lsb] = [out[1], out[2]]
rtd_ADC_Code = (( rtd_msb << 8 ) | rtd_lsb ) >> 1
temp_C = self.calcPT100Temp(rtd_ADC_Code)
[hft_msb, hft_lsb] = [out[3], out[4]]
hft = (( hft_msb << 8 ) | hft_lsb ) >> 1
# print("high fault threshold: %d" % hft)
[lft_msb, lft_lsb] = [out[5], out[6]]
lft = (( lft_msb << 8 ) | lft_lsb ) >> 1
# print("low fault threshold: %d" % lft)
status = out[7]
#
# 10 Mohm resistor is on breakout board to help
# detect cable faults
# bit 7: RTD High Threshold / cable fault open
# bit 6: RTD Low Threshold / cable fault short
# bit 5: REFIN- > 0.85 x VBias -> must be requested
# bit 4: REFIN- < 0.85 x VBias (FORCE- open) -> must be requested
# bit 3: RTDIN- < 0.85 x VBias (FORCE- open) -> must be requested
# bit 2: Overvoltage / undervoltage fault
# bits 1,0 don't care
#print "Status byte: %x" % status
if ((status & 0x80) == 1):
raise FaultError("High threshold limit (Cable fault/open)")
if ((status & 0x40) == 1):
raise FaultError("Low threshold limit (Cable fault/short)")
if ((status & 0x04) == 1):
raise FaultError("Overvoltage or Undervoltage Error")
return temp_C
def writeRegister(self, regNum, dataByte):
GPIO.output(self.csPin, GPIO.LOW)
# 0x8x to specify 'write register value'
addressByte = 0x80 | regNum;
# first byte is address byte
self.sendByte(addressByte)
# the rest are data bytes
self.sendByte(dataByte)
GPIO.output(self.csPin, GPIO.HIGH)
def readRegisters(self, regNumStart, numRegisters):
out = []
GPIO.output(self.csPin, GPIO.LOW)
# 0x to specify 'read register value'
self.sendByte(regNumStart)
for byte in range(numRegisters):
data = self.recvByte()
out.append(data)
GPIO.output(self.csPin, GPIO.HIGH)
return out
def sendByte(self,byte):
for bit in range(8):
GPIO.output(self.clkPin, GPIO.HIGH)
if (byte & 0x80):
GPIO.output(self.mosiPin, GPIO.HIGH)
else:
GPIO.output(self.mosiPin, GPIO.LOW)
byte <<= 1
GPIO.output(self.clkPin, GPIO.LOW)
def recvByte(self):
byte = 0x00
for bit in range(8):
GPIO.output(self.clkPin, GPIO.HIGH)
byte <<= 1
if GPIO.input(self.misoPin):
byte |= 0x1
GPIO.output(self.clkPin, GPIO.LOW)
return byte
def calcPT100Temp(self, RTD_ADC_Code):
R_REF = 430.0 # Reference Resistor
Res0 = 100.0; # Resistance at 0 degC for 400ohm R_Ref
a = .00390830
b = -.000000577500
# c = -4.18301e-12 # for -200 <= T <= 0 (degC)
c = -0.00000000000418301
# c = 0 # for 0 <= T <= 850 (degC)
#print("RTD ADC Code: %d" % RTD_ADC_Code)
Res_RTD = (RTD_ADC_Code * R_REF) / 32768.0 # PT100 Resistance
#print("PT100 Resistance: %f ohms" % Res_RTD)
#
# Callendar-Van Dusen equation
# Res_RTD = Res0 * (1 + a*T + b*T**2 + c*(T-100)*T**3)
# Res_RTD = Res0 + a*Res0*T + b*Res0*T**2 # c = 0
# (c*Res0)T**4 - (c*Res0)*100*T**3
# + (b*Res0)*T**2 + (a*Res0)*T + (Res0 - Res_RTD) = 0
#
# quadratic formula:
# for 0 <= T <= 850 (degC)
temp_C = -(a*Res0) + math.sqrt(a*a*Res0*Res0 - 4*(b*Res0)*(Res0 - Res_RTD))
temp_C = temp_C / (2*(b*Res0))
temp_C_line = (RTD_ADC_Code/32.0) - 256.0
# removing numpy.roots will greatly speed things up
#temp_C_numpy = numpy.roots([c*Res0, -c*Res0*100, b*Res0, a*Res0, (Res0 - Res_RTD)])
#temp_C_numpy = abs(temp_C_numpy[-1])
#print("Straight Line Approx. Temp: %f degC" % temp_C_line)
#print("Callendar-Van Dusen Temp (degC > 0): %f degC" % temp_C)
#print "Solving Full Callendar-Van Dusen using numpy: %f" % temp_C_numpy
if (temp_C < 0): #use straight line approximation if less than 0
# Can also use python lib numpy to solve cubic
# Should never get here in this application
temp_C = temp_C_line
return temp_C
class FaultError(Exception):
pass
if __name__ == "__main__":
try:
csPin = 24
misoPin = 21
mosiPin = 17
clkPin = 23
max = max31865(csPin,misoPin,mosiPin,clkPin)
while True:
tempC = max.readTemp()
print(tempC)
time.sleep(0.1)
except KeyboardInterrupt:
pass
GPIO.cleanup()
|
normal
|
{
"blob_id": "5d92c68e0fe7f37d4719fb9ca4274b29ff1cbb43",
"index": 4699,
"step-1": "<mask token>\n\n\nclass max31865(object):\n <mask token>\n\n def __init__(self, csPin=8, misoPin=9, mosiPin=10, clkPin=11):\n self.csPin = csPin\n self.misoPin = misoPin\n self.mosiPin = mosiPin\n self.clkPin = clkPin\n self.setupGPIO()\n <mask token>\n <mask token>\n\n def writeRegister(self, regNum, dataByte):\n GPIO.output(self.csPin, GPIO.LOW)\n addressByte = 128 | regNum\n self.sendByte(addressByte)\n self.sendByte(dataByte)\n GPIO.output(self.csPin, GPIO.HIGH)\n\n def readRegisters(self, regNumStart, numRegisters):\n out = []\n GPIO.output(self.csPin, GPIO.LOW)\n self.sendByte(regNumStart)\n for byte in range(numRegisters):\n data = self.recvByte()\n out.append(data)\n GPIO.output(self.csPin, GPIO.HIGH)\n return out\n\n def sendByte(self, byte):\n for bit in range(8):\n GPIO.output(self.clkPin, GPIO.HIGH)\n if byte & 128:\n GPIO.output(self.mosiPin, GPIO.HIGH)\n else:\n GPIO.output(self.mosiPin, GPIO.LOW)\n byte <<= 1\n GPIO.output(self.clkPin, GPIO.LOW)\n\n def recvByte(self):\n byte = 0\n for bit in range(8):\n GPIO.output(self.clkPin, GPIO.HIGH)\n byte <<= 1\n if GPIO.input(self.misoPin):\n byte |= 1\n GPIO.output(self.clkPin, GPIO.LOW)\n return byte\n\n def calcPT100Temp(self, RTD_ADC_Code):\n R_REF = 430.0\n Res0 = 100.0\n a = 0.0039083\n b = -5.775e-07\n c = -4.18301e-12\n Res_RTD = RTD_ADC_Code * R_REF / 32768.0\n temp_C = -(a * Res0) + math.sqrt(a * a * Res0 * Res0 - 4 * (b *\n Res0) * (Res0 - Res_RTD))\n temp_C = temp_C / (2 * (b * Res0))\n temp_C_line = RTD_ADC_Code / 32.0 - 256.0\n if temp_C < 0:\n temp_C = temp_C_line\n return temp_C\n\n\nclass FaultError(Exception):\n pass\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass max31865(object):\n <mask token>\n\n def __init__(self, csPin=8, misoPin=9, mosiPin=10, clkPin=11):\n self.csPin = csPin\n self.misoPin = misoPin\n self.mosiPin = mosiPin\n self.clkPin = clkPin\n self.setupGPIO()\n\n def setupGPIO(self):\n GPIO.setwarnings(False)\n GPIO.setmode(GPIO.BCM)\n GPIO.setup(self.csPin, GPIO.OUT)\n GPIO.setup(self.misoPin, GPIO.IN)\n GPIO.setup(self.mosiPin, GPIO.OUT)\n GPIO.setup(self.clkPin, GPIO.OUT)\n GPIO.output(self.csPin, GPIO.HIGH)\n GPIO.output(self.clkPin, GPIO.LOW)\n GPIO.output(self.mosiPin, GPIO.LOW)\n <mask token>\n\n def writeRegister(self, regNum, dataByte):\n GPIO.output(self.csPin, GPIO.LOW)\n addressByte = 128 | regNum\n self.sendByte(addressByte)\n self.sendByte(dataByte)\n GPIO.output(self.csPin, GPIO.HIGH)\n\n def readRegisters(self, regNumStart, numRegisters):\n out = []\n GPIO.output(self.csPin, GPIO.LOW)\n self.sendByte(regNumStart)\n for byte in range(numRegisters):\n data = self.recvByte()\n out.append(data)\n GPIO.output(self.csPin, GPIO.HIGH)\n return out\n\n def sendByte(self, byte):\n for bit in range(8):\n GPIO.output(self.clkPin, GPIO.HIGH)\n if byte & 128:\n GPIO.output(self.mosiPin, GPIO.HIGH)\n else:\n GPIO.output(self.mosiPin, GPIO.LOW)\n byte <<= 1\n GPIO.output(self.clkPin, GPIO.LOW)\n\n def recvByte(self):\n byte = 0\n for bit in range(8):\n GPIO.output(self.clkPin, GPIO.HIGH)\n byte <<= 1\n if GPIO.input(self.misoPin):\n byte |= 1\n GPIO.output(self.clkPin, GPIO.LOW)\n return byte\n\n def calcPT100Temp(self, RTD_ADC_Code):\n R_REF = 430.0\n Res0 = 100.0\n a = 0.0039083\n b = -5.775e-07\n c = -4.18301e-12\n Res_RTD = RTD_ADC_Code * R_REF / 32768.0\n temp_C = -(a * Res0) + math.sqrt(a * a * Res0 * Res0 - 4 * (b *\n Res0) * (Res0 - Res_RTD))\n temp_C = temp_C / (2 * (b * Res0))\n temp_C_line = RTD_ADC_Code / 32.0 - 256.0\n if temp_C < 0:\n temp_C = temp_C_line\n return temp_C\n\n\nclass FaultError(Exception):\n pass\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass max31865(object):\n \"\"\"Reading Temperature from the MAX31865 with GPIO using \n\t the Raspberry Pi. Any pins can be used.\n\t Numpy can be used to completely solve the Callendar-Van Dusen equation \n\t but it slows the temp reading down. I commented it out in the code. \n\t Both the quadratic formula using Callendar-Van Dusen equation (ignoring the\n\t 3rd and 4th degree parts of the polynomial) and the straight line approx.\n\t temperature is calculated with the quadratic formula one being the most accurate.\n\t\"\"\"\n\n def __init__(self, csPin=8, misoPin=9, mosiPin=10, clkPin=11):\n self.csPin = csPin\n self.misoPin = misoPin\n self.mosiPin = mosiPin\n self.clkPin = clkPin\n self.setupGPIO()\n\n def setupGPIO(self):\n GPIO.setwarnings(False)\n GPIO.setmode(GPIO.BCM)\n GPIO.setup(self.csPin, GPIO.OUT)\n GPIO.setup(self.misoPin, GPIO.IN)\n GPIO.setup(self.mosiPin, GPIO.OUT)\n GPIO.setup(self.clkPin, GPIO.OUT)\n GPIO.output(self.csPin, GPIO.HIGH)\n GPIO.output(self.clkPin, GPIO.LOW)\n GPIO.output(self.mosiPin, GPIO.LOW)\n\n def readTemp(self):\n self.writeRegister(0, 178)\n time.sleep(0.1)\n out = self.readRegisters(0, 8)\n conf_reg = out[0]\n [rtd_msb, rtd_lsb] = [out[1], out[2]]\n rtd_ADC_Code = (rtd_msb << 8 | rtd_lsb) >> 1\n temp_C = self.calcPT100Temp(rtd_ADC_Code)\n [hft_msb, hft_lsb] = [out[3], out[4]]\n hft = (hft_msb << 8 | hft_lsb) >> 1\n [lft_msb, lft_lsb] = [out[5], out[6]]\n lft = (lft_msb << 8 | lft_lsb) >> 1\n status = out[7]\n if status & 128 == 1:\n raise FaultError('High threshold limit (Cable fault/open)')\n if status & 64 == 1:\n raise FaultError('Low threshold limit (Cable fault/short)')\n if status & 4 == 1:\n raise FaultError('Overvoltage or Undervoltage Error')\n return temp_C\n\n def writeRegister(self, regNum, dataByte):\n GPIO.output(self.csPin, GPIO.LOW)\n addressByte = 128 | regNum\n self.sendByte(addressByte)\n self.sendByte(dataByte)\n GPIO.output(self.csPin, GPIO.HIGH)\n\n def readRegisters(self, regNumStart, numRegisters):\n out = []\n GPIO.output(self.csPin, GPIO.LOW)\n self.sendByte(regNumStart)\n for byte in range(numRegisters):\n data = self.recvByte()\n out.append(data)\n GPIO.output(self.csPin, GPIO.HIGH)\n return out\n\n def sendByte(self, byte):\n for bit in range(8):\n GPIO.output(self.clkPin, GPIO.HIGH)\n if byte & 128:\n GPIO.output(self.mosiPin, GPIO.HIGH)\n else:\n GPIO.output(self.mosiPin, GPIO.LOW)\n byte <<= 1\n GPIO.output(self.clkPin, GPIO.LOW)\n\n def recvByte(self):\n byte = 0\n for bit in range(8):\n GPIO.output(self.clkPin, GPIO.HIGH)\n byte <<= 1\n if GPIO.input(self.misoPin):\n byte |= 1\n GPIO.output(self.clkPin, GPIO.LOW)\n return byte\n\n def calcPT100Temp(self, RTD_ADC_Code):\n R_REF = 430.0\n Res0 = 100.0\n a = 0.0039083\n b = -5.775e-07\n c = -4.18301e-12\n Res_RTD = RTD_ADC_Code * R_REF / 32768.0\n temp_C = -(a * Res0) + math.sqrt(a * a * Res0 * Res0 - 4 * (b *\n Res0) * (Res0 - Res_RTD))\n temp_C = temp_C / (2 * (b * Res0))\n temp_C_line = RTD_ADC_Code / 32.0 - 256.0\n if temp_C < 0:\n temp_C = temp_C_line\n return temp_C\n\n\nclass FaultError(Exception):\n pass\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\nclass max31865(object):\n \"\"\"Reading Temperature from the MAX31865 with GPIO using \n\t the Raspberry Pi. Any pins can be used.\n\t Numpy can be used to completely solve the Callendar-Van Dusen equation \n\t but it slows the temp reading down. I commented it out in the code. \n\t Both the quadratic formula using Callendar-Van Dusen equation (ignoring the\n\t 3rd and 4th degree parts of the polynomial) and the straight line approx.\n\t temperature is calculated with the quadratic formula one being the most accurate.\n\t\"\"\"\n\n def __init__(self, csPin=8, misoPin=9, mosiPin=10, clkPin=11):\n self.csPin = csPin\n self.misoPin = misoPin\n self.mosiPin = mosiPin\n self.clkPin = clkPin\n self.setupGPIO()\n\n def setupGPIO(self):\n GPIO.setwarnings(False)\n GPIO.setmode(GPIO.BCM)\n GPIO.setup(self.csPin, GPIO.OUT)\n GPIO.setup(self.misoPin, GPIO.IN)\n GPIO.setup(self.mosiPin, GPIO.OUT)\n GPIO.setup(self.clkPin, GPIO.OUT)\n GPIO.output(self.csPin, GPIO.HIGH)\n GPIO.output(self.clkPin, GPIO.LOW)\n GPIO.output(self.mosiPin, GPIO.LOW)\n\n def readTemp(self):\n self.writeRegister(0, 178)\n time.sleep(0.1)\n out = self.readRegisters(0, 8)\n conf_reg = out[0]\n [rtd_msb, rtd_lsb] = [out[1], out[2]]\n rtd_ADC_Code = (rtd_msb << 8 | rtd_lsb) >> 1\n temp_C = self.calcPT100Temp(rtd_ADC_Code)\n [hft_msb, hft_lsb] = [out[3], out[4]]\n hft = (hft_msb << 8 | hft_lsb) >> 1\n [lft_msb, lft_lsb] = [out[5], out[6]]\n lft = (lft_msb << 8 | lft_lsb) >> 1\n status = out[7]\n if status & 128 == 1:\n raise FaultError('High threshold limit (Cable fault/open)')\n if status & 64 == 1:\n raise FaultError('Low threshold limit (Cable fault/short)')\n if status & 4 == 1:\n raise FaultError('Overvoltage or Undervoltage Error')\n return temp_C\n\n def writeRegister(self, regNum, dataByte):\n GPIO.output(self.csPin, GPIO.LOW)\n addressByte = 128 | regNum\n self.sendByte(addressByte)\n self.sendByte(dataByte)\n GPIO.output(self.csPin, GPIO.HIGH)\n\n def readRegisters(self, regNumStart, numRegisters):\n out = []\n GPIO.output(self.csPin, GPIO.LOW)\n self.sendByte(regNumStart)\n for byte in range(numRegisters):\n data = self.recvByte()\n out.append(data)\n GPIO.output(self.csPin, GPIO.HIGH)\n return out\n\n def sendByte(self, byte):\n for bit in range(8):\n GPIO.output(self.clkPin, GPIO.HIGH)\n if byte & 128:\n GPIO.output(self.mosiPin, GPIO.HIGH)\n else:\n GPIO.output(self.mosiPin, GPIO.LOW)\n byte <<= 1\n GPIO.output(self.clkPin, GPIO.LOW)\n\n def recvByte(self):\n byte = 0\n for bit in range(8):\n GPIO.output(self.clkPin, GPIO.HIGH)\n byte <<= 1\n if GPIO.input(self.misoPin):\n byte |= 1\n GPIO.output(self.clkPin, GPIO.LOW)\n return byte\n\n def calcPT100Temp(self, RTD_ADC_Code):\n R_REF = 430.0\n Res0 = 100.0\n a = 0.0039083\n b = -5.775e-07\n c = -4.18301e-12\n Res_RTD = RTD_ADC_Code * R_REF / 32768.0\n temp_C = -(a * Res0) + math.sqrt(a * a * Res0 * Res0 - 4 * (b *\n Res0) * (Res0 - Res_RTD))\n temp_C = temp_C / (2 * (b * Res0))\n temp_C_line = RTD_ADC_Code / 32.0 - 256.0\n if temp_C < 0:\n temp_C = temp_C_line\n return temp_C\n\n\nclass FaultError(Exception):\n pass\n\n\nif __name__ == '__main__':\n try:\n csPin = 24\n misoPin = 21\n mosiPin = 17\n clkPin = 23\n max = max31865(csPin, misoPin, mosiPin, clkPin)\n while True:\n tempC = max.readTemp()\n print(tempC)\n time.sleep(0.1)\n except KeyboardInterrupt:\n pass\n GPIO.cleanup()\n",
"step-5": "#!/usr/bin/python\n#The MIT License (MIT)\n#\n#Copyright (c) 2015 Stephen P. Smith\n#\n#Permission is hereby granted, free of charge, to any person obtaining a copy\n#of this software and associated documentation files (the \"Software\"), to deal\n#in the Software without restriction, including without limitation the rights\n#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n#copies of the Software, and to permit persons to whom the Software is\n#furnished to do so, subject to the following conditions:\n#\n#The above copyright notice and this permission notice shall be included in all\n#copies or substantial portions of the Software.\n#\n#THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n#SOFTWARE.\n\nimport time, math\nimport RPi.GPIO as GPIO\n#import numpy\n\nclass max31865(object):\n\t\"\"\"Reading Temperature from the MAX31865 with GPIO using \n\t the Raspberry Pi. Any pins can be used.\n\t Numpy can be used to completely solve the Callendar-Van Dusen equation \n\t but it slows the temp reading down. I commented it out in the code. \n\t Both the quadratic formula using Callendar-Van Dusen equation (ignoring the\n\t 3rd and 4th degree parts of the polynomial) and the straight line approx.\n\t temperature is calculated with the quadratic formula one being the most accurate.\n\t\"\"\"\n\tdef __init__(self, csPin = 8, misoPin = 9, mosiPin = 10, clkPin = 11):\n\t\tself.csPin = csPin\n\t\tself.misoPin = misoPin\n\t\tself.mosiPin = mosiPin\n\t\tself.clkPin = clkPin\n\t\tself.setupGPIO()\n\t\t\n\tdef setupGPIO(self):\n\t\tGPIO.setwarnings(False)\n\t\tGPIO.setmode(GPIO.BCM)\n\t\tGPIO.setup(self.csPin, GPIO.OUT)\n\t\tGPIO.setup(self.misoPin, GPIO.IN)\n\t\tGPIO.setup(self.mosiPin, GPIO.OUT)\n\t\tGPIO.setup(self.clkPin, GPIO.OUT)\n\n\t\tGPIO.output(self.csPin, GPIO.HIGH)\n\t\tGPIO.output(self.clkPin, GPIO.LOW)\n\t\tGPIO.output(self.mosiPin, GPIO.LOW)\n\n\tdef readTemp(self):\n\t\t#\n\t\t# b10000000 = 0x80\n\t\t# 0x8x to specify 'write register value'\n\t\t# 0xx0 to specify 'configuration register'\n\t\t#\n\t\t# 0b10110010 = 0xB2\n\t\t# Config Register\n\t\t# ---------------\n\t\t# bit 7: Vbias -> 1 (ON)\n\t\t# bit 6: Conversion Mode -> 0 (MANUAL)\n\t\t# bit5: 1-shot ->1 (ON)\n\t\t# bit4: 3-wire select -> 1 (3 wire config)\n\t\t# bits 3-2: fault detection cycle -> 0 (none)\n\t\t# bit 1: fault status clear -> 1 (clear any fault)\n\t\t# bit 0: 50/60 Hz filter select -> 0 (60Hz)\n\t\t#\n\t\t# 0b11010010 or 0xD2 for continuous auto conversion \n\t\t# at 60Hz (faster conversion)\n\t\t#\n\n\t\t#one shot\n\t\tself.writeRegister(0, 0xB2)\n\n\t\t# conversion time is less than 100ms\n\t\ttime.sleep(.1) #give it 100ms for conversion\n\n\t\t# read all registers\n\t\tout = self.readRegisters(0,8)\n\n\t\tconf_reg = out[0]\n\t\t# print(\"config register byte: %x\" % conf_reg)\n\n\t\t[rtd_msb, rtd_lsb] = [out[1], out[2]]\n\t\trtd_ADC_Code = (( rtd_msb << 8 ) | rtd_lsb ) >> 1\n\n\t\ttemp_C = self.calcPT100Temp(rtd_ADC_Code)\n\n\t\t[hft_msb, hft_lsb] = [out[3], out[4]]\n\t\thft = (( hft_msb << 8 ) | hft_lsb ) >> 1\n\t\t# print(\"high fault threshold: %d\" % hft)\n\n\t\t[lft_msb, lft_lsb] = [out[5], out[6]]\n\t\tlft = (( lft_msb << 8 ) | lft_lsb ) >> 1\n\t\t# print(\"low fault threshold: %d\" % lft)\n\n\t\tstatus = out[7]\n\t\t#\n\t\t# 10 Mohm resistor is on breakout board to help\n\t\t# detect cable faults\n\t\t# bit 7: RTD High Threshold / cable fault open \n\t\t# bit 6: RTD Low Threshold / cable fault short\n\t\t# bit 5: REFIN- > 0.85 x VBias -> must be requested\n\t\t# bit 4: REFIN- < 0.85 x VBias (FORCE- open) -> must be requested\n\t\t# bit 3: RTDIN- < 0.85 x VBias (FORCE- open) -> must be requested\n\t\t# bit 2: Overvoltage / undervoltage fault\n\t\t# bits 1,0 don't care\t\n\t\t#print \"Status byte: %x\" % status\n\n\t\tif ((status & 0x80) == 1):\n\t\t\traise FaultError(\"High threshold limit (Cable fault/open)\")\n\t\tif ((status & 0x40) == 1):\n\t\t\traise FaultError(\"Low threshold limit (Cable fault/short)\")\n\t\tif ((status & 0x04) == 1):\n\t\t\traise FaultError(\"Overvoltage or Undervoltage Error\") \n\n\t\treturn temp_C\n\t\t\n\tdef writeRegister(self, regNum, dataByte):\n\t\tGPIO.output(self.csPin, GPIO.LOW)\n\t\t\n\t\t# 0x8x to specify 'write register value'\n\t\taddressByte = 0x80 | regNum;\n\t\t\n\t\t# first byte is address byte\n\t\tself.sendByte(addressByte)\n\t\t# the rest are data bytes\n\t\tself.sendByte(dataByte)\n\n\t\tGPIO.output(self.csPin, GPIO.HIGH)\n\t\t\n\tdef readRegisters(self, regNumStart, numRegisters):\n\t\tout = []\n\t\tGPIO.output(self.csPin, GPIO.LOW)\n\t\t\n\t\t# 0x to specify 'read register value'\n\t\tself.sendByte(regNumStart)\n\t\t\n\t\tfor byte in range(numRegisters):\t\n\t\t\tdata = self.recvByte()\n\t\t\tout.append(data)\n\n\t\tGPIO.output(self.csPin, GPIO.HIGH)\n\t\treturn out\n\n\tdef sendByte(self,byte):\n\t\tfor bit in range(8):\n\t\t\tGPIO.output(self.clkPin, GPIO.HIGH)\n\t\t\tif (byte & 0x80):\n\t\t\t\tGPIO.output(self.mosiPin, GPIO.HIGH)\n\t\t\telse:\n\t\t\t\tGPIO.output(self.mosiPin, GPIO.LOW)\n\t\t\tbyte <<= 1\n\t\t\tGPIO.output(self.clkPin, GPIO.LOW)\n\n\tdef recvByte(self):\n\t\tbyte = 0x00\n\t\tfor bit in range(8):\n\t\t\tGPIO.output(self.clkPin, GPIO.HIGH)\n\t\t\tbyte <<= 1\n\t\t\tif GPIO.input(self.misoPin):\n\t\t\t\tbyte |= 0x1\n\t\t\tGPIO.output(self.clkPin, GPIO.LOW)\n\t\treturn byte\t\n\t\n\tdef calcPT100Temp(self, RTD_ADC_Code):\n\t\tR_REF = 430.0 # Reference Resistor\n\t\tRes0 = 100.0; # Resistance at 0 degC for 400ohm R_Ref\n\t\ta = .00390830\n\t\tb = -.000000577500\n\t\t# c = -4.18301e-12 # for -200 <= T <= 0 (degC)\n\t\tc = -0.00000000000418301\n\t\t# c = 0 # for 0 <= T <= 850 (degC)\n\t\t#print(\"RTD ADC Code: %d\" % RTD_ADC_Code)\n\t\tRes_RTD = (RTD_ADC_Code * R_REF) / 32768.0 # PT100 Resistance\n\t\t#print(\"PT100 Resistance: %f ohms\" % Res_RTD)\n\t\t#\n\t\t# Callendar-Van Dusen equation\n\t\t# Res_RTD = Res0 * (1 + a*T + b*T**2 + c*(T-100)*T**3)\n\t\t# Res_RTD = Res0 + a*Res0*T + b*Res0*T**2 # c = 0\n\t\t# (c*Res0)T**4 - (c*Res0)*100*T**3 \n\t\t# + (b*Res0)*T**2 + (a*Res0)*T + (Res0 - Res_RTD) = 0\n\t\t#\n\t\t# quadratic formula:\n\t\t# for 0 <= T <= 850 (degC)\n\t\ttemp_C = -(a*Res0) + math.sqrt(a*a*Res0*Res0 - 4*(b*Res0)*(Res0 - Res_RTD))\n\t\ttemp_C = temp_C / (2*(b*Res0))\n\t\ttemp_C_line = (RTD_ADC_Code/32.0) - 256.0\n\t\t# removing numpy.roots will greatly speed things up\n\t\t#temp_C_numpy = numpy.roots([c*Res0, -c*Res0*100, b*Res0, a*Res0, (Res0 - Res_RTD)])\n\t\t#temp_C_numpy = abs(temp_C_numpy[-1])\n\t\t#print(\"Straight Line Approx. Temp: %f degC\" % temp_C_line)\n\t\t#print(\"Callendar-Van Dusen Temp (degC > 0): %f degC\" % temp_C)\n\t\t#print \"Solving Full Callendar-Van Dusen using numpy: %f\" % temp_C_numpy\n\t\tif (temp_C < 0): #use straight line approximation if less than 0\n\t\t\t# Can also use python lib numpy to solve cubic\n\t\t\t# Should never get here in this application\n\t\t\ttemp_C = temp_C_line\n\t\treturn temp_C\n\nclass FaultError(Exception):\n\tpass\n\nif __name__ == \"__main__\":\n\ttry:\n\t\tcsPin = 24\n\t\tmisoPin = 21\n\t\tmosiPin = 17\n\t\tclkPin = 23\n\t\tmax = max31865(csPin,misoPin,mosiPin,clkPin)\n\t\twhile True:\n\t\t\ttempC = max.readTemp()\n\t\t\tprint(tempC)\n\t\t\ttime.sleep(0.1)\n\texcept KeyboardInterrupt:\n\t\tpass\n\tGPIO.cleanup()\n",
"step-ids": [
8,
9,
11,
12,
14
]
}
|
[
8,
9,
11,
12,
14
] |
"""
Author: Alan Danque
Date: 20210323
Purpose:Final Data Wrangling, strips html and punctuation.
"""
from sklearn.tree import export_graphviz
import pydot
import pickle
from pathlib import Path
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.ensemble import RandomForestRegressor
import time
start_time = time.time()
pd.options.mode.chained_assignment = None # default='warn' # https://stackoverflow.com/questions/20625582/how-to-deal-with-settingwithcopywarning-in-pandas
results_dir = Path('C:/Alan/DSC680/Project1Data/').joinpath('results')
results_dir.mkdir(parents=True, exist_ok=True)
filepath = "C:/Alan/DSC680/Project1Data/FinalLeadAnalyticsRecord2.csv"
data = pd.read_csv(filepath)
print(data.shape)
shape = data.shape
print('\nDataFrame Shape :', shape)
print('\nNumber of rows :', shape[0])
print('\nNumber of columns :', shape[1])
amsmodel1 = data
del amsmodel1['street_address']
amsmodel1.fillna(0, inplace=True)
#amsmodel1.replace(np.nan,0)
print("Dataframe Loaded: --- %s seconds ---" % (time.time() - start_time))
# load model and predict
model_file = results_dir.joinpath('My3rdModel.pkl')
with open(model_file, 'rb') as f:
rf = pickle.load(f)
#rf.predict(X[0:1])
print("Model Loaded: --- %s seconds ---" % (time.time() - start_time))
target = np.array(amsmodel1['price'])
features = amsmodel1.drop('price', axis = 1)
feature_list = list(features.columns)
features = np.array(features)
print(feature_list)
print(features)
print("Features Loaded: --- %s seconds ---" % (time.time() - start_time))
"""
from sklearn.model_selection import RandomizedSearchCV
# Number of trees in random forest
n_estimators = [int(x) for x in np.linspace(start = 200, stop = 2000, num = 10)]
# Number of features to consider at every split
max_features = ['auto', 'sqrt']
# Maximum number of levels in tree
max_depth = [int(x) for x in np.linspace(10, 110, num = 11)]
max_depth.append(None)
# Minimum number of samples required to split a node
min_samples_split = [2, 5, 10]
# Minimum number of samples required at each leaf node
min_samples_leaf = [1, 2, 4]
# Method of selecting samples for training each tree
bootstrap = [True, False]
# Create the random grid
random_grid = {'n_estimators': n_estimators,
'max_features': max_features,
'max_depth': max_depth,
'min_samples_split': min_samples_split,
'min_samples_leaf': min_samples_leaf,
'bootstrap': bootstrap}
print(random_grid)
print("Estimators Loaded: --- %s seconds ---" % (time.time() - start_time))
"""
# Decision Tree
## SAVING THE DECISION TREE
tree = rf.estimators_[5]
tree_dot_file = results_dir.joinpath('tree.dot')
tree_png_file = results_dir.joinpath('tree.png')
dotfile = open(tree_dot_file, 'w')
export_graphviz(tree, out_file = dotfile, feature_names = feature_list, rounded = True, precision = 1)
# Install https://graphviz.org/download/#windows
#(graph, ) = pydot.graph_from_dot_file(tree_dot_file)
#graph.write_png(tree_png_file)
# C:\Program Files\Graphviz\bin
# having issues with pydot.graph_from_dot_file. Since my dot file is getting created using subprocess.
## from subprocess import check_call
## check_call(['dot','-Tpng',dotfile,'-o',tree_png_file])
(graph,) = pydot.graph_from_dot_file(tree_dot_file)
graph.write_png(tree_png_file)
print("DecisionTree: --- %s seconds ---" % (time.time() - start_time))
"""
PyDot Conversion Complete: --- 3804.3111951351166 seconds ---
"""
|
normal
|
{
"blob_id": "b9678b447bc6e7c4e928ffa6b8cd58639e41a801",
"index": 2688,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nresults_dir.mkdir(parents=True, exist_ok=True)\n<mask token>\nprint(data.shape)\n<mask token>\nprint(\"\"\"\nDataFrame Shape :\"\"\", shape)\nprint(\"\"\"\nNumber of rows :\"\"\", shape[0])\nprint(\"\"\"\nNumber of columns :\"\"\", shape[1])\n<mask token>\ndel amsmodel1['street_address']\namsmodel1.fillna(0, inplace=True)\nprint('Dataframe Loaded: --- %s seconds ---' % (time.time() - start_time))\n<mask token>\nwith open(model_file, 'rb') as f:\n rf = pickle.load(f)\nprint('Model Loaded: --- %s seconds ---' % (time.time() - start_time))\n<mask token>\nprint(feature_list)\nprint(features)\nprint('Features Loaded: --- %s seconds ---' % (time.time() - start_time))\n<mask token>\nexport_graphviz(tree, out_file=dotfile, feature_names=feature_list, rounded\n =True, precision=1)\n<mask token>\ngraph.write_png(tree_png_file)\nprint('DecisionTree: --- %s seconds ---' % (time.time() - start_time))\n<mask token>\n",
"step-3": "<mask token>\nstart_time = time.time()\npd.options.mode.chained_assignment = None\nresults_dir = Path('C:/Alan/DSC680/Project1Data/').joinpath('results')\nresults_dir.mkdir(parents=True, exist_ok=True)\nfilepath = 'C:/Alan/DSC680/Project1Data/FinalLeadAnalyticsRecord2.csv'\ndata = pd.read_csv(filepath)\nprint(data.shape)\nshape = data.shape\nprint(\"\"\"\nDataFrame Shape :\"\"\", shape)\nprint(\"\"\"\nNumber of rows :\"\"\", shape[0])\nprint(\"\"\"\nNumber of columns :\"\"\", shape[1])\namsmodel1 = data\ndel amsmodel1['street_address']\namsmodel1.fillna(0, inplace=True)\nprint('Dataframe Loaded: --- %s seconds ---' % (time.time() - start_time))\nmodel_file = results_dir.joinpath('My3rdModel.pkl')\nwith open(model_file, 'rb') as f:\n rf = pickle.load(f)\nprint('Model Loaded: --- %s seconds ---' % (time.time() - start_time))\ntarget = np.array(amsmodel1['price'])\nfeatures = amsmodel1.drop('price', axis=1)\nfeature_list = list(features.columns)\nfeatures = np.array(features)\nprint(feature_list)\nprint(features)\nprint('Features Loaded: --- %s seconds ---' % (time.time() - start_time))\n<mask token>\ntree = rf.estimators_[5]\ntree_dot_file = results_dir.joinpath('tree.dot')\ntree_png_file = results_dir.joinpath('tree.png')\ndotfile = open(tree_dot_file, 'w')\nexport_graphviz(tree, out_file=dotfile, feature_names=feature_list, rounded\n =True, precision=1)\ngraph, = pydot.graph_from_dot_file(tree_dot_file)\ngraph.write_png(tree_png_file)\nprint('DecisionTree: --- %s seconds ---' % (time.time() - start_time))\n<mask token>\n",
"step-4": "<mask token>\nfrom sklearn.tree import export_graphviz\nimport pydot\nimport pickle\nfrom pathlib import Path\nimport pandas as pd\nimport numpy as np\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nfrom sklearn.ensemble import RandomForestRegressor\nimport time\nstart_time = time.time()\npd.options.mode.chained_assignment = None\nresults_dir = Path('C:/Alan/DSC680/Project1Data/').joinpath('results')\nresults_dir.mkdir(parents=True, exist_ok=True)\nfilepath = 'C:/Alan/DSC680/Project1Data/FinalLeadAnalyticsRecord2.csv'\ndata = pd.read_csv(filepath)\nprint(data.shape)\nshape = data.shape\nprint(\"\"\"\nDataFrame Shape :\"\"\", shape)\nprint(\"\"\"\nNumber of rows :\"\"\", shape[0])\nprint(\"\"\"\nNumber of columns :\"\"\", shape[1])\namsmodel1 = data\ndel amsmodel1['street_address']\namsmodel1.fillna(0, inplace=True)\nprint('Dataframe Loaded: --- %s seconds ---' % (time.time() - start_time))\nmodel_file = results_dir.joinpath('My3rdModel.pkl')\nwith open(model_file, 'rb') as f:\n rf = pickle.load(f)\nprint('Model Loaded: --- %s seconds ---' % (time.time() - start_time))\ntarget = np.array(amsmodel1['price'])\nfeatures = amsmodel1.drop('price', axis=1)\nfeature_list = list(features.columns)\nfeatures = np.array(features)\nprint(feature_list)\nprint(features)\nprint('Features Loaded: --- %s seconds ---' % (time.time() - start_time))\n<mask token>\ntree = rf.estimators_[5]\ntree_dot_file = results_dir.joinpath('tree.dot')\ntree_png_file = results_dir.joinpath('tree.png')\ndotfile = open(tree_dot_file, 'w')\nexport_graphviz(tree, out_file=dotfile, feature_names=feature_list, rounded\n =True, precision=1)\ngraph, = pydot.graph_from_dot_file(tree_dot_file)\ngraph.write_png(tree_png_file)\nprint('DecisionTree: --- %s seconds ---' % (time.time() - start_time))\n<mask token>\n",
"step-5": "\"\"\"\nAuthor: Alan Danque\nDate: 20210323\nPurpose:Final Data Wrangling, strips html and punctuation.\n\n\"\"\"\nfrom sklearn.tree import export_graphviz\nimport pydot\nimport pickle\nfrom pathlib import Path\nimport pandas as pd\nimport numpy as np\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nfrom sklearn.ensemble import RandomForestRegressor\nimport time\nstart_time = time.time()\npd.options.mode.chained_assignment = None # default='warn' # https://stackoverflow.com/questions/20625582/how-to-deal-with-settingwithcopywarning-in-pandas\n\nresults_dir = Path('C:/Alan/DSC680/Project1Data/').joinpath('results')\nresults_dir.mkdir(parents=True, exist_ok=True)\n\nfilepath = \"C:/Alan/DSC680/Project1Data/FinalLeadAnalyticsRecord2.csv\"\ndata = pd.read_csv(filepath)\nprint(data.shape)\nshape = data.shape\nprint('\\nDataFrame Shape :', shape)\nprint('\\nNumber of rows :', shape[0])\nprint('\\nNumber of columns :', shape[1])\n\namsmodel1 = data\ndel amsmodel1['street_address']\namsmodel1.fillna(0, inplace=True)\n#amsmodel1.replace(np.nan,0)\n\nprint(\"Dataframe Loaded: --- %s seconds ---\" % (time.time() - start_time))\n\n\n# load model and predict\nmodel_file = results_dir.joinpath('My3rdModel.pkl')\nwith open(model_file, 'rb') as f:\n rf = pickle.load(f)\n#rf.predict(X[0:1])\n\nprint(\"Model Loaded: --- %s seconds ---\" % (time.time() - start_time))\n\n\ntarget = np.array(amsmodel1['price'])\nfeatures = amsmodel1.drop('price', axis = 1)\nfeature_list = list(features.columns)\nfeatures = np.array(features)\nprint(feature_list)\nprint(features)\nprint(\"Features Loaded: --- %s seconds ---\" % (time.time() - start_time))\n\n\"\"\"\n\nfrom sklearn.model_selection import RandomizedSearchCV\n# Number of trees in random forest\nn_estimators = [int(x) for x in np.linspace(start = 200, stop = 2000, num = 10)]\n# Number of features to consider at every split\nmax_features = ['auto', 'sqrt']\n# Maximum number of levels in tree\nmax_depth = [int(x) for x in np.linspace(10, 110, num = 11)]\nmax_depth.append(None)\n# Minimum number of samples required to split a node\nmin_samples_split = [2, 5, 10]\n# Minimum number of samples required at each leaf node\nmin_samples_leaf = [1, 2, 4]\n# Method of selecting samples for training each tree\nbootstrap = [True, False]\n# Create the random grid\nrandom_grid = {'n_estimators': n_estimators,\n 'max_features': max_features,\n 'max_depth': max_depth,\n 'min_samples_split': min_samples_split,\n 'min_samples_leaf': min_samples_leaf,\n 'bootstrap': bootstrap}\nprint(random_grid)\nprint(\"Estimators Loaded: --- %s seconds ---\" % (time.time() - start_time))\n\"\"\"\n\n\n# Decision Tree\n\n## SAVING THE DECISION TREE\ntree = rf.estimators_[5]\ntree_dot_file = results_dir.joinpath('tree.dot')\ntree_png_file = results_dir.joinpath('tree.png')\ndotfile = open(tree_dot_file, 'w')\nexport_graphviz(tree, out_file = dotfile, feature_names = feature_list, rounded = True, precision = 1)\n\n# Install https://graphviz.org/download/#windows\n#(graph, ) = pydot.graph_from_dot_file(tree_dot_file)\n#graph.write_png(tree_png_file)\n# C:\\Program Files\\Graphviz\\bin\n# having issues with pydot.graph_from_dot_file. Since my dot file is getting created using subprocess.\n## from subprocess import check_call\n## check_call(['dot','-Tpng',dotfile,'-o',tree_png_file])\n\n(graph,) = pydot.graph_from_dot_file(tree_dot_file)\ngraph.write_png(tree_png_file)\n\nprint(\"DecisionTree: --- %s seconds ---\" % (time.time() - start_time))\n\"\"\"\nPyDot Conversion Complete: --- 3804.3111951351166 seconds ---\n\"\"\"\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
"""
Package for haasplugin.
"""
|
normal
|
{
"blob_id": "20518302b6a67f8f1ac01f1adf4fe06ab2eaf280",
"index": 3098,
"step-1": "<mask token>\n",
"step-2": "\"\"\"\nPackage for haasplugin.\n\"\"\"\n",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
'''
Take list of iam users in a csv file like
S_NO, IAM_User_Name,Programatic_Access,Console_Access,PolicyARN
1,XYZ, Yes,No,arn:aws:iam::aws:policy/AdministratorAccess
2.pqr,Yes,Yes,arn:aws:iam::aws:policy/AdministratorAccess
3.abc,No,Yes,arn:aws:iam::aws:policy/AmazonAPIGatewayInvokeFullAccess
'''
import boto3,sys
from pprint import pprint
while True:
session=boto3.session.Session(profile_name="dev_root")
iam_re=session.resource(service_name="iam")
for each in range(701,1100):
try:
iam_re.create_user(UserName="ixasisiidemo"+str(each))
if each==509:
sys.exit()
except:
continue
|
normal
|
{
"blob_id": "00afab442f56d364c785324f816b52b4a6be609d",
"index": 3078,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwhile True:\n session = boto3.session.Session(profile_name='dev_root')\n iam_re = session.resource(service_name='iam')\n for each in range(701, 1100):\n try:\n iam_re.create_user(UserName='ixasisiidemo' + str(each))\n if each == 509:\n sys.exit()\n except:\n continue\n",
"step-3": "<mask token>\nimport boto3, sys\nfrom pprint import pprint\nwhile True:\n session = boto3.session.Session(profile_name='dev_root')\n iam_re = session.resource(service_name='iam')\n for each in range(701, 1100):\n try:\n iam_re.create_user(UserName='ixasisiidemo' + str(each))\n if each == 509:\n sys.exit()\n except:\n continue\n",
"step-4": "'''\nTake list of iam users in a csv file like\n\nS_NO, IAM_User_Name,Programatic_Access,Console_Access,PolicyARN\n\n1,XYZ, Yes,No,arn:aws:iam::aws:policy/AdministratorAccess\n\n2.pqr,Yes,Yes,arn:aws:iam::aws:policy/AdministratorAccess\n\n3.abc,No,Yes,arn:aws:iam::aws:policy/AmazonAPIGatewayInvokeFullAccess\n\n'''\n\nimport boto3,sys\nfrom pprint import pprint\nwhile True:\n session=boto3.session.Session(profile_name=\"dev_root\")\n iam_re=session.resource(service_name=\"iam\")\n for each in range(701,1100):\n try:\n iam_re.create_user(UserName=\"ixasisiidemo\"+str(each))\n if each==509:\n sys.exit()\n except:\n continue\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import rpy2.robjects as robjects
from rpy2.robjects.packages import importr
# print(robjects.__file__)
import sys
sys.path.append('./')
import importlib
import json
import os
from web_app.function.WordCould import word_img
# importlib.reload(sys)
# #sys.setdefaultencoding('gbk')
class Ubiquitination():
def __init__(self,disease,path):
self.disease=disease
path=path
self.num=11
# print('泛素化',self.disease,path)
self.datas_path=self.data_path(self.disease)
self.save=self.save_path(path,self.disease)
# self.analysis(self.datas_path,self.disease,self.save)
def load_R(self):
pass
def data_path(self,name):
exp_path='./web_app/data/disease/exp_data/{}.txt'.format(name)
clinical_path='./web_app/data/disease/clinical/{}.txt'.format(name)
ubiquitina_path='./web_app/data/data/ubiq/UbiqGene.txt'
# print(exp_path)
return (exp_path,clinical_path,ubiquitina_path)
def save_path(self,path,disease):
path=path
disease=disease
sp=path+'/Ubiquitination/'
if not os.path.exists(sp):
os.makedirs(sp)
sp=sp+disease+'/'
if not os.path.exists(sp):
os.makedirs(sp)
# print(sp)
return sp
def analysis(self,data,name,save_path):
data_path=data
name=name
save_path=save_path
# print(data_path[0],'TCGA-BRCA',save_path)
lime_all='./web_app/data/Difference/{}/limma_DEG_all.csv'.format(name)
lime_n='./web_app/data/Difference/{}/limma_DEG_0.05.csv'.format(name)
ubiq='./web_app/data/data/ubiq/UbiqGene.txt'
pheno='./web_app/data/Difference/{}/pheno.csv'.format(name)
exp_data='./web_app/data/disease/exp_data/{}.csv'.format(name)
cli='./web_app/data/disease/clinical/{}.csv'.format(name)
return (lime_all,lime_n,ubiq,pheno,exp_data,cli,save_path)
fig1_result=self.fig1(lime_all,lime_n,ubiq,pheno,exp_data,cli,usa,save_path)
# print(multiple[0])
# print(single[0],single[1])
def fig1(self,lime_all,lime_n,ubiq,pheno,exp_data,cli,save_path):
lime_all=lime_all
lime_n=lime_n
ubiq=ubiq
pheno=pheno
exp_data=exp_data
cli=cli
save_path=save_path+'Fig1/'
if not os.path.exists(save_path):
os.makedirs(save_path)
r=robjects.r
# 加载差异分析文件
r.source('./web_app/script/Conversion_Difference.r')
r.source('./web_app/script/Single.r')
r.source('./web_app/script/Survival_.r')
r.source('./web_app/script/RelatedBubbles.r')
# 调用差异分析函数完成差异分析
# 构建差异基因,绘制差异火山图,热图
difference=r.Difference(lime_all,lime_n,ubiq,pheno,exp_data,save_path)
# print(difference[0],difference[1])
# 单多因素分析
single=r.SingleFactor(cli,exp_data,difference[0],save_path)
# # print([i for i in single])
survival=r.Survival_(single[0],single[1],difference[0],pheno,cli,save_path)
# survival=r.Survival_(single[0],single[1],difference[0],pheno,save_path)
# # # print([i for i in survival])
# # # # 相关性气泡图
bubble=r.RelatedBubbles(survival[0],cli,save_path)
word_img(single[1],save_path)
# print([i for i in bubble])
result={
'code':1,
'difference':[i for i in difference],
'single':[i for i in single],
'survival':[i for i in survival],
'bubble':[i for i in bubble],
}
return result
def fig2(self,save_path):
# save_path=save_path+'Fig2/'
# if not os.path.exists(save_path):
# os.makedirs(save_path)
r=robjects.r
# 加载差异分析文件
r.source('./web_app/script/GeneSurvivalModel/Heatmap.r')
result={
'code':2,
}
return result
# 条带热图
# r.source('./web_app/script/Heatmap.r')
# r.Heatmap('./web_app/temp/Arsz/Ubiquitination/TCGA-BRCA/train.csv',
# "./web_app/temp/Arsz/Ubiquitination/TCGA-BRCA/Cox_genes_OS_pValue.csv",
# './web_app/temp/Arsz/Ubiquitination/TCGA-BRCA/')
# Lasso折线图
# r.source('./web_app/script/LineLasso.r')
# r.LineLasso(
# './web_app/temp/Arsz/Ubiquitination/TCGA-BRCA/train.csv',
# './web_app/temp/Arsz/Ubiquitination/TCGA-BRCA/CoxSingle_train.csv',
# './web_app/temp/Arsz/Ubiquitination/TCGA-BRCA/'
# )
# # 发散曲线
# r.source('./web_app/script/CurveLasso.r')
# r.CurveLasso('./web_app/temp/Arsz/Ubiquitination/TCGA-BRCA/train.csv',
# './web_app/temp/Arsz/Ubiquitination/TCGA-BRCA/signature_gene.txt',
# './web_app/temp/Arsz/Ubiquitination/TCGA-BRCA/')
# # 随机生存森林
# r.source('./web_app/script/RandomSurvivalForest.r')
# r.Rsf('./web_app/temp/Arsz/Ubiquitination/TCGA-BRCA/train.csv',
# './web_app/temp/Arsz/Ubiquitination/TCGA-BRCA/signature_gene.txt',
# './web_app/temp/Arsz/Ubiquitination/TCGA-BRCA/')
def epath(self):
return self.save
def progress(self):
return 1
if __name__ == "__main__":
a=Ubiquitination('TCGA-LIHC','./web_app/temp/Arsz')
x=a.analysis(a.datas_path,a.disease,a.save)
f1=a.fig1(x[0],x[1],x[2],x[3],x[4],x[5],x[6])
|
normal
|
{
"blob_id": "a6ae4324580a8471969e0229c02ea1670728f25b",
"index": 3767,
"step-1": "<mask token>\n\n\nclass Ubiquitination:\n <mask token>\n\n def load_R(self):\n pass\n\n def data_path(self, name):\n exp_path = './web_app/data/disease/exp_data/{}.txt'.format(name)\n clinical_path = './web_app/data/disease/clinical/{}.txt'.format(name)\n ubiquitina_path = './web_app/data/data/ubiq/UbiqGene.txt'\n return exp_path, clinical_path, ubiquitina_path\n\n def save_path(self, path, disease):\n path = path\n disease = disease\n sp = path + '/Ubiquitination/'\n if not os.path.exists(sp):\n os.makedirs(sp)\n sp = sp + disease + '/'\n if not os.path.exists(sp):\n os.makedirs(sp)\n return sp\n\n def analysis(self, data, name, save_path):\n data_path = data\n name = name\n save_path = save_path\n lime_all = './web_app/data/Difference/{}/limma_DEG_all.csv'.format(name\n )\n lime_n = './web_app/data/Difference/{}/limma_DEG_0.05.csv'.format(name)\n ubiq = './web_app/data/data/ubiq/UbiqGene.txt'\n pheno = './web_app/data/Difference/{}/pheno.csv'.format(name)\n exp_data = './web_app/data/disease/exp_data/{}.csv'.format(name)\n cli = './web_app/data/disease/clinical/{}.csv'.format(name)\n return lime_all, lime_n, ubiq, pheno, exp_data, cli, save_path\n fig1_result = self.fig1(lime_all, lime_n, ubiq, pheno, exp_data,\n cli, usa, save_path)\n <mask token>\n <mask token>\n\n def epath(self):\n return self.save\n\n def progress(self):\n return 1\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Ubiquitination:\n <mask token>\n\n def load_R(self):\n pass\n\n def data_path(self, name):\n exp_path = './web_app/data/disease/exp_data/{}.txt'.format(name)\n clinical_path = './web_app/data/disease/clinical/{}.txt'.format(name)\n ubiquitina_path = './web_app/data/data/ubiq/UbiqGene.txt'\n return exp_path, clinical_path, ubiquitina_path\n\n def save_path(self, path, disease):\n path = path\n disease = disease\n sp = path + '/Ubiquitination/'\n if not os.path.exists(sp):\n os.makedirs(sp)\n sp = sp + disease + '/'\n if not os.path.exists(sp):\n os.makedirs(sp)\n return sp\n\n def analysis(self, data, name, save_path):\n data_path = data\n name = name\n save_path = save_path\n lime_all = './web_app/data/Difference/{}/limma_DEG_all.csv'.format(name\n )\n lime_n = './web_app/data/Difference/{}/limma_DEG_0.05.csv'.format(name)\n ubiq = './web_app/data/data/ubiq/UbiqGene.txt'\n pheno = './web_app/data/Difference/{}/pheno.csv'.format(name)\n exp_data = './web_app/data/disease/exp_data/{}.csv'.format(name)\n cli = './web_app/data/disease/clinical/{}.csv'.format(name)\n return lime_all, lime_n, ubiq, pheno, exp_data, cli, save_path\n fig1_result = self.fig1(lime_all, lime_n, ubiq, pheno, exp_data,\n cli, usa, save_path)\n\n def fig1(self, lime_all, lime_n, ubiq, pheno, exp_data, cli, save_path):\n lime_all = lime_all\n lime_n = lime_n\n ubiq = ubiq\n pheno = pheno\n exp_data = exp_data\n cli = cli\n save_path = save_path + 'Fig1/'\n if not os.path.exists(save_path):\n os.makedirs(save_path)\n r = robjects.r\n r.source('./web_app/script/Conversion_Difference.r')\n r.source('./web_app/script/Single.r')\n r.source('./web_app/script/Survival_.r')\n r.source('./web_app/script/RelatedBubbles.r')\n difference = r.Difference(lime_all, lime_n, ubiq, pheno, exp_data,\n save_path)\n single = r.SingleFactor(cli, exp_data, difference[0], save_path)\n survival = r.Survival_(single[0], single[1], difference[0], pheno,\n cli, save_path)\n bubble = r.RelatedBubbles(survival[0], cli, save_path)\n word_img(single[1], save_path)\n result = {'code': 1, 'difference': [i for i in difference],\n 'single': [i for i in single], 'survival': [i for i in survival\n ], 'bubble': [i for i in bubble]}\n return result\n <mask token>\n\n def epath(self):\n return self.save\n\n def progress(self):\n return 1\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Ubiquitination:\n\n def __init__(self, disease, path):\n self.disease = disease\n path = path\n self.num = 11\n self.datas_path = self.data_path(self.disease)\n self.save = self.save_path(path, self.disease)\n\n def load_R(self):\n pass\n\n def data_path(self, name):\n exp_path = './web_app/data/disease/exp_data/{}.txt'.format(name)\n clinical_path = './web_app/data/disease/clinical/{}.txt'.format(name)\n ubiquitina_path = './web_app/data/data/ubiq/UbiqGene.txt'\n return exp_path, clinical_path, ubiquitina_path\n\n def save_path(self, path, disease):\n path = path\n disease = disease\n sp = path + '/Ubiquitination/'\n if not os.path.exists(sp):\n os.makedirs(sp)\n sp = sp + disease + '/'\n if not os.path.exists(sp):\n os.makedirs(sp)\n return sp\n\n def analysis(self, data, name, save_path):\n data_path = data\n name = name\n save_path = save_path\n lime_all = './web_app/data/Difference/{}/limma_DEG_all.csv'.format(name\n )\n lime_n = './web_app/data/Difference/{}/limma_DEG_0.05.csv'.format(name)\n ubiq = './web_app/data/data/ubiq/UbiqGene.txt'\n pheno = './web_app/data/Difference/{}/pheno.csv'.format(name)\n exp_data = './web_app/data/disease/exp_data/{}.csv'.format(name)\n cli = './web_app/data/disease/clinical/{}.csv'.format(name)\n return lime_all, lime_n, ubiq, pheno, exp_data, cli, save_path\n fig1_result = self.fig1(lime_all, lime_n, ubiq, pheno, exp_data,\n cli, usa, save_path)\n\n def fig1(self, lime_all, lime_n, ubiq, pheno, exp_data, cli, save_path):\n lime_all = lime_all\n lime_n = lime_n\n ubiq = ubiq\n pheno = pheno\n exp_data = exp_data\n cli = cli\n save_path = save_path + 'Fig1/'\n if not os.path.exists(save_path):\n os.makedirs(save_path)\n r = robjects.r\n r.source('./web_app/script/Conversion_Difference.r')\n r.source('./web_app/script/Single.r')\n r.source('./web_app/script/Survival_.r')\n r.source('./web_app/script/RelatedBubbles.r')\n difference = r.Difference(lime_all, lime_n, ubiq, pheno, exp_data,\n save_path)\n single = r.SingleFactor(cli, exp_data, difference[0], save_path)\n survival = r.Survival_(single[0], single[1], difference[0], pheno,\n cli, save_path)\n bubble = r.RelatedBubbles(survival[0], cli, save_path)\n word_img(single[1], save_path)\n result = {'code': 1, 'difference': [i for i in difference],\n 'single': [i for i in single], 'survival': [i for i in survival\n ], 'bubble': [i for i in bubble]}\n return result\n\n def fig2(self, save_path):\n r = robjects.r\n r.source('./web_app/script/GeneSurvivalModel/Heatmap.r')\n result = {'code': 2}\n return result\n\n def epath(self):\n return self.save\n\n def progress(self):\n return 1\n\n\n<mask token>\n",
"step-4": "import rpy2.robjects as robjects\nfrom rpy2.robjects.packages import importr\nimport sys\nsys.path.append('./')\nimport importlib\nimport json\nimport os\nfrom web_app.function.WordCould import word_img\n\n\nclass Ubiquitination:\n\n def __init__(self, disease, path):\n self.disease = disease\n path = path\n self.num = 11\n self.datas_path = self.data_path(self.disease)\n self.save = self.save_path(path, self.disease)\n\n def load_R(self):\n pass\n\n def data_path(self, name):\n exp_path = './web_app/data/disease/exp_data/{}.txt'.format(name)\n clinical_path = './web_app/data/disease/clinical/{}.txt'.format(name)\n ubiquitina_path = './web_app/data/data/ubiq/UbiqGene.txt'\n return exp_path, clinical_path, ubiquitina_path\n\n def save_path(self, path, disease):\n path = path\n disease = disease\n sp = path + '/Ubiquitination/'\n if not os.path.exists(sp):\n os.makedirs(sp)\n sp = sp + disease + '/'\n if not os.path.exists(sp):\n os.makedirs(sp)\n return sp\n\n def analysis(self, data, name, save_path):\n data_path = data\n name = name\n save_path = save_path\n lime_all = './web_app/data/Difference/{}/limma_DEG_all.csv'.format(name\n )\n lime_n = './web_app/data/Difference/{}/limma_DEG_0.05.csv'.format(name)\n ubiq = './web_app/data/data/ubiq/UbiqGene.txt'\n pheno = './web_app/data/Difference/{}/pheno.csv'.format(name)\n exp_data = './web_app/data/disease/exp_data/{}.csv'.format(name)\n cli = './web_app/data/disease/clinical/{}.csv'.format(name)\n return lime_all, lime_n, ubiq, pheno, exp_data, cli, save_path\n fig1_result = self.fig1(lime_all, lime_n, ubiq, pheno, exp_data,\n cli, usa, save_path)\n\n def fig1(self, lime_all, lime_n, ubiq, pheno, exp_data, cli, save_path):\n lime_all = lime_all\n lime_n = lime_n\n ubiq = ubiq\n pheno = pheno\n exp_data = exp_data\n cli = cli\n save_path = save_path + 'Fig1/'\n if not os.path.exists(save_path):\n os.makedirs(save_path)\n r = robjects.r\n r.source('./web_app/script/Conversion_Difference.r')\n r.source('./web_app/script/Single.r')\n r.source('./web_app/script/Survival_.r')\n r.source('./web_app/script/RelatedBubbles.r')\n difference = r.Difference(lime_all, lime_n, ubiq, pheno, exp_data,\n save_path)\n single = r.SingleFactor(cli, exp_data, difference[0], save_path)\n survival = r.Survival_(single[0], single[1], difference[0], pheno,\n cli, save_path)\n bubble = r.RelatedBubbles(survival[0], cli, save_path)\n word_img(single[1], save_path)\n result = {'code': 1, 'difference': [i for i in difference],\n 'single': [i for i in single], 'survival': [i for i in survival\n ], 'bubble': [i for i in bubble]}\n return result\n\n def fig2(self, save_path):\n r = robjects.r\n r.source('./web_app/script/GeneSurvivalModel/Heatmap.r')\n result = {'code': 2}\n return result\n\n def epath(self):\n return self.save\n\n def progress(self):\n return 1\n\n\nif __name__ == '__main__':\n a = Ubiquitination('TCGA-LIHC', './web_app/temp/Arsz')\n x = a.analysis(a.datas_path, a.disease, a.save)\n f1 = a.fig1(x[0], x[1], x[2], x[3], x[4], x[5], x[6])\n",
"step-5": "import rpy2.robjects as robjects\nfrom rpy2.robjects.packages import importr\n# print(robjects.__file__)\nimport sys\nsys.path.append('./')\nimport importlib\nimport json\nimport os\nfrom web_app.function.WordCould import word_img\n# importlib.reload(sys)\n# #sys.setdefaultencoding('gbk')\n\n\nclass Ubiquitination():\n\n def __init__(self,disease,path):\n\n self.disease=disease\n path=path\n self.num=11\n # print('泛素化',self.disease,path)\n self.datas_path=self.data_path(self.disease)\n self.save=self.save_path(path,self.disease)\n\n # self.analysis(self.datas_path,self.disease,self.save)\n\n def load_R(self):\n pass\n\n def data_path(self,name):\n\n exp_path='./web_app/data/disease/exp_data/{}.txt'.format(name)\n clinical_path='./web_app/data/disease/clinical/{}.txt'.format(name)\n ubiquitina_path='./web_app/data/data/ubiq/UbiqGene.txt'\n # print(exp_path)\n return (exp_path,clinical_path,ubiquitina_path)\n\n def save_path(self,path,disease):\n path=path\n disease=disease\n\n sp=path+'/Ubiquitination/'\n\n if not os.path.exists(sp):\n os.makedirs(sp)\n \n sp=sp+disease+'/'\n \n if not os.path.exists(sp):\n os.makedirs(sp)\n \n # print(sp)\n return sp\n\n def analysis(self,data,name,save_path):\n \n data_path=data\n name=name\n save_path=save_path\n # print(data_path[0],'TCGA-BRCA',save_path)\n\n lime_all='./web_app/data/Difference/{}/limma_DEG_all.csv'.format(name)\n lime_n='./web_app/data/Difference/{}/limma_DEG_0.05.csv'.format(name)\n ubiq='./web_app/data/data/ubiq/UbiqGene.txt'\n pheno='./web_app/data/Difference/{}/pheno.csv'.format(name)\n exp_data='./web_app/data/disease/exp_data/{}.csv'.format(name)\n cli='./web_app/data/disease/clinical/{}.csv'.format(name)\n\n return (lime_all,lime_n,ubiq,pheno,exp_data,cli,save_path)\n\n fig1_result=self.fig1(lime_all,lime_n,ubiq,pheno,exp_data,cli,usa,save_path)\n\n \n \n\n\n # print(multiple[0])\n # print(single[0],single[1])\n\n def fig1(self,lime_all,lime_n,ubiq,pheno,exp_data,cli,save_path):\n\n lime_all=lime_all\n lime_n=lime_n\n ubiq=ubiq\n pheno=pheno\n exp_data=exp_data\n cli=cli\n save_path=save_path+'Fig1/'\n\n if not os.path.exists(save_path):\n os.makedirs(save_path)\n\n r=robjects.r\n # 加载差异分析文件\n r.source('./web_app/script/Conversion_Difference.r')\n r.source('./web_app/script/Single.r')\n r.source('./web_app/script/Survival_.r')\n r.source('./web_app/script/RelatedBubbles.r')\n # 调用差异分析函数完成差异分析\n # 构建差异基因,绘制差异火山图,热图\n difference=r.Difference(lime_all,lime_n,ubiq,pheno,exp_data,save_path)\n # print(difference[0],difference[1])\n\n # 单多因素分析\n single=r.SingleFactor(cli,exp_data,difference[0],save_path)\n # # print([i for i in single])\n \n\n survival=r.Survival_(single[0],single[1],difference[0],pheno,cli,save_path)\n # survival=r.Survival_(single[0],single[1],difference[0],pheno,save_path)\n # # # print([i for i in survival])\n # # # # 相关性气泡图\n \n bubble=r.RelatedBubbles(survival[0],cli,save_path)\n\n word_img(single[1],save_path)\n # print([i for i in bubble])\n\n result={\n 'code':1,\n 'difference':[i for i in difference],\n 'single':[i for i in single],\n 'survival':[i for i in survival],\n 'bubble':[i for i in bubble],\n }\n return result\n\n def fig2(self,save_path):\n # save_path=save_path+'Fig2/'\n\n # if not os.path.exists(save_path):\n # os.makedirs(save_path)\n\n r=robjects.r\n # 加载差异分析文件\n r.source('./web_app/script/GeneSurvivalModel/Heatmap.r')\n\n result={\n 'code':2,\n \n }\n return result\n # 条带热图\n # r.source('./web_app/script/Heatmap.r')\n # r.Heatmap('./web_app/temp/Arsz/Ubiquitination/TCGA-BRCA/train.csv',\n # \"./web_app/temp/Arsz/Ubiquitination/TCGA-BRCA/Cox_genes_OS_pValue.csv\",\n # './web_app/temp/Arsz/Ubiquitination/TCGA-BRCA/')\n\n # Lasso折线图\n # r.source('./web_app/script/LineLasso.r')\n # r.LineLasso(\n # './web_app/temp/Arsz/Ubiquitination/TCGA-BRCA/train.csv',\n # './web_app/temp/Arsz/Ubiquitination/TCGA-BRCA/CoxSingle_train.csv',\n # './web_app/temp/Arsz/Ubiquitination/TCGA-BRCA/'\n # )\n # # 发散曲线\n # r.source('./web_app/script/CurveLasso.r')\n # r.CurveLasso('./web_app/temp/Arsz/Ubiquitination/TCGA-BRCA/train.csv',\n # './web_app/temp/Arsz/Ubiquitination/TCGA-BRCA/signature_gene.txt',\n # './web_app/temp/Arsz/Ubiquitination/TCGA-BRCA/')\n\n # # 随机生存森林\n # r.source('./web_app/script/RandomSurvivalForest.r')\n # r.Rsf('./web_app/temp/Arsz/Ubiquitination/TCGA-BRCA/train.csv',\n # './web_app/temp/Arsz/Ubiquitination/TCGA-BRCA/signature_gene.txt',\n # './web_app/temp/Arsz/Ubiquitination/TCGA-BRCA/')\n\n def epath(self):\n return self.save\n def progress(self):\n\n return 1\nif __name__ == \"__main__\":\n a=Ubiquitination('TCGA-LIHC','./web_app/temp/Arsz')\n x=a.analysis(a.datas_path,a.disease,a.save)\n f1=a.fig1(x[0],x[1],x[2],x[3],x[4],x[5],x[6])\n",
"step-ids": [
7,
8,
10,
12,
13
]
}
|
[
7,
8,
10,
12,
13
] |
import numpy as np
import pandas as pd
import logging
import matplotlib.pyplot as plt
from sklearn.impute import SimpleImputer
from sklearn.preprocessing import LabelEncoder, OneHotEncoder, StandardScaler, RobustScaler
from sklearn.compose import ColumnTransformer
from sklearn.pipeline import Pipeline, make_pipeline
from sklearn.linear_model import LinearRegression
import datetime
from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor
from sklearn.model_selection import KFold, cross_val_score, train_test_split
from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score
# ignore warnings
import warnings
warnings.filterwarnings(action="ignore")
df_train = pd.read_csv('./Scripts/pages/train.csv')
df_store = pd.read_csv('./Scripts/pages/store.csv')
df_test = pd.read_csv('./Scripts/pages/test.csv')
merged_train = pd.merge(left = df_train, right = df_store, how = 'inner', left_on = 'Store', right_on = 'Store')
merged_test = pd.merge(left = df_test, right = df_store, how = 'inner', left_on = 'Store', right_on = 'Store')
def preprocess_data(train, test):
# '''preprocessing'''
global train_features, test_features, train_target, categorical, numerical
# train and target features
train_features = train.drop(['Sales', 'Customers'], axis = 1) #drop the target feature + customers (~ will not be used for prediction)
train_target = train[['Sales']]
test_features = test.drop(['Id'], axis = 1) #drop id, it's required only during submission
#feature generation + transformations
try:
train_features['Date'] = pd.to_datetime(train_features.Date)
train_features['Month'] = train_features.Date.dt.month.to_list()
train_features['Year'] = train_features.Date.dt.year.to_list()
train_features['Day'] = train_features.Date.dt.day.to_list()
train_features['WeekOfYear'] = train_features.Date.dt.weekofyear.to_list()
train_features['DayOfWeek'] = train_features.Date.dt.dayofweek.to_list()
train_features['weekday'] = 1 # Initialize the column with default value of 1
train_features.loc[train_features['DayOfWeek'] == 5, 'weekday'] = 0
train_features.loc[train_features['DayOfWeek'] == 6, 'weekday'] = 0
train_features = train_features.drop(['Store'], axis = 1)
test_features['Date'] = pd.to_datetime(test_features.Date)
test_features['Month'] = test_features.Date.dt.month.to_list()
test_features['Year'] = test_features.Date.dt.year.to_list()
test_features['Day'] = test_features.Date.dt.day.to_list()
test_features['WeekOfYear'] = test_features.Date.dt.weekofyear.to_list()
test_features['DayOfWeek'] = test_features.Date.dt.dayofweek.to_list()
test_features['weekday'] = 1 # Initialize the column with default value of 1
test_features.loc[test_features['DayOfWeek'] == 5, 'weekday'] = 0
test_features.loc[test_features['DayOfWeek'] == 6, 'weekday'] = 0
test_features = test_features.drop(['Store'], axis = 1)
except KeyError:
print("Column couldn't be found")
# numerical and categorical columns (train set)
categorical = []
numerical = []
timestamp = []
for col in train_features.columns:
if train_features[col].dtype == object:
categorical.append(col)
elif train_features[col].dtype in ['int16', 'int32', 'int64', 'float16', 'float32', 'float64']:
numerical.append(col)
else:
timestamp.append(col)
# Keep selected columns only
my_cols = categorical + numerical + timestamp
train_features = train_features[my_cols].copy()
test_features = test_features[my_cols].copy()
features = pd.concat([train_features, test_features]) #merge the features columns for uniform preprocessing
# change dtypes for uniformity in preprocessing
features.CompetitionOpenSinceMonth = features.CompetitionOpenSinceMonth.astype('Int64')
features.CompetitionOpenSinceYear = features.CompetitionOpenSinceYear.astype('Int64')
features.Promo2SinceWeek = features.Promo2SinceWeek.astype('Int64')
features.Promo2SinceYear = features.Promo2SinceYear.astype('Int64')
features["StateHoliday"].loc[features["StateHoliday"] == 0] = "0"
# ''' actual preprocessing: the mighty pipeline '''
# numeric
for col in ['CompetitionDistance', 'CompetitionOpenSinceMonth', 'CompetitionOpenSinceYear', 'Promo2SinceWeek', 'Promo2SinceYear']:
features[col] = features[col].fillna((int(features[col].mean())))
features.PromoInterval = features.PromoInterval.fillna(features.PromoInterval.mode()[0])
features.Open = features.Open.fillna(features.Open.mode()[0])
features = pd.get_dummies(features, columns=['StoreType', 'Assortment', 'PromoInterval', 'StateHoliday'])
scaler = RobustScaler()
c = ['DayOfWeek', 'Open', 'Promo', 'SchoolHoliday', 'CompetitionDistance', 'CompetitionOpenSinceMonth', 'CompetitionOpenSinceYear',
'Promo2', 'Promo2SinceWeek', 'Promo2SinceYear', 'WeekOfYear', 'Month', 'Year', 'Day', 'WeekOfYear', 'weekday']
features[numerical] = scaler.fit_transform(features[numerical].values)
return features
features = preprocess_data(merged_train, merged_test)
features = features.drop(['Date'], axis = 1)
# reconstruct train and test sets
def reconstruct_sets(features):
global x_train, x_val, y_train, y_val
# global train_set
# original train and test sets
x_train = features.iloc[:len(train_features), :]
x_test = features.iloc[len(train_features):, :]
y_train = train_target
# train_set = pd.concat([x_train, y_train], axis=1)
# updated train and validation sets
x_train, x_val, y_train, y_val = train_test_split(x_train, y_train, test_size = .20, random_state = 0)
return x_train, x_val, y_train, y_val, x_test
x_train, x_val, y_train, y_val, x_test = reconstruct_sets(features)
clf=RandomForestRegressor(n_estimators=14)
clf.fit(x_train,y_train)
y_pred = clf.predict(x_val)
print("MSE =", mean_squared_error(y_val, y_pred))
print("Mean R2 score =", r2_score(y_val, y_pred))
print("MAE =", mean_absolute_error(y_val, y_pred))
|
normal
|
{
"blob_id": "dc51ca86a49dbec6f714753782494f21d4b1591d",
"index": 9091,
"step-1": "<mask token>\n\n\ndef preprocess_data(train, test):\n global train_features, test_features, train_target, categorical, numerical\n train_features = train.drop(['Sales', 'Customers'], axis=1)\n train_target = train[['Sales']]\n test_features = test.drop(['Id'], axis=1)\n try:\n train_features['Date'] = pd.to_datetime(train_features.Date)\n train_features['Month'] = train_features.Date.dt.month.to_list()\n train_features['Year'] = train_features.Date.dt.year.to_list()\n train_features['Day'] = train_features.Date.dt.day.to_list()\n train_features['WeekOfYear'\n ] = train_features.Date.dt.weekofyear.to_list()\n train_features['DayOfWeek'] = train_features.Date.dt.dayofweek.to_list(\n )\n train_features['weekday'] = 1\n train_features.loc[train_features['DayOfWeek'] == 5, 'weekday'] = 0\n train_features.loc[train_features['DayOfWeek'] == 6, 'weekday'] = 0\n train_features = train_features.drop(['Store'], axis=1)\n test_features['Date'] = pd.to_datetime(test_features.Date)\n test_features['Month'] = test_features.Date.dt.month.to_list()\n test_features['Year'] = test_features.Date.dt.year.to_list()\n test_features['Day'] = test_features.Date.dt.day.to_list()\n test_features['WeekOfYear'] = test_features.Date.dt.weekofyear.to_list(\n )\n test_features['DayOfWeek'] = test_features.Date.dt.dayofweek.to_list()\n test_features['weekday'] = 1\n test_features.loc[test_features['DayOfWeek'] == 5, 'weekday'] = 0\n test_features.loc[test_features['DayOfWeek'] == 6, 'weekday'] = 0\n test_features = test_features.drop(['Store'], axis=1)\n except KeyError:\n print(\"Column couldn't be found\")\n categorical = []\n numerical = []\n timestamp = []\n for col in train_features.columns:\n if train_features[col].dtype == object:\n categorical.append(col)\n elif train_features[col].dtype in ['int16', 'int32', 'int64',\n 'float16', 'float32', 'float64']:\n numerical.append(col)\n else:\n timestamp.append(col)\n my_cols = categorical + numerical + timestamp\n train_features = train_features[my_cols].copy()\n test_features = test_features[my_cols].copy()\n features = pd.concat([train_features, test_features])\n features.CompetitionOpenSinceMonth = (features.\n CompetitionOpenSinceMonth.astype('Int64'))\n features.CompetitionOpenSinceYear = (features.CompetitionOpenSinceYear.\n astype('Int64'))\n features.Promo2SinceWeek = features.Promo2SinceWeek.astype('Int64')\n features.Promo2SinceYear = features.Promo2SinceYear.astype('Int64')\n features['StateHoliday'].loc[features['StateHoliday'] == 0] = '0'\n for col in ['CompetitionDistance', 'CompetitionOpenSinceMonth',\n 'CompetitionOpenSinceYear', 'Promo2SinceWeek', 'Promo2SinceYear']:\n features[col] = features[col].fillna(int(features[col].mean()))\n features.PromoInterval = features.PromoInterval.fillna(features.\n PromoInterval.mode()[0])\n features.Open = features.Open.fillna(features.Open.mode()[0])\n features = pd.get_dummies(features, columns=['StoreType', 'Assortment',\n 'PromoInterval', 'StateHoliday'])\n scaler = RobustScaler()\n c = ['DayOfWeek', 'Open', 'Promo', 'SchoolHoliday',\n 'CompetitionDistance', 'CompetitionOpenSinceMonth',\n 'CompetitionOpenSinceYear', 'Promo2', 'Promo2SinceWeek',\n 'Promo2SinceYear', 'WeekOfYear', 'Month', 'Year', 'Day',\n 'WeekOfYear', 'weekday']\n features[numerical] = scaler.fit_transform(features[numerical].values)\n return features\n\n\n<mask token>\n\n\ndef reconstruct_sets(features):\n global x_train, x_val, y_train, y_val\n x_train = features.iloc[:len(train_features), :]\n x_test = features.iloc[len(train_features):, :]\n y_train = train_target\n x_train, x_val, y_train, y_val = train_test_split(x_train, y_train,\n test_size=0.2, random_state=0)\n return x_train, x_val, y_train, y_val, x_test\n\n\n<mask token>\n",
"step-2": "<mask token>\nwarnings.filterwarnings(action='ignore')\n<mask token>\n\n\ndef preprocess_data(train, test):\n global train_features, test_features, train_target, categorical, numerical\n train_features = train.drop(['Sales', 'Customers'], axis=1)\n train_target = train[['Sales']]\n test_features = test.drop(['Id'], axis=1)\n try:\n train_features['Date'] = pd.to_datetime(train_features.Date)\n train_features['Month'] = train_features.Date.dt.month.to_list()\n train_features['Year'] = train_features.Date.dt.year.to_list()\n train_features['Day'] = train_features.Date.dt.day.to_list()\n train_features['WeekOfYear'\n ] = train_features.Date.dt.weekofyear.to_list()\n train_features['DayOfWeek'] = train_features.Date.dt.dayofweek.to_list(\n )\n train_features['weekday'] = 1\n train_features.loc[train_features['DayOfWeek'] == 5, 'weekday'] = 0\n train_features.loc[train_features['DayOfWeek'] == 6, 'weekday'] = 0\n train_features = train_features.drop(['Store'], axis=1)\n test_features['Date'] = pd.to_datetime(test_features.Date)\n test_features['Month'] = test_features.Date.dt.month.to_list()\n test_features['Year'] = test_features.Date.dt.year.to_list()\n test_features['Day'] = test_features.Date.dt.day.to_list()\n test_features['WeekOfYear'] = test_features.Date.dt.weekofyear.to_list(\n )\n test_features['DayOfWeek'] = test_features.Date.dt.dayofweek.to_list()\n test_features['weekday'] = 1\n test_features.loc[test_features['DayOfWeek'] == 5, 'weekday'] = 0\n test_features.loc[test_features['DayOfWeek'] == 6, 'weekday'] = 0\n test_features = test_features.drop(['Store'], axis=1)\n except KeyError:\n print(\"Column couldn't be found\")\n categorical = []\n numerical = []\n timestamp = []\n for col in train_features.columns:\n if train_features[col].dtype == object:\n categorical.append(col)\n elif train_features[col].dtype in ['int16', 'int32', 'int64',\n 'float16', 'float32', 'float64']:\n numerical.append(col)\n else:\n timestamp.append(col)\n my_cols = categorical + numerical + timestamp\n train_features = train_features[my_cols].copy()\n test_features = test_features[my_cols].copy()\n features = pd.concat([train_features, test_features])\n features.CompetitionOpenSinceMonth = (features.\n CompetitionOpenSinceMonth.astype('Int64'))\n features.CompetitionOpenSinceYear = (features.CompetitionOpenSinceYear.\n astype('Int64'))\n features.Promo2SinceWeek = features.Promo2SinceWeek.astype('Int64')\n features.Promo2SinceYear = features.Promo2SinceYear.astype('Int64')\n features['StateHoliday'].loc[features['StateHoliday'] == 0] = '0'\n for col in ['CompetitionDistance', 'CompetitionOpenSinceMonth',\n 'CompetitionOpenSinceYear', 'Promo2SinceWeek', 'Promo2SinceYear']:\n features[col] = features[col].fillna(int(features[col].mean()))\n features.PromoInterval = features.PromoInterval.fillna(features.\n PromoInterval.mode()[0])\n features.Open = features.Open.fillna(features.Open.mode()[0])\n features = pd.get_dummies(features, columns=['StoreType', 'Assortment',\n 'PromoInterval', 'StateHoliday'])\n scaler = RobustScaler()\n c = ['DayOfWeek', 'Open', 'Promo', 'SchoolHoliday',\n 'CompetitionDistance', 'CompetitionOpenSinceMonth',\n 'CompetitionOpenSinceYear', 'Promo2', 'Promo2SinceWeek',\n 'Promo2SinceYear', 'WeekOfYear', 'Month', 'Year', 'Day',\n 'WeekOfYear', 'weekday']\n features[numerical] = scaler.fit_transform(features[numerical].values)\n return features\n\n\n<mask token>\n\n\ndef reconstruct_sets(features):\n global x_train, x_val, y_train, y_val\n x_train = features.iloc[:len(train_features), :]\n x_test = features.iloc[len(train_features):, :]\n y_train = train_target\n x_train, x_val, y_train, y_val = train_test_split(x_train, y_train,\n test_size=0.2, random_state=0)\n return x_train, x_val, y_train, y_val, x_test\n\n\n<mask token>\nclf.fit(x_train, y_train)\n<mask token>\nprint('MSE =', mean_squared_error(y_val, y_pred))\nprint('Mean R2 score =', r2_score(y_val, y_pred))\nprint('MAE =', mean_absolute_error(y_val, y_pred))\n",
"step-3": "<mask token>\nwarnings.filterwarnings(action='ignore')\ndf_train = pd.read_csv('./Scripts/pages/train.csv')\ndf_store = pd.read_csv('./Scripts/pages/store.csv')\ndf_test = pd.read_csv('./Scripts/pages/test.csv')\nmerged_train = pd.merge(left=df_train, right=df_store, how='inner', left_on\n ='Store', right_on='Store')\nmerged_test = pd.merge(left=df_test, right=df_store, how='inner', left_on=\n 'Store', right_on='Store')\n\n\ndef preprocess_data(train, test):\n global train_features, test_features, train_target, categorical, numerical\n train_features = train.drop(['Sales', 'Customers'], axis=1)\n train_target = train[['Sales']]\n test_features = test.drop(['Id'], axis=1)\n try:\n train_features['Date'] = pd.to_datetime(train_features.Date)\n train_features['Month'] = train_features.Date.dt.month.to_list()\n train_features['Year'] = train_features.Date.dt.year.to_list()\n train_features['Day'] = train_features.Date.dt.day.to_list()\n train_features['WeekOfYear'\n ] = train_features.Date.dt.weekofyear.to_list()\n train_features['DayOfWeek'] = train_features.Date.dt.dayofweek.to_list(\n )\n train_features['weekday'] = 1\n train_features.loc[train_features['DayOfWeek'] == 5, 'weekday'] = 0\n train_features.loc[train_features['DayOfWeek'] == 6, 'weekday'] = 0\n train_features = train_features.drop(['Store'], axis=1)\n test_features['Date'] = pd.to_datetime(test_features.Date)\n test_features['Month'] = test_features.Date.dt.month.to_list()\n test_features['Year'] = test_features.Date.dt.year.to_list()\n test_features['Day'] = test_features.Date.dt.day.to_list()\n test_features['WeekOfYear'] = test_features.Date.dt.weekofyear.to_list(\n )\n test_features['DayOfWeek'] = test_features.Date.dt.dayofweek.to_list()\n test_features['weekday'] = 1\n test_features.loc[test_features['DayOfWeek'] == 5, 'weekday'] = 0\n test_features.loc[test_features['DayOfWeek'] == 6, 'weekday'] = 0\n test_features = test_features.drop(['Store'], axis=1)\n except KeyError:\n print(\"Column couldn't be found\")\n categorical = []\n numerical = []\n timestamp = []\n for col in train_features.columns:\n if train_features[col].dtype == object:\n categorical.append(col)\n elif train_features[col].dtype in ['int16', 'int32', 'int64',\n 'float16', 'float32', 'float64']:\n numerical.append(col)\n else:\n timestamp.append(col)\n my_cols = categorical + numerical + timestamp\n train_features = train_features[my_cols].copy()\n test_features = test_features[my_cols].copy()\n features = pd.concat([train_features, test_features])\n features.CompetitionOpenSinceMonth = (features.\n CompetitionOpenSinceMonth.astype('Int64'))\n features.CompetitionOpenSinceYear = (features.CompetitionOpenSinceYear.\n astype('Int64'))\n features.Promo2SinceWeek = features.Promo2SinceWeek.astype('Int64')\n features.Promo2SinceYear = features.Promo2SinceYear.astype('Int64')\n features['StateHoliday'].loc[features['StateHoliday'] == 0] = '0'\n for col in ['CompetitionDistance', 'CompetitionOpenSinceMonth',\n 'CompetitionOpenSinceYear', 'Promo2SinceWeek', 'Promo2SinceYear']:\n features[col] = features[col].fillna(int(features[col].mean()))\n features.PromoInterval = features.PromoInterval.fillna(features.\n PromoInterval.mode()[0])\n features.Open = features.Open.fillna(features.Open.mode()[0])\n features = pd.get_dummies(features, columns=['StoreType', 'Assortment',\n 'PromoInterval', 'StateHoliday'])\n scaler = RobustScaler()\n c = ['DayOfWeek', 'Open', 'Promo', 'SchoolHoliday',\n 'CompetitionDistance', 'CompetitionOpenSinceMonth',\n 'CompetitionOpenSinceYear', 'Promo2', 'Promo2SinceWeek',\n 'Promo2SinceYear', 'WeekOfYear', 'Month', 'Year', 'Day',\n 'WeekOfYear', 'weekday']\n features[numerical] = scaler.fit_transform(features[numerical].values)\n return features\n\n\nfeatures = preprocess_data(merged_train, merged_test)\nfeatures = features.drop(['Date'], axis=1)\n\n\ndef reconstruct_sets(features):\n global x_train, x_val, y_train, y_val\n x_train = features.iloc[:len(train_features), :]\n x_test = features.iloc[len(train_features):, :]\n y_train = train_target\n x_train, x_val, y_train, y_val = train_test_split(x_train, y_train,\n test_size=0.2, random_state=0)\n return x_train, x_val, y_train, y_val, x_test\n\n\nx_train, x_val, y_train, y_val, x_test = reconstruct_sets(features)\nclf = RandomForestRegressor(n_estimators=14)\nclf.fit(x_train, y_train)\ny_pred = clf.predict(x_val)\nprint('MSE =', mean_squared_error(y_val, y_pred))\nprint('Mean R2 score =', r2_score(y_val, y_pred))\nprint('MAE =', mean_absolute_error(y_val, y_pred))\n",
"step-4": "import numpy as np\nimport pandas as pd\nimport logging\nimport matplotlib.pyplot as plt\nfrom sklearn.impute import SimpleImputer\nfrom sklearn.preprocessing import LabelEncoder, OneHotEncoder, StandardScaler, RobustScaler\nfrom sklearn.compose import ColumnTransformer\nfrom sklearn.pipeline import Pipeline, make_pipeline\nfrom sklearn.linear_model import LinearRegression\nimport datetime\nfrom sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor\nfrom sklearn.model_selection import KFold, cross_val_score, train_test_split\nfrom sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score\nimport warnings\nwarnings.filterwarnings(action='ignore')\ndf_train = pd.read_csv('./Scripts/pages/train.csv')\ndf_store = pd.read_csv('./Scripts/pages/store.csv')\ndf_test = pd.read_csv('./Scripts/pages/test.csv')\nmerged_train = pd.merge(left=df_train, right=df_store, how='inner', left_on\n ='Store', right_on='Store')\nmerged_test = pd.merge(left=df_test, right=df_store, how='inner', left_on=\n 'Store', right_on='Store')\n\n\ndef preprocess_data(train, test):\n global train_features, test_features, train_target, categorical, numerical\n train_features = train.drop(['Sales', 'Customers'], axis=1)\n train_target = train[['Sales']]\n test_features = test.drop(['Id'], axis=1)\n try:\n train_features['Date'] = pd.to_datetime(train_features.Date)\n train_features['Month'] = train_features.Date.dt.month.to_list()\n train_features['Year'] = train_features.Date.dt.year.to_list()\n train_features['Day'] = train_features.Date.dt.day.to_list()\n train_features['WeekOfYear'\n ] = train_features.Date.dt.weekofyear.to_list()\n train_features['DayOfWeek'] = train_features.Date.dt.dayofweek.to_list(\n )\n train_features['weekday'] = 1\n train_features.loc[train_features['DayOfWeek'] == 5, 'weekday'] = 0\n train_features.loc[train_features['DayOfWeek'] == 6, 'weekday'] = 0\n train_features = train_features.drop(['Store'], axis=1)\n test_features['Date'] = pd.to_datetime(test_features.Date)\n test_features['Month'] = test_features.Date.dt.month.to_list()\n test_features['Year'] = test_features.Date.dt.year.to_list()\n test_features['Day'] = test_features.Date.dt.day.to_list()\n test_features['WeekOfYear'] = test_features.Date.dt.weekofyear.to_list(\n )\n test_features['DayOfWeek'] = test_features.Date.dt.dayofweek.to_list()\n test_features['weekday'] = 1\n test_features.loc[test_features['DayOfWeek'] == 5, 'weekday'] = 0\n test_features.loc[test_features['DayOfWeek'] == 6, 'weekday'] = 0\n test_features = test_features.drop(['Store'], axis=1)\n except KeyError:\n print(\"Column couldn't be found\")\n categorical = []\n numerical = []\n timestamp = []\n for col in train_features.columns:\n if train_features[col].dtype == object:\n categorical.append(col)\n elif train_features[col].dtype in ['int16', 'int32', 'int64',\n 'float16', 'float32', 'float64']:\n numerical.append(col)\n else:\n timestamp.append(col)\n my_cols = categorical + numerical + timestamp\n train_features = train_features[my_cols].copy()\n test_features = test_features[my_cols].copy()\n features = pd.concat([train_features, test_features])\n features.CompetitionOpenSinceMonth = (features.\n CompetitionOpenSinceMonth.astype('Int64'))\n features.CompetitionOpenSinceYear = (features.CompetitionOpenSinceYear.\n astype('Int64'))\n features.Promo2SinceWeek = features.Promo2SinceWeek.astype('Int64')\n features.Promo2SinceYear = features.Promo2SinceYear.astype('Int64')\n features['StateHoliday'].loc[features['StateHoliday'] == 0] = '0'\n for col in ['CompetitionDistance', 'CompetitionOpenSinceMonth',\n 'CompetitionOpenSinceYear', 'Promo2SinceWeek', 'Promo2SinceYear']:\n features[col] = features[col].fillna(int(features[col].mean()))\n features.PromoInterval = features.PromoInterval.fillna(features.\n PromoInterval.mode()[0])\n features.Open = features.Open.fillna(features.Open.mode()[0])\n features = pd.get_dummies(features, columns=['StoreType', 'Assortment',\n 'PromoInterval', 'StateHoliday'])\n scaler = RobustScaler()\n c = ['DayOfWeek', 'Open', 'Promo', 'SchoolHoliday',\n 'CompetitionDistance', 'CompetitionOpenSinceMonth',\n 'CompetitionOpenSinceYear', 'Promo2', 'Promo2SinceWeek',\n 'Promo2SinceYear', 'WeekOfYear', 'Month', 'Year', 'Day',\n 'WeekOfYear', 'weekday']\n features[numerical] = scaler.fit_transform(features[numerical].values)\n return features\n\n\nfeatures = preprocess_data(merged_train, merged_test)\nfeatures = features.drop(['Date'], axis=1)\n\n\ndef reconstruct_sets(features):\n global x_train, x_val, y_train, y_val\n x_train = features.iloc[:len(train_features), :]\n x_test = features.iloc[len(train_features):, :]\n y_train = train_target\n x_train, x_val, y_train, y_val = train_test_split(x_train, y_train,\n test_size=0.2, random_state=0)\n return x_train, x_val, y_train, y_val, x_test\n\n\nx_train, x_val, y_train, y_val, x_test = reconstruct_sets(features)\nclf = RandomForestRegressor(n_estimators=14)\nclf.fit(x_train, y_train)\ny_pred = clf.predict(x_val)\nprint('MSE =', mean_squared_error(y_val, y_pred))\nprint('Mean R2 score =', r2_score(y_val, y_pred))\nprint('MAE =', mean_absolute_error(y_val, y_pred))\n",
"step-5": "import numpy as np\nimport pandas as pd \nimport logging\nimport matplotlib.pyplot as plt\nfrom sklearn.impute import SimpleImputer\nfrom sklearn.preprocessing import LabelEncoder, OneHotEncoder, StandardScaler, RobustScaler\nfrom sklearn.compose import ColumnTransformer\nfrom sklearn.pipeline import Pipeline, make_pipeline\nfrom sklearn.linear_model import LinearRegression\nimport datetime\nfrom sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor\nfrom sklearn.model_selection import KFold, cross_val_score, train_test_split\nfrom sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score\n\n# ignore warnings\nimport warnings\nwarnings.filterwarnings(action=\"ignore\")\n\ndf_train = pd.read_csv('./Scripts/pages/train.csv')\ndf_store = pd.read_csv('./Scripts/pages/store.csv')\ndf_test = pd.read_csv('./Scripts/pages/test.csv')\n\nmerged_train = pd.merge(left = df_train, right = df_store, how = 'inner', left_on = 'Store', right_on = 'Store')\nmerged_test = pd.merge(left = df_test, right = df_store, how = 'inner', left_on = 'Store', right_on = 'Store')\n\n\ndef preprocess_data(train, test):\n \n # '''preprocessing'''\n global train_features, test_features, train_target, categorical, numerical\n\n # train and target features\n train_features = train.drop(['Sales', 'Customers'], axis = 1) #drop the target feature + customers (~ will not be used for prediction)\n train_target = train[['Sales']]\n test_features = test.drop(['Id'], axis = 1) #drop id, it's required only during submission\n\n #feature generation + transformations\n try:\n train_features['Date'] = pd.to_datetime(train_features.Date)\n train_features['Month'] = train_features.Date.dt.month.to_list()\n train_features['Year'] = train_features.Date.dt.year.to_list()\n train_features['Day'] = train_features.Date.dt.day.to_list()\n train_features['WeekOfYear'] = train_features.Date.dt.weekofyear.to_list()\n train_features['DayOfWeek'] = train_features.Date.dt.dayofweek.to_list()\n train_features['weekday'] = 1 # Initialize the column with default value of 1\n train_features.loc[train_features['DayOfWeek'] == 5, 'weekday'] = 0\n train_features.loc[train_features['DayOfWeek'] == 6, 'weekday'] = 0\n train_features = train_features.drop(['Store'], axis = 1)\n\n test_features['Date'] = pd.to_datetime(test_features.Date)\n test_features['Month'] = test_features.Date.dt.month.to_list()\n test_features['Year'] = test_features.Date.dt.year.to_list()\n test_features['Day'] = test_features.Date.dt.day.to_list()\n test_features['WeekOfYear'] = test_features.Date.dt.weekofyear.to_list()\n test_features['DayOfWeek'] = test_features.Date.dt.dayofweek.to_list()\n test_features['weekday'] = 1 # Initialize the column with default value of 1\n test_features.loc[test_features['DayOfWeek'] == 5, 'weekday'] = 0\n test_features.loc[test_features['DayOfWeek'] == 6, 'weekday'] = 0\n test_features = test_features.drop(['Store'], axis = 1)\n except KeyError:\n print(\"Column couldn't be found\")\n\n # numerical and categorical columns (train set)\n categorical = []\n numerical = []\n timestamp = []\n\n for col in train_features.columns:\n if train_features[col].dtype == object:\n categorical.append(col)\n elif train_features[col].dtype in ['int16', 'int32', 'int64', 'float16', 'float32', 'float64']:\n numerical.append(col)\n else:\n timestamp.append(col)\n\n # Keep selected columns only\n my_cols = categorical + numerical + timestamp\n train_features = train_features[my_cols].copy()\n test_features = test_features[my_cols].copy()\n features = pd.concat([train_features, test_features]) #merge the features columns for uniform preprocessing\n\n # change dtypes for uniformity in preprocessing\n features.CompetitionOpenSinceMonth = features.CompetitionOpenSinceMonth.astype('Int64') \n features.CompetitionOpenSinceYear = features.CompetitionOpenSinceYear.astype('Int64')\n features.Promo2SinceWeek = features.Promo2SinceWeek.astype('Int64') \n features.Promo2SinceYear = features.Promo2SinceYear.astype('Int64')\n features[\"StateHoliday\"].loc[features[\"StateHoliday\"] == 0] = \"0\"\n\n\n\n # ''' actual preprocessing: the mighty pipeline '''\n # numeric\n for col in ['CompetitionDistance', 'CompetitionOpenSinceMonth', 'CompetitionOpenSinceYear', 'Promo2SinceWeek', 'Promo2SinceYear']:\n features[col] = features[col].fillna((int(features[col].mean()))) \n features.PromoInterval = features.PromoInterval.fillna(features.PromoInterval.mode()[0])\n features.Open = features.Open.fillna(features.Open.mode()[0])\n features = pd.get_dummies(features, columns=['StoreType', 'Assortment', 'PromoInterval', 'StateHoliday'])\n \n scaler = RobustScaler()\n c = ['DayOfWeek', 'Open', 'Promo', 'SchoolHoliday', 'CompetitionDistance', 'CompetitionOpenSinceMonth', 'CompetitionOpenSinceYear',\n 'Promo2', 'Promo2SinceWeek', 'Promo2SinceYear', 'WeekOfYear', 'Month', 'Year', 'Day', 'WeekOfYear', 'weekday']\n features[numerical] = scaler.fit_transform(features[numerical].values)\n \n\n return features\n\n\n\nfeatures = preprocess_data(merged_train, merged_test)\nfeatures = features.drop(['Date'], axis = 1)\n\n\n# reconstruct train and test sets\ndef reconstruct_sets(features):\n global x_train, x_val, y_train, y_val\n # global train_set\n # original train and test sets\n x_train = features.iloc[:len(train_features), :]\n x_test = features.iloc[len(train_features):, :]\n y_train = train_target\n # train_set = pd.concat([x_train, y_train], axis=1)\n\n # updated train and validation sets\n x_train, x_val, y_train, y_val = train_test_split(x_train, y_train, test_size = .20, random_state = 0)\n\n\n return x_train, x_val, y_train, y_val, x_test\n\nx_train, x_val, y_train, y_val, x_test = reconstruct_sets(features)\n\nclf=RandomForestRegressor(n_estimators=14)\nclf.fit(x_train,y_train)\ny_pred = clf.predict(x_val)\nprint(\"MSE =\", mean_squared_error(y_val, y_pred))\nprint(\"Mean R2 score =\", r2_score(y_val, y_pred))\nprint(\"MAE =\", mean_absolute_error(y_val, y_pred))\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
'''
filter_items = lambda a : a[0] == 'b'
fruits = ["apple", "banana", "pear", "orange"]
result = filter(filter_items, fruits)
print(list(result))
'''
'''
Given a list of integers, return the even integers in the list.
input = [11, 4, 5, 8, 9, 2, 12]
output = [4, 8, 2, 12]
input = [3, 5, 7]
output = []
'''
# even_integers = lambda a : a / 2 == 0
even_integers = lambda a : a % 2 == 0
input = [11, 4, 5, 8, 9, 2, 12]
result = filter(even_integers, input)
print(list(result))
input = [3, 5, 7]
result = filter(even_integers, input)
print(list(result))
|
normal
|
{
"blob_id": "7d9032b2426dbf3c285b99efa78be38d8f76ec24",
"index": 1933,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(list(result))\n<mask token>\nprint(list(result))\n",
"step-3": "<mask token>\neven_integers = lambda a: a % 2 == 0\ninput = [11, 4, 5, 8, 9, 2, 12]\nresult = filter(even_integers, input)\nprint(list(result))\ninput = [3, 5, 7]\nresult = filter(even_integers, input)\nprint(list(result))\n",
"step-4": "'''\nfilter_items = lambda a : a[0] == 'b'\n\nfruits = [\"apple\", \"banana\", \"pear\", \"orange\"]\nresult = filter(filter_items, fruits)\nprint(list(result))\n'''\n\n'''\nGiven a list of integers, return the even integers in the list.\n\ninput = [11, 4, 5, 8, 9, 2, 12]\noutput = [4, 8, 2, 12]\n\ninput = [3, 5, 7]\noutput = []\n'''\n\n# even_integers = lambda a : a / 2 == 0\neven_integers = lambda a : a % 2 == 0\n\ninput = [11, 4, 5, 8, 9, 2, 12]\nresult = filter(even_integers, input)\nprint(list(result))\n\ninput = [3, 5, 7]\nresult = filter(even_integers, input)\nprint(list(result))",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import discord
from app.vars.client import client
from app.helpers import delete, getUser, getGuild
@client.command()
async def inviteInfo(ctx, link):
try:
await delete.byContext(ctx)
except:
pass
linkData = await client.fetch_invite(url=link)
if (linkData.inviter):
inviterData = await getUser.byID(linkData.inviter.id)
try:
guildData = await getGuild.byID(linkData.guild.id)
except:
guildData = linkData.guild
embed = discord.Embed(title="Invite information", colour=discord.Color.purple())
embed.set_thumbnail(url=guildData.icon_url)
fields = [
("ID", f"```{guildData.id}```", True),
("Name::", f"```{guildData.name}```", True),
("Description", f"```{guildData.description}```", True),
("Created in:", f'```{guildData.created_at.strftime("%d/%m/%Y")}```', True),
("Member Count:", f"```{int(linkData.approximate_member_count)}```", True),
("Link", f"```{linkData.url}```", True),
("\u200b", "\u200b", True),
]
for name, value, inline in fields:
embed.add_field(name=name, value=value, inline=inline)
if (linkData.inviter):
embed.add_field(name="Inviter ID:", value=f"```{inviterData.id}```", inline=True)
embed.add_field(name="Inviter:", value=f"```{inviterData.name + '#' + inviterData.discriminator}```", inline=True)
embed.set_footer(text='Selfium (◔‿◔)')
await ctx.send(embed=embed)
|
normal
|
{
"blob_id": "b8f9633ab3110d00b2f0b82c78ad047fca0d3eee",
"index": 6999,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\[email protected]()\nasync def inviteInfo(ctx, link):\n try:\n await delete.byContext(ctx)\n except:\n pass\n linkData = await client.fetch_invite(url=link)\n if linkData.inviter:\n inviterData = await getUser.byID(linkData.inviter.id)\n try:\n guildData = await getGuild.byID(linkData.guild.id)\n except:\n guildData = linkData.guild\n embed = discord.Embed(title='Invite information', colour=discord.Color.\n purple())\n embed.set_thumbnail(url=guildData.icon_url)\n fields = [('ID', f'```{guildData.id}```', True), ('Name::',\n f'```{guildData.name}```', True), ('Description',\n f'```{guildData.description}```', True), ('Created in:',\n f\"```{guildData.created_at.strftime('%d/%m/%Y')}```\", True), (\n 'Member Count:', f'```{int(linkData.approximate_member_count)}```',\n True), ('Link', f'```{linkData.url}```', True), ('\\u200b', '\\u200b',\n True)]\n for name, value, inline in fields:\n embed.add_field(name=name, value=value, inline=inline)\n if linkData.inviter:\n embed.add_field(name='Inviter ID:', value=f'```{inviterData.id}```',\n inline=True)\n embed.add_field(name='Inviter:', value=\n f\"```{inviterData.name + '#' + inviterData.discriminator}```\",\n inline=True)\n embed.set_footer(text='Selfium (◔‿◔)')\n await ctx.send(embed=embed)\n",
"step-3": "import discord\nfrom app.vars.client import client\nfrom app.helpers import delete, getUser, getGuild\n\n\[email protected]()\nasync def inviteInfo(ctx, link):\n try:\n await delete.byContext(ctx)\n except:\n pass\n linkData = await client.fetch_invite(url=link)\n if linkData.inviter:\n inviterData = await getUser.byID(linkData.inviter.id)\n try:\n guildData = await getGuild.byID(linkData.guild.id)\n except:\n guildData = linkData.guild\n embed = discord.Embed(title='Invite information', colour=discord.Color.\n purple())\n embed.set_thumbnail(url=guildData.icon_url)\n fields = [('ID', f'```{guildData.id}```', True), ('Name::',\n f'```{guildData.name}```', True), ('Description',\n f'```{guildData.description}```', True), ('Created in:',\n f\"```{guildData.created_at.strftime('%d/%m/%Y')}```\", True), (\n 'Member Count:', f'```{int(linkData.approximate_member_count)}```',\n True), ('Link', f'```{linkData.url}```', True), ('\\u200b', '\\u200b',\n True)]\n for name, value, inline in fields:\n embed.add_field(name=name, value=value, inline=inline)\n if linkData.inviter:\n embed.add_field(name='Inviter ID:', value=f'```{inviterData.id}```',\n inline=True)\n embed.add_field(name='Inviter:', value=\n f\"```{inviterData.name + '#' + inviterData.discriminator}```\",\n inline=True)\n embed.set_footer(text='Selfium (◔‿◔)')\n await ctx.send(embed=embed)\n",
"step-4": "import discord\nfrom app.vars.client import client\nfrom app.helpers import delete, getUser, getGuild\n\[email protected]()\nasync def inviteInfo(ctx, link):\n try:\n await delete.byContext(ctx)\n except:\n pass\n\n linkData = await client.fetch_invite(url=link)\n if (linkData.inviter):\n inviterData = await getUser.byID(linkData.inviter.id)\n try:\n guildData = await getGuild.byID(linkData.guild.id)\n except:\n guildData = linkData.guild\n\n embed = discord.Embed(title=\"Invite information\", colour=discord.Color.purple())\n embed.set_thumbnail(url=guildData.icon_url)\n fields = [\n (\"ID\", f\"```{guildData.id}```\", True),\n (\"Name::\", f\"```{guildData.name}```\", True),\n (\"Description\", f\"```{guildData.description}```\", True),\n (\"Created in:\", f'```{guildData.created_at.strftime(\"%d/%m/%Y\")}```', True),\n (\"Member Count:\", f\"```{int(linkData.approximate_member_count)}```\", True), \n (\"Link\", f\"```{linkData.url}```\", True),\n (\"\\u200b\", \"\\u200b\", True),\n ]\n for name, value, inline in fields:\n embed.add_field(name=name, value=value, inline=inline)\n \n if (linkData.inviter):\n embed.add_field(name=\"Inviter ID:\", value=f\"```{inviterData.id}```\", inline=True)\n embed.add_field(name=\"Inviter:\", value=f\"```{inviterData.name + '#' + inviterData.discriminator}```\", inline=True)\n\n embed.set_footer(text='Selfium (◔‿◔)')\n await ctx.send(embed=embed)",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import collections
s = [('yellow', 1), ('blue', 2), ('yellow', 3), ('blue', 4), ('red', 1)]
d = collections.defaultdict(list)
d2 = {'test':121}
for k, v in s:
d[k].append(v)
d['test'].append('value')
print list(d.items())
print d
print d['blue']
print type(d)
print type(d2)
|
normal
|
{
"blob_id": "15a894e6f94fc62b97d1614a4213f21331ef12a0",
"index": 7843,
"step-1": "import collections\ns = [('yellow', 1), ('blue', 2), ('yellow', 3), ('blue', 4), ('red', 1)]\n\nd = collections.defaultdict(list)\nd2 = {'test':121}\nfor k, v in s:\n d[k].append(v)\n\nd['test'].append('value')\n\nprint list(d.items())\nprint d\nprint d['blue']\nprint type(d)\nprint type(d2)",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
# Ques1:
# To create a program that asks the user to enter their name and their age
# and prints out a message addressed to them that tells them the year that
# they will turn 100 years old. Additionally, the program asks the user for
# another number and prints out that many copies of the previous message on
# separate lines.
from datetime import datetime
name = input('Enter your name : ')
age = int(input('Enter your age : '))
year = int((100-age) + datetime.now().year)
copies = int(input('How many copies of the above message do you want? : '))
msg = name + " will turn 100 years old in " + str(year) + "\n"
print(msg * copies)
|
normal
|
{
"blob_id": "948b793359555f98872e0bdbf6db970ed1ff3b83",
"index": 7046,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(msg * copies)\n",
"step-3": "<mask token>\nname = input('Enter your name : ')\nage = int(input('Enter your age : '))\nyear = int(100 - age + datetime.now().year)\ncopies = int(input('How many copies of the above message do you want? : '))\nmsg = name + ' will turn 100 years old in ' + str(year) + '\\n'\nprint(msg * copies)\n",
"step-4": "from datetime import datetime\nname = input('Enter your name : ')\nage = int(input('Enter your age : '))\nyear = int(100 - age + datetime.now().year)\ncopies = int(input('How many copies of the above message do you want? : '))\nmsg = name + ' will turn 100 years old in ' + str(year) + '\\n'\nprint(msg * copies)\n",
"step-5": "# Ques1:\r\n# To create a program that asks the user to enter their name and their age \r\n# and prints out a message addressed to them that tells them the year that \r\n# they will turn 100 years old. Additionally, the program asks the user for \r\n# another number and prints out that many copies of the previous message on \r\n# separate lines.\r\n\r\nfrom datetime import datetime\r\nname = input('Enter your name : ')\r\nage = int(input('Enter your age : '))\r\nyear = int((100-age) + datetime.now().year)\r\ncopies = int(input('How many copies of the above message do you want? : '))\r\nmsg = name + \" will turn 100 years old in \" + str(year) + \"\\n\" \r\nprint(msg * copies)\r\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from django import forms
from crawlr.models import Route, Category, UserProfile
from django.contrib.auth.models import User
class CategoryForm(forms.ModelForm):
name = forms.CharField(max_length=128,
help_text = "Please enter the category name.")
views = forms.IntegerField(widget=forms.HiddenInput(), initial=0)
likes = forms.IntegerField(widget=forms.HiddenInput(), initial=0)
slug = forms.CharField(widget=forms.HiddenInput(), required=False)
class Meta:
model = Category
fields = ('name',)
class RouteForm(forms.ModelForm):
error_messages = {'duplicate_title':'Please enter a unique name for the crawl'}
title = forms.CharField(max_length=128,
help_text = "Please enter the name of the Crawl")
views = forms.IntegerField(widget=forms.HiddenInput(), initial=0)
#Hidden inputs for the variables retrieved from find directions page
start = forms.CharField(widget=forms.HiddenInput())
end = forms.CharField(widget=forms.HiddenInput())
waypts = forms.CharField(widget=forms.HiddenInput())
#Location choice, a drop down menu selection
category = forms.ModelChoiceField(queryset=Category.objects.all())
slug = forms.CharField(widget=forms.HiddenInput(), required=False)
created_by = forms.ModelChoiceField(queryset=User.objects.all(), widget=forms.HiddenInput())
class Meta:
model = Route
fields = ('category', 'title', 'slug', 'start', 'end', 'waypts', 'created_by')
def clean_title(self):
title = self.cleaned_data["title"]
try:
Route.objects.get(title=title)
raise forms.ValidationError(
self.error_messages['duplicate_title'], # customized error message
code='duplicate_title',
)
except Route.DoesNotExist:
return title
class UserForm(forms.ModelForm):
password = forms.CharField(widget=forms.PasswordInput(attrs={'placeholder' : 'Password'}), label='')
username = forms.CharField(widget=forms.TextInput(attrs={'placeholder': 'Username'}), label='')
email = forms.CharField(widget=forms.TextInput(attrs={'placeholder': 'Email'}), label='', required=False)
class Meta:
model = User
fields = ('username', 'email', 'password')
class UserProfileForm(forms.ModelForm):
class Meta:
model = UserProfile
fields = ('picture',)
|
normal
|
{
"blob_id": "abf25cf3d4435754b916fa06e5e887b1e3589a1c",
"index": 5073,
"step-1": "<mask token>\n\n\nclass RouteForm(forms.ModelForm):\n error_messages = {'duplicate_title':\n 'Please enter a unique name for the crawl'}\n title = forms.CharField(max_length=128, help_text=\n 'Please enter the name of the Crawl')\n views = forms.IntegerField(widget=forms.HiddenInput(), initial=0)\n start = forms.CharField(widget=forms.HiddenInput())\n end = forms.CharField(widget=forms.HiddenInput())\n waypts = forms.CharField(widget=forms.HiddenInput())\n category = forms.ModelChoiceField(queryset=Category.objects.all())\n slug = forms.CharField(widget=forms.HiddenInput(), required=False)\n created_by = forms.ModelChoiceField(queryset=User.objects.all(), widget\n =forms.HiddenInput())\n\n\n class Meta:\n model = Route\n fields = ('category', 'title', 'slug', 'start', 'end', 'waypts',\n 'created_by')\n\n def clean_title(self):\n title = self.cleaned_data['title']\n try:\n Route.objects.get(title=title)\n raise forms.ValidationError(self.error_messages[\n 'duplicate_title'], code='duplicate_title')\n except Route.DoesNotExist:\n return title\n\n\nclass UserForm(forms.ModelForm):\n password = forms.CharField(widget=forms.PasswordInput(attrs={\n 'placeholder': 'Password'}), label='')\n username = forms.CharField(widget=forms.TextInput(attrs={'placeholder':\n 'Username'}), label='')\n email = forms.CharField(widget=forms.TextInput(attrs={'placeholder':\n 'Email'}), label='', required=False)\n\n\n class Meta:\n model = User\n fields = 'username', 'email', 'password'\n\n\nclass UserProfileForm(forms.ModelForm):\n\n\n class Meta:\n model = UserProfile\n fields = 'picture',\n",
"step-2": "<mask token>\n\n\nclass CategoryForm(forms.ModelForm):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\n class Meta:\n model = Category\n fields = 'name',\n\n\nclass RouteForm(forms.ModelForm):\n error_messages = {'duplicate_title':\n 'Please enter a unique name for the crawl'}\n title = forms.CharField(max_length=128, help_text=\n 'Please enter the name of the Crawl')\n views = forms.IntegerField(widget=forms.HiddenInput(), initial=0)\n start = forms.CharField(widget=forms.HiddenInput())\n end = forms.CharField(widget=forms.HiddenInput())\n waypts = forms.CharField(widget=forms.HiddenInput())\n category = forms.ModelChoiceField(queryset=Category.objects.all())\n slug = forms.CharField(widget=forms.HiddenInput(), required=False)\n created_by = forms.ModelChoiceField(queryset=User.objects.all(), widget\n =forms.HiddenInput())\n\n\n class Meta:\n model = Route\n fields = ('category', 'title', 'slug', 'start', 'end', 'waypts',\n 'created_by')\n\n def clean_title(self):\n title = self.cleaned_data['title']\n try:\n Route.objects.get(title=title)\n raise forms.ValidationError(self.error_messages[\n 'duplicate_title'], code='duplicate_title')\n except Route.DoesNotExist:\n return title\n\n\nclass UserForm(forms.ModelForm):\n password = forms.CharField(widget=forms.PasswordInput(attrs={\n 'placeholder': 'Password'}), label='')\n username = forms.CharField(widget=forms.TextInput(attrs={'placeholder':\n 'Username'}), label='')\n email = forms.CharField(widget=forms.TextInput(attrs={'placeholder':\n 'Email'}), label='', required=False)\n\n\n class Meta:\n model = User\n fields = 'username', 'email', 'password'\n\n\nclass UserProfileForm(forms.ModelForm):\n\n\n class Meta:\n model = UserProfile\n fields = 'picture',\n",
"step-3": "<mask token>\n\n\nclass CategoryForm(forms.ModelForm):\n name = forms.CharField(max_length=128, help_text=\n 'Please enter the category name.')\n views = forms.IntegerField(widget=forms.HiddenInput(), initial=0)\n likes = forms.IntegerField(widget=forms.HiddenInput(), initial=0)\n slug = forms.CharField(widget=forms.HiddenInput(), required=False)\n\n\n class Meta:\n model = Category\n fields = 'name',\n\n\nclass RouteForm(forms.ModelForm):\n error_messages = {'duplicate_title':\n 'Please enter a unique name for the crawl'}\n title = forms.CharField(max_length=128, help_text=\n 'Please enter the name of the Crawl')\n views = forms.IntegerField(widget=forms.HiddenInput(), initial=0)\n start = forms.CharField(widget=forms.HiddenInput())\n end = forms.CharField(widget=forms.HiddenInput())\n waypts = forms.CharField(widget=forms.HiddenInput())\n category = forms.ModelChoiceField(queryset=Category.objects.all())\n slug = forms.CharField(widget=forms.HiddenInput(), required=False)\n created_by = forms.ModelChoiceField(queryset=User.objects.all(), widget\n =forms.HiddenInput())\n\n\n class Meta:\n model = Route\n fields = ('category', 'title', 'slug', 'start', 'end', 'waypts',\n 'created_by')\n\n def clean_title(self):\n title = self.cleaned_data['title']\n try:\n Route.objects.get(title=title)\n raise forms.ValidationError(self.error_messages[\n 'duplicate_title'], code='duplicate_title')\n except Route.DoesNotExist:\n return title\n\n\nclass UserForm(forms.ModelForm):\n password = forms.CharField(widget=forms.PasswordInput(attrs={\n 'placeholder': 'Password'}), label='')\n username = forms.CharField(widget=forms.TextInput(attrs={'placeholder':\n 'Username'}), label='')\n email = forms.CharField(widget=forms.TextInput(attrs={'placeholder':\n 'Email'}), label='', required=False)\n\n\n class Meta:\n model = User\n fields = 'username', 'email', 'password'\n\n\nclass UserProfileForm(forms.ModelForm):\n\n\n class Meta:\n model = UserProfile\n fields = 'picture',\n",
"step-4": "from django import forms\nfrom crawlr.models import Route, Category, UserProfile\nfrom django.contrib.auth.models import User\n\n\nclass CategoryForm(forms.ModelForm):\n name = forms.CharField(max_length=128, help_text=\n 'Please enter the category name.')\n views = forms.IntegerField(widget=forms.HiddenInput(), initial=0)\n likes = forms.IntegerField(widget=forms.HiddenInput(), initial=0)\n slug = forms.CharField(widget=forms.HiddenInput(), required=False)\n\n\n class Meta:\n model = Category\n fields = 'name',\n\n\nclass RouteForm(forms.ModelForm):\n error_messages = {'duplicate_title':\n 'Please enter a unique name for the crawl'}\n title = forms.CharField(max_length=128, help_text=\n 'Please enter the name of the Crawl')\n views = forms.IntegerField(widget=forms.HiddenInput(), initial=0)\n start = forms.CharField(widget=forms.HiddenInput())\n end = forms.CharField(widget=forms.HiddenInput())\n waypts = forms.CharField(widget=forms.HiddenInput())\n category = forms.ModelChoiceField(queryset=Category.objects.all())\n slug = forms.CharField(widget=forms.HiddenInput(), required=False)\n created_by = forms.ModelChoiceField(queryset=User.objects.all(), widget\n =forms.HiddenInput())\n\n\n class Meta:\n model = Route\n fields = ('category', 'title', 'slug', 'start', 'end', 'waypts',\n 'created_by')\n\n def clean_title(self):\n title = self.cleaned_data['title']\n try:\n Route.objects.get(title=title)\n raise forms.ValidationError(self.error_messages[\n 'duplicate_title'], code='duplicate_title')\n except Route.DoesNotExist:\n return title\n\n\nclass UserForm(forms.ModelForm):\n password = forms.CharField(widget=forms.PasswordInput(attrs={\n 'placeholder': 'Password'}), label='')\n username = forms.CharField(widget=forms.TextInput(attrs={'placeholder':\n 'Username'}), label='')\n email = forms.CharField(widget=forms.TextInput(attrs={'placeholder':\n 'Email'}), label='', required=False)\n\n\n class Meta:\n model = User\n fields = 'username', 'email', 'password'\n\n\nclass UserProfileForm(forms.ModelForm):\n\n\n class Meta:\n model = UserProfile\n fields = 'picture',\n",
"step-5": "from django import forms\nfrom crawlr.models import Route, Category, UserProfile\nfrom django.contrib.auth.models import User\n\nclass CategoryForm(forms.ModelForm):\n name = forms.CharField(max_length=128,\n help_text = \"Please enter the category name.\")\n views = forms.IntegerField(widget=forms.HiddenInput(), initial=0)\n likes = forms.IntegerField(widget=forms.HiddenInput(), initial=0)\n slug = forms.CharField(widget=forms.HiddenInput(), required=False)\n\n class Meta:\n model = Category\n fields = ('name',)\n\nclass RouteForm(forms.ModelForm):\n error_messages = {'duplicate_title':'Please enter a unique name for the crawl'}\n title = forms.CharField(max_length=128,\n help_text = \"Please enter the name of the Crawl\")\n\n views = forms.IntegerField(widget=forms.HiddenInput(), initial=0)\n #Hidden inputs for the variables retrieved from find directions page\n start = forms.CharField(widget=forms.HiddenInput())\n end = forms.CharField(widget=forms.HiddenInput())\n waypts = forms.CharField(widget=forms.HiddenInput())\n #Location choice, a drop down menu selection\n category = forms.ModelChoiceField(queryset=Category.objects.all())\n slug = forms.CharField(widget=forms.HiddenInput(), required=False)\n created_by = forms.ModelChoiceField(queryset=User.objects.all(), widget=forms.HiddenInput())\n\n class Meta:\n model = Route\n fields = ('category', 'title', 'slug', 'start', 'end', 'waypts', 'created_by')\n\n def clean_title(self):\n title = self.cleaned_data[\"title\"]\n try:\n Route.objects.get(title=title)\n\n raise forms.ValidationError(\n self.error_messages['duplicate_title'], # customized error message\n code='duplicate_title',\n )\n except Route.DoesNotExist:\n return title\n\nclass UserForm(forms.ModelForm):\n password = forms.CharField(widget=forms.PasswordInput(attrs={'placeholder' : 'Password'}), label='')\n username = forms.CharField(widget=forms.TextInput(attrs={'placeholder': 'Username'}), label='')\n email = forms.CharField(widget=forms.TextInput(attrs={'placeholder': 'Email'}), label='', required=False)\n\n class Meta:\n model = User\n fields = ('username', 'email', 'password')\n\nclass UserProfileForm(forms.ModelForm):\n class Meta:\n model = UserProfile\n fields = ('picture',)\n",
"step-ids": [
6,
7,
8,
9,
10
]
}
|
[
6,
7,
8,
9,
10
] |
from django.urls import path
from django.contrib.auth import views as auth_views
from . views import register, channel
urlpatterns = [
path('register/', register, name="register"),
path('channel/', channel, name="channel"),
path('login/', auth_views.LoginView.as_view(template_name='user/login.html'), name='login'),
path('logout/', auth_views.LogoutView.as_view(template_name='user/logout.html'), name='logout'),
path('password_reset/',auth_views.PasswordResetView.as_view(template_name="user/password_reset.html"), name="password_reset"),
path('password_reset/done/',auth_views.PasswordResetDoneView.as_view(template_name="user/password_reset_done.html"), name="password_reset_done"),
path('password_reset_confirm/<uidb64>/<token>/',auth_views.PasswordResetConfirmView.as_view(template_name="user/password_reset_confirm.html"),name="password_reset_confirm"),
path('password_reset_complete/',auth_views.PasswordResetCompleteView.as_view(template_name="user/password_reset_complete.html"),name="password_reset_complete"),
]
|
normal
|
{
"blob_id": "d76c1507594bb0c1ed7a83e6c5961097c7fbf54a",
"index": 9859,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nurlpatterns = [path('register/', register, name='register'), path(\n 'channel/', channel, name='channel'), path('login/', auth_views.\n LoginView.as_view(template_name='user/login.html'), name='login'), path\n ('logout/', auth_views.LogoutView.as_view(template_name=\n 'user/logout.html'), name='logout'), path('password_reset/', auth_views\n .PasswordResetView.as_view(template_name='user/password_reset.html'),\n name='password_reset'), path('password_reset/done/', auth_views.\n PasswordResetDoneView.as_view(template_name=\n 'user/password_reset_done.html'), name='password_reset_done'), path(\n 'password_reset_confirm/<uidb64>/<token>/', auth_views.\n PasswordResetConfirmView.as_view(template_name=\n 'user/password_reset_confirm.html'), name='password_reset_confirm'),\n path('password_reset_complete/', auth_views.PasswordResetCompleteView.\n as_view(template_name='user/password_reset_complete.html'), name=\n 'password_reset_complete')]\n",
"step-3": "from django.urls import path\nfrom django.contrib.auth import views as auth_views\nfrom .views import register, channel\nurlpatterns = [path('register/', register, name='register'), path(\n 'channel/', channel, name='channel'), path('login/', auth_views.\n LoginView.as_view(template_name='user/login.html'), name='login'), path\n ('logout/', auth_views.LogoutView.as_view(template_name=\n 'user/logout.html'), name='logout'), path('password_reset/', auth_views\n .PasswordResetView.as_view(template_name='user/password_reset.html'),\n name='password_reset'), path('password_reset/done/', auth_views.\n PasswordResetDoneView.as_view(template_name=\n 'user/password_reset_done.html'), name='password_reset_done'), path(\n 'password_reset_confirm/<uidb64>/<token>/', auth_views.\n PasswordResetConfirmView.as_view(template_name=\n 'user/password_reset_confirm.html'), name='password_reset_confirm'),\n path('password_reset_complete/', auth_views.PasswordResetCompleteView.\n as_view(template_name='user/password_reset_complete.html'), name=\n 'password_reset_complete')]\n",
"step-4": "from django.urls import path\nfrom django.contrib.auth import views as auth_views\nfrom . views import register, channel\n\n\nurlpatterns = [\n path('register/', register, name=\"register\"),\n path('channel/', channel, name=\"channel\"),\n path('login/', auth_views.LoginView.as_view(template_name='user/login.html'), name='login'),\n path('logout/', auth_views.LogoutView.as_view(template_name='user/logout.html'), name='logout'),\n path('password_reset/',auth_views.PasswordResetView.as_view(template_name=\"user/password_reset.html\"), name=\"password_reset\"),\n path('password_reset/done/',auth_views.PasswordResetDoneView.as_view(template_name=\"user/password_reset_done.html\"), name=\"password_reset_done\"),\n path('password_reset_confirm/<uidb64>/<token>/',auth_views.PasswordResetConfirmView.as_view(template_name=\"user/password_reset_confirm.html\"),name=\"password_reset_confirm\"),\n path('password_reset_complete/',auth_views.PasswordResetCompleteView.as_view(template_name=\"user/password_reset_complete.html\"),name=\"password_reset_complete\"),\n]\n\n\n\n\n\n\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from socketserver import StreamRequestHandler, TCPServer
from functools import partial
class EchoHandler(StreamRequestHandler):
def __init__(self, *args, ack, **kwargs):
self.ack = ack
super.__init__(*args, **kwargs)
def handle(self):
for line in self.rfile:
self.wfile.write(self.ack +line)
# serv = TCPServer(('', 15000), EchoHandler)
serv = TCPServer(('', 15000), partial(EchoHandler, ack=b'RECEIVE:'))
# serv = TCPServer(('', 15000), lambda *args, **kwargs: EchoHandler(*args, ack=b'RECEIVED:', **kwargs)) #等价
serv.serve_forever()
|
normal
|
{
"blob_id": "7819e41d567daabe64bd6eba62461d9e553566b3",
"index": 5393,
"step-1": "<mask token>\n\n\nclass EchoHandler(StreamRequestHandler):\n <mask token>\n <mask token>\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass EchoHandler(StreamRequestHandler):\n\n def __init__(self, *args, ack, **kwargs):\n self.ack = ack\n super.__init__(*args, **kwargs)\n\n def handle(self):\n for line in self.rfile:\n self.wfile.write(self.ack + line)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass EchoHandler(StreamRequestHandler):\n\n def __init__(self, *args, ack, **kwargs):\n self.ack = ack\n super.__init__(*args, **kwargs)\n\n def handle(self):\n for line in self.rfile:\n self.wfile.write(self.ack + line)\n\n\nserv = TCPServer(('', 15000), partial(EchoHandler, ack=b'RECEIVE:'))\nserv.serve_forever()\n",
"step-4": "from socketserver import StreamRequestHandler, TCPServer\nfrom functools import partial\n\n\nclass EchoHandler(StreamRequestHandler):\n\n def __init__(self, *args, ack, **kwargs):\n self.ack = ack\n super.__init__(*args, **kwargs)\n\n def handle(self):\n for line in self.rfile:\n self.wfile.write(self.ack + line)\n\n\nserv = TCPServer(('', 15000), partial(EchoHandler, ack=b'RECEIVE:'))\nserv.serve_forever()\n",
"step-5": "from socketserver import StreamRequestHandler, TCPServer\nfrom functools import partial\n\n\nclass EchoHandler(StreamRequestHandler):\n def __init__(self, *args, ack, **kwargs):\n self.ack = ack\n super.__init__(*args, **kwargs)\n\n def handle(self):\n for line in self.rfile:\n self.wfile.write(self.ack +line)\n\n# serv = TCPServer(('', 15000), EchoHandler)\nserv = TCPServer(('', 15000), partial(EchoHandler, ack=b'RECEIVE:'))\n# serv = TCPServer(('', 15000), lambda *args, **kwargs: EchoHandler(*args, ack=b'RECEIVED:', **kwargs)) #等价\nserv.serve_forever()\n",
"step-ids": [
1,
3,
5,
6,
7
]
}
|
[
1,
3,
5,
6,
7
] |
from layout import UIDump
import Tkinter
from Tkinter import *
from ScriptGenerator import ScriptGen
class Divide_and_Conquer():
def __init__(self, XY):
self.XY = XY
self.user_val = 'None'
self.flag = 'green'
print self.XY
def bounds_Compare(self, bounds, filename):
""" Compares the bounds with Master XY and generates the Script fro given Element. """
# removed "android.widget.Spinner", "android.widget.ExpandableListView" from reqlist, it's interfering with the view.
reqlist = ["android.widget.EditText",
"android.widget.Button", "android.widget.CheckBox", "android.widget.RadioButton", "android.widget.TextView", "android.widget.RelativeLayout",
"android.widget.ImageView", "android.app.Dialogue", "android.view.View"]
ignore_list = [None,'','None']
collection = []
logs = []
count = 0
len_bounds = len(bounds)
for i in bounds:
print '\n ---------------------------------------------- \n'
# print "for every bound block" ----> DEBUG < -----
if int(bounds[count][2]) <= self.XY[1] <= int(bounds[count][3]):
if int(bounds[count][0]) <= self.XY[0] <= int(bounds[count][1]):
# print "current X_Y : ", str(self.XY)
# print "current bounds : ", str(UIDump.bounds[count])
# print "unique id : ", str(UIDump.check_unique_id[count])
# print "resource id : ", str(UIDump.check_resource_id[count])
# print "current text : ", str(UIDump.check_text[count])
# print "in range block" ----> DEBUG < -----
if UIDump.elements[count] in reqlist:
# print "in reqlist block" ----> DEBUG < -----
if UIDump.elements[count] == reqlist[0]:
# print "EditText block" ----> DEBUG < -----
window = Tkinter.Tk()
window.resizable(width=False,height=False);
window.geometry("200x80")
l1=Label(window,width=30,text="Enter Text to Type: ")
l1.pack()
self.entry_id = StringVar()
e1 = Entry(window, width=30,textvariable=self.entry_id)
e1.pack()
def input(args= None):
self.user_val = e1.get()
window.destroy()
if self.resource_id not in ignore_list:
ScriptGen(filename).script("vc.findViewByIdOrRaise('{id}').setText('{text}')\n".format(id=self.resource_id, text=self.user_val))
ScriptGen(filename).log("#LOG({classname}): Cleared and Typed : '{text}' on id : '{id}'\n".format(classname =self.classname,text=self.user_val, id=self.resource_id))
elif self.unique_id not in ignore_list:
ScriptGen(filename).script("vc.findViewByIdOrRaise('{id}').setText('{text}')\n".format(id=self.unique_id, text=self.user_val))
ScriptGen(filename).log("#LOG({classname}): Cleared and Typed : '{text}'\n".format(classname =self.classname,text=self.user_val))
elif UIDump.check_text[count] not in ignore_list:
ScriptGen(filename).script("vc.findViewWithTextOrRaise('{id_text}').setText('{text}')\n".format(id_text=UIDump.check_text[count], text=self.user_val))
ScriptGen(filename).log("#LOG({classname}): Cleared and Typed : '{text}' on Element with text : '{id_text}'\n".format(classname =self.classname,id_text=UIDump.check_text[count], text=self.user_val))
else :
ScriptGen(filename).script("device.touchDip({X},{Y},0)\n".format(X=int(self.XY[0]), Y=int(self.XY[1])))
ScriptGen(filename).log("#LOG({classname}): Vulnerable/Unstable field on co-ordinates ({X},{Y})\n".format(classname ="Vulnerable",X=int(self.XY[0]), Y=int(self.XY[1])))
def framedestroy():
window.destroy()
self.unique_id = UIDump.check_unique_id[count]
self.resource_id = UIDump.check_resource_id[count]
self.classname = UIDump.check_className[count]
b1=Button(window,text="Ok",width=10, command = input)
b1.pack(side=LEFT)
b1.place(x=10,y=50)
b2=Button(window, text = "Cancel", width=10, command = framedestroy)
b2.pack(side=RIGHT)
b2.place(x=110,y=50)
window.bind('<Return>', input)
window.mainloop()
self.flag = 'red'
break
elif UIDump.elements[count] in reqlist[1:4]:
# print "Button block" ----> DEBUG < -----
self.unique_id = UIDump.check_unique_id[count]
self.resource_id = UIDump.check_resource_id[count]
self.classname = UIDump.check_className[count]
if UIDump.check_text[count] not in ignore_list:
log_ = "#LOG({classname}): Clicked on element with text : '{id}'\n".format(classname =self.classname,id=UIDump.check_text[count])
line = "vc.findViewWithTextOrRaise('{id}').touch()\n\tvc.sleep(3)\n".format(id=UIDump.check_text[count])
if line not in collection:
collection.append(line)
logs.append(log_)
break
elif self.resource_id not in ignore_list:
log_ = "#LOG({classname}): Clicked on : '{id}'\n".format(classname =self.classname,id=self.resource_id)
line = "vc.findViewByIdOrRaise('{id}').touch()\n\tvc.sleep(3)\n".format(id=self.resource_id)
if line not in collection:
collection.append(line)
logs.append(log_)
break
elif self.unique_id not in ignore_list:
log_ = "#LOG({classname}): Clicked on : '{id}'\n".format(classname =self.classname,id=self.unique_id)
line = "vc.findViewByIdOrRaise('{id_text}').touch()\n\tvc.sleep(3)\n".format(id_text=self.unique_id)
if line not in collection:
collection.append(line)
logs.append(log_)
break
else :
log_ = "#LOG({classname}): Vulnerable/Unstable field on co-ordinates ({X},{Y})\n".format(classname =self.classname,X=int(self.XY[0]), Y=int(self.XY[1]))
line = "device.touchDip({X},{Y},0)\n\tvc.sleep(3)\n".format(X=int(self.XY[0]), Y=int(self.XY[1]))
if line not in collection:
collection.append(line)
logs.append(log_)
break
elif UIDump.elements[count] in reqlist[4:]:
# print "remaining views block" ----> DEBUG < -----
self.unique_id = UIDump.check_unique_id[count]
self.resource_id = UIDump.check_resource_id[count]
self.classname = UIDump.check_className[count]
if UIDump.check_text[count] not in ignore_list:
log_ = "#LOG({classname}): Clicked on element with Text : '{id}'\n".format(classname =self.classname,id=UIDump.check_text[count])
line = "vc.findViewWithTextOrRaise('{id}').touch()\n".format(id=UIDump.check_text[count])
if line not in collection:
collection.append(line)
logs.append(log_)
elif self.resource_id not in ignore_list:
log_ = "#LOG({classname}): Clicked on : '{id}'\n".format(classname =self.classname,id=self.resource_id)
line = "vc.findViewByIdOrRaise('{id}').touch()\n".format(id=self.resource_id)
if line not in collection:
collection.append(line)
logs.append(log_)
elif self.unique_id not in ignore_list:
log_ = "#LOG({classname}): Clicked on : '{id}'\n".format(classname =self.classname,id=self.unique_id)
line = "vc.findViewByIdOrRaise('{id_text}').touch()\n".format(id_text=self.unique_id)
if line not in collection:
collection.append(line)
logs.append(log_)
else :
log_ = "#LOG({classname}): Vulnerable/Unstable field on co-ordinates ({X},{Y})\n".format(classname ='Vulnerable',X=int(self.XY[0]), Y=int(self.XY[1]))
line = "device.touchDip({X},{Y},0)\n\tvc.sleep(3)\n".format(X=int(self.XY[0]), Y=int(self.XY[1]))
if line not in collection:
collection.append(line)
logs.append(log_)
else:
# print "not in imp view block" ----> DEBUG < -----
log_ = "#LOG({classname}): Vulnerable/Unstable field on co-ordinates ({X},{Y})\n".format(classname ='Vulnerable',X=int(self.XY[0]), Y=int(self.XY[1]))
line = "device.touchDip({X},{Y},0)\n\tvc.sleep(3)\n".format(X=int(self.XY[0]), Y=int(self.XY[1]))
if line not in collection:
collection.append(line)
logs.append(log_)
break
elif UIDump.elements[count] in ["android.widget.FrameLayout"]:
# print "FrameLayout block" ----> DEBUG < -----
log_ = "#LOG({classname}): Vulnerable/Unstable field on co-ordinates ({X},{Y})\n".format(classname ='Vulnerable',X=int(self.XY[0]), Y=int(self.XY[1]))
line = "device.touchDip({X},{Y},0)\n\tvc.sleep(3)\n".format(X=int(self.XY[0]), Y=int(self.XY[1]))
if line not in collection:
collection.append(line)
logs.append(log_)
count += 1
else :
# print "nothing matches block" ----> DEBUG < -----
log_ = "#LOG({classname}): Vulnerable/Unstable field on co-ordinates ({X},{Y})\n".format(classname ='Vulnerable',X=int(self.XY[0]), Y=int(self.XY[1]))
line = "device.touchDip({X},{Y},0)\n\tvc.sleep(3)\n".format(X=int(self.XY[0]), Y=int(self.XY[1]))
if line not in collection:
collection.append(line)
logs.append(log_)
print collection
print logs
# ----> DEBUG < -----
if self.flag == 'green':
ScriptGen(filename).script(collection[-1])
ScriptGen(filename).log(logs[-1])
else:
pass
def main():
Divide_and_Conquer().bounds_Compare(bounds)
if __name__ == '__main__':
main()
|
normal
|
{
"blob_id": "7a65a5522db97a7a113a412883b640feede5bcee",
"index": 909,
"step-1": "from layout import UIDump\nimport Tkinter \nfrom Tkinter import *\nfrom ScriptGenerator import ScriptGen\n\nclass Divide_and_Conquer():\n\n\tdef __init__(self, XY):\n\t\tself.XY = XY\n\t\tself.user_val = 'None'\n\t\tself.flag = 'green'\n\n\t\tprint self.XY\n\t\n\tdef bounds_Compare(self, bounds, filename):\n\t\t\"\"\" Compares the bounds with Master XY and generates the Script fro given Element. \"\"\"\n\n\t\t# removed \"android.widget.Spinner\", \"android.widget.ExpandableListView\" from reqlist, it's interfering with the view.\n \t\t\n \t\treqlist = [\"android.widget.EditText\",\n \t\t\"android.widget.Button\", \"android.widget.CheckBox\", \"android.widget.RadioButton\", \"android.widget.TextView\", \"android.widget.RelativeLayout\",\n \t\t\"android.widget.ImageView\", \"android.app.Dialogue\", \"android.view.View\"]\n\n \t\tignore_list = [None,'','None']\n\t\t\n\t\tcollection = []\n\t\tlogs = []\n\n\t\tcount = 0\n\t\tlen_bounds = len(bounds)\n\t\t\n\t\tfor i in bounds:\n\t\t\tprint '\\n ---------------------------------------------- \\n'\n\t\t\t# print \"for every bound block\" ----> DEBUG < -----\n\t\t\tif int(bounds[count][2]) <= self.XY[1] <= int(bounds[count][3]):\n\t\t\t\tif int(bounds[count][0]) <= self.XY[0] <= int(bounds[count][1]):\n\t\t\t\t\t\n\t\t\t\t\t# print \"current X_Y : \", str(self.XY)\n\t\t\t\t\t# print \"current bounds : \", str(UIDump.bounds[count])\n\t\t\t\t\t# print \"unique id : \", str(UIDump.check_unique_id[count])\n\t\t\t\t\t# print \"resource id : \", str(UIDump.check_resource_id[count])\n\t\t\t\t\t# print \"current text : \", str(UIDump.check_text[count])\n\n\t\t\t\t\t# print \"in range block\" ----> DEBUG < -----\n\t\t\t\t\t\t\n\t\t\t\t\tif UIDump.elements[count] in reqlist:\n\t\t\t\t\t\t# print \"in reqlist block\" ----> DEBUG < -----\n\t\t\t\t\t\t\n\t\t\t\t\t\tif UIDump.elements[count] == reqlist[0]:\n\t\t\t\t\t\t\t# print \"EditText block\" ----> DEBUG < -----\n\n\t\t\t\t\t\t\twindow = Tkinter.Tk()\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\twindow.resizable(width=False,height=False);\n\t\t\t\t\t\t\twindow.geometry(\"200x80\")\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\tl1=Label(window,width=30,text=\"Enter Text to Type: \")\n\t\t\t\t\t\t\tl1.pack()\n\n\t\t\t\t\t\t\tself.entry_id = StringVar() \n\t\t\t\t\t\t\te1 = Entry(window, width=30,textvariable=self.entry_id)\n\t\t\t\t\t\t\te1.pack()\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\tdef input(args= None):\n\t\t\t\t\t\t\t\tself.user_val = e1.get()\n\t\t\t\t\t\t\t\twindow.destroy()\t\t\n\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\tif self.resource_id not in ignore_list:\n\t\t\t\t\t\t\t\t\tScriptGen(filename).script(\"vc.findViewByIdOrRaise('{id}').setText('{text}')\\n\".format(id=self.resource_id, text=self.user_val))\n\t\t\t\t\t\t\t\t\tScriptGen(filename).log(\"#LOG({classname}): Cleared and Typed : '{text}' on id : '{id}'\\n\".format(classname =self.classname,text=self.user_val, id=self.resource_id))\n\t\t\t\t\t\t\t\t\t\n\n\t\t\t\t\t\t\t\telif self.unique_id not in ignore_list:\n\t\t\t\t\t\t\t\t\tScriptGen(filename).script(\"vc.findViewByIdOrRaise('{id}').setText('{text}')\\n\".format(id=self.unique_id, text=self.user_val))\n\t\t\t\t\t\t\t\t\tScriptGen(filename).log(\"#LOG({classname}): Cleared and Typed : '{text}'\\n\".format(classname =self.classname,text=self.user_val))\n\t\t\t\t\t\t\t\t\t\t\t\n\n\t\t\t\t\t\t\t\telif UIDump.check_text[count] not in ignore_list:\n\t\t\t\t\t\t\t\t\tScriptGen(filename).script(\"vc.findViewWithTextOrRaise('{id_text}').setText('{text}')\\n\".format(id_text=UIDump.check_text[count], text=self.user_val))\n\t\t\t\t\t\t\t\t\tScriptGen(filename).log(\"#LOG({classname}): Cleared and Typed : '{text}' on Element with text : '{id_text}'\\n\".format(classname =self.classname,id_text=UIDump.check_text[count], text=self.user_val))\n\t\t\t\t\t\t\t\t\n\n\t\t\t\t\t\t\t\telse :\n\t\t\t\t\t\t\t\t\tScriptGen(filename).script(\"device.touchDip({X},{Y},0)\\n\".format(X=int(self.XY[0]), Y=int(self.XY[1])))\n\t\t\t\t\t\t\t\t\tScriptGen(filename).log(\"#LOG({classname}): Vulnerable/Unstable field on co-ordinates ({X},{Y})\\n\".format(classname =\"Vulnerable\",X=int(self.XY[0]), Y=int(self.XY[1])))\n\n\t\t\t\t\t\t\tdef framedestroy():\n\t\t\t\t\t\t\t\twindow.destroy()\n\n\t\t\t\t\t\t\tself.unique_id = UIDump.check_unique_id[count]\n\t\t\t\t\t\t\tself.resource_id = UIDump.check_resource_id[count]\n\t\t\t\t\t\t\tself.classname = UIDump.check_className[count]\n\n\t\t\t\t\t\t\tb1=Button(window,text=\"Ok\",width=10, command = input)\n\t\t\t\t\t\t\tb1.pack(side=LEFT)\n\t\t\t\t\t\t\tb1.place(x=10,y=50)\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\tb2=Button(window, text = \"Cancel\", width=10, command = framedestroy)\n\t\t\t\t\t\t\tb2.pack(side=RIGHT)\n\t\t\t\t\t\t\tb2.place(x=110,y=50)\n\n\t\t\t\t\t\t\twindow.bind('<Return>', input)\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\twindow.mainloop()\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\tself.flag = 'red'\n\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\n\t\t\t\t\t\telif UIDump.elements[count] in reqlist[1:4]:\n\t\t\t\t\t\t\t# print \"Button block\" ----> DEBUG < -----\n\n\t\t\t\t\t\t\tself.unique_id = UIDump.check_unique_id[count]\n\t\t\t\t\t\t\tself.resource_id = UIDump.check_resource_id[count]\n\t\t\t\t\t\t\tself.classname = UIDump.check_className[count]\n\n\t\t\t\t\t\t\tif UIDump.check_text[count] not in ignore_list:\n\t\t\t\t\t\t\t\tlog_ = \"#LOG({classname}): Clicked on element with text : '{id}'\\n\".format(classname =self.classname,id=UIDump.check_text[count])\n\t\t\t\t\t\t\t\tline = \"vc.findViewWithTextOrRaise('{id}').touch()\\n\\tvc.sleep(3)\\n\".format(id=UIDump.check_text[count])\n\t\t\t\t\t\t\t\tif line not in collection: \n\t\t\t\t\t\t\t\t\tcollection.append(line)\n\t\t\t\t\t\t\t\t\tlogs.append(log_)\n\t\t\t\t\t\t\t\tbreak\n\n\t\t\t\t\t\t\telif self.resource_id not in ignore_list:\n\t\t\t\t\t\t\t\tlog_ = \"#LOG({classname}): Clicked on : '{id}'\\n\".format(classname =self.classname,id=self.resource_id)\n\t\t\t\t\t\t\t\tline = \"vc.findViewByIdOrRaise('{id}').touch()\\n\\tvc.sleep(3)\\n\".format(id=self.resource_id)\n\t\t\t\t\t\t\t\tif line not in collection: \n\t\t\t\t\t\t\t\t\tcollection.append(line)\n\t\t\t\t\t\t\t\t\tlogs.append(log_)\n\t\t\t\t\t\t\t\tbreak\n\n\t\t\t\t\t\t\telif self.unique_id not in ignore_list:\n\t\t\t\t\t\t\t\tlog_ = \"#LOG({classname}): Clicked on : '{id}'\\n\".format(classname =self.classname,id=self.unique_id)\n\t\t\t\t\t\t\t\tline = \"vc.findViewByIdOrRaise('{id_text}').touch()\\n\\tvc.sleep(3)\\n\".format(id_text=self.unique_id)\n\t\t\t\t\t\t\t\tif line not in collection: \n\t\t\t\t\t\t\t\t\tcollection.append(line)\n\t\t\t\t\t\t\t\t\tlogs.append(log_)\n\t\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\telse :\n\t\t\t\t\t\t\t\tlog_ = \"#LOG({classname}): Vulnerable/Unstable field on co-ordinates ({X},{Y})\\n\".format(classname =self.classname,X=int(self.XY[0]), Y=int(self.XY[1]))\n\t\t\t\t\t\t\t\tline = \"device.touchDip({X},{Y},0)\\n\\tvc.sleep(3)\\n\".format(X=int(self.XY[0]), Y=int(self.XY[1]))\n\t\t\t\t\t\t\t\tif line not in collection: \n\t\t\t\t\t\t\t\t\tcollection.append(line)\n\t\t\t\t\t\t\t\t\tlogs.append(log_)\n\t\t\t\t\t\t\t\tbreak\n\n\t\t\t\t\t\telif UIDump.elements[count] in reqlist[4:]:\n\t\t\t\t\t\t\t# print \"remaining views block\" ----> DEBUG < -----\n\n\t\t\t\t\t\t\tself.unique_id = UIDump.check_unique_id[count]\n\t\t\t\t\t\t\tself.resource_id = UIDump.check_resource_id[count]\n\t\t\t\t\t\t\tself.classname = UIDump.check_className[count]\n\n\t\t\t\t\t\t\tif UIDump.check_text[count] not in ignore_list:\n\t\t\t\t\t\t\t\tlog_ = \"#LOG({classname}): Clicked on element with Text : '{id}'\\n\".format(classname =self.classname,id=UIDump.check_text[count])\n\t\t\t\t\t\t\t\tline = \"vc.findViewWithTextOrRaise('{id}').touch()\\n\".format(id=UIDump.check_text[count])\n\t\t\t\t\t\t\t\tif line not in collection: \n\t\t\t\t\t\t\t\t\tcollection.append(line)\n\t\t\t\t\t\t\t\t\tlogs.append(log_)\n\n\t\t\t\t\t\t\telif self.resource_id not in ignore_list:\n\t\t\t\t\t\t\t\tlog_ = \"#LOG({classname}): Clicked on : '{id}'\\n\".format(classname =self.classname,id=self.resource_id)\n\t\t\t\t\t\t\t\tline = \"vc.findViewByIdOrRaise('{id}').touch()\\n\".format(id=self.resource_id)\n\t\t\t\t\t\t\t\tif line not in collection: \n\t\t\t\t\t\t\t\t\tcollection.append(line)\n\t\t\t\t\t\t\t\t\tlogs.append(log_)\n\n\t\t\t\t\t\t\telif self.unique_id not in ignore_list:\n\t\t\t\t\t\t\t\tlog_ = \"#LOG({classname}): Clicked on : '{id}'\\n\".format(classname =self.classname,id=self.unique_id)\n\t\t\t\t\t\t\t\tline = \"vc.findViewByIdOrRaise('{id_text}').touch()\\n\".format(id_text=self.unique_id)\n\t\t\t\t\t\t\t\tif line not in collection: \n\t\t\t\t\t\t\t\t\tcollection.append(line)\n\t\t\t\t\t\t\t\t\tlogs.append(log_)\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\telse :\n\t\t\t\t\t\t\t\tlog_ = \"#LOG({classname}): Vulnerable/Unstable field on co-ordinates ({X},{Y})\\n\".format(classname ='Vulnerable',X=int(self.XY[0]), Y=int(self.XY[1]))\n\t\t\t\t\t\t\t\tline = \"device.touchDip({X},{Y},0)\\n\\tvc.sleep(3)\\n\".format(X=int(self.XY[0]), Y=int(self.XY[1]))\n\t\t\t\t\t\t\t\tif line not in collection: \n\t\t\t\t\t\t\t\t\tcollection.append(line)\n\t\t\t\t\t\t\t\t\tlogs.append(log_)\n\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t# print \"not in imp view block\" ----> DEBUG < -----\n\t\t\t\t\t\t\tlog_ = \"#LOG({classname}): Vulnerable/Unstable field on co-ordinates ({X},{Y})\\n\".format(classname ='Vulnerable',X=int(self.XY[0]), Y=int(self.XY[1]))\n\t\t\t\t\t\t\tline = \"device.touchDip({X},{Y},0)\\n\\tvc.sleep(3)\\n\".format(X=int(self.XY[0]), Y=int(self.XY[1]))\n\t\t\t\t\t\t\tif line not in collection: \n\t\t\t\t\t\t\t\tcollection.append(line)\n\t\t\t\t\t\t\t\tlogs.append(log_)\n\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\telif UIDump.elements[count] in [\"android.widget.FrameLayout\"]:\n\t\t\t\t\t\t# print \"FrameLayout block\" ----> DEBUG < -----\n\t\t\t\t\t\tlog_ = \"#LOG({classname}): Vulnerable/Unstable field on co-ordinates ({X},{Y})\\n\".format(classname ='Vulnerable',X=int(self.XY[0]), Y=int(self.XY[1]))\n\t\t\t\t\t\tline = \"device.touchDip({X},{Y},0)\\n\\tvc.sleep(3)\\n\".format(X=int(self.XY[0]), Y=int(self.XY[1]))\n\t\t\t\t\t\tif line not in collection: \n\t\t\t\t\t\t\tcollection.append(line)\n\t\t\t\t\t\t\tlogs.append(log_)\n\t\t\t\t\n\t\t\tcount += 1\n\n\t\telse :\n\t\t\t# print \"nothing matches block\" ----> DEBUG < -----\n\t\t\tlog_ = \"#LOG({classname}): Vulnerable/Unstable field on co-ordinates ({X},{Y})\\n\".format(classname ='Vulnerable',X=int(self.XY[0]), Y=int(self.XY[1]))\n\t\t\tline = \"device.touchDip({X},{Y},0)\\n\\tvc.sleep(3)\\n\".format(X=int(self.XY[0]), Y=int(self.XY[1]))\n\t\t\tif line not in collection: \n\t\t\t\tcollection.append(line)\n\t\t\t\tlogs.append(log_)\n\n\t\tprint collection\n\t\tprint logs\n\t\t# ----> DEBUG < -----\n\t\t\n\t\tif self.flag == 'green':\n\t\t\tScriptGen(filename).script(collection[-1])\n\t\t\tScriptGen(filename).log(logs[-1])\n\t\telse:\n\t\t\tpass\n\ndef main():\n\tDivide_and_Conquer().bounds_Compare(bounds)\n\nif __name__ == '__main__':\n\tmain()\t",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
from django.shortcuts import render
from django.shortcuts import redirect
# Create your views here.
from .forms import AddBookForm ,UpdateBookForm,BookCreateModelForm,SearchForm,RegistrationForm,SignInForm
from book.models import Books
from django.contrib.auth import authenticate,login,logout
def book_add(request):
if request.user.is_authenticated:
context = {}
if request.method == "GET":
form = BookCreateModelForm()
context["form"] = form
return render(request, "addbook.html", context)
elif request.method == "POST":
context = {}
form = BookCreateModelForm(request.POST)
if form.is_valid():
form.save()
# context["form"] = form
# book_name = form.cleaned_data["book_name"]
# author= form.cleaned_data["author"]
# category=form.cleaned_data["category"]
# prices=form.cleaned_data["price"]
# copies=form.cleaned_data["number_copies"]
# print(book_name,author,category,prices,copies)
# book=Books(book_name=book_name,author=author,category=category,price=prices,copies=copies)
# book.save()
return redirect("index")
else:
return render(request, "addbook.html",context)
else:
return redirect('singn')
def get_books(request):
if request.user.is_authenticated:
form=SearchForm()
context = {}
books=Books.objects.all()
context["books"]=books
context['form']=form
if request.method=="POST":
form=SearchForm(request.POST)
if form.is_valid():
book_name=form.cleaned_data["book_name"]
books=Books.objects.filter(book_name__contains=book_name)
context['books']=books
return render(request,"book_list.html",context)
else:
context['form']=form
return render(request, "book_list.html", context)
return render(request, "book_list.html", context)
else:
return redirect('singn')
def book_details(request,id):
if request.user.is_authenticated:
book=Books.objects.get(id=id)
context = {}
context["book"]=book
return render(request,"book_details.html",context)
else:
return redirect('singn')
def remove_book(request,id):
if request.user.is_authenticated:
book=Books.objects.get(id=id)
book.delete()
return redirect("books")
else:
return redirect('singn')
def update_book(request,id):
if request.user.is_authenticated:
book = Books.objects.get(id=id)
form=BookCreateModelForm(instance=book)
# form=BookCreateModelForm(initial={
# "book_name":book.book_name,
# "author":book.author,
# "category":book.category,
# "price":book.price,
# "number_copies":book.copies})
context = {}
context['form']=form
if request.method=="POST":
book = Books.objects.get(id=id)
form=BookCreateModelForm(instance=book,data=request.POST)
if form.is_valid():
form.save()
# form=BookCreateModelForm(request.POST)
#
# if form.is_valid():
# book.book_name=form.cleaned_data["book_name"]
# book.author=form.cleaned_data["author"]
# book.category=form.cleaned_data["category"]
# book.price=form.cleaned_data["price"]
# book.copies=form.cleaned_data["number_copies"]
# book.save()
return redirect("books")
else:
form=BookCreateModelForm(request.POST)
context["form"]=form
print(form)
return render(request, "edit.html", context)
return render(request,"edit.html",context)
else:
return redirect('singn')
def create_account(request):
form=RegistrationForm()
context={'form':form}
if request.method=="POST":
form=RegistrationForm(request.POST)
if form.is_valid():
form.save()
print("account created")
return redirect("singn")
else:
context["form"]=form
return render(request, "createaccount.html", context)
return render(request,"createaccount.html",context)
def singn_in(request):
form=SignInForm()
context={'form':form}
if request.method=="POST":
form=SignInForm(request.POST)
if form.is_valid():
username=form.cleaned_data["username"]
password=form.cleaned_data["password"]
user=authenticate(request,username=username,password=password)
if user:
login(request,user)
return redirect("index")
else:
context['form']=form
return render(request, "signin.html", context)
return render(request,"signin.html",context)
def signout(request):
if request.user.is_authenticated:
logout(request)
return redirect("singn")
else:
return redirect('singn')
|
normal
|
{
"blob_id": "aba2a0a262c14f286c278f21ba42871410c174f0",
"index": 953,
"step-1": "<mask token>\n\n\ndef book_add(request):\n if request.user.is_authenticated:\n context = {}\n if request.method == 'GET':\n form = BookCreateModelForm()\n context['form'] = form\n return render(request, 'addbook.html', context)\n elif request.method == 'POST':\n context = {}\n form = BookCreateModelForm(request.POST)\n if form.is_valid():\n form.save()\n return redirect('index')\n else:\n return render(request, 'addbook.html', context)\n else:\n return redirect('singn')\n\n\ndef get_books(request):\n if request.user.is_authenticated:\n form = SearchForm()\n context = {}\n books = Books.objects.all()\n context['books'] = books\n context['form'] = form\n if request.method == 'POST':\n form = SearchForm(request.POST)\n if form.is_valid():\n book_name = form.cleaned_data['book_name']\n books = Books.objects.filter(book_name__contains=book_name)\n context['books'] = books\n return render(request, 'book_list.html', context)\n else:\n context['form'] = form\n return render(request, 'book_list.html', context)\n return render(request, 'book_list.html', context)\n else:\n return redirect('singn')\n\n\n<mask token>\n\n\ndef remove_book(request, id):\n if request.user.is_authenticated:\n book = Books.objects.get(id=id)\n book.delete()\n return redirect('books')\n else:\n return redirect('singn')\n\n\n<mask token>\n\n\ndef create_account(request):\n form = RegistrationForm()\n context = {'form': form}\n if request.method == 'POST':\n form = RegistrationForm(request.POST)\n if form.is_valid():\n form.save()\n print('account created')\n return redirect('singn')\n else:\n context['form'] = form\n return render(request, 'createaccount.html', context)\n return render(request, 'createaccount.html', context)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef book_add(request):\n if request.user.is_authenticated:\n context = {}\n if request.method == 'GET':\n form = BookCreateModelForm()\n context['form'] = form\n return render(request, 'addbook.html', context)\n elif request.method == 'POST':\n context = {}\n form = BookCreateModelForm(request.POST)\n if form.is_valid():\n form.save()\n return redirect('index')\n else:\n return render(request, 'addbook.html', context)\n else:\n return redirect('singn')\n\n\ndef get_books(request):\n if request.user.is_authenticated:\n form = SearchForm()\n context = {}\n books = Books.objects.all()\n context['books'] = books\n context['form'] = form\n if request.method == 'POST':\n form = SearchForm(request.POST)\n if form.is_valid():\n book_name = form.cleaned_data['book_name']\n books = Books.objects.filter(book_name__contains=book_name)\n context['books'] = books\n return render(request, 'book_list.html', context)\n else:\n context['form'] = form\n return render(request, 'book_list.html', context)\n return render(request, 'book_list.html', context)\n else:\n return redirect('singn')\n\n\ndef book_details(request, id):\n if request.user.is_authenticated:\n book = Books.objects.get(id=id)\n context = {}\n context['book'] = book\n return render(request, 'book_details.html', context)\n else:\n return redirect('singn')\n\n\ndef remove_book(request, id):\n if request.user.is_authenticated:\n book = Books.objects.get(id=id)\n book.delete()\n return redirect('books')\n else:\n return redirect('singn')\n\n\n<mask token>\n\n\ndef create_account(request):\n form = RegistrationForm()\n context = {'form': form}\n if request.method == 'POST':\n form = RegistrationForm(request.POST)\n if form.is_valid():\n form.save()\n print('account created')\n return redirect('singn')\n else:\n context['form'] = form\n return render(request, 'createaccount.html', context)\n return render(request, 'createaccount.html', context)\n\n\n<mask token>\n\n\ndef signout(request):\n if request.user.is_authenticated:\n logout(request)\n return redirect('singn')\n else:\n return redirect('singn')\n",
"step-3": "<mask token>\n\n\ndef book_add(request):\n if request.user.is_authenticated:\n context = {}\n if request.method == 'GET':\n form = BookCreateModelForm()\n context['form'] = form\n return render(request, 'addbook.html', context)\n elif request.method == 'POST':\n context = {}\n form = BookCreateModelForm(request.POST)\n if form.is_valid():\n form.save()\n return redirect('index')\n else:\n return render(request, 'addbook.html', context)\n else:\n return redirect('singn')\n\n\ndef get_books(request):\n if request.user.is_authenticated:\n form = SearchForm()\n context = {}\n books = Books.objects.all()\n context['books'] = books\n context['form'] = form\n if request.method == 'POST':\n form = SearchForm(request.POST)\n if form.is_valid():\n book_name = form.cleaned_data['book_name']\n books = Books.objects.filter(book_name__contains=book_name)\n context['books'] = books\n return render(request, 'book_list.html', context)\n else:\n context['form'] = form\n return render(request, 'book_list.html', context)\n return render(request, 'book_list.html', context)\n else:\n return redirect('singn')\n\n\ndef book_details(request, id):\n if request.user.is_authenticated:\n book = Books.objects.get(id=id)\n context = {}\n context['book'] = book\n return render(request, 'book_details.html', context)\n else:\n return redirect('singn')\n\n\ndef remove_book(request, id):\n if request.user.is_authenticated:\n book = Books.objects.get(id=id)\n book.delete()\n return redirect('books')\n else:\n return redirect('singn')\n\n\ndef update_book(request, id):\n if request.user.is_authenticated:\n book = Books.objects.get(id=id)\n form = BookCreateModelForm(instance=book)\n context = {}\n context['form'] = form\n if request.method == 'POST':\n book = Books.objects.get(id=id)\n form = BookCreateModelForm(instance=book, data=request.POST)\n if form.is_valid():\n form.save()\n return redirect('books')\n else:\n form = BookCreateModelForm(request.POST)\n context['form'] = form\n print(form)\n return render(request, 'edit.html', context)\n return render(request, 'edit.html', context)\n else:\n return redirect('singn')\n\n\ndef create_account(request):\n form = RegistrationForm()\n context = {'form': form}\n if request.method == 'POST':\n form = RegistrationForm(request.POST)\n if form.is_valid():\n form.save()\n print('account created')\n return redirect('singn')\n else:\n context['form'] = form\n return render(request, 'createaccount.html', context)\n return render(request, 'createaccount.html', context)\n\n\n<mask token>\n\n\ndef signout(request):\n if request.user.is_authenticated:\n logout(request)\n return redirect('singn')\n else:\n return redirect('singn')\n",
"step-4": "<mask token>\n\n\ndef book_add(request):\n if request.user.is_authenticated:\n context = {}\n if request.method == 'GET':\n form = BookCreateModelForm()\n context['form'] = form\n return render(request, 'addbook.html', context)\n elif request.method == 'POST':\n context = {}\n form = BookCreateModelForm(request.POST)\n if form.is_valid():\n form.save()\n return redirect('index')\n else:\n return render(request, 'addbook.html', context)\n else:\n return redirect('singn')\n\n\ndef get_books(request):\n if request.user.is_authenticated:\n form = SearchForm()\n context = {}\n books = Books.objects.all()\n context['books'] = books\n context['form'] = form\n if request.method == 'POST':\n form = SearchForm(request.POST)\n if form.is_valid():\n book_name = form.cleaned_data['book_name']\n books = Books.objects.filter(book_name__contains=book_name)\n context['books'] = books\n return render(request, 'book_list.html', context)\n else:\n context['form'] = form\n return render(request, 'book_list.html', context)\n return render(request, 'book_list.html', context)\n else:\n return redirect('singn')\n\n\ndef book_details(request, id):\n if request.user.is_authenticated:\n book = Books.objects.get(id=id)\n context = {}\n context['book'] = book\n return render(request, 'book_details.html', context)\n else:\n return redirect('singn')\n\n\ndef remove_book(request, id):\n if request.user.is_authenticated:\n book = Books.objects.get(id=id)\n book.delete()\n return redirect('books')\n else:\n return redirect('singn')\n\n\ndef update_book(request, id):\n if request.user.is_authenticated:\n book = Books.objects.get(id=id)\n form = BookCreateModelForm(instance=book)\n context = {}\n context['form'] = form\n if request.method == 'POST':\n book = Books.objects.get(id=id)\n form = BookCreateModelForm(instance=book, data=request.POST)\n if form.is_valid():\n form.save()\n return redirect('books')\n else:\n form = BookCreateModelForm(request.POST)\n context['form'] = form\n print(form)\n return render(request, 'edit.html', context)\n return render(request, 'edit.html', context)\n else:\n return redirect('singn')\n\n\ndef create_account(request):\n form = RegistrationForm()\n context = {'form': form}\n if request.method == 'POST':\n form = RegistrationForm(request.POST)\n if form.is_valid():\n form.save()\n print('account created')\n return redirect('singn')\n else:\n context['form'] = form\n return render(request, 'createaccount.html', context)\n return render(request, 'createaccount.html', context)\n\n\ndef singn_in(request):\n form = SignInForm()\n context = {'form': form}\n if request.method == 'POST':\n form = SignInForm(request.POST)\n if form.is_valid():\n username = form.cleaned_data['username']\n password = form.cleaned_data['password']\n user = authenticate(request, username=username, password=password)\n if user:\n login(request, user)\n return redirect('index')\n else:\n context['form'] = form\n return render(request, 'signin.html', context)\n return render(request, 'signin.html', context)\n\n\ndef signout(request):\n if request.user.is_authenticated:\n logout(request)\n return redirect('singn')\n else:\n return redirect('singn')\n",
"step-5": "from django.shortcuts import render\nfrom django.shortcuts import redirect\n\n\n\n# Create your views here.\nfrom .forms import AddBookForm ,UpdateBookForm,BookCreateModelForm,SearchForm,RegistrationForm,SignInForm\nfrom book.models import Books\nfrom django.contrib.auth import authenticate,login,logout\n\n\n\ndef book_add(request):\n if request.user.is_authenticated:\n context = {}\n if request.method == \"GET\":\n form = BookCreateModelForm()\n context[\"form\"] = form\n return render(request, \"addbook.html\", context)\n elif request.method == \"POST\":\n context = {}\n form = BookCreateModelForm(request.POST)\n \n if form.is_valid():\n form.save()\n # context[\"form\"] = form\n # book_name = form.cleaned_data[\"book_name\"]\n # author= form.cleaned_data[\"author\"]\n # category=form.cleaned_data[\"category\"]\n # prices=form.cleaned_data[\"price\"]\n # copies=form.cleaned_data[\"number_copies\"]\n # print(book_name,author,category,prices,copies)\n # book=Books(book_name=book_name,author=author,category=category,price=prices,copies=copies)\n # book.save()\n return redirect(\"index\")\n else:\n return render(request, \"addbook.html\",context)\n else:\n return redirect('singn')\n\ndef get_books(request):\n if request.user.is_authenticated:\n form=SearchForm()\n context = {}\n books=Books.objects.all()\n context[\"books\"]=books\n context['form']=form\n if request.method==\"POST\":\n form=SearchForm(request.POST)\n if form.is_valid():\n book_name=form.cleaned_data[\"book_name\"]\n books=Books.objects.filter(book_name__contains=book_name)\n context['books']=books\n return render(request,\"book_list.html\",context)\n else:\n context['form']=form\n return render(request, \"book_list.html\", context)\n return render(request, \"book_list.html\", context)\n else:\n return redirect('singn')\n\ndef book_details(request,id):\n if request.user.is_authenticated:\n book=Books.objects.get(id=id)\n context = {}\n context[\"book\"]=book\n return render(request,\"book_details.html\",context)\n else:\n return redirect('singn')\n\n\ndef remove_book(request,id):\n if request.user.is_authenticated:\n book=Books.objects.get(id=id)\n book.delete()\n return redirect(\"books\")\n else:\n return redirect('singn')\ndef update_book(request,id):\n if request.user.is_authenticated:\n book = Books.objects.get(id=id)\n form=BookCreateModelForm(instance=book)\n\n # form=BookCreateModelForm(initial={\n # \"book_name\":book.book_name,\n # \"author\":book.author,\n # \"category\":book.category,\n # \"price\":book.price,\n # \"number_copies\":book.copies})\n context = {}\n context['form']=form\n if request.method==\"POST\":\n book = Books.objects.get(id=id)\n form=BookCreateModelForm(instance=book,data=request.POST)\n if form.is_valid():\n form.save()\n # form=BookCreateModelForm(request.POST)\n #\n # if form.is_valid():\n # book.book_name=form.cleaned_data[\"book_name\"]\n # book.author=form.cleaned_data[\"author\"]\n # book.category=form.cleaned_data[\"category\"]\n # book.price=form.cleaned_data[\"price\"]\n # book.copies=form.cleaned_data[\"number_copies\"]\n # book.save()\n return redirect(\"books\")\n else:\n form=BookCreateModelForm(request.POST)\n context[\"form\"]=form\n print(form)\n return render(request, \"edit.html\", context)\n return render(request,\"edit.html\",context)\n else:\n return redirect('singn')\n\n\ndef create_account(request):\n form=RegistrationForm()\n context={'form':form}\n if request.method==\"POST\":\n form=RegistrationForm(request.POST)\n if form.is_valid():\n form.save()\n print(\"account created\")\n return redirect(\"singn\")\n else:\n context[\"form\"]=form\n return render(request, \"createaccount.html\", context)\n\n return render(request,\"createaccount.html\",context)\n\n\ndef singn_in(request):\n form=SignInForm()\n context={'form':form}\n if request.method==\"POST\":\n form=SignInForm(request.POST)\n if form.is_valid():\n username=form.cleaned_data[\"username\"]\n password=form.cleaned_data[\"password\"]\n user=authenticate(request,username=username,password=password)\n if user:\n login(request,user)\n return redirect(\"index\")\n else:\n context['form']=form\n return render(request, \"signin.html\", context)\n\n\n \n return render(request,\"signin.html\",context)\n\n\ndef signout(request):\n if request.user.is_authenticated:\n logout(request)\n return redirect(\"singn\")\n else:\n return redirect('singn')\n\n\n\n\n\n\n\n\n\n\n",
"step-ids": [
4,
6,
7,
8,
10
]
}
|
[
4,
6,
7,
8,
10
] |
from discord.ext import commands, tasks
from discord.utils import get
import discord
import re
import json
import time
import random
import asyncio
import os
import datetime
from live_ticker_scrape import wrangle_data
from tokens import dev, dev1, es, nas, dow, us10y, dollar, vix, btc, eth, silver , link
es_bot = discord.Client()
nas_bot = discord.Client()
dow_bot = discord.Client()
us10y_bot = discord.Client()
vix_bot = discord.Client()
ticker_vix = discord.Client()
dollar_bot = discord.Client()
silver_bot = discord.Client()
btc_bot = discord.Client()
eth_bot= discord.Client()
link_bot = discord.Client()
loop = asyncio.get_event_loop()
@es_bot.event
async def on_ready():
print('es started')
@nas_bot.event
async def on_ready():
print('nas started')
@dow_bot.event
async def on_ready():
print('dow started')
@silver_bot.event
async def on_ready():
print('silver started')
@us10y_bot.event
async def on_ready():
print('us10y started')
@dollar_bot.event
async def on_Ready():
print('dollar started')
@vix_bot.event
async def on_ready():
print('vix started')
@btc_bot.event
async def on_ready():
print('btc started')
@eth_bot.event
async def on_ready():
print('eth started')
@link_bot.event
async def on_ready():
print('link started')
'''
@tasks.loop() can be changed to seconds, minutes, hours
https://discordpy.readthedocs.io/en/latest/ext/tasks/
'''
@tasks.loop(seconds=5)
async def called_second():
## get all guild ids that the bot is joined in
data = wrangle_data()
print(data)
ticker_es = data['es']
ticker_nas = data['nas']
ticker_dow = data['dow']
ticker_vix = data['vix']
ticker_us10y = data['us10y']
ticker_dollar = data['dxy']
ticker_silver = data['silver']
ticker_btc = data['btc']
ticker_eth = data['eth']
ticker_link = data['link']
## es
if ticker_es:
guild_ids = [guild.id for guild in es_bot.guilds]
name_es = '{:20,.2f}'.format(ticker_es['last'])
watching_es = ticker_es['change%']
guild_channels = [es_bot.get_guild(guild_id) for guild_id in guild_ids]
for guild_channel in guild_channels:
try:
red = get(guild_channel.roles, name='RED')
green = get(guild_channel.roles, name='GREEN')
if "-" in watching_es:
discord_bot = guild_channel.me
await discord_bot.remove_roles(green)
await discord_bot.add_roles(red)
else:
discord_bot = guild_channel.me
await discord_bot.remove_roles(red)
await discord_bot.add_roles(green)
await guild_channel.me.edit(nick=f"1) {name_es}")
await es_bot.change_presence(activity=discord.Activity(type=discord.ActivityType.watching, name=f"ES {watching_es}"))
except:
print(f'broke in {guild_channel}')
else:
print('no es data')
##nas
if ticker_nas:
guild_ids = [guild.id for guild in nas_bot.guilds]
name_nas = '{:20,.2f}'.format(ticker_nas['last'])
watching_nas= ticker_nas['change%']
guild_channels = [nas_bot.get_guild(guild_id) for guild_id in guild_ids]
for guild_channel in guild_channels:
try:
red = get(guild_channel.roles, name='RED')
green = get(guild_channel.roles, name='GREEN')
if "-" in watching_nas:
discord_bot = guild_channel.me
await discord_bot.remove_roles(green)
await discord_bot.add_roles(red)
else:
discord_bot = guild_channel.me
await discord_bot.remove_roles(red)
await discord_bot.add_roles(green)
await guild_channel.me.edit(nick=f"2) {name_nas}")
await nas_bot.change_presence(activity=discord.Activity(type=discord.ActivityType.watching, name=f"NQ {watching_nas}"))
except:
print(f'broke in {guild_channel}')
else:
print('no nas data')
## dow
if ticker_dow:
guild_ids = [guild.id for guild in dow_bot.guilds]
name_dow = '{:20,.2f}'.format(ticker_dow['last'])
watching_dow = ticker_dow['change%']
guild_channels = [dow_bot.get_guild(guild_id) for guild_id in guild_ids]
for guild_channel in guild_channels:
try:
red = get(guild_channel.roles, name='RED')
green = get(guild_channel.roles, name='GREEN')
if "-" in watching_dow:
discord_bot = guild_channel.me
await discord_bot.remove_roles(green)
await discord_bot.add_roles(red)
else:
discord_bot = guild_channel.me
await discord_bot.remove_roles(red)
await discord_bot.add_roles(green)
await guild_channel.me.edit(nick=f"3) {name_dow}")
await dow_bot.change_presence(activity=discord.Activity(type=discord.ActivityType.watching, name=f"DJI {watching_dow}"))
except:
print(f'broke in {guild_channel}')
else:
print('no dow data')
## vix
if vix:
guild_ids = [guild.id for guild in vix_bot.guilds]
name_vix = '{:20,.2f}'.format(ticker_vix['last'])
watching_vix = ticker_vix['change%']
guild_channels = [vix_bot.get_guild(guild_id) for guild_id in guild_ids]
for guild_channel in guild_channels:
try:
red = get(guild_channel.roles, name='RED')
green = get(guild_channel.roles, name='GREEN')
if "-" in watching_vix:
discord_bot = guild_channel.me
await discord_bot.remove_roles(green)
await discord_bot.add_roles(red)
else:
discord_bot = guild_channel.me
await discord_bot.remove_roles(red)
await discord_bot.add_roles(green)
await guild_channel.me.edit(nick=f"4) {name_vix}")
await vix_bot.change_presence(activity=discord.Activity(type=discord.ActivityType.watching, name=f"VIX {watching_vix}"))
except:
print(f'broke in {guild_channel}')
else:
print('no vix data ')
# dollar
if ticker_dollar:
guild_ids = [guild.id for guild in dollar_bot.guilds]
name_dollar = '{:20,.2f}'.format(ticker_dollar['last'])
watching_dollar = ticker_dollar['change%']
guild_channels = [dollar_bot.get_guild(guild_id) for guild_id in guild_ids]
for guild_channel in guild_channels:
try:
red = get(guild_channel.roles, name='RED')
green = get(guild_channel.roles, name='GREEN')
if "-" in watching_dollar:
discord_bot = guild_channel.me
await discord_bot.remove_roles(green)
await discord_bot.add_roles(red)
else:
discord_bot = guild_channel.me
await discord_bot.remove_roles(red)
await discord_bot.add_roles(green)
await guild_channel.me.edit(nick=f"5) {name_dollar}")
await dollar_bot.change_presence(activity=discord.Activity(type=discord.ActivityType.watching, name=f"DXY {watching_dollar}"))
except:
print(f'broke in {guild_channel}')
else:
print('no dollar data')
# us10y
if ticker_us10y:
guild_ids = [guild.id for guild in us10y_bot.guilds]
name_us10y = '{:20,.2f}'.format(ticker_us10y['last'])
watching_us10y = ticker_us10y['change%']
guild_channels = [us10y_bot.get_guild(guild_id) for guild_id in guild_ids]
for guild_channel in guild_channels:
try:
red = get(guild_channel.roles, name='RED')
green = get(guild_channel.roles, name='GREEN')
if "-" in watching_us10y:
discord_bot = guild_channel.me
await discord_bot.remove_roles(green)
await discord_bot.add_roles(red)
else:
discord_bot = guild_channel.me
await discord_bot.remove_roles(red)
await discord_bot.add_roles(green)
await guild_channel.me.edit(nick=f"4) {name_us10y}")
await us10y_bot.change_presence(activity=discord.Activity(type=discord.ActivityType.watching, name=f"US10Y {watching_us10y}"))
except:
print(f'broke in {guild_channel}')
else:
print('no us10y data')
# silver
if ticker_silver:
guild_ids = [guild.id for guild in silver_bot.guilds]
name_silver = '{:20,.2f}'.format(ticker_silver['last'])
watching_silver = ticker_silver['change%']
guild_channels = [silver_bot.get_guild(guild_id) for guild_id in guild_ids]
for guild_channel in guild_channels:
try:
red = get(guild_channel.roles, name='RED')
green = get(guild_channel.roles, name='GREEN')
if "-" in watching_silver:
discord_bot = guild_channel.me
await discord_bot.remove_roles(green)
await discord_bot.add_roles(red)
else:
discord_bot = guild_channel.me
await discord_bot.remove_roles(red)
await discord_bot.add_roles(green)
await guild_channel.me.edit(nick=f"6) {name_silver}")
await silver_bot.change_presence(activity=discord.Activity(type=discord.ActivityType.watching, name=f"{ticker_silver['name'].upper()} {watching_silver}"))
except:
print(f'broke in {guild_channel}')
else:
print('no silver data')
#shit coin stuff
# btc
if ticker_btc:
guild_ids = [guild.id for guild in btc_bot.guilds]
name_btc = '{:20,.2f}'.format(ticker_btc['last'])
watching_btc = ticker_btc['change%']
guild_channels = [btc_bot.get_guild(guild_id) for guild_id in guild_ids]
for guild_channel in guild_channels:
try:
red = get(guild_channel.roles, name='RED')
green = get(guild_channel.roles, name='GREEN')
if "-" in watching_btc:
discord_bot = guild_channel.me
await discord_bot.remove_roles(green)
await discord_bot.add_roles(red)
else:
discord_bot = guild_channel.me
await discord_bot.remove_roles(red)
await discord_bot.add_roles(green)
await guild_channel.me.edit(nick=f"7) {name_btc}")
await btc_bot.change_presence(activity=discord.Activity(type=discord.ActivityType.watching, name=f"BTC {watching_btc}"))
except:
print(f'broke in {guild_channel}')
else:
print('no data for btc')
# eth
if ticker_eth:
guild_ids = [guild.id for guild in eth_bot.guilds]
name_eth= '{:20,.2f}'.format(ticker_eth['last'])
watching_eth = ticker_eth['change%']
guild_channels = [eth_bot.get_guild(guild_id) for guild_id in guild_ids]
for guild_channel in guild_channels:
try:
red = get(guild_channel.roles, name='RED')
green = get(guild_channel.roles, name='GREEN')
if "-" in watching_eth:
discord_bot = guild_channel.me
await discord_bot.remove_roles(green)
await discord_bot.add_roles(red)
else:
discord_bot = guild_channel.me
await discord_bot.remove_roles(red)
await discord_bot.add_roles(green)
await guild_channel.me.edit(nick=f"8) {name_eth}")
await eth_bot.change_presence(activity=discord.Activity(type=discord.ActivityType.watching, name=f"ETH {watching_eth}"))
except:
print(f'broke in {guild_channel}')
else:
print('nodata for eth')
# link
if ticker_link:
guild_ids = [guild.id for guild in link_bot.guilds]
name_link = '{:20,.2f}'.format(ticker_link['last'])
watching_link = ticker_link['change%']
guild_channels = [link_bot.get_guild(guild_id) for guild_id in guild_ids]
for guild_channel in guild_channels:
try:
red = get(guild_channel.roles, name='RED')
green = get(guild_channel.roles, name='GREEN')
if "-" in watching_link:
discord_bot = guild_channel.me
await discord_bot.remove_roles(green)
await discord_bot.add_roles(red)
else:
discord_bot = guild_channel.me
await discord_bot.remove_roles(red)
await discord_bot.add_roles(green)
await guild_channel.me.edit(nick=f"9) {name_link}")
await link_bot.change_presence(activity=discord.Activity(type=discord.ActivityType.watching, name=f"LINK {watching_link}"))
except:
print(f'broke in {guild_channel}')
else:
print('nodata for eth')
print(f'updated ')
@called_second.before_loop
async def before():
await es_bot.wait_until_ready()
await nas_bot.wait_until_ready()
await dow_bot.wait_until_ready()
await vix_bot.wait_until_ready()
await us10y_bot.wait_until_ready()
await dollar_bot.wait_until_ready()
await silver_bot.wait_until_ready()
await btc_bot.wait_until_ready()
await eth_bot.wait_until_ready()
await link_bot.wait_until_ready()
print("Finished waiting")
called_second.start()
async def create_bots():
es_task= loop.create_task(es_bot.start(es))
nas_task = loop.create_task(nas_bot.start(nas))
dow_task = loop.create_task(dow_bot.start(dow))
vix_task = loop.create_task(vix_bot.start(vix))
us10y_task = loop.create_task(us10y_bot.start(us10y))
dollar_task = loop.create_task(dollar_bot.start(dollar))
silver_task = loop.create_task(silver_bot.start(silver))
btc_task = loop.create_task(btc_bot.start(btc))
eth_task = loop.create_task(eth_bot.start(eth))
link_task = loop.create_task(link_bot.start(link))
await es_task
await nas_task
await dow_task
await vix_task
await us10y_task
await dollar_task
await silver_task
await btc_task
await eth_task
await link_task
loop.run_until_complete(create_bots())
|
normal
|
{
"blob_id": "e57109f1c5c2e1468ef1cf9f10fba743633ca150",
"index": 8094,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\n@es_bot.event\nasync def on_ready():\n print('es started')\n\n\n@nas_bot.event\nasync def on_ready():\n print('nas started')\n\n\n@dow_bot.event\nasync def on_ready():\n print('dow started')\n\n\n@silver_bot.event\nasync def on_ready():\n print('silver started')\n\n\n@us10y_bot.event\nasync def on_ready():\n print('us10y started')\n\n\n@dollar_bot.event\nasync def on_Ready():\n print('dollar started')\n\n\n@vix_bot.event\nasync def on_ready():\n print('vix started')\n\n\n@btc_bot.event\nasync def on_ready():\n print('btc started')\n\n\n@eth_bot.event\nasync def on_ready():\n print('eth started')\n\n\n@link_bot.event\nasync def on_ready():\n print('link started')\n\n\n<mask token>\n\n\[email protected](seconds=5)\nasync def called_second():\n data = wrangle_data()\n print(data)\n ticker_es = data['es']\n ticker_nas = data['nas']\n ticker_dow = data['dow']\n ticker_vix = data['vix']\n ticker_us10y = data['us10y']\n ticker_dollar = data['dxy']\n ticker_silver = data['silver']\n ticker_btc = data['btc']\n ticker_eth = data['eth']\n ticker_link = data['link']\n if ticker_es:\n guild_ids = [guild.id for guild in es_bot.guilds]\n name_es = '{:20,.2f}'.format(ticker_es['last'])\n watching_es = ticker_es['change%']\n guild_channels = [es_bot.get_guild(guild_id) for guild_id in guild_ids]\n for guild_channel in guild_channels:\n try:\n red = get(guild_channel.roles, name='RED')\n green = get(guild_channel.roles, name='GREEN')\n if '-' in watching_es:\n discord_bot = guild_channel.me\n await discord_bot.remove_roles(green)\n await discord_bot.add_roles(red)\n else:\n discord_bot = guild_channel.me\n await discord_bot.remove_roles(red)\n await discord_bot.add_roles(green)\n await guild_channel.me.edit(nick=f'1) {name_es}')\n await es_bot.change_presence(activity=discord.Activity(type\n =discord.ActivityType.watching, name=f'ES {watching_es}'))\n except:\n print(f'broke in {guild_channel}')\n else:\n print('no es data')\n if ticker_nas:\n guild_ids = [guild.id for guild in nas_bot.guilds]\n name_nas = '{:20,.2f}'.format(ticker_nas['last'])\n watching_nas = ticker_nas['change%']\n guild_channels = [nas_bot.get_guild(guild_id) for guild_id in guild_ids\n ]\n for guild_channel in guild_channels:\n try:\n red = get(guild_channel.roles, name='RED')\n green = get(guild_channel.roles, name='GREEN')\n if '-' in watching_nas:\n discord_bot = guild_channel.me\n await discord_bot.remove_roles(green)\n await discord_bot.add_roles(red)\n else:\n discord_bot = guild_channel.me\n await discord_bot.remove_roles(red)\n await discord_bot.add_roles(green)\n await guild_channel.me.edit(nick=f'2) {name_nas}')\n await nas_bot.change_presence(activity=discord.Activity(\n type=discord.ActivityType.watching, name=\n f'NQ {watching_nas}'))\n except:\n print(f'broke in {guild_channel}')\n else:\n print('no nas data')\n if ticker_dow:\n guild_ids = [guild.id for guild in dow_bot.guilds]\n name_dow = '{:20,.2f}'.format(ticker_dow['last'])\n watching_dow = ticker_dow['change%']\n guild_channels = [dow_bot.get_guild(guild_id) for guild_id in guild_ids\n ]\n for guild_channel in guild_channels:\n try:\n red = get(guild_channel.roles, name='RED')\n green = get(guild_channel.roles, name='GREEN')\n if '-' in watching_dow:\n discord_bot = guild_channel.me\n await discord_bot.remove_roles(green)\n await discord_bot.add_roles(red)\n else:\n discord_bot = guild_channel.me\n await discord_bot.remove_roles(red)\n await discord_bot.add_roles(green)\n await guild_channel.me.edit(nick=f'3) {name_dow}')\n await dow_bot.change_presence(activity=discord.Activity(\n type=discord.ActivityType.watching, name=\n f'DJI {watching_dow}'))\n except:\n print(f'broke in {guild_channel}')\n else:\n print('no dow data')\n if vix:\n guild_ids = [guild.id for guild in vix_bot.guilds]\n name_vix = '{:20,.2f}'.format(ticker_vix['last'])\n watching_vix = ticker_vix['change%']\n guild_channels = [vix_bot.get_guild(guild_id) for guild_id in guild_ids\n ]\n for guild_channel in guild_channels:\n try:\n red = get(guild_channel.roles, name='RED')\n green = get(guild_channel.roles, name='GREEN')\n if '-' in watching_vix:\n discord_bot = guild_channel.me\n await discord_bot.remove_roles(green)\n await discord_bot.add_roles(red)\n else:\n discord_bot = guild_channel.me\n await discord_bot.remove_roles(red)\n await discord_bot.add_roles(green)\n await guild_channel.me.edit(nick=f'4) {name_vix}')\n await vix_bot.change_presence(activity=discord.Activity(\n type=discord.ActivityType.watching, name=\n f'VIX {watching_vix}'))\n except:\n print(f'broke in {guild_channel}')\n else:\n print('no vix data ')\n if ticker_dollar:\n guild_ids = [guild.id for guild in dollar_bot.guilds]\n name_dollar = '{:20,.2f}'.format(ticker_dollar['last'])\n watching_dollar = ticker_dollar['change%']\n guild_channels = [dollar_bot.get_guild(guild_id) for guild_id in\n guild_ids]\n for guild_channel in guild_channels:\n try:\n red = get(guild_channel.roles, name='RED')\n green = get(guild_channel.roles, name='GREEN')\n if '-' in watching_dollar:\n discord_bot = guild_channel.me\n await discord_bot.remove_roles(green)\n await discord_bot.add_roles(red)\n else:\n discord_bot = guild_channel.me\n await discord_bot.remove_roles(red)\n await discord_bot.add_roles(green)\n await guild_channel.me.edit(nick=f'5) {name_dollar}')\n await dollar_bot.change_presence(activity=discord.Activity(\n type=discord.ActivityType.watching, name=\n f'DXY {watching_dollar}'))\n except:\n print(f'broke in {guild_channel}')\n else:\n print('no dollar data')\n if ticker_us10y:\n guild_ids = [guild.id for guild in us10y_bot.guilds]\n name_us10y = '{:20,.2f}'.format(ticker_us10y['last'])\n watching_us10y = ticker_us10y['change%']\n guild_channels = [us10y_bot.get_guild(guild_id) for guild_id in\n guild_ids]\n for guild_channel in guild_channels:\n try:\n red = get(guild_channel.roles, name='RED')\n green = get(guild_channel.roles, name='GREEN')\n if '-' in watching_us10y:\n discord_bot = guild_channel.me\n await discord_bot.remove_roles(green)\n await discord_bot.add_roles(red)\n else:\n discord_bot = guild_channel.me\n await discord_bot.remove_roles(red)\n await discord_bot.add_roles(green)\n await guild_channel.me.edit(nick=f'4) {name_us10y}')\n await us10y_bot.change_presence(activity=discord.Activity(\n type=discord.ActivityType.watching, name=\n f'US10Y {watching_us10y}'))\n except:\n print(f'broke in {guild_channel}')\n else:\n print('no us10y data')\n if ticker_silver:\n guild_ids = [guild.id for guild in silver_bot.guilds]\n name_silver = '{:20,.2f}'.format(ticker_silver['last'])\n watching_silver = ticker_silver['change%']\n guild_channels = [silver_bot.get_guild(guild_id) for guild_id in\n guild_ids]\n for guild_channel in guild_channels:\n try:\n red = get(guild_channel.roles, name='RED')\n green = get(guild_channel.roles, name='GREEN')\n if '-' in watching_silver:\n discord_bot = guild_channel.me\n await discord_bot.remove_roles(green)\n await discord_bot.add_roles(red)\n else:\n discord_bot = guild_channel.me\n await discord_bot.remove_roles(red)\n await discord_bot.add_roles(green)\n await guild_channel.me.edit(nick=f'6) {name_silver}')\n await silver_bot.change_presence(activity=discord.Activity(\n type=discord.ActivityType.watching, name=\n f\"{ticker_silver['name'].upper()} {watching_silver}\"))\n except:\n print(f'broke in {guild_channel}')\n else:\n print('no silver data')\n if ticker_btc:\n guild_ids = [guild.id for guild in btc_bot.guilds]\n name_btc = '{:20,.2f}'.format(ticker_btc['last'])\n watching_btc = ticker_btc['change%']\n guild_channels = [btc_bot.get_guild(guild_id) for guild_id in guild_ids\n ]\n for guild_channel in guild_channels:\n try:\n red = get(guild_channel.roles, name='RED')\n green = get(guild_channel.roles, name='GREEN')\n if '-' in watching_btc:\n discord_bot = guild_channel.me\n await discord_bot.remove_roles(green)\n await discord_bot.add_roles(red)\n else:\n discord_bot = guild_channel.me\n await discord_bot.remove_roles(red)\n await discord_bot.add_roles(green)\n await guild_channel.me.edit(nick=f'7) {name_btc}')\n await btc_bot.change_presence(activity=discord.Activity(\n type=discord.ActivityType.watching, name=\n f'BTC {watching_btc}'))\n except:\n print(f'broke in {guild_channel}')\n else:\n print('no data for btc')\n if ticker_eth:\n guild_ids = [guild.id for guild in eth_bot.guilds]\n name_eth = '{:20,.2f}'.format(ticker_eth['last'])\n watching_eth = ticker_eth['change%']\n guild_channels = [eth_bot.get_guild(guild_id) for guild_id in guild_ids\n ]\n for guild_channel in guild_channels:\n try:\n red = get(guild_channel.roles, name='RED')\n green = get(guild_channel.roles, name='GREEN')\n if '-' in watching_eth:\n discord_bot = guild_channel.me\n await discord_bot.remove_roles(green)\n await discord_bot.add_roles(red)\n else:\n discord_bot = guild_channel.me\n await discord_bot.remove_roles(red)\n await discord_bot.add_roles(green)\n await guild_channel.me.edit(nick=f'8) {name_eth}')\n await eth_bot.change_presence(activity=discord.Activity(\n type=discord.ActivityType.watching, name=\n f'ETH {watching_eth}'))\n except:\n print(f'broke in {guild_channel}')\n else:\n print('nodata for eth')\n if ticker_link:\n guild_ids = [guild.id for guild in link_bot.guilds]\n name_link = '{:20,.2f}'.format(ticker_link['last'])\n watching_link = ticker_link['change%']\n guild_channels = [link_bot.get_guild(guild_id) for guild_id in\n guild_ids]\n for guild_channel in guild_channels:\n try:\n red = get(guild_channel.roles, name='RED')\n green = get(guild_channel.roles, name='GREEN')\n if '-' in watching_link:\n discord_bot = guild_channel.me\n await discord_bot.remove_roles(green)\n await discord_bot.add_roles(red)\n else:\n discord_bot = guild_channel.me\n await discord_bot.remove_roles(red)\n await discord_bot.add_roles(green)\n await guild_channel.me.edit(nick=f'9) {name_link}')\n await link_bot.change_presence(activity=discord.Activity(\n type=discord.ActivityType.watching, name=\n f'LINK {watching_link}'))\n except:\n print(f'broke in {guild_channel}')\n else:\n print('nodata for eth')\n print(f'updated ')\n\n\n@called_second.before_loop\nasync def before():\n await es_bot.wait_until_ready()\n await nas_bot.wait_until_ready()\n await dow_bot.wait_until_ready()\n await vix_bot.wait_until_ready()\n await us10y_bot.wait_until_ready()\n await dollar_bot.wait_until_ready()\n await silver_bot.wait_until_ready()\n await btc_bot.wait_until_ready()\n await eth_bot.wait_until_ready()\n await link_bot.wait_until_ready()\n print('Finished waiting')\n\n\ncalled_second.start()\n\n\nasync def create_bots():\n es_task = loop.create_task(es_bot.start(es))\n nas_task = loop.create_task(nas_bot.start(nas))\n dow_task = loop.create_task(dow_bot.start(dow))\n vix_task = loop.create_task(vix_bot.start(vix))\n us10y_task = loop.create_task(us10y_bot.start(us10y))\n dollar_task = loop.create_task(dollar_bot.start(dollar))\n silver_task = loop.create_task(silver_bot.start(silver))\n btc_task = loop.create_task(btc_bot.start(btc))\n eth_task = loop.create_task(eth_bot.start(eth))\n link_task = loop.create_task(link_bot.start(link))\n await es_task\n await nas_task\n await dow_task\n await vix_task\n await us10y_task\n await dollar_task\n await silver_task\n await btc_task\n await eth_task\n await link_task\n\n\nloop.run_until_complete(create_bots())\n",
"step-3": "<mask token>\nes_bot = discord.Client()\nnas_bot = discord.Client()\ndow_bot = discord.Client()\nus10y_bot = discord.Client()\nvix_bot = discord.Client()\nticker_vix = discord.Client()\ndollar_bot = discord.Client()\nsilver_bot = discord.Client()\nbtc_bot = discord.Client()\neth_bot = discord.Client()\nlink_bot = discord.Client()\nloop = asyncio.get_event_loop()\n\n\n@es_bot.event\nasync def on_ready():\n print('es started')\n\n\n@nas_bot.event\nasync def on_ready():\n print('nas started')\n\n\n@dow_bot.event\nasync def on_ready():\n print('dow started')\n\n\n@silver_bot.event\nasync def on_ready():\n print('silver started')\n\n\n@us10y_bot.event\nasync def on_ready():\n print('us10y started')\n\n\n@dollar_bot.event\nasync def on_Ready():\n print('dollar started')\n\n\n@vix_bot.event\nasync def on_ready():\n print('vix started')\n\n\n@btc_bot.event\nasync def on_ready():\n print('btc started')\n\n\n@eth_bot.event\nasync def on_ready():\n print('eth started')\n\n\n@link_bot.event\nasync def on_ready():\n print('link started')\n\n\n<mask token>\n\n\[email protected](seconds=5)\nasync def called_second():\n data = wrangle_data()\n print(data)\n ticker_es = data['es']\n ticker_nas = data['nas']\n ticker_dow = data['dow']\n ticker_vix = data['vix']\n ticker_us10y = data['us10y']\n ticker_dollar = data['dxy']\n ticker_silver = data['silver']\n ticker_btc = data['btc']\n ticker_eth = data['eth']\n ticker_link = data['link']\n if ticker_es:\n guild_ids = [guild.id for guild in es_bot.guilds]\n name_es = '{:20,.2f}'.format(ticker_es['last'])\n watching_es = ticker_es['change%']\n guild_channels = [es_bot.get_guild(guild_id) for guild_id in guild_ids]\n for guild_channel in guild_channels:\n try:\n red = get(guild_channel.roles, name='RED')\n green = get(guild_channel.roles, name='GREEN')\n if '-' in watching_es:\n discord_bot = guild_channel.me\n await discord_bot.remove_roles(green)\n await discord_bot.add_roles(red)\n else:\n discord_bot = guild_channel.me\n await discord_bot.remove_roles(red)\n await discord_bot.add_roles(green)\n await guild_channel.me.edit(nick=f'1) {name_es}')\n await es_bot.change_presence(activity=discord.Activity(type\n =discord.ActivityType.watching, name=f'ES {watching_es}'))\n except:\n print(f'broke in {guild_channel}')\n else:\n print('no es data')\n if ticker_nas:\n guild_ids = [guild.id for guild in nas_bot.guilds]\n name_nas = '{:20,.2f}'.format(ticker_nas['last'])\n watching_nas = ticker_nas['change%']\n guild_channels = [nas_bot.get_guild(guild_id) for guild_id in guild_ids\n ]\n for guild_channel in guild_channels:\n try:\n red = get(guild_channel.roles, name='RED')\n green = get(guild_channel.roles, name='GREEN')\n if '-' in watching_nas:\n discord_bot = guild_channel.me\n await discord_bot.remove_roles(green)\n await discord_bot.add_roles(red)\n else:\n discord_bot = guild_channel.me\n await discord_bot.remove_roles(red)\n await discord_bot.add_roles(green)\n await guild_channel.me.edit(nick=f'2) {name_nas}')\n await nas_bot.change_presence(activity=discord.Activity(\n type=discord.ActivityType.watching, name=\n f'NQ {watching_nas}'))\n except:\n print(f'broke in {guild_channel}')\n else:\n print('no nas data')\n if ticker_dow:\n guild_ids = [guild.id for guild in dow_bot.guilds]\n name_dow = '{:20,.2f}'.format(ticker_dow['last'])\n watching_dow = ticker_dow['change%']\n guild_channels = [dow_bot.get_guild(guild_id) for guild_id in guild_ids\n ]\n for guild_channel in guild_channels:\n try:\n red = get(guild_channel.roles, name='RED')\n green = get(guild_channel.roles, name='GREEN')\n if '-' in watching_dow:\n discord_bot = guild_channel.me\n await discord_bot.remove_roles(green)\n await discord_bot.add_roles(red)\n else:\n discord_bot = guild_channel.me\n await discord_bot.remove_roles(red)\n await discord_bot.add_roles(green)\n await guild_channel.me.edit(nick=f'3) {name_dow}')\n await dow_bot.change_presence(activity=discord.Activity(\n type=discord.ActivityType.watching, name=\n f'DJI {watching_dow}'))\n except:\n print(f'broke in {guild_channel}')\n else:\n print('no dow data')\n if vix:\n guild_ids = [guild.id for guild in vix_bot.guilds]\n name_vix = '{:20,.2f}'.format(ticker_vix['last'])\n watching_vix = ticker_vix['change%']\n guild_channels = [vix_bot.get_guild(guild_id) for guild_id in guild_ids\n ]\n for guild_channel in guild_channels:\n try:\n red = get(guild_channel.roles, name='RED')\n green = get(guild_channel.roles, name='GREEN')\n if '-' in watching_vix:\n discord_bot = guild_channel.me\n await discord_bot.remove_roles(green)\n await discord_bot.add_roles(red)\n else:\n discord_bot = guild_channel.me\n await discord_bot.remove_roles(red)\n await discord_bot.add_roles(green)\n await guild_channel.me.edit(nick=f'4) {name_vix}')\n await vix_bot.change_presence(activity=discord.Activity(\n type=discord.ActivityType.watching, name=\n f'VIX {watching_vix}'))\n except:\n print(f'broke in {guild_channel}')\n else:\n print('no vix data ')\n if ticker_dollar:\n guild_ids = [guild.id for guild in dollar_bot.guilds]\n name_dollar = '{:20,.2f}'.format(ticker_dollar['last'])\n watching_dollar = ticker_dollar['change%']\n guild_channels = [dollar_bot.get_guild(guild_id) for guild_id in\n guild_ids]\n for guild_channel in guild_channels:\n try:\n red = get(guild_channel.roles, name='RED')\n green = get(guild_channel.roles, name='GREEN')\n if '-' in watching_dollar:\n discord_bot = guild_channel.me\n await discord_bot.remove_roles(green)\n await discord_bot.add_roles(red)\n else:\n discord_bot = guild_channel.me\n await discord_bot.remove_roles(red)\n await discord_bot.add_roles(green)\n await guild_channel.me.edit(nick=f'5) {name_dollar}')\n await dollar_bot.change_presence(activity=discord.Activity(\n type=discord.ActivityType.watching, name=\n f'DXY {watching_dollar}'))\n except:\n print(f'broke in {guild_channel}')\n else:\n print('no dollar data')\n if ticker_us10y:\n guild_ids = [guild.id for guild in us10y_bot.guilds]\n name_us10y = '{:20,.2f}'.format(ticker_us10y['last'])\n watching_us10y = ticker_us10y['change%']\n guild_channels = [us10y_bot.get_guild(guild_id) for guild_id in\n guild_ids]\n for guild_channel in guild_channels:\n try:\n red = get(guild_channel.roles, name='RED')\n green = get(guild_channel.roles, name='GREEN')\n if '-' in watching_us10y:\n discord_bot = guild_channel.me\n await discord_bot.remove_roles(green)\n await discord_bot.add_roles(red)\n else:\n discord_bot = guild_channel.me\n await discord_bot.remove_roles(red)\n await discord_bot.add_roles(green)\n await guild_channel.me.edit(nick=f'4) {name_us10y}')\n await us10y_bot.change_presence(activity=discord.Activity(\n type=discord.ActivityType.watching, name=\n f'US10Y {watching_us10y}'))\n except:\n print(f'broke in {guild_channel}')\n else:\n print('no us10y data')\n if ticker_silver:\n guild_ids = [guild.id for guild in silver_bot.guilds]\n name_silver = '{:20,.2f}'.format(ticker_silver['last'])\n watching_silver = ticker_silver['change%']\n guild_channels = [silver_bot.get_guild(guild_id) for guild_id in\n guild_ids]\n for guild_channel in guild_channels:\n try:\n red = get(guild_channel.roles, name='RED')\n green = get(guild_channel.roles, name='GREEN')\n if '-' in watching_silver:\n discord_bot = guild_channel.me\n await discord_bot.remove_roles(green)\n await discord_bot.add_roles(red)\n else:\n discord_bot = guild_channel.me\n await discord_bot.remove_roles(red)\n await discord_bot.add_roles(green)\n await guild_channel.me.edit(nick=f'6) {name_silver}')\n await silver_bot.change_presence(activity=discord.Activity(\n type=discord.ActivityType.watching, name=\n f\"{ticker_silver['name'].upper()} {watching_silver}\"))\n except:\n print(f'broke in {guild_channel}')\n else:\n print('no silver data')\n if ticker_btc:\n guild_ids = [guild.id for guild in btc_bot.guilds]\n name_btc = '{:20,.2f}'.format(ticker_btc['last'])\n watching_btc = ticker_btc['change%']\n guild_channels = [btc_bot.get_guild(guild_id) for guild_id in guild_ids\n ]\n for guild_channel in guild_channels:\n try:\n red = get(guild_channel.roles, name='RED')\n green = get(guild_channel.roles, name='GREEN')\n if '-' in watching_btc:\n discord_bot = guild_channel.me\n await discord_bot.remove_roles(green)\n await discord_bot.add_roles(red)\n else:\n discord_bot = guild_channel.me\n await discord_bot.remove_roles(red)\n await discord_bot.add_roles(green)\n await guild_channel.me.edit(nick=f'7) {name_btc}')\n await btc_bot.change_presence(activity=discord.Activity(\n type=discord.ActivityType.watching, name=\n f'BTC {watching_btc}'))\n except:\n print(f'broke in {guild_channel}')\n else:\n print('no data for btc')\n if ticker_eth:\n guild_ids = [guild.id for guild in eth_bot.guilds]\n name_eth = '{:20,.2f}'.format(ticker_eth['last'])\n watching_eth = ticker_eth['change%']\n guild_channels = [eth_bot.get_guild(guild_id) for guild_id in guild_ids\n ]\n for guild_channel in guild_channels:\n try:\n red = get(guild_channel.roles, name='RED')\n green = get(guild_channel.roles, name='GREEN')\n if '-' in watching_eth:\n discord_bot = guild_channel.me\n await discord_bot.remove_roles(green)\n await discord_bot.add_roles(red)\n else:\n discord_bot = guild_channel.me\n await discord_bot.remove_roles(red)\n await discord_bot.add_roles(green)\n await guild_channel.me.edit(nick=f'8) {name_eth}')\n await eth_bot.change_presence(activity=discord.Activity(\n type=discord.ActivityType.watching, name=\n f'ETH {watching_eth}'))\n except:\n print(f'broke in {guild_channel}')\n else:\n print('nodata for eth')\n if ticker_link:\n guild_ids = [guild.id for guild in link_bot.guilds]\n name_link = '{:20,.2f}'.format(ticker_link['last'])\n watching_link = ticker_link['change%']\n guild_channels = [link_bot.get_guild(guild_id) for guild_id in\n guild_ids]\n for guild_channel in guild_channels:\n try:\n red = get(guild_channel.roles, name='RED')\n green = get(guild_channel.roles, name='GREEN')\n if '-' in watching_link:\n discord_bot = guild_channel.me\n await discord_bot.remove_roles(green)\n await discord_bot.add_roles(red)\n else:\n discord_bot = guild_channel.me\n await discord_bot.remove_roles(red)\n await discord_bot.add_roles(green)\n await guild_channel.me.edit(nick=f'9) {name_link}')\n await link_bot.change_presence(activity=discord.Activity(\n type=discord.ActivityType.watching, name=\n f'LINK {watching_link}'))\n except:\n print(f'broke in {guild_channel}')\n else:\n print('nodata for eth')\n print(f'updated ')\n\n\n@called_second.before_loop\nasync def before():\n await es_bot.wait_until_ready()\n await nas_bot.wait_until_ready()\n await dow_bot.wait_until_ready()\n await vix_bot.wait_until_ready()\n await us10y_bot.wait_until_ready()\n await dollar_bot.wait_until_ready()\n await silver_bot.wait_until_ready()\n await btc_bot.wait_until_ready()\n await eth_bot.wait_until_ready()\n await link_bot.wait_until_ready()\n print('Finished waiting')\n\n\ncalled_second.start()\n\n\nasync def create_bots():\n es_task = loop.create_task(es_bot.start(es))\n nas_task = loop.create_task(nas_bot.start(nas))\n dow_task = loop.create_task(dow_bot.start(dow))\n vix_task = loop.create_task(vix_bot.start(vix))\n us10y_task = loop.create_task(us10y_bot.start(us10y))\n dollar_task = loop.create_task(dollar_bot.start(dollar))\n silver_task = loop.create_task(silver_bot.start(silver))\n btc_task = loop.create_task(btc_bot.start(btc))\n eth_task = loop.create_task(eth_bot.start(eth))\n link_task = loop.create_task(link_bot.start(link))\n await es_task\n await nas_task\n await dow_task\n await vix_task\n await us10y_task\n await dollar_task\n await silver_task\n await btc_task\n await eth_task\n await link_task\n\n\nloop.run_until_complete(create_bots())\n",
"step-4": "from discord.ext import commands, tasks\nfrom discord.utils import get\nimport discord\nimport re\nimport json\nimport time\nimport random\nimport asyncio\nimport os\nimport datetime\nfrom live_ticker_scrape import wrangle_data\nfrom tokens import dev, dev1, es, nas, dow, us10y, dollar, vix, btc, eth, silver, link\nes_bot = discord.Client()\nnas_bot = discord.Client()\ndow_bot = discord.Client()\nus10y_bot = discord.Client()\nvix_bot = discord.Client()\nticker_vix = discord.Client()\ndollar_bot = discord.Client()\nsilver_bot = discord.Client()\nbtc_bot = discord.Client()\neth_bot = discord.Client()\nlink_bot = discord.Client()\nloop = asyncio.get_event_loop()\n\n\n@es_bot.event\nasync def on_ready():\n print('es started')\n\n\n@nas_bot.event\nasync def on_ready():\n print('nas started')\n\n\n@dow_bot.event\nasync def on_ready():\n print('dow started')\n\n\n@silver_bot.event\nasync def on_ready():\n print('silver started')\n\n\n@us10y_bot.event\nasync def on_ready():\n print('us10y started')\n\n\n@dollar_bot.event\nasync def on_Ready():\n print('dollar started')\n\n\n@vix_bot.event\nasync def on_ready():\n print('vix started')\n\n\n@btc_bot.event\nasync def on_ready():\n print('btc started')\n\n\n@eth_bot.event\nasync def on_ready():\n print('eth started')\n\n\n@link_bot.event\nasync def on_ready():\n print('link started')\n\n\n<mask token>\n\n\[email protected](seconds=5)\nasync def called_second():\n data = wrangle_data()\n print(data)\n ticker_es = data['es']\n ticker_nas = data['nas']\n ticker_dow = data['dow']\n ticker_vix = data['vix']\n ticker_us10y = data['us10y']\n ticker_dollar = data['dxy']\n ticker_silver = data['silver']\n ticker_btc = data['btc']\n ticker_eth = data['eth']\n ticker_link = data['link']\n if ticker_es:\n guild_ids = [guild.id for guild in es_bot.guilds]\n name_es = '{:20,.2f}'.format(ticker_es['last'])\n watching_es = ticker_es['change%']\n guild_channels = [es_bot.get_guild(guild_id) for guild_id in guild_ids]\n for guild_channel in guild_channels:\n try:\n red = get(guild_channel.roles, name='RED')\n green = get(guild_channel.roles, name='GREEN')\n if '-' in watching_es:\n discord_bot = guild_channel.me\n await discord_bot.remove_roles(green)\n await discord_bot.add_roles(red)\n else:\n discord_bot = guild_channel.me\n await discord_bot.remove_roles(red)\n await discord_bot.add_roles(green)\n await guild_channel.me.edit(nick=f'1) {name_es}')\n await es_bot.change_presence(activity=discord.Activity(type\n =discord.ActivityType.watching, name=f'ES {watching_es}'))\n except:\n print(f'broke in {guild_channel}')\n else:\n print('no es data')\n if ticker_nas:\n guild_ids = [guild.id for guild in nas_bot.guilds]\n name_nas = '{:20,.2f}'.format(ticker_nas['last'])\n watching_nas = ticker_nas['change%']\n guild_channels = [nas_bot.get_guild(guild_id) for guild_id in guild_ids\n ]\n for guild_channel in guild_channels:\n try:\n red = get(guild_channel.roles, name='RED')\n green = get(guild_channel.roles, name='GREEN')\n if '-' in watching_nas:\n discord_bot = guild_channel.me\n await discord_bot.remove_roles(green)\n await discord_bot.add_roles(red)\n else:\n discord_bot = guild_channel.me\n await discord_bot.remove_roles(red)\n await discord_bot.add_roles(green)\n await guild_channel.me.edit(nick=f'2) {name_nas}')\n await nas_bot.change_presence(activity=discord.Activity(\n type=discord.ActivityType.watching, name=\n f'NQ {watching_nas}'))\n except:\n print(f'broke in {guild_channel}')\n else:\n print('no nas data')\n if ticker_dow:\n guild_ids = [guild.id for guild in dow_bot.guilds]\n name_dow = '{:20,.2f}'.format(ticker_dow['last'])\n watching_dow = ticker_dow['change%']\n guild_channels = [dow_bot.get_guild(guild_id) for guild_id in guild_ids\n ]\n for guild_channel in guild_channels:\n try:\n red = get(guild_channel.roles, name='RED')\n green = get(guild_channel.roles, name='GREEN')\n if '-' in watching_dow:\n discord_bot = guild_channel.me\n await discord_bot.remove_roles(green)\n await discord_bot.add_roles(red)\n else:\n discord_bot = guild_channel.me\n await discord_bot.remove_roles(red)\n await discord_bot.add_roles(green)\n await guild_channel.me.edit(nick=f'3) {name_dow}')\n await dow_bot.change_presence(activity=discord.Activity(\n type=discord.ActivityType.watching, name=\n f'DJI {watching_dow}'))\n except:\n print(f'broke in {guild_channel}')\n else:\n print('no dow data')\n if vix:\n guild_ids = [guild.id for guild in vix_bot.guilds]\n name_vix = '{:20,.2f}'.format(ticker_vix['last'])\n watching_vix = ticker_vix['change%']\n guild_channels = [vix_bot.get_guild(guild_id) for guild_id in guild_ids\n ]\n for guild_channel in guild_channels:\n try:\n red = get(guild_channel.roles, name='RED')\n green = get(guild_channel.roles, name='GREEN')\n if '-' in watching_vix:\n discord_bot = guild_channel.me\n await discord_bot.remove_roles(green)\n await discord_bot.add_roles(red)\n else:\n discord_bot = guild_channel.me\n await discord_bot.remove_roles(red)\n await discord_bot.add_roles(green)\n await guild_channel.me.edit(nick=f'4) {name_vix}')\n await vix_bot.change_presence(activity=discord.Activity(\n type=discord.ActivityType.watching, name=\n f'VIX {watching_vix}'))\n except:\n print(f'broke in {guild_channel}')\n else:\n print('no vix data ')\n if ticker_dollar:\n guild_ids = [guild.id for guild in dollar_bot.guilds]\n name_dollar = '{:20,.2f}'.format(ticker_dollar['last'])\n watching_dollar = ticker_dollar['change%']\n guild_channels = [dollar_bot.get_guild(guild_id) for guild_id in\n guild_ids]\n for guild_channel in guild_channels:\n try:\n red = get(guild_channel.roles, name='RED')\n green = get(guild_channel.roles, name='GREEN')\n if '-' in watching_dollar:\n discord_bot = guild_channel.me\n await discord_bot.remove_roles(green)\n await discord_bot.add_roles(red)\n else:\n discord_bot = guild_channel.me\n await discord_bot.remove_roles(red)\n await discord_bot.add_roles(green)\n await guild_channel.me.edit(nick=f'5) {name_dollar}')\n await dollar_bot.change_presence(activity=discord.Activity(\n type=discord.ActivityType.watching, name=\n f'DXY {watching_dollar}'))\n except:\n print(f'broke in {guild_channel}')\n else:\n print('no dollar data')\n if ticker_us10y:\n guild_ids = [guild.id for guild in us10y_bot.guilds]\n name_us10y = '{:20,.2f}'.format(ticker_us10y['last'])\n watching_us10y = ticker_us10y['change%']\n guild_channels = [us10y_bot.get_guild(guild_id) for guild_id in\n guild_ids]\n for guild_channel in guild_channels:\n try:\n red = get(guild_channel.roles, name='RED')\n green = get(guild_channel.roles, name='GREEN')\n if '-' in watching_us10y:\n discord_bot = guild_channel.me\n await discord_bot.remove_roles(green)\n await discord_bot.add_roles(red)\n else:\n discord_bot = guild_channel.me\n await discord_bot.remove_roles(red)\n await discord_bot.add_roles(green)\n await guild_channel.me.edit(nick=f'4) {name_us10y}')\n await us10y_bot.change_presence(activity=discord.Activity(\n type=discord.ActivityType.watching, name=\n f'US10Y {watching_us10y}'))\n except:\n print(f'broke in {guild_channel}')\n else:\n print('no us10y data')\n if ticker_silver:\n guild_ids = [guild.id for guild in silver_bot.guilds]\n name_silver = '{:20,.2f}'.format(ticker_silver['last'])\n watching_silver = ticker_silver['change%']\n guild_channels = [silver_bot.get_guild(guild_id) for guild_id in\n guild_ids]\n for guild_channel in guild_channels:\n try:\n red = get(guild_channel.roles, name='RED')\n green = get(guild_channel.roles, name='GREEN')\n if '-' in watching_silver:\n discord_bot = guild_channel.me\n await discord_bot.remove_roles(green)\n await discord_bot.add_roles(red)\n else:\n discord_bot = guild_channel.me\n await discord_bot.remove_roles(red)\n await discord_bot.add_roles(green)\n await guild_channel.me.edit(nick=f'6) {name_silver}')\n await silver_bot.change_presence(activity=discord.Activity(\n type=discord.ActivityType.watching, name=\n f\"{ticker_silver['name'].upper()} {watching_silver}\"))\n except:\n print(f'broke in {guild_channel}')\n else:\n print('no silver data')\n if ticker_btc:\n guild_ids = [guild.id for guild in btc_bot.guilds]\n name_btc = '{:20,.2f}'.format(ticker_btc['last'])\n watching_btc = ticker_btc['change%']\n guild_channels = [btc_bot.get_guild(guild_id) for guild_id in guild_ids\n ]\n for guild_channel in guild_channels:\n try:\n red = get(guild_channel.roles, name='RED')\n green = get(guild_channel.roles, name='GREEN')\n if '-' in watching_btc:\n discord_bot = guild_channel.me\n await discord_bot.remove_roles(green)\n await discord_bot.add_roles(red)\n else:\n discord_bot = guild_channel.me\n await discord_bot.remove_roles(red)\n await discord_bot.add_roles(green)\n await guild_channel.me.edit(nick=f'7) {name_btc}')\n await btc_bot.change_presence(activity=discord.Activity(\n type=discord.ActivityType.watching, name=\n f'BTC {watching_btc}'))\n except:\n print(f'broke in {guild_channel}')\n else:\n print('no data for btc')\n if ticker_eth:\n guild_ids = [guild.id for guild in eth_bot.guilds]\n name_eth = '{:20,.2f}'.format(ticker_eth['last'])\n watching_eth = ticker_eth['change%']\n guild_channels = [eth_bot.get_guild(guild_id) for guild_id in guild_ids\n ]\n for guild_channel in guild_channels:\n try:\n red = get(guild_channel.roles, name='RED')\n green = get(guild_channel.roles, name='GREEN')\n if '-' in watching_eth:\n discord_bot = guild_channel.me\n await discord_bot.remove_roles(green)\n await discord_bot.add_roles(red)\n else:\n discord_bot = guild_channel.me\n await discord_bot.remove_roles(red)\n await discord_bot.add_roles(green)\n await guild_channel.me.edit(nick=f'8) {name_eth}')\n await eth_bot.change_presence(activity=discord.Activity(\n type=discord.ActivityType.watching, name=\n f'ETH {watching_eth}'))\n except:\n print(f'broke in {guild_channel}')\n else:\n print('nodata for eth')\n if ticker_link:\n guild_ids = [guild.id for guild in link_bot.guilds]\n name_link = '{:20,.2f}'.format(ticker_link['last'])\n watching_link = ticker_link['change%']\n guild_channels = [link_bot.get_guild(guild_id) for guild_id in\n guild_ids]\n for guild_channel in guild_channels:\n try:\n red = get(guild_channel.roles, name='RED')\n green = get(guild_channel.roles, name='GREEN')\n if '-' in watching_link:\n discord_bot = guild_channel.me\n await discord_bot.remove_roles(green)\n await discord_bot.add_roles(red)\n else:\n discord_bot = guild_channel.me\n await discord_bot.remove_roles(red)\n await discord_bot.add_roles(green)\n await guild_channel.me.edit(nick=f'9) {name_link}')\n await link_bot.change_presence(activity=discord.Activity(\n type=discord.ActivityType.watching, name=\n f'LINK {watching_link}'))\n except:\n print(f'broke in {guild_channel}')\n else:\n print('nodata for eth')\n print(f'updated ')\n\n\n@called_second.before_loop\nasync def before():\n await es_bot.wait_until_ready()\n await nas_bot.wait_until_ready()\n await dow_bot.wait_until_ready()\n await vix_bot.wait_until_ready()\n await us10y_bot.wait_until_ready()\n await dollar_bot.wait_until_ready()\n await silver_bot.wait_until_ready()\n await btc_bot.wait_until_ready()\n await eth_bot.wait_until_ready()\n await link_bot.wait_until_ready()\n print('Finished waiting')\n\n\ncalled_second.start()\n\n\nasync def create_bots():\n es_task = loop.create_task(es_bot.start(es))\n nas_task = loop.create_task(nas_bot.start(nas))\n dow_task = loop.create_task(dow_bot.start(dow))\n vix_task = loop.create_task(vix_bot.start(vix))\n us10y_task = loop.create_task(us10y_bot.start(us10y))\n dollar_task = loop.create_task(dollar_bot.start(dollar))\n silver_task = loop.create_task(silver_bot.start(silver))\n btc_task = loop.create_task(btc_bot.start(btc))\n eth_task = loop.create_task(eth_bot.start(eth))\n link_task = loop.create_task(link_bot.start(link))\n await es_task\n await nas_task\n await dow_task\n await vix_task\n await us10y_task\n await dollar_task\n await silver_task\n await btc_task\n await eth_task\n await link_task\n\n\nloop.run_until_complete(create_bots())\n",
"step-5": "from discord.ext import commands, tasks\nfrom discord.utils import get\nimport discord\nimport re\nimport json \nimport time \nimport random\nimport asyncio\nimport os\nimport datetime\n\nfrom live_ticker_scrape import wrangle_data\nfrom tokens import dev, dev1, es, nas, dow, us10y, dollar, vix, btc, eth, silver , link\n\nes_bot = discord.Client()\nnas_bot = discord.Client()\ndow_bot = discord.Client()\n\n\nus10y_bot = discord.Client()\nvix_bot = discord.Client()\nticker_vix = discord.Client()\n\n\ndollar_bot = discord.Client()\nsilver_bot = discord.Client()\n\nbtc_bot = discord.Client()\neth_bot= discord.Client()\nlink_bot = discord.Client()\n\nloop = asyncio.get_event_loop()\n\n@es_bot.event\nasync def on_ready():\n print('es started') \n\n@nas_bot.event\nasync def on_ready():\n print('nas started')\n\n@dow_bot.event\nasync def on_ready():\n print('dow started')\n\n@silver_bot.event\nasync def on_ready():\n print('silver started')\n\n@us10y_bot.event\nasync def on_ready():\n print('us10y started')\n\n@dollar_bot.event\nasync def on_Ready():\n print('dollar started')\n\n@vix_bot.event\nasync def on_ready():\n print('vix started')\n\n@btc_bot.event\nasync def on_ready():\n print('btc started')\n\n@eth_bot.event\nasync def on_ready():\n print('eth started')\n@link_bot.event\nasync def on_ready():\n print('link started')\n \n'''\[email protected]() can be changed to seconds, minutes, hours\nhttps://discordpy.readthedocs.io/en/latest/ext/tasks/\n'''\n\[email protected](seconds=5)\nasync def called_second():\n ## get all guild ids that the bot is joined in \n\n\n data = wrangle_data()\n print(data)\n\n ticker_es = data['es']\n ticker_nas = data['nas'] \n ticker_dow = data['dow'] \n ticker_vix = data['vix']\n ticker_us10y = data['us10y']\n ticker_dollar = data['dxy']\n ticker_silver = data['silver']\n ticker_btc = data['btc']\n ticker_eth = data['eth']\n ticker_link = data['link']\n ## es\n if ticker_es:\n guild_ids = [guild.id for guild in es_bot.guilds] \n name_es = '{:20,.2f}'.format(ticker_es['last'])\n watching_es = ticker_es['change%']\n guild_channels = [es_bot.get_guild(guild_id) for guild_id in guild_ids]\n for guild_channel in guild_channels:\n try:\n red = get(guild_channel.roles, name='RED')\n green = get(guild_channel.roles, name='GREEN')\n if \"-\" in watching_es:\n discord_bot = guild_channel.me\n await discord_bot.remove_roles(green)\n await discord_bot.add_roles(red)\n else: \n discord_bot = guild_channel.me\n await discord_bot.remove_roles(red)\n await discord_bot.add_roles(green)\n await guild_channel.me.edit(nick=f\"1) {name_es}\") \n await es_bot.change_presence(activity=discord.Activity(type=discord.ActivityType.watching, name=f\"ES {watching_es}\"))\n except:\n print(f'broke in {guild_channel}')\n else:\n print('no es data')\n ##nas\n if ticker_nas:\n guild_ids = [guild.id for guild in nas_bot.guilds] \n name_nas = '{:20,.2f}'.format(ticker_nas['last'])\n watching_nas= ticker_nas['change%']\n guild_channels = [nas_bot.get_guild(guild_id) for guild_id in guild_ids]\n for guild_channel in guild_channels:\n try:\n red = get(guild_channel.roles, name='RED')\n green = get(guild_channel.roles, name='GREEN')\n if \"-\" in watching_nas:\n discord_bot = guild_channel.me\n await discord_bot.remove_roles(green)\n await discord_bot.add_roles(red)\n else: \n discord_bot = guild_channel.me\n await discord_bot.remove_roles(red)\n await discord_bot.add_roles(green)\n await guild_channel.me.edit(nick=f\"2) {name_nas}\")\n await nas_bot.change_presence(activity=discord.Activity(type=discord.ActivityType.watching, name=f\"NQ {watching_nas}\"))\n except:\n print(f'broke in {guild_channel}')\n else: \n print('no nas data')\n ## dow\n if ticker_dow: \n guild_ids = [guild.id for guild in dow_bot.guilds] \n name_dow = '{:20,.2f}'.format(ticker_dow['last'])\n watching_dow = ticker_dow['change%']\n\n guild_channels = [dow_bot.get_guild(guild_id) for guild_id in guild_ids]\n for guild_channel in guild_channels:\n try:\n red = get(guild_channel.roles, name='RED')\n green = get(guild_channel.roles, name='GREEN')\n if \"-\" in watching_dow:\n discord_bot = guild_channel.me\n await discord_bot.remove_roles(green)\n await discord_bot.add_roles(red)\n else: \n discord_bot = guild_channel.me\n await discord_bot.remove_roles(red)\n await discord_bot.add_roles(green)\n await guild_channel.me.edit(nick=f\"3) {name_dow}\")\n await dow_bot.change_presence(activity=discord.Activity(type=discord.ActivityType.watching, name=f\"DJI {watching_dow}\"))\n\n except:\n print(f'broke in {guild_channel}')\n else:\n print('no dow data')\n\n ## vix \n if vix:\n guild_ids = [guild.id for guild in vix_bot.guilds] \n name_vix = '{:20,.2f}'.format(ticker_vix['last'])\n watching_vix = ticker_vix['change%']\n\n guild_channels = [vix_bot.get_guild(guild_id) for guild_id in guild_ids]\n for guild_channel in guild_channels:\n try:\n red = get(guild_channel.roles, name='RED')\n green = get(guild_channel.roles, name='GREEN')\n if \"-\" in watching_vix:\n discord_bot = guild_channel.me\n await discord_bot.remove_roles(green)\n await discord_bot.add_roles(red)\n else: \n discord_bot = guild_channel.me\n await discord_bot.remove_roles(red)\n await discord_bot.add_roles(green)\n\n await guild_channel.me.edit(nick=f\"4) {name_vix}\")\n await vix_bot.change_presence(activity=discord.Activity(type=discord.ActivityType.watching, name=f\"VIX {watching_vix}\"))\n except:\n print(f'broke in {guild_channel}')\n else:\n print('no vix data ')\n\n # dollar \n if ticker_dollar:\n guild_ids = [guild.id for guild in dollar_bot.guilds] \n name_dollar = '{:20,.2f}'.format(ticker_dollar['last'])\n watching_dollar = ticker_dollar['change%']\n guild_channels = [dollar_bot.get_guild(guild_id) for guild_id in guild_ids]\n for guild_channel in guild_channels:\n try:\n red = get(guild_channel.roles, name='RED')\n green = get(guild_channel.roles, name='GREEN')\n if \"-\" in watching_dollar:\n discord_bot = guild_channel.me\n await discord_bot.remove_roles(green)\n await discord_bot.add_roles(red)\n else: \n discord_bot = guild_channel.me\n await discord_bot.remove_roles(red)\n await discord_bot.add_roles(green)\n\n await guild_channel.me.edit(nick=f\"5) {name_dollar}\")\n await dollar_bot.change_presence(activity=discord.Activity(type=discord.ActivityType.watching, name=f\"DXY {watching_dollar}\"))\n except:\n print(f'broke in {guild_channel}')\n else:\n print('no dollar data')\n # us10y \n if ticker_us10y:\n guild_ids = [guild.id for guild in us10y_bot.guilds] \n name_us10y = '{:20,.2f}'.format(ticker_us10y['last'])\n watching_us10y = ticker_us10y['change%']\n guild_channels = [us10y_bot.get_guild(guild_id) for guild_id in guild_ids]\n for guild_channel in guild_channels:\n try:\n red = get(guild_channel.roles, name='RED')\n green = get(guild_channel.roles, name='GREEN')\n if \"-\" in watching_us10y:\n discord_bot = guild_channel.me\n await discord_bot.remove_roles(green)\n await discord_bot.add_roles(red)\n else: \n discord_bot = guild_channel.me\n await discord_bot.remove_roles(red)\n await discord_bot.add_roles(green)\n\n await guild_channel.me.edit(nick=f\"4) {name_us10y}\")\n await us10y_bot.change_presence(activity=discord.Activity(type=discord.ActivityType.watching, name=f\"US10Y {watching_us10y}\"))\n except:\n print(f'broke in {guild_channel}')\n else:\n print('no us10y data')\n\n # silver \n if ticker_silver:\n guild_ids = [guild.id for guild in silver_bot.guilds] \n name_silver = '{:20,.2f}'.format(ticker_silver['last'])\n watching_silver = ticker_silver['change%']\n \n guild_channels = [silver_bot.get_guild(guild_id) for guild_id in guild_ids]\n for guild_channel in guild_channels:\n try:\n red = get(guild_channel.roles, name='RED')\n green = get(guild_channel.roles, name='GREEN')\n if \"-\" in watching_silver:\n discord_bot = guild_channel.me\n await discord_bot.remove_roles(green)\n await discord_bot.add_roles(red)\n else: \n discord_bot = guild_channel.me\n await discord_bot.remove_roles(red)\n await discord_bot.add_roles(green)\n await guild_channel.me.edit(nick=f\"6) {name_silver}\")\n await silver_bot.change_presence(activity=discord.Activity(type=discord.ActivityType.watching, name=f\"{ticker_silver['name'].upper()} {watching_silver}\"))\n except:\n print(f'broke in {guild_channel}')\n else:\n print('no silver data')\n #shit coin stuff\n # btc\n if ticker_btc:\n guild_ids = [guild.id for guild in btc_bot.guilds] \n name_btc = '{:20,.2f}'.format(ticker_btc['last'])\n watching_btc = ticker_btc['change%']\n guild_channels = [btc_bot.get_guild(guild_id) for guild_id in guild_ids]\n\n for guild_channel in guild_channels:\n try:\n red = get(guild_channel.roles, name='RED')\n green = get(guild_channel.roles, name='GREEN')\n if \"-\" in watching_btc:\n discord_bot = guild_channel.me\n await discord_bot.remove_roles(green)\n await discord_bot.add_roles(red)\n else: \n discord_bot = guild_channel.me\n await discord_bot.remove_roles(red)\n await discord_bot.add_roles(green)\n await guild_channel.me.edit(nick=f\"7) {name_btc}\")\n await btc_bot.change_presence(activity=discord.Activity(type=discord.ActivityType.watching, name=f\"BTC {watching_btc}\"))\n except:\n print(f'broke in {guild_channel}')\n else:\n print('no data for btc')\n # eth \n if ticker_eth:\n guild_ids = [guild.id for guild in eth_bot.guilds] \n name_eth= '{:20,.2f}'.format(ticker_eth['last'])\n watching_eth = ticker_eth['change%']\n guild_channels = [eth_bot.get_guild(guild_id) for guild_id in guild_ids]\n for guild_channel in guild_channels:\n try:\n red = get(guild_channel.roles, name='RED')\n green = get(guild_channel.roles, name='GREEN')\n if \"-\" in watching_eth:\n discord_bot = guild_channel.me\n await discord_bot.remove_roles(green)\n await discord_bot.add_roles(red)\n else: \n discord_bot = guild_channel.me\n await discord_bot.remove_roles(red)\n await discord_bot.add_roles(green)\n await guild_channel.me.edit(nick=f\"8) {name_eth}\")\n await eth_bot.change_presence(activity=discord.Activity(type=discord.ActivityType.watching, name=f\"ETH {watching_eth}\"))\n except:\n print(f'broke in {guild_channel}')\n else:\n print('nodata for eth')\n # link\n if ticker_link:\n guild_ids = [guild.id for guild in link_bot.guilds] \n name_link = '{:20,.2f}'.format(ticker_link['last'])\n watching_link = ticker_link['change%']\n guild_channels = [link_bot.get_guild(guild_id) for guild_id in guild_ids]\n for guild_channel in guild_channels:\n try:\n red = get(guild_channel.roles, name='RED')\n green = get(guild_channel.roles, name='GREEN')\n if \"-\" in watching_link:\n discord_bot = guild_channel.me\n await discord_bot.remove_roles(green)\n await discord_bot.add_roles(red)\n else: \n discord_bot = guild_channel.me\n await discord_bot.remove_roles(red)\n await discord_bot.add_roles(green)\n await guild_channel.me.edit(nick=f\"9) {name_link}\")\n await link_bot.change_presence(activity=discord.Activity(type=discord.ActivityType.watching, name=f\"LINK {watching_link}\"))\n except:\n print(f'broke in {guild_channel}')\n else:\n print('nodata for eth')\n print(f'updated ')\n\n@called_second.before_loop\nasync def before():\n await es_bot.wait_until_ready()\n await nas_bot.wait_until_ready()\n await dow_bot.wait_until_ready()\n await vix_bot.wait_until_ready()\n\n await us10y_bot.wait_until_ready()\n await dollar_bot.wait_until_ready()\n await silver_bot.wait_until_ready() \n\n await btc_bot.wait_until_ready()\n await eth_bot.wait_until_ready()\n await link_bot.wait_until_ready()\n\n print(\"Finished waiting\")\n\ncalled_second.start()\n\nasync def create_bots():\n es_task= loop.create_task(es_bot.start(es))\n nas_task = loop.create_task(nas_bot.start(nas))\n dow_task = loop.create_task(dow_bot.start(dow))\n vix_task = loop.create_task(vix_bot.start(vix))\n\n us10y_task = loop.create_task(us10y_bot.start(us10y)) \n dollar_task = loop.create_task(dollar_bot.start(dollar))\n silver_task = loop.create_task(silver_bot.start(silver))\n\n btc_task = loop.create_task(btc_bot.start(btc))\n eth_task = loop.create_task(eth_bot.start(eth))\n link_task = loop.create_task(link_bot.start(link))\n \n await es_task \n await nas_task\n await dow_task\n await vix_task\n\n await us10y_task\n await dollar_task \n await silver_task\n\n await btc_task \n await eth_task\n await link_task \n\nloop.run_until_complete(create_bots())",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import numpy as np
import matplotlib.pyplot as plt
def sigmoid(X):
""" Applies the logistic function to x, element-wise. """
return 1 / (1 + np.exp(-X))
def x_strich(X):
return np.column_stack((np.ones(len(X)), X))
def feature_scaling(X):
x_mean = np.mean(X, axis=0)
x_std = np.std(X, axis=0)
return (X - x_mean) / x_std, x_mean, x_std
def rescale_model(thetas, mean, std):
thetas_rescaled = np.zeros(thetas.shape[0])
for count, value in enumerate(thetas):
if count == 0:
thetas_rescaled[0] = value + thetas[1] * (mean / std)
return thetas_rescaled
def logistic_hypothesis(theta):
"""Combines given list argument in a logistic equation and returns it as a
function
Args:
thetas: list of coefficients
Returns:
lambda that models a logistc function based on thetas and x
"""
return lambda X: sigmoid(np.dot(x_strich(X), theta))
# def regulated_cost(X, y, theta, lambda_reg):
#
# return cross_entropy(X, y)(theta) + L2_regularization_cost(X, theta, lambda_reg)
# def cross_entropy(X, y):
# """
# Computes the cross-entropy for a single logit value and a given target class.
# Parameters
# ----------
# X : float64 or float32
# The logit
# y : int
# The target class
# Returns
# -------
# floatX
# The cross entropy value (negative log-likelihood)
# """
#
# def cost(theta):
# z = x_strich(X).dot(theta)
# mu = np.max([np.zeros(X.shape[0]), -z], axis=0)
# r1 = y * (mu + np.log(np.exp(-mu) + np.exp(-z - mu)))
# mu = np.max([np.zeros(X.shape[0]), z], axis=0)
# r2 = (1 - y) * (mu + np.log(np.exp(-mu) + np.exp(z - mu)))
# return r1 + r2
#
# return cost
def cross_entropy(X, y):
"""Implements cross-entropy as a function costs(theta) on given traning data
Args:
h: the hypothesis as function
x: features as 2D array with shape (m_examples, n_features)
y: ground truth labels for given features with shape (m_examples)
Returns:
lambda costs(theta) that models the cross-entropy for each x^i
"""
return lambda theta: -y * np.log(logistic_hypothesis(theta)(X) + 1e-9) - (
1 - y
) * np.log(1 - logistic_hypothesis(theta)(X) + 1e-9)
def compute_new_theta(X, y, theta, learning_rate, lambda_reg):
"""Updates learnable parameters theta
The update is done by calculating the partial derivities of
the cost function including the linear hypothesis. The
gradients scaled by a scalar are subtracted from the given
theta values.
Args:
X: 2D numpy array of x values
y: array of y values corresponding to x
theta: current theta values
learning_rate: value to scale the negative gradient
hypothesis: the hypothesis as function
Returns:
theta: Updated theta_0
"""
thetas = np.zeros(len(theta))
thetas = theta * (1 - learning_rate * (lambda_reg / len(X))) - (
learning_rate / len(X)
) * np.sum((logistic_hypothesis(theta)(X) - y) * x_strich(X).T, axis=1)
return thetas
def L2_regularization_cost(X, theta, lambda_reg):
return np.sum(theta ** 2) * (lambda_reg / (2 * len(X)))
def gradient_descent(X, y, theta, learning_rate, num_iters, lambda_reg):
"""Minimize theta values of a logistic model based on cross-entropy cost function
Args:
X: 2D numpy array of x values
y: array of y values corresponding to x
theta: current theta values
learning_rate: value to scale the negative gradient
num_iters: number of iterations updating thetas
lambda_reg: regularization strength
Returns:
history_cost: cost after each iteration
history_theta: Updated theta values after each iteration
"""
thetas = [theta]
cost = np.zeros(num_iters)
J = mean_cross_entropy_costs(X, y, lambda_reg)
cost[0] = J(thetas[0])
for i in range(1, num_iters):
thetas.append(compute_new_theta(X, y, thetas[i - 1], learning_rate, lambda_reg))
cost[i] = J(thetas[i])
return cost, thetas
def mean_cross_entropy_costs(X, y, lambda_reg=0.0):
"""Implements mean cross-entropy as a function J(theta) on given traning
data
Args:
X: features as 2D array with shape (m_examples, n_features)
y: ground truth labels for given features with shape (m_examples)
hypothesis: the hypothesis as function
cost_func: cost function
Returns:
lambda J(theta) that models the mean cross-entropy
"""
return lambda theta: np.mean(cross_entropy(X, y)(theta)) + L2_regularization_cost(
X, theta, lambda_reg
)
def plot_progress(fig, costs, learning_rate, lambda_reg):
"""Plots the costs over the iterations
Args:
costs: history of costs
"""
ax = fig.add_subplot(111)
ax.plot(
np.arange(len(costs)),
costs,
alpha=0.8,
label="LR: " + str(learning_rate) + " __ Lambda: " + str(lambda_reg),
)
ax.legend(
bbox_to_anchor=(0.0, 1.02, 1.0, 0.102),
loc="best",
ncol=4,
mode="expand",
borderaxespad=0.0,
)
|
normal
|
{
"blob_id": "36e7398f576aa1d298a20b4d4a27a7b93e3bd992",
"index": 5482,
"step-1": "<mask token>\n\n\ndef sigmoid(X):\n \"\"\" Applies the logistic function to x, element-wise. \"\"\"\n return 1 / (1 + np.exp(-X))\n\n\ndef x_strich(X):\n return np.column_stack((np.ones(len(X)), X))\n\n\n<mask token>\n\n\ndef rescale_model(thetas, mean, std):\n thetas_rescaled = np.zeros(thetas.shape[0])\n for count, value in enumerate(thetas):\n if count == 0:\n thetas_rescaled[0] = value + thetas[1] * (mean / std)\n return thetas_rescaled\n\n\n<mask token>\n\n\ndef cross_entropy(X, y):\n \"\"\"Implements cross-entropy as a function costs(theta) on given traning data\n Args:\n h: the hypothesis as function\n x: features as 2D array with shape (m_examples, n_features)\n y: ground truth labels for given features with shape (m_examples)\n Returns:\n lambda costs(theta) that models the cross-entropy for each x^i\n \"\"\"\n return lambda theta: -y * np.log(logistic_hypothesis(theta)(X) + 1e-09) - (\n 1 - y) * np.log(1 - logistic_hypothesis(theta)(X) + 1e-09)\n\n\n<mask token>\n\n\ndef L2_regularization_cost(X, theta, lambda_reg):\n return np.sum(theta ** 2) * (lambda_reg / (2 * len(X)))\n\n\n<mask token>\n\n\ndef mean_cross_entropy_costs(X, y, lambda_reg=0.0):\n \"\"\"Implements mean cross-entropy as a function J(theta) on given traning\n data\n Args:\n X: features as 2D array with shape (m_examples, n_features)\n y: ground truth labels for given features with shape (m_examples)\n hypothesis: the hypothesis as function\n cost_func: cost function\n Returns:\n lambda J(theta) that models the mean cross-entropy\n \"\"\"\n return lambda theta: np.mean(cross_entropy(X, y)(theta)\n ) + L2_regularization_cost(X, theta, lambda_reg)\n\n\ndef plot_progress(fig, costs, learning_rate, lambda_reg):\n \"\"\"Plots the costs over the iterations\n\n Args:\n costs: history of costs\n \"\"\"\n ax = fig.add_subplot(111)\n ax.plot(np.arange(len(costs)), costs, alpha=0.8, label='LR: ' + str(\n learning_rate) + ' __ Lambda: ' + str(lambda_reg))\n ax.legend(bbox_to_anchor=(0.0, 1.02, 1.0, 0.102), loc='best', ncol=4,\n mode='expand', borderaxespad=0.0)\n",
"step-2": "<mask token>\n\n\ndef sigmoid(X):\n \"\"\" Applies the logistic function to x, element-wise. \"\"\"\n return 1 / (1 + np.exp(-X))\n\n\ndef x_strich(X):\n return np.column_stack((np.ones(len(X)), X))\n\n\n<mask token>\n\n\ndef rescale_model(thetas, mean, std):\n thetas_rescaled = np.zeros(thetas.shape[0])\n for count, value in enumerate(thetas):\n if count == 0:\n thetas_rescaled[0] = value + thetas[1] * (mean / std)\n return thetas_rescaled\n\n\n<mask token>\n\n\ndef cross_entropy(X, y):\n \"\"\"Implements cross-entropy as a function costs(theta) on given traning data\n Args:\n h: the hypothesis as function\n x: features as 2D array with shape (m_examples, n_features)\n y: ground truth labels for given features with shape (m_examples)\n Returns:\n lambda costs(theta) that models the cross-entropy for each x^i\n \"\"\"\n return lambda theta: -y * np.log(logistic_hypothesis(theta)(X) + 1e-09) - (\n 1 - y) * np.log(1 - logistic_hypothesis(theta)(X) + 1e-09)\n\n\ndef compute_new_theta(X, y, theta, learning_rate, lambda_reg):\n \"\"\"Updates learnable parameters theta\n The update is done by calculating the partial derivities of\n the cost function including the linear hypothesis. The\n gradients scaled by a scalar are subtracted from the given\n theta values.\n Args:\n X: 2D numpy array of x values\n y: array of y values corresponding to x\n theta: current theta values\n learning_rate: value to scale the negative gradient\n hypothesis: the hypothesis as function\n Returns:\n theta: Updated theta_0\n \"\"\"\n thetas = np.zeros(len(theta))\n thetas = theta * (1 - learning_rate * (lambda_reg / len(X))\n ) - learning_rate / len(X) * np.sum((logistic_hypothesis(theta)(X) -\n y) * x_strich(X).T, axis=1)\n return thetas\n\n\ndef L2_regularization_cost(X, theta, lambda_reg):\n return np.sum(theta ** 2) * (lambda_reg / (2 * len(X)))\n\n\ndef gradient_descent(X, y, theta, learning_rate, num_iters, lambda_reg):\n \"\"\"Minimize theta values of a logistic model based on cross-entropy cost function\n Args:\n X: 2D numpy array of x values\n y: array of y values corresponding to x\n theta: current theta values\n learning_rate: value to scale the negative gradient\n num_iters: number of iterations updating thetas\n lambda_reg: regularization strength\n Returns:\n history_cost: cost after each iteration\n history_theta: Updated theta values after each iteration\n \"\"\"\n thetas = [theta]\n cost = np.zeros(num_iters)\n J = mean_cross_entropy_costs(X, y, lambda_reg)\n cost[0] = J(thetas[0])\n for i in range(1, num_iters):\n thetas.append(compute_new_theta(X, y, thetas[i - 1], learning_rate,\n lambda_reg))\n cost[i] = J(thetas[i])\n return cost, thetas\n\n\ndef mean_cross_entropy_costs(X, y, lambda_reg=0.0):\n \"\"\"Implements mean cross-entropy as a function J(theta) on given traning\n data\n Args:\n X: features as 2D array with shape (m_examples, n_features)\n y: ground truth labels for given features with shape (m_examples)\n hypothesis: the hypothesis as function\n cost_func: cost function\n Returns:\n lambda J(theta) that models the mean cross-entropy\n \"\"\"\n return lambda theta: np.mean(cross_entropy(X, y)(theta)\n ) + L2_regularization_cost(X, theta, lambda_reg)\n\n\ndef plot_progress(fig, costs, learning_rate, lambda_reg):\n \"\"\"Plots the costs over the iterations\n\n Args:\n costs: history of costs\n \"\"\"\n ax = fig.add_subplot(111)\n ax.plot(np.arange(len(costs)), costs, alpha=0.8, label='LR: ' + str(\n learning_rate) + ' __ Lambda: ' + str(lambda_reg))\n ax.legend(bbox_to_anchor=(0.0, 1.02, 1.0, 0.102), loc='best', ncol=4,\n mode='expand', borderaxespad=0.0)\n",
"step-3": "<mask token>\n\n\ndef sigmoid(X):\n \"\"\" Applies the logistic function to x, element-wise. \"\"\"\n return 1 / (1 + np.exp(-X))\n\n\ndef x_strich(X):\n return np.column_stack((np.ones(len(X)), X))\n\n\n<mask token>\n\n\ndef rescale_model(thetas, mean, std):\n thetas_rescaled = np.zeros(thetas.shape[0])\n for count, value in enumerate(thetas):\n if count == 0:\n thetas_rescaled[0] = value + thetas[1] * (mean / std)\n return thetas_rescaled\n\n\ndef logistic_hypothesis(theta):\n \"\"\"Combines given list argument in a logistic equation and returns it as a\n function\n Args:\n thetas: list of coefficients\n Returns:\n lambda that models a logistc function based on thetas and x\n \"\"\"\n return lambda X: sigmoid(np.dot(x_strich(X), theta))\n\n\ndef cross_entropy(X, y):\n \"\"\"Implements cross-entropy as a function costs(theta) on given traning data\n Args:\n h: the hypothesis as function\n x: features as 2D array with shape (m_examples, n_features)\n y: ground truth labels for given features with shape (m_examples)\n Returns:\n lambda costs(theta) that models the cross-entropy for each x^i\n \"\"\"\n return lambda theta: -y * np.log(logistic_hypothesis(theta)(X) + 1e-09) - (\n 1 - y) * np.log(1 - logistic_hypothesis(theta)(X) + 1e-09)\n\n\ndef compute_new_theta(X, y, theta, learning_rate, lambda_reg):\n \"\"\"Updates learnable parameters theta\n The update is done by calculating the partial derivities of\n the cost function including the linear hypothesis. The\n gradients scaled by a scalar are subtracted from the given\n theta values.\n Args:\n X: 2D numpy array of x values\n y: array of y values corresponding to x\n theta: current theta values\n learning_rate: value to scale the negative gradient\n hypothesis: the hypothesis as function\n Returns:\n theta: Updated theta_0\n \"\"\"\n thetas = np.zeros(len(theta))\n thetas = theta * (1 - learning_rate * (lambda_reg / len(X))\n ) - learning_rate / len(X) * np.sum((logistic_hypothesis(theta)(X) -\n y) * x_strich(X).T, axis=1)\n return thetas\n\n\ndef L2_regularization_cost(X, theta, lambda_reg):\n return np.sum(theta ** 2) * (lambda_reg / (2 * len(X)))\n\n\ndef gradient_descent(X, y, theta, learning_rate, num_iters, lambda_reg):\n \"\"\"Minimize theta values of a logistic model based on cross-entropy cost function\n Args:\n X: 2D numpy array of x values\n y: array of y values corresponding to x\n theta: current theta values\n learning_rate: value to scale the negative gradient\n num_iters: number of iterations updating thetas\n lambda_reg: regularization strength\n Returns:\n history_cost: cost after each iteration\n history_theta: Updated theta values after each iteration\n \"\"\"\n thetas = [theta]\n cost = np.zeros(num_iters)\n J = mean_cross_entropy_costs(X, y, lambda_reg)\n cost[0] = J(thetas[0])\n for i in range(1, num_iters):\n thetas.append(compute_new_theta(X, y, thetas[i - 1], learning_rate,\n lambda_reg))\n cost[i] = J(thetas[i])\n return cost, thetas\n\n\ndef mean_cross_entropy_costs(X, y, lambda_reg=0.0):\n \"\"\"Implements mean cross-entropy as a function J(theta) on given traning\n data\n Args:\n X: features as 2D array with shape (m_examples, n_features)\n y: ground truth labels for given features with shape (m_examples)\n hypothesis: the hypothesis as function\n cost_func: cost function\n Returns:\n lambda J(theta) that models the mean cross-entropy\n \"\"\"\n return lambda theta: np.mean(cross_entropy(X, y)(theta)\n ) + L2_regularization_cost(X, theta, lambda_reg)\n\n\ndef plot_progress(fig, costs, learning_rate, lambda_reg):\n \"\"\"Plots the costs over the iterations\n\n Args:\n costs: history of costs\n \"\"\"\n ax = fig.add_subplot(111)\n ax.plot(np.arange(len(costs)), costs, alpha=0.8, label='LR: ' + str(\n learning_rate) + ' __ Lambda: ' + str(lambda_reg))\n ax.legend(bbox_to_anchor=(0.0, 1.02, 1.0, 0.102), loc='best', ncol=4,\n mode='expand', borderaxespad=0.0)\n",
"step-4": "<mask token>\n\n\ndef sigmoid(X):\n \"\"\" Applies the logistic function to x, element-wise. \"\"\"\n return 1 / (1 + np.exp(-X))\n\n\ndef x_strich(X):\n return np.column_stack((np.ones(len(X)), X))\n\n\ndef feature_scaling(X):\n x_mean = np.mean(X, axis=0)\n x_std = np.std(X, axis=0)\n return (X - x_mean) / x_std, x_mean, x_std\n\n\ndef rescale_model(thetas, mean, std):\n thetas_rescaled = np.zeros(thetas.shape[0])\n for count, value in enumerate(thetas):\n if count == 0:\n thetas_rescaled[0] = value + thetas[1] * (mean / std)\n return thetas_rescaled\n\n\ndef logistic_hypothesis(theta):\n \"\"\"Combines given list argument in a logistic equation and returns it as a\n function\n Args:\n thetas: list of coefficients\n Returns:\n lambda that models a logistc function based on thetas and x\n \"\"\"\n return lambda X: sigmoid(np.dot(x_strich(X), theta))\n\n\ndef cross_entropy(X, y):\n \"\"\"Implements cross-entropy as a function costs(theta) on given traning data\n Args:\n h: the hypothesis as function\n x: features as 2D array with shape (m_examples, n_features)\n y: ground truth labels for given features with shape (m_examples)\n Returns:\n lambda costs(theta) that models the cross-entropy for each x^i\n \"\"\"\n return lambda theta: -y * np.log(logistic_hypothesis(theta)(X) + 1e-09) - (\n 1 - y) * np.log(1 - logistic_hypothesis(theta)(X) + 1e-09)\n\n\ndef compute_new_theta(X, y, theta, learning_rate, lambda_reg):\n \"\"\"Updates learnable parameters theta\n The update is done by calculating the partial derivities of\n the cost function including the linear hypothesis. The\n gradients scaled by a scalar are subtracted from the given\n theta values.\n Args:\n X: 2D numpy array of x values\n y: array of y values corresponding to x\n theta: current theta values\n learning_rate: value to scale the negative gradient\n hypothesis: the hypothesis as function\n Returns:\n theta: Updated theta_0\n \"\"\"\n thetas = np.zeros(len(theta))\n thetas = theta * (1 - learning_rate * (lambda_reg / len(X))\n ) - learning_rate / len(X) * np.sum((logistic_hypothesis(theta)(X) -\n y) * x_strich(X).T, axis=1)\n return thetas\n\n\ndef L2_regularization_cost(X, theta, lambda_reg):\n return np.sum(theta ** 2) * (lambda_reg / (2 * len(X)))\n\n\ndef gradient_descent(X, y, theta, learning_rate, num_iters, lambda_reg):\n \"\"\"Minimize theta values of a logistic model based on cross-entropy cost function\n Args:\n X: 2D numpy array of x values\n y: array of y values corresponding to x\n theta: current theta values\n learning_rate: value to scale the negative gradient\n num_iters: number of iterations updating thetas\n lambda_reg: regularization strength\n Returns:\n history_cost: cost after each iteration\n history_theta: Updated theta values after each iteration\n \"\"\"\n thetas = [theta]\n cost = np.zeros(num_iters)\n J = mean_cross_entropy_costs(X, y, lambda_reg)\n cost[0] = J(thetas[0])\n for i in range(1, num_iters):\n thetas.append(compute_new_theta(X, y, thetas[i - 1], learning_rate,\n lambda_reg))\n cost[i] = J(thetas[i])\n return cost, thetas\n\n\ndef mean_cross_entropy_costs(X, y, lambda_reg=0.0):\n \"\"\"Implements mean cross-entropy as a function J(theta) on given traning\n data\n Args:\n X: features as 2D array with shape (m_examples, n_features)\n y: ground truth labels for given features with shape (m_examples)\n hypothesis: the hypothesis as function\n cost_func: cost function\n Returns:\n lambda J(theta) that models the mean cross-entropy\n \"\"\"\n return lambda theta: np.mean(cross_entropy(X, y)(theta)\n ) + L2_regularization_cost(X, theta, lambda_reg)\n\n\ndef plot_progress(fig, costs, learning_rate, lambda_reg):\n \"\"\"Plots the costs over the iterations\n\n Args:\n costs: history of costs\n \"\"\"\n ax = fig.add_subplot(111)\n ax.plot(np.arange(len(costs)), costs, alpha=0.8, label='LR: ' + str(\n learning_rate) + ' __ Lambda: ' + str(lambda_reg))\n ax.legend(bbox_to_anchor=(0.0, 1.02, 1.0, 0.102), loc='best', ncol=4,\n mode='expand', borderaxespad=0.0)\n",
"step-5": "import numpy as np\nimport matplotlib.pyplot as plt\n\n\ndef sigmoid(X):\n \"\"\" Applies the logistic function to x, element-wise. \"\"\"\n return 1 / (1 + np.exp(-X))\n\n\ndef x_strich(X):\n\n return np.column_stack((np.ones(len(X)), X))\n\n\ndef feature_scaling(X):\n x_mean = np.mean(X, axis=0)\n x_std = np.std(X, axis=0)\n\n return (X - x_mean) / x_std, x_mean, x_std\n\n\ndef rescale_model(thetas, mean, std):\n thetas_rescaled = np.zeros(thetas.shape[0])\n for count, value in enumerate(thetas):\n if count == 0:\n thetas_rescaled[0] = value + thetas[1] * (mean / std)\n return thetas_rescaled\n\n\ndef logistic_hypothesis(theta):\n \"\"\"Combines given list argument in a logistic equation and returns it as a\n function\n Args:\n thetas: list of coefficients\n Returns:\n lambda that models a logistc function based on thetas and x\n \"\"\"\n return lambda X: sigmoid(np.dot(x_strich(X), theta))\n\n\n# def regulated_cost(X, y, theta, lambda_reg):\n#\n# return cross_entropy(X, y)(theta) + L2_regularization_cost(X, theta, lambda_reg)\n\n\n# def cross_entropy(X, y):\n# \"\"\"\n# Computes the cross-entropy for a single logit value and a given target class.\n# Parameters\n# ----------\n# X : float64 or float32\n# The logit\n# y : int\n# The target class\n# Returns\n# -------\n# floatX\n# The cross entropy value (negative log-likelihood)\n# \"\"\"\n#\n# def cost(theta):\n# z = x_strich(X).dot(theta)\n# mu = np.max([np.zeros(X.shape[0]), -z], axis=0)\n# r1 = y * (mu + np.log(np.exp(-mu) + np.exp(-z - mu)))\n# mu = np.max([np.zeros(X.shape[0]), z], axis=0)\n# r2 = (1 - y) * (mu + np.log(np.exp(-mu) + np.exp(z - mu)))\n# return r1 + r2\n#\n# return cost\n\n\ndef cross_entropy(X, y):\n \"\"\"Implements cross-entropy as a function costs(theta) on given traning data\n Args:\n h: the hypothesis as function\n x: features as 2D array with shape (m_examples, n_features)\n y: ground truth labels for given features with shape (m_examples)\n Returns:\n lambda costs(theta) that models the cross-entropy for each x^i\n \"\"\"\n return lambda theta: -y * np.log(logistic_hypothesis(theta)(X) + 1e-9) - (\n 1 - y\n ) * np.log(1 - logistic_hypothesis(theta)(X) + 1e-9)\n\n\ndef compute_new_theta(X, y, theta, learning_rate, lambda_reg):\n \"\"\"Updates learnable parameters theta\n The update is done by calculating the partial derivities of\n the cost function including the linear hypothesis. The\n gradients scaled by a scalar are subtracted from the given\n theta values.\n Args:\n X: 2D numpy array of x values\n y: array of y values corresponding to x\n theta: current theta values\n learning_rate: value to scale the negative gradient\n hypothesis: the hypothesis as function\n Returns:\n theta: Updated theta_0\n \"\"\"\n\n thetas = np.zeros(len(theta))\n thetas = theta * (1 - learning_rate * (lambda_reg / len(X))) - (\n learning_rate / len(X)\n ) * np.sum((logistic_hypothesis(theta)(X) - y) * x_strich(X).T, axis=1)\n\n return thetas\n\n\ndef L2_regularization_cost(X, theta, lambda_reg):\n return np.sum(theta ** 2) * (lambda_reg / (2 * len(X)))\n\n\ndef gradient_descent(X, y, theta, learning_rate, num_iters, lambda_reg):\n \"\"\"Minimize theta values of a logistic model based on cross-entropy cost function\n Args:\n X: 2D numpy array of x values\n y: array of y values corresponding to x\n theta: current theta values\n learning_rate: value to scale the negative gradient\n num_iters: number of iterations updating thetas\n lambda_reg: regularization strength\n Returns:\n history_cost: cost after each iteration\n history_theta: Updated theta values after each iteration\n \"\"\"\n thetas = [theta]\n cost = np.zeros(num_iters)\n\n J = mean_cross_entropy_costs(X, y, lambda_reg)\n cost[0] = J(thetas[0])\n for i in range(1, num_iters):\n thetas.append(compute_new_theta(X, y, thetas[i - 1], learning_rate, lambda_reg))\n cost[i] = J(thetas[i])\n return cost, thetas\n\n\ndef mean_cross_entropy_costs(X, y, lambda_reg=0.0):\n \"\"\"Implements mean cross-entropy as a function J(theta) on given traning\n data\n Args:\n X: features as 2D array with shape (m_examples, n_features)\n y: ground truth labels for given features with shape (m_examples)\n hypothesis: the hypothesis as function\n cost_func: cost function\n Returns:\n lambda J(theta) that models the mean cross-entropy\n \"\"\"\n return lambda theta: np.mean(cross_entropy(X, y)(theta)) + L2_regularization_cost(\n X, theta, lambda_reg\n )\n\n\ndef plot_progress(fig, costs, learning_rate, lambda_reg):\n \"\"\"Plots the costs over the iterations\n\n Args:\n costs: history of costs\n \"\"\"\n ax = fig.add_subplot(111)\n ax.plot(\n np.arange(len(costs)),\n costs,\n alpha=0.8,\n label=\"LR: \" + str(learning_rate) + \" __ Lambda: \" + str(lambda_reg),\n )\n\n ax.legend(\n bbox_to_anchor=(0.0, 1.02, 1.0, 0.102),\n loc=\"best\",\n ncol=4,\n mode=\"expand\",\n borderaxespad=0.0,\n )",
"step-ids": [
7,
9,
10,
11,
13
]
}
|
[
7,
9,
10,
11,
13
] |
def progress_format(user):
json = dict()
json["progres_id"] = user[0]
json["percentage"] = user[1]
json["user_id"] = user[2]
json["technology"] = user[3]
return json
def progresses_format(users):
json = dict()
json["users_progresses"] = list()
for user in users:
json["users_progresses"].append(progress_format(user))
return json
def progress_percentage_formating(progresses):
response = dict()
response['response'] = list()
for progress in progresses:
json = dict()
json["name"] = progress[1]
json["percentage"] = progress[0]
response['response'].append(json)
return response
|
normal
|
{
"blob_id": "6ebf6bdfc6a4a1fe49f4eed1a2c1802f8adeef08",
"index": 1195,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef progresses_format(users):\n json = dict()\n json['users_progresses'] = list()\n for user in users:\n json['users_progresses'].append(progress_format(user))\n return json\n\n\n<mask token>\n",
"step-3": "def progress_format(user):\n json = dict()\n json['progres_id'] = user[0]\n json['percentage'] = user[1]\n json['user_id'] = user[2]\n json['technology'] = user[3]\n return json\n\n\ndef progresses_format(users):\n json = dict()\n json['users_progresses'] = list()\n for user in users:\n json['users_progresses'].append(progress_format(user))\n return json\n\n\n<mask token>\n",
"step-4": "def progress_format(user):\n json = dict()\n json['progres_id'] = user[0]\n json['percentage'] = user[1]\n json['user_id'] = user[2]\n json['technology'] = user[3]\n return json\n\n\ndef progresses_format(users):\n json = dict()\n json['users_progresses'] = list()\n for user in users:\n json['users_progresses'].append(progress_format(user))\n return json\n\n\ndef progress_percentage_formating(progresses):\n response = dict()\n response['response'] = list()\n for progress in progresses:\n json = dict()\n json['name'] = progress[1]\n json['percentage'] = progress[0]\n response['response'].append(json)\n return response\n",
"step-5": "def progress_format(user):\r\n json = dict()\r\n json[\"progres_id\"] = user[0]\r\n json[\"percentage\"] = user[1]\r\n json[\"user_id\"] = user[2]\r\n json[\"technology\"] = user[3]\r\n return json\r\n\r\ndef progresses_format(users):\r\n json = dict()\r\n json[\"users_progresses\"] = list()\r\n for user in users:\r\n json[\"users_progresses\"].append(progress_format(user))\r\n return json\r\n\r\ndef progress_percentage_formating(progresses):\r\n response = dict()\r\n response['response'] = list()\r\n for progress in progresses:\r\n json = dict()\r\n json[\"name\"] = progress[1]\r\n json[\"percentage\"] = progress[0]\r\n response['response'].append(json)\r\n return response",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import sys
n = int(input())
min_number = sys.maxsize
max_number = -sys.maxsize
for i in range(0, n):
num = int(input())
if num > max_number:
max_number = num
if num < min_number:
min_number = num
print(f"Max number: {max_number}")
print(f"Min number: {min_number}")
|
normal
|
{
"blob_id": "ac6f2287390bdad8fe20cdc73c0063f685970cfb",
"index": 5289,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor i in range(0, n):\n num = int(input())\n if num > max_number:\n max_number = num\n if num < min_number:\n min_number = num\nprint(f'Max number: {max_number}')\nprint(f'Min number: {min_number}')\n",
"step-3": "<mask token>\nn = int(input())\nmin_number = sys.maxsize\nmax_number = -sys.maxsize\nfor i in range(0, n):\n num = int(input())\n if num > max_number:\n max_number = num\n if num < min_number:\n min_number = num\nprint(f'Max number: {max_number}')\nprint(f'Min number: {min_number}')\n",
"step-4": "import sys\nn = int(input())\nmin_number = sys.maxsize\nmax_number = -sys.maxsize\nfor i in range(0, n):\n num = int(input())\n if num > max_number:\n max_number = num\n if num < min_number:\n min_number = num\nprint(f'Max number: {max_number}')\nprint(f'Min number: {min_number}')\n",
"step-5": "import sys\r\n\r\nn = int(input())\r\nmin_number = sys.maxsize\r\nmax_number = -sys.maxsize\r\n\r\nfor i in range(0, n):\r\n num = int(input())\r\n if num > max_number:\r\n max_number = num\r\n\r\n if num < min_number:\r\n min_number = num\r\n\r\nprint(f\"Max number: {max_number}\")\r\nprint(f\"Min number: {min_number}\")",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import datetime
import subprocess
from time import sleep
from flask import render_template, redirect, request, url_for, flash, abort
from dirkules import app, db, scheduler, app_version
import dirkules.manager.serviceManager as servMan
import dirkules.manager.driveManager as driveMan
import dirkules.manager.cleaning as cleaningMan
from dirkules.models import Drive, Cleaning, Pool
import dirkules.manager.viewManager as viewManager
from dirkules.validation.validators import CleaningForm, PoolAddForm
@app.errorhandler(404)
def page_not_found(e):
return render_template('404.html', error=str(e))
@app.errorhandler(500)
def internal_server_error(e):
return render_template('500.html', error=str(e))
@app.route('/', methods=['GET'])
def index():
return render_template('index.html', service=servMan.service_state())
@app.route('/drives', methods=['GET'])
def drives():
delete = request.args.get('delete')
if delete is not None:
try:
drive = driveMan.get_drive_by_id(int(delete))
driveMan.delete_drive(drive)
except ValueError:
abort(500, description="Expected int, but got {}.".format(delete))
except LookupError:
abort(500, description="Invalid drive id {}".format(delete))
return redirect(url_for('drives'))
return render_template('drives.html', drives=Drive.query.all())
@app.route('/pools', methods=['GET'])
def pools():
return render_template('pools.html', pools=Pool.query.all())
@app.route('/pool/<pool>', methods=['GET'])
def pool(pool):
db_pool = Pool.query.get(pool)
if db_pool is None:
abort(404, description="Pool with ID {} could not be found.".format(pool))
return render_template('pool.html', pool=db_pool)
@app.route('/pools/add', methods=['GET', 'POST'])
def add_pool():
form = PoolAddForm(request.form)
form.drives.choices = viewManager.get_empty_drives()
if request.method == 'POST' and form.validate():
try:
viewManager.create_btrfs_pool(form)
except subprocess.CalledProcessError as e:
abort(500, description="While creating a pool, the following exception occured: {}".format(e))
except subprocess.TimeoutExpired as e:
abort(500, description="Pool creation took too long: {}".format(e))
scheduler.get_job("refresh_disks").modify(next_run_time=datetime.datetime.now())
sleep(1)
return redirect(url_for('pools'))
return render_template('pool_add.html', form=form)
@app.route('/about', methods=['GET'])
def about():
return render_template('about.html', version=app_version)
@app.route('/partitions/<part>', methods=['GET'])
def partitions(part):
try:
drive = driveMan.get_drive_by_id(int(part))
except ValueError:
abort(500, description="Expected int, but got {}.".format(part))
except LookupError:
abort(500, description="Invalid drive id {}".format(part))
return render_template('partitions.html', parts=drive.partitions)
@app.route('/cleaning', methods=['GET'])
def cleaning():
remove = request.args.get('remove')
changestate = request.args.get('changestate')
service = request.args.get('service')
if not (remove is not None and changestate is not None):
if remove is not None:
try:
remove = int(remove)
Cleaning.query.filter(Cleaning.id == remove).delete()
db.session.commit()
return redirect(request.path, code=302)
except ValueError:
flash("Value Error: remove")
elif changestate is not None:
try:
changestate = int(changestate)
job = Cleaning.query.get(changestate)
if job.state == 0:
job.state = 1
else:
job.state = 0
db.session.commit()
return redirect(request.path, code=302)
except ValueError:
flash("Value Error: changestate")
else:
flash("Value Error: remove and changestate set")
if service is not None:
try:
service = str(service)
if service == "start":
if not cleaningMan.running():
cleaningMan.enable()
return redirect(request.path, code=302)
else:
flash("Error: Cleaning Service already running.")
elif service == "pause":
if cleaningMan.running():
cleaningMan.disable()
return redirect(request.path, code=302)
else:
flash("Error: Cleaning Service already paused.")
else:
raise ValueError
except ValueError:
flash("Value Error: service")
elements = Cleaning.query.order_by(db.asc(db.collate(Cleaning.name, 'NOCASE'))).all()
return render_template('cleaning.html', elements=elements, task_running=cleaningMan.running())
@app.route('/add_cleaning', methods=['GET', 'POST'])
def add_cleaning():
form = CleaningForm(request.form)
if request.method == 'POST' and form.validate():
viewManager.create_cleaning_obj(form.jobname.data, form.path.data, form.active.data)
return redirect(url_for('cleaning'))
return render_template('add_cleaning.html', form=form)
|
normal
|
{
"blob_id": "ab27780b19db6854855af51eea063f07d9eb7302",
"index": 3553,
"step-1": "<mask token>\n\n\[email protected](500)\ndef internal_server_error(e):\n return render_template('500.html', error=str(e))\n\n\n<mask token>\n\n\[email protected]('/pools', methods=['GET'])\ndef pools():\n return render_template('pools.html', pools=Pool.query.all())\n\n\[email protected]('/pool/<pool>', methods=['GET'])\ndef pool(pool):\n db_pool = Pool.query.get(pool)\n if db_pool is None:\n abort(404, description='Pool with ID {} could not be found.'.format\n (pool))\n return render_template('pool.html', pool=db_pool)\n\n\n<mask token>\n\n\[email protected]('/about', methods=['GET'])\ndef about():\n return render_template('about.html', version=app_version)\n\n\[email protected]('/partitions/<part>', methods=['GET'])\ndef partitions(part):\n try:\n drive = driveMan.get_drive_by_id(int(part))\n except ValueError:\n abort(500, description='Expected int, but got {}.'.format(part))\n except LookupError:\n abort(500, description='Invalid drive id {}'.format(part))\n return render_template('partitions.html', parts=drive.partitions)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\[email protected](404)\ndef page_not_found(e):\n return render_template('404.html', error=str(e))\n\n\[email protected](500)\ndef internal_server_error(e):\n return render_template('500.html', error=str(e))\n\n\n<mask token>\n\n\[email protected]('/drives', methods=['GET'])\ndef drives():\n delete = request.args.get('delete')\n if delete is not None:\n try:\n drive = driveMan.get_drive_by_id(int(delete))\n driveMan.delete_drive(drive)\n except ValueError:\n abort(500, description='Expected int, but got {}.'.format(delete))\n except LookupError:\n abort(500, description='Invalid drive id {}'.format(delete))\n return redirect(url_for('drives'))\n return render_template('drives.html', drives=Drive.query.all())\n\n\[email protected]('/pools', methods=['GET'])\ndef pools():\n return render_template('pools.html', pools=Pool.query.all())\n\n\[email protected]('/pool/<pool>', methods=['GET'])\ndef pool(pool):\n db_pool = Pool.query.get(pool)\n if db_pool is None:\n abort(404, description='Pool with ID {} could not be found.'.format\n (pool))\n return render_template('pool.html', pool=db_pool)\n\n\[email protected]('/pools/add', methods=['GET', 'POST'])\ndef add_pool():\n form = PoolAddForm(request.form)\n form.drives.choices = viewManager.get_empty_drives()\n if request.method == 'POST' and form.validate():\n try:\n viewManager.create_btrfs_pool(form)\n except subprocess.CalledProcessError as e:\n abort(500, description=\n 'While creating a pool, the following exception occured: {}'\n .format(e))\n except subprocess.TimeoutExpired as e:\n abort(500, description='Pool creation took too long: {}'.format(e))\n scheduler.get_job('refresh_disks').modify(next_run_time=datetime.\n datetime.now())\n sleep(1)\n return redirect(url_for('pools'))\n return render_template('pool_add.html', form=form)\n\n\[email protected]('/about', methods=['GET'])\ndef about():\n return render_template('about.html', version=app_version)\n\n\[email protected]('/partitions/<part>', methods=['GET'])\ndef partitions(part):\n try:\n drive = driveMan.get_drive_by_id(int(part))\n except ValueError:\n abort(500, description='Expected int, but got {}.'.format(part))\n except LookupError:\n abort(500, description='Invalid drive id {}'.format(part))\n return render_template('partitions.html', parts=drive.partitions)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\[email protected](404)\ndef page_not_found(e):\n return render_template('404.html', error=str(e))\n\n\[email protected](500)\ndef internal_server_error(e):\n return render_template('500.html', error=str(e))\n\n\n<mask token>\n\n\[email protected]('/drives', methods=['GET'])\ndef drives():\n delete = request.args.get('delete')\n if delete is not None:\n try:\n drive = driveMan.get_drive_by_id(int(delete))\n driveMan.delete_drive(drive)\n except ValueError:\n abort(500, description='Expected int, but got {}.'.format(delete))\n except LookupError:\n abort(500, description='Invalid drive id {}'.format(delete))\n return redirect(url_for('drives'))\n return render_template('drives.html', drives=Drive.query.all())\n\n\[email protected]('/pools', methods=['GET'])\ndef pools():\n return render_template('pools.html', pools=Pool.query.all())\n\n\[email protected]('/pool/<pool>', methods=['GET'])\ndef pool(pool):\n db_pool = Pool.query.get(pool)\n if db_pool is None:\n abort(404, description='Pool with ID {} could not be found.'.format\n (pool))\n return render_template('pool.html', pool=db_pool)\n\n\[email protected]('/pools/add', methods=['GET', 'POST'])\ndef add_pool():\n form = PoolAddForm(request.form)\n form.drives.choices = viewManager.get_empty_drives()\n if request.method == 'POST' and form.validate():\n try:\n viewManager.create_btrfs_pool(form)\n except subprocess.CalledProcessError as e:\n abort(500, description=\n 'While creating a pool, the following exception occured: {}'\n .format(e))\n except subprocess.TimeoutExpired as e:\n abort(500, description='Pool creation took too long: {}'.format(e))\n scheduler.get_job('refresh_disks').modify(next_run_time=datetime.\n datetime.now())\n sleep(1)\n return redirect(url_for('pools'))\n return render_template('pool_add.html', form=form)\n\n\[email protected]('/about', methods=['GET'])\ndef about():\n return render_template('about.html', version=app_version)\n\n\[email protected]('/partitions/<part>', methods=['GET'])\ndef partitions(part):\n try:\n drive = driveMan.get_drive_by_id(int(part))\n except ValueError:\n abort(500, description='Expected int, but got {}.'.format(part))\n except LookupError:\n abort(500, description='Invalid drive id {}'.format(part))\n return render_template('partitions.html', parts=drive.partitions)\n\n\n<mask token>\n\n\[email protected]('/add_cleaning', methods=['GET', 'POST'])\ndef add_cleaning():\n form = CleaningForm(request.form)\n if request.method == 'POST' and form.validate():\n viewManager.create_cleaning_obj(form.jobname.data, form.path.data,\n form.active.data)\n return redirect(url_for('cleaning'))\n return render_template('add_cleaning.html', form=form)\n",
"step-4": "<mask token>\n\n\[email protected](404)\ndef page_not_found(e):\n return render_template('404.html', error=str(e))\n\n\[email protected](500)\ndef internal_server_error(e):\n return render_template('500.html', error=str(e))\n\n\[email protected]('/', methods=['GET'])\ndef index():\n return render_template('index.html', service=servMan.service_state())\n\n\[email protected]('/drives', methods=['GET'])\ndef drives():\n delete = request.args.get('delete')\n if delete is not None:\n try:\n drive = driveMan.get_drive_by_id(int(delete))\n driveMan.delete_drive(drive)\n except ValueError:\n abort(500, description='Expected int, but got {}.'.format(delete))\n except LookupError:\n abort(500, description='Invalid drive id {}'.format(delete))\n return redirect(url_for('drives'))\n return render_template('drives.html', drives=Drive.query.all())\n\n\[email protected]('/pools', methods=['GET'])\ndef pools():\n return render_template('pools.html', pools=Pool.query.all())\n\n\[email protected]('/pool/<pool>', methods=['GET'])\ndef pool(pool):\n db_pool = Pool.query.get(pool)\n if db_pool is None:\n abort(404, description='Pool with ID {} could not be found.'.format\n (pool))\n return render_template('pool.html', pool=db_pool)\n\n\[email protected]('/pools/add', methods=['GET', 'POST'])\ndef add_pool():\n form = PoolAddForm(request.form)\n form.drives.choices = viewManager.get_empty_drives()\n if request.method == 'POST' and form.validate():\n try:\n viewManager.create_btrfs_pool(form)\n except subprocess.CalledProcessError as e:\n abort(500, description=\n 'While creating a pool, the following exception occured: {}'\n .format(e))\n except subprocess.TimeoutExpired as e:\n abort(500, description='Pool creation took too long: {}'.format(e))\n scheduler.get_job('refresh_disks').modify(next_run_time=datetime.\n datetime.now())\n sleep(1)\n return redirect(url_for('pools'))\n return render_template('pool_add.html', form=form)\n\n\[email protected]('/about', methods=['GET'])\ndef about():\n return render_template('about.html', version=app_version)\n\n\[email protected]('/partitions/<part>', methods=['GET'])\ndef partitions(part):\n try:\n drive = driveMan.get_drive_by_id(int(part))\n except ValueError:\n abort(500, description='Expected int, but got {}.'.format(part))\n except LookupError:\n abort(500, description='Invalid drive id {}'.format(part))\n return render_template('partitions.html', parts=drive.partitions)\n\n\[email protected]('/cleaning', methods=['GET'])\ndef cleaning():\n remove = request.args.get('remove')\n changestate = request.args.get('changestate')\n service = request.args.get('service')\n if not (remove is not None and changestate is not None):\n if remove is not None:\n try:\n remove = int(remove)\n Cleaning.query.filter(Cleaning.id == remove).delete()\n db.session.commit()\n return redirect(request.path, code=302)\n except ValueError:\n flash('Value Error: remove')\n elif changestate is not None:\n try:\n changestate = int(changestate)\n job = Cleaning.query.get(changestate)\n if job.state == 0:\n job.state = 1\n else:\n job.state = 0\n db.session.commit()\n return redirect(request.path, code=302)\n except ValueError:\n flash('Value Error: changestate')\n else:\n flash('Value Error: remove and changestate set')\n if service is not None:\n try:\n service = str(service)\n if service == 'start':\n if not cleaningMan.running():\n cleaningMan.enable()\n return redirect(request.path, code=302)\n else:\n flash('Error: Cleaning Service already running.')\n elif service == 'pause':\n if cleaningMan.running():\n cleaningMan.disable()\n return redirect(request.path, code=302)\n else:\n flash('Error: Cleaning Service already paused.')\n else:\n raise ValueError\n except ValueError:\n flash('Value Error: service')\n elements = Cleaning.query.order_by(db.asc(db.collate(Cleaning.name,\n 'NOCASE'))).all()\n return render_template('cleaning.html', elements=elements, task_running\n =cleaningMan.running())\n\n\[email protected]('/add_cleaning', methods=['GET', 'POST'])\ndef add_cleaning():\n form = CleaningForm(request.form)\n if request.method == 'POST' and form.validate():\n viewManager.create_cleaning_obj(form.jobname.data, form.path.data,\n form.active.data)\n return redirect(url_for('cleaning'))\n return render_template('add_cleaning.html', form=form)\n",
"step-5": "import datetime\nimport subprocess\nfrom time import sleep\nfrom flask import render_template, redirect, request, url_for, flash, abort\nfrom dirkules import app, db, scheduler, app_version\nimport dirkules.manager.serviceManager as servMan\nimport dirkules.manager.driveManager as driveMan\nimport dirkules.manager.cleaning as cleaningMan\nfrom dirkules.models import Drive, Cleaning, Pool\nimport dirkules.manager.viewManager as viewManager\nfrom dirkules.validation.validators import CleaningForm, PoolAddForm\n\n\[email protected](404)\ndef page_not_found(e):\n return render_template('404.html', error=str(e))\n\n\[email protected](500)\ndef internal_server_error(e):\n return render_template('500.html', error=str(e))\n\n\[email protected]('/', methods=['GET'])\ndef index():\n return render_template('index.html', service=servMan.service_state())\n\n\[email protected]('/drives', methods=['GET'])\ndef drives():\n delete = request.args.get('delete')\n if delete is not None:\n try:\n drive = driveMan.get_drive_by_id(int(delete))\n driveMan.delete_drive(drive)\n except ValueError:\n abort(500, description=\"Expected int, but got {}.\".format(delete))\n except LookupError:\n abort(500, description=\"Invalid drive id {}\".format(delete))\n return redirect(url_for('drives'))\n return render_template('drives.html', drives=Drive.query.all())\n\n\[email protected]('/pools', methods=['GET'])\ndef pools():\n return render_template('pools.html', pools=Pool.query.all())\n\n\[email protected]('/pool/<pool>', methods=['GET'])\ndef pool(pool):\n db_pool = Pool.query.get(pool)\n if db_pool is None:\n abort(404, description=\"Pool with ID {} could not be found.\".format(pool))\n return render_template('pool.html', pool=db_pool)\n\n\[email protected]('/pools/add', methods=['GET', 'POST'])\ndef add_pool():\n form = PoolAddForm(request.form)\n form.drives.choices = viewManager.get_empty_drives()\n if request.method == 'POST' and form.validate():\n try:\n viewManager.create_btrfs_pool(form)\n except subprocess.CalledProcessError as e:\n abort(500, description=\"While creating a pool, the following exception occured: {}\".format(e))\n except subprocess.TimeoutExpired as e:\n abort(500, description=\"Pool creation took too long: {}\".format(e))\n scheduler.get_job(\"refresh_disks\").modify(next_run_time=datetime.datetime.now())\n sleep(1)\n return redirect(url_for('pools'))\n return render_template('pool_add.html', form=form)\n\n\[email protected]('/about', methods=['GET'])\ndef about():\n return render_template('about.html', version=app_version)\n\n\[email protected]('/partitions/<part>', methods=['GET'])\ndef partitions(part):\n try:\n drive = driveMan.get_drive_by_id(int(part))\n except ValueError:\n abort(500, description=\"Expected int, but got {}.\".format(part))\n except LookupError:\n abort(500, description=\"Invalid drive id {}\".format(part))\n return render_template('partitions.html', parts=drive.partitions)\n\n\[email protected]('/cleaning', methods=['GET'])\ndef cleaning():\n remove = request.args.get('remove')\n changestate = request.args.get('changestate')\n service = request.args.get('service')\n if not (remove is not None and changestate is not None):\n if remove is not None:\n try:\n remove = int(remove)\n Cleaning.query.filter(Cleaning.id == remove).delete()\n db.session.commit()\n return redirect(request.path, code=302)\n except ValueError:\n flash(\"Value Error: remove\")\n elif changestate is not None:\n try:\n changestate = int(changestate)\n job = Cleaning.query.get(changestate)\n if job.state == 0:\n job.state = 1\n else:\n job.state = 0\n db.session.commit()\n return redirect(request.path, code=302)\n except ValueError:\n flash(\"Value Error: changestate\")\n else:\n flash(\"Value Error: remove and changestate set\")\n if service is not None:\n try:\n service = str(service)\n if service == \"start\":\n if not cleaningMan.running():\n cleaningMan.enable()\n return redirect(request.path, code=302)\n else:\n flash(\"Error: Cleaning Service already running.\")\n elif service == \"pause\":\n if cleaningMan.running():\n cleaningMan.disable()\n return redirect(request.path, code=302)\n else:\n flash(\"Error: Cleaning Service already paused.\")\n else:\n raise ValueError\n except ValueError:\n flash(\"Value Error: service\")\n elements = Cleaning.query.order_by(db.asc(db.collate(Cleaning.name, 'NOCASE'))).all()\n return render_template('cleaning.html', elements=elements, task_running=cleaningMan.running())\n\n\[email protected]('/add_cleaning', methods=['GET', 'POST'])\ndef add_cleaning():\n form = CleaningForm(request.form)\n if request.method == 'POST' and form.validate():\n viewManager.create_cleaning_obj(form.jobname.data, form.path.data, form.active.data)\n return redirect(url_for('cleaning'))\n return render_template('add_cleaning.html', form=form)\n",
"step-ids": [
5,
8,
9,
11,
13
]
}
|
[
5,
8,
9,
11,
13
] |
#!/usr/bin/env python
import sys
import subprocess
import mystem
def run(args, fin=sys.stdin, fout=sys.stdout, ferr=sys.stderr, input_data=None):
'''\
Generic wrapper for MyStem
'''
mystem_path = mystem.util.find_mystem()
# make utf-8 a default encoding
if '-e' not in args:
args.extend(["-e", "utf-8"])
p = subprocess.Popen([mystem_path] + args,
stdout=fout,
stderr=ferr,
stdin=fin)
out_data, err_data = p.communicate(input=input_data)
return p.returncode, out_data, err_data
def main(args):
return run(args)
if __name__ == "__main__":
sys.exit(main(sys.argv[1:])[0])
|
normal
|
{
"blob_id": "d4a4ea67a06107ad7ea18bb21fb1ec9e74ccd7c1",
"index": 7187,
"step-1": "<mask token>\n\n\ndef main(args):\n return run(args)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef run(args, fin=sys.stdin, fout=sys.stdout, ferr=sys.stderr, input_data=None\n ):\n \"\"\" Generic wrapper for MyStem\n \"\"\"\n mystem_path = mystem.util.find_mystem()\n if '-e' not in args:\n args.extend(['-e', 'utf-8'])\n p = subprocess.Popen([mystem_path] + args, stdout=fout, stderr=ferr,\n stdin=fin)\n out_data, err_data = p.communicate(input=input_data)\n return p.returncode, out_data, err_data\n\n\ndef main(args):\n return run(args)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef run(args, fin=sys.stdin, fout=sys.stdout, ferr=sys.stderr, input_data=None\n ):\n \"\"\" Generic wrapper for MyStem\n \"\"\"\n mystem_path = mystem.util.find_mystem()\n if '-e' not in args:\n args.extend(['-e', 'utf-8'])\n p = subprocess.Popen([mystem_path] + args, stdout=fout, stderr=ferr,\n stdin=fin)\n out_data, err_data = p.communicate(input=input_data)\n return p.returncode, out_data, err_data\n\n\ndef main(args):\n return run(args)\n\n\nif __name__ == '__main__':\n sys.exit(main(sys.argv[1:])[0])\n",
"step-4": "import sys\nimport subprocess\nimport mystem\n\n\ndef run(args, fin=sys.stdin, fout=sys.stdout, ferr=sys.stderr, input_data=None\n ):\n \"\"\" Generic wrapper for MyStem\n \"\"\"\n mystem_path = mystem.util.find_mystem()\n if '-e' not in args:\n args.extend(['-e', 'utf-8'])\n p = subprocess.Popen([mystem_path] + args, stdout=fout, stderr=ferr,\n stdin=fin)\n out_data, err_data = p.communicate(input=input_data)\n return p.returncode, out_data, err_data\n\n\ndef main(args):\n return run(args)\n\n\nif __name__ == '__main__':\n sys.exit(main(sys.argv[1:])[0])\n",
"step-5": "#!/usr/bin/env python\nimport sys\nimport subprocess\n\nimport mystem\n\ndef run(args, fin=sys.stdin, fout=sys.stdout, ferr=sys.stderr, input_data=None):\n '''\\\n Generic wrapper for MyStem\n '''\n mystem_path = mystem.util.find_mystem()\n\n # make utf-8 a default encoding\n if '-e' not in args:\n args.extend([\"-e\", \"utf-8\"])\n\n p = subprocess.Popen([mystem_path] + args,\n stdout=fout,\n stderr=ferr,\n stdin=fin)\n\n out_data, err_data = p.communicate(input=input_data)\n\n return p.returncode, out_data, err_data\n\n\ndef main(args):\n return run(args)\n\n\nif __name__ == \"__main__\":\n sys.exit(main(sys.argv[1:])[0])\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
from collections import deque
def my_queue(n=5):
return deque([], n)
pass
if __name__ == '__main__':
mq = my_queue()
for i in range(10):
mq.append(i)
print((i, list(mq)))
"""Queue size does not go beyond n int, this outputs:
(0, [0])
(1, [0, 1])
(2, [0, 1, 2])
(3, [0, 1, 2, 3])
(4, [0, 1, 2, 3, 4])
(5, [1, 2, 3, 4, 5])
(6, [2, 3, 4, 5, 6])
(7, [3, 4, 5, 6, 7])
(8, [4, 5, 6, 7, 8])
(9, [5, 6, 7, 8, 9])
"""
|
normal
|
{
"blob_id": "499baaa8c739c1bd846edc944e510542d76bbed5",
"index": 9312,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef my_queue(n=5):\n return deque([], n)\n pass\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef my_queue(n=5):\n return deque([], n)\n pass\n\n\nif __name__ == '__main__':\n mq = my_queue()\n for i in range(10):\n mq.append(i)\n print((i, list(mq)))\n \"\"\"Queue size does not go beyond n int, this outputs:\n (0, [0])\n (1, [0, 1])\n (2, [0, 1, 2])\n (3, [0, 1, 2, 3])\n (4, [0, 1, 2, 3, 4])\n (5, [1, 2, 3, 4, 5])\n (6, [2, 3, 4, 5, 6])\n (7, [3, 4, 5, 6, 7])\n (8, [4, 5, 6, 7, 8])\n (9, [5, 6, 7, 8, 9])\n \"\"\"\n",
"step-4": "from collections import deque\n\n\ndef my_queue(n=5):\n return deque([], n)\n pass\n\n\nif __name__ == '__main__':\n mq = my_queue()\n for i in range(10):\n mq.append(i)\n print((i, list(mq)))\n \"\"\"Queue size does not go beyond n int, this outputs:\n (0, [0])\n (1, [0, 1])\n (2, [0, 1, 2])\n (3, [0, 1, 2, 3])\n (4, [0, 1, 2, 3, 4])\n (5, [1, 2, 3, 4, 5])\n (6, [2, 3, 4, 5, 6])\n (7, [3, 4, 5, 6, 7])\n (8, [4, 5, 6, 7, 8])\n (9, [5, 6, 7, 8, 9])\n \"\"\"\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import pandas as pd
import matplotlib.pyplot as plt
import math
import seaborn as sns
import numpy as np
suv_data=pd.read_csv("F:/Development/Machine Learning/suv-data/suv_data.csv")
print(suv_data.head(10))
print("the no of passengers in the list is"+str(len(suv_data.index)))
sns.countplot(x="Purchased",data=suv_data)
sns.countplot(x="Purchased",hue="Gender",data=suv_data)
suv_data['Age'].plot.hist()
suv_data.info()
suv_data['EstimatedSalary'].plot.hist(bins=50,figsize=(10,5))
print(suv_data.isnull())
print(suv_data.isnull().sum())
sns.heatmap(suv_data.isnull(),yticklabels=False,cmap="viridis")
plt.show()
sns.boxplot(x="Gender",y="Age",data=suv_data)
plt.show()
suv_data.drop("User ID",axis=1,inplace=True)
suv_data.columns
suv_data.head(10)
Gen=pd.get_dummies(suv_data['Gender'],drop_first=True)
print(Gen.head(5))
suv_data=pd.concat([suv_data,Gen],axis=1)
print(suv_data.head(5))
suv_data.drop("Gender",axis=1,inplace=True)
print(suv_data.head(10))
X=suv_data.iloc[:,[0,1,3]].values
y=suv_data.iloc[:,2].values
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25, random_state=0)
from sklearn.preprocessing import StandardScaler
sc=StandardScaler()
X_train=sc.fit_transform(X_train)
X_test=sc.transform(X_test)
from sklearn.linear_model import LogisticRegression
logmodel=LogisticRegression()
logmodel.fit(X_train, y_train)
predictions=logmodel.predict(X_test)
print(predictions)
from sklearn.metrics import classification_report
print(classification_report(y_test,predictions))
from sklearn.metrics import confusion_matrix
print(confusion_matrix(y_test,predictions))
from sklearn.metrics import accuracy_score
print(accuracy_score(y_test,predictions)*100)
|
normal
|
{
"blob_id": "c955057d7f8d5289898ecb96a290f5a7d241b787",
"index": 6440,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(suv_data.head(10))\nprint('the no of passengers in the list is' + str(len(suv_data.index)))\nsns.countplot(x='Purchased', data=suv_data)\nsns.countplot(x='Purchased', hue='Gender', data=suv_data)\nsuv_data['Age'].plot.hist()\nsuv_data.info()\nsuv_data['EstimatedSalary'].plot.hist(bins=50, figsize=(10, 5))\nprint(suv_data.isnull())\nprint(suv_data.isnull().sum())\nsns.heatmap(suv_data.isnull(), yticklabels=False, cmap='viridis')\nplt.show()\nsns.boxplot(x='Gender', y='Age', data=suv_data)\nplt.show()\nsuv_data.drop('User ID', axis=1, inplace=True)\nsuv_data.columns\nsuv_data.head(10)\n<mask token>\nprint(Gen.head(5))\n<mask token>\nprint(suv_data.head(5))\nsuv_data.drop('Gender', axis=1, inplace=True)\nprint(suv_data.head(10))\n<mask token>\nlogmodel.fit(X_train, y_train)\n<mask token>\nprint(predictions)\n<mask token>\nprint(classification_report(y_test, predictions))\n<mask token>\nprint(confusion_matrix(y_test, predictions))\n<mask token>\nprint(accuracy_score(y_test, predictions) * 100)\n",
"step-3": "<mask token>\nsuv_data = pd.read_csv('F:/Development/Machine Learning/suv-data/suv_data.csv')\nprint(suv_data.head(10))\nprint('the no of passengers in the list is' + str(len(suv_data.index)))\nsns.countplot(x='Purchased', data=suv_data)\nsns.countplot(x='Purchased', hue='Gender', data=suv_data)\nsuv_data['Age'].plot.hist()\nsuv_data.info()\nsuv_data['EstimatedSalary'].plot.hist(bins=50, figsize=(10, 5))\nprint(suv_data.isnull())\nprint(suv_data.isnull().sum())\nsns.heatmap(suv_data.isnull(), yticklabels=False, cmap='viridis')\nplt.show()\nsns.boxplot(x='Gender', y='Age', data=suv_data)\nplt.show()\nsuv_data.drop('User ID', axis=1, inplace=True)\nsuv_data.columns\nsuv_data.head(10)\nGen = pd.get_dummies(suv_data['Gender'], drop_first=True)\nprint(Gen.head(5))\nsuv_data = pd.concat([suv_data, Gen], axis=1)\nprint(suv_data.head(5))\nsuv_data.drop('Gender', axis=1, inplace=True)\nprint(suv_data.head(10))\nX = suv_data.iloc[:, [0, 1, 3]].values\ny = suv_data.iloc[:, 2].values\n<mask token>\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25,\n random_state=0)\n<mask token>\nsc = StandardScaler()\nX_train = sc.fit_transform(X_train)\nX_test = sc.transform(X_test)\n<mask token>\nlogmodel = LogisticRegression()\nlogmodel.fit(X_train, y_train)\npredictions = logmodel.predict(X_test)\nprint(predictions)\n<mask token>\nprint(classification_report(y_test, predictions))\n<mask token>\nprint(confusion_matrix(y_test, predictions))\n<mask token>\nprint(accuracy_score(y_test, predictions) * 100)\n",
"step-4": "import pandas as pd\nimport matplotlib.pyplot as plt\nimport math\nimport seaborn as sns\nimport numpy as np\nsuv_data = pd.read_csv('F:/Development/Machine Learning/suv-data/suv_data.csv')\nprint(suv_data.head(10))\nprint('the no of passengers in the list is' + str(len(suv_data.index)))\nsns.countplot(x='Purchased', data=suv_data)\nsns.countplot(x='Purchased', hue='Gender', data=suv_data)\nsuv_data['Age'].plot.hist()\nsuv_data.info()\nsuv_data['EstimatedSalary'].plot.hist(bins=50, figsize=(10, 5))\nprint(suv_data.isnull())\nprint(suv_data.isnull().sum())\nsns.heatmap(suv_data.isnull(), yticklabels=False, cmap='viridis')\nplt.show()\nsns.boxplot(x='Gender', y='Age', data=suv_data)\nplt.show()\nsuv_data.drop('User ID', axis=1, inplace=True)\nsuv_data.columns\nsuv_data.head(10)\nGen = pd.get_dummies(suv_data['Gender'], drop_first=True)\nprint(Gen.head(5))\nsuv_data = pd.concat([suv_data, Gen], axis=1)\nprint(suv_data.head(5))\nsuv_data.drop('Gender', axis=1, inplace=True)\nprint(suv_data.head(10))\nX = suv_data.iloc[:, [0, 1, 3]].values\ny = suv_data.iloc[:, 2].values\nfrom sklearn.model_selection import train_test_split\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25,\n random_state=0)\nfrom sklearn.preprocessing import StandardScaler\nsc = StandardScaler()\nX_train = sc.fit_transform(X_train)\nX_test = sc.transform(X_test)\nfrom sklearn.linear_model import LogisticRegression\nlogmodel = LogisticRegression()\nlogmodel.fit(X_train, y_train)\npredictions = logmodel.predict(X_test)\nprint(predictions)\nfrom sklearn.metrics import classification_report\nprint(classification_report(y_test, predictions))\nfrom sklearn.metrics import confusion_matrix\nprint(confusion_matrix(y_test, predictions))\nfrom sklearn.metrics import accuracy_score\nprint(accuracy_score(y_test, predictions) * 100)\n",
"step-5": "import pandas as pd\nimport matplotlib.pyplot as plt \nimport math\nimport seaborn as sns\nimport numpy as np\nsuv_data=pd.read_csv(\"F:/Development/Machine Learning/suv-data/suv_data.csv\")\nprint(suv_data.head(10))\nprint(\"the no of passengers in the list is\"+str(len(suv_data.index)))\nsns.countplot(x=\"Purchased\",data=suv_data)\nsns.countplot(x=\"Purchased\",hue=\"Gender\",data=suv_data)\nsuv_data['Age'].plot.hist()\nsuv_data.info()\nsuv_data['EstimatedSalary'].plot.hist(bins=50,figsize=(10,5))\nprint(suv_data.isnull())\nprint(suv_data.isnull().sum())\nsns.heatmap(suv_data.isnull(),yticklabels=False,cmap=\"viridis\")\nplt.show()\nsns.boxplot(x=\"Gender\",y=\"Age\",data=suv_data)\nplt.show()\nsuv_data.drop(\"User ID\",axis=1,inplace=True)\nsuv_data.columns\nsuv_data.head(10)\nGen=pd.get_dummies(suv_data['Gender'],drop_first=True)\nprint(Gen.head(5))\nsuv_data=pd.concat([suv_data,Gen],axis=1)\nprint(suv_data.head(5))\nsuv_data.drop(\"Gender\",axis=1,inplace=True)\nprint(suv_data.head(10))\nX=suv_data.iloc[:,[0,1,3]].values\ny=suv_data.iloc[:,2].values\nfrom sklearn.model_selection import train_test_split\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25, random_state=0)\nfrom sklearn.preprocessing import StandardScaler\nsc=StandardScaler()\nX_train=sc.fit_transform(X_train)\nX_test=sc.transform(X_test)\nfrom sklearn.linear_model import LogisticRegression\nlogmodel=LogisticRegression()\nlogmodel.fit(X_train, y_train)\npredictions=logmodel.predict(X_test)\nprint(predictions)\nfrom sklearn.metrics import classification_report\nprint(classification_report(y_test,predictions))\nfrom sklearn.metrics import confusion_matrix\nprint(confusion_matrix(y_test,predictions))\nfrom sklearn.metrics import accuracy_score\nprint(accuracy_score(y_test,predictions)*100)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
class Graph:
def __init__(self, num_vertices):
self.adj_list = {}
for i in range(num_vertices):
self.adj_list[i] = []
def add_vertice(self, source):
self.adj_list[source] = []
def add_edge(self, source, dest):
self.adj_list[source].append(dest)
def print_graph(self):
print(self.adj_list)
print(self.adj_list.keys())
def topo_order(self):
def topo_util(source, visited, stack):
visited.add(source)
for neighbour in self.adj_list[source]:
if neighbour not in visited:
topo_util(neighbour, visited, stack)
stack.append(source)
visited, stack = set(), []
for vertex in self.adj_list.keys():
if vertex not in visited:
topo_util(vertex, visited, stack)
stack.reverse()
print(stack)
g = Graph(6)
g.add_edge(5, 0)
g.add_edge(5, 2)
g.add_edge(2, 3)
g.add_edge(3, 1)
g.add_edge(4, 1)
g.add_edge(4, 0)
g.print_graph()
g.topo_order()
|
normal
|
{
"blob_id": "ae5ec7919b9de4fbf578547c31837add32826f60",
"index": 7448,
"step-1": "class Graph:\n\n def __init__(self, num_vertices):\n self.adj_list = {}\n for i in range(num_vertices):\n self.adj_list[i] = []\n\n def add_vertice(self, source):\n self.adj_list[source] = []\n\n def add_edge(self, source, dest):\n self.adj_list[source].append(dest)\n <mask token>\n\n def topo_order(self):\n\n def topo_util(source, visited, stack):\n visited.add(source)\n for neighbour in self.adj_list[source]:\n if neighbour not in visited:\n topo_util(neighbour, visited, stack)\n stack.append(source)\n visited, stack = set(), []\n for vertex in self.adj_list.keys():\n if vertex not in visited:\n topo_util(vertex, visited, stack)\n stack.reverse()\n print(stack)\n\n\n<mask token>\n",
"step-2": "class Graph:\n\n def __init__(self, num_vertices):\n self.adj_list = {}\n for i in range(num_vertices):\n self.adj_list[i] = []\n\n def add_vertice(self, source):\n self.adj_list[source] = []\n\n def add_edge(self, source, dest):\n self.adj_list[source].append(dest)\n\n def print_graph(self):\n print(self.adj_list)\n print(self.adj_list.keys())\n\n def topo_order(self):\n\n def topo_util(source, visited, stack):\n visited.add(source)\n for neighbour in self.adj_list[source]:\n if neighbour not in visited:\n topo_util(neighbour, visited, stack)\n stack.append(source)\n visited, stack = set(), []\n for vertex in self.adj_list.keys():\n if vertex not in visited:\n topo_util(vertex, visited, stack)\n stack.reverse()\n print(stack)\n\n\n<mask token>\n",
"step-3": "class Graph:\n\n def __init__(self, num_vertices):\n self.adj_list = {}\n for i in range(num_vertices):\n self.adj_list[i] = []\n\n def add_vertice(self, source):\n self.adj_list[source] = []\n\n def add_edge(self, source, dest):\n self.adj_list[source].append(dest)\n\n def print_graph(self):\n print(self.adj_list)\n print(self.adj_list.keys())\n\n def topo_order(self):\n\n def topo_util(source, visited, stack):\n visited.add(source)\n for neighbour in self.adj_list[source]:\n if neighbour not in visited:\n topo_util(neighbour, visited, stack)\n stack.append(source)\n visited, stack = set(), []\n for vertex in self.adj_list.keys():\n if vertex not in visited:\n topo_util(vertex, visited, stack)\n stack.reverse()\n print(stack)\n\n\n<mask token>\ng.add_edge(5, 0)\ng.add_edge(5, 2)\ng.add_edge(2, 3)\ng.add_edge(3, 1)\ng.add_edge(4, 1)\ng.add_edge(4, 0)\ng.print_graph()\ng.topo_order()\n",
"step-4": "class Graph:\n\n def __init__(self, num_vertices):\n self.adj_list = {}\n for i in range(num_vertices):\n self.adj_list[i] = []\n\n def add_vertice(self, source):\n self.adj_list[source] = []\n\n def add_edge(self, source, dest):\n self.adj_list[source].append(dest)\n\n def print_graph(self):\n print(self.adj_list)\n print(self.adj_list.keys())\n\n def topo_order(self):\n\n def topo_util(source, visited, stack):\n visited.add(source)\n for neighbour in self.adj_list[source]:\n if neighbour not in visited:\n topo_util(neighbour, visited, stack)\n stack.append(source)\n visited, stack = set(), []\n for vertex in self.adj_list.keys():\n if vertex not in visited:\n topo_util(vertex, visited, stack)\n stack.reverse()\n print(stack)\n\n\ng = Graph(6)\ng.add_edge(5, 0)\ng.add_edge(5, 2)\ng.add_edge(2, 3)\ng.add_edge(3, 1)\ng.add_edge(4, 1)\ng.add_edge(4, 0)\ng.print_graph()\ng.topo_order()\n",
"step-5": null,
"step-ids": [
5,
6,
7,
8
]
}
|
[
5,
6,
7,
8
] |
A,B=map(str,input().split())
if(A>B):
print(A)
elif(B>A):
print(B)
else:
print(AorB)
|
normal
|
{
"blob_id": "8cbe78863de535a5b83eacebe67402569b4015fa",
"index": 9189,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif A > B:\n print(A)\nelif B > A:\n print(B)\nelse:\n print(AorB)\n",
"step-3": "A, B = map(str, input().split())\nif A > B:\n print(A)\nelif B > A:\n print(B)\nelse:\n print(AorB)\n",
"step-4": "A,B=map(str,input().split())\nif(A>B):\n\tprint(A)\nelif(B>A):\n\tprint(B)\nelse:\n\tprint(AorB)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import requests
from requests.auth import HTTPBasicAuth
def __run_query(self, query):
URL = 'https://api.github.com/graphql'
request = requests.post(URL, json=query,auth=HTTPBasicAuth('gleisonbt', 'Aleister93'))
if request.status_code == 200:
return request.json()
else:
raise Exception("Query failed to run by returning code of {}. {}".format(request.status_code, query))
def user_get_starred(self, username):
query = """
query userGetStarred($username: String!){
user(login: $username){
starredRepositories(first:100){
nodes{
nameWithOwner
description
stargazers{
totalCount
}
}
}
following(first:100){
nodes{
starredRepositories(first:100){
nodes{
nameWithOwner
description
stargazers{
totalCount
}
}
}
}
}
}
}
"""
json = {
"query": query, "variables":{
"username": username
}
}
return __run_query(self, json)
def repos_for_query(self, query):
query2 = """
query queryByItems($queryString: String!){
search(query:$queryString, type:REPOSITORY, first: 100){
nodes{
... on Repository{
nameWithOwner
description
stargazers{
totalCount
}
}
}
}
}
"""
json = {
"query": query2, "variables":{
"queryString": query
}
}
return __run_query(self, json)
|
normal
|
{
"blob_id": "fa511411e59880fd80fba0ccc49c95d42cb4b78d",
"index": 6962,
"step-1": "<mask token>\n\n\ndef __run_query(self, query):\n URL = 'https://api.github.com/graphql'\n request = requests.post(URL, json=query, auth=HTTPBasicAuth('gleisonbt',\n 'Aleister93'))\n if request.status_code == 200:\n return request.json()\n else:\n raise Exception('Query failed to run by returning code of {}. {}'.\n format(request.status_code, query))\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef __run_query(self, query):\n URL = 'https://api.github.com/graphql'\n request = requests.post(URL, json=query, auth=HTTPBasicAuth('gleisonbt',\n 'Aleister93'))\n if request.status_code == 200:\n return request.json()\n else:\n raise Exception('Query failed to run by returning code of {}. {}'.\n format(request.status_code, query))\n\n\n<mask token>\n\n\ndef repos_for_query(self, query):\n query2 = \"\"\"\n query queryByItems($queryString: String!){\n search(query:$queryString, type:REPOSITORY, first: 100){\n nodes{\n ... on Repository{\n nameWithOwner\n description\n stargazers{\n totalCount\n }\n }\n }\n }\n }\n \"\"\"\n json = {'query': query2, 'variables': {'queryString': query}}\n return __run_query(self, json)\n",
"step-3": "<mask token>\n\n\ndef __run_query(self, query):\n URL = 'https://api.github.com/graphql'\n request = requests.post(URL, json=query, auth=HTTPBasicAuth('gleisonbt',\n 'Aleister93'))\n if request.status_code == 200:\n return request.json()\n else:\n raise Exception('Query failed to run by returning code of {}. {}'.\n format(request.status_code, query))\n\n\ndef user_get_starred(self, username):\n query = \"\"\"\n query userGetStarred($username: String!){\n user(login: $username){\n starredRepositories(first:100){\n nodes{\n nameWithOwner\n description\n stargazers{\n totalCount\n }\n }\n }\n following(first:100){\n nodes{\n starredRepositories(first:100){\n nodes{\n nameWithOwner\n description\n stargazers{\n totalCount\n }\n }\n }\n }\n }\n }\n }\n \"\"\"\n json = {'query': query, 'variables': {'username': username}}\n return __run_query(self, json)\n\n\ndef repos_for_query(self, query):\n query2 = \"\"\"\n query queryByItems($queryString: String!){\n search(query:$queryString, type:REPOSITORY, first: 100){\n nodes{\n ... on Repository{\n nameWithOwner\n description\n stargazers{\n totalCount\n }\n }\n }\n }\n }\n \"\"\"\n json = {'query': query2, 'variables': {'queryString': query}}\n return __run_query(self, json)\n",
"step-4": "import requests\nfrom requests.auth import HTTPBasicAuth\n\n\ndef __run_query(self, query):\n URL = 'https://api.github.com/graphql'\n request = requests.post(URL, json=query, auth=HTTPBasicAuth('gleisonbt',\n 'Aleister93'))\n if request.status_code == 200:\n return request.json()\n else:\n raise Exception('Query failed to run by returning code of {}. {}'.\n format(request.status_code, query))\n\n\ndef user_get_starred(self, username):\n query = \"\"\"\n query userGetStarred($username: String!){\n user(login: $username){\n starredRepositories(first:100){\n nodes{\n nameWithOwner\n description\n stargazers{\n totalCount\n }\n }\n }\n following(first:100){\n nodes{\n starredRepositories(first:100){\n nodes{\n nameWithOwner\n description\n stargazers{\n totalCount\n }\n }\n }\n }\n }\n }\n }\n \"\"\"\n json = {'query': query, 'variables': {'username': username}}\n return __run_query(self, json)\n\n\ndef repos_for_query(self, query):\n query2 = \"\"\"\n query queryByItems($queryString: String!){\n search(query:$queryString, type:REPOSITORY, first: 100){\n nodes{\n ... on Repository{\n nameWithOwner\n description\n stargazers{\n totalCount\n }\n }\n }\n }\n }\n \"\"\"\n json = {'query': query2, 'variables': {'queryString': query}}\n return __run_query(self, json)\n",
"step-5": "import requests\nfrom requests.auth import HTTPBasicAuth\n\ndef __run_query(self, query):\n URL = 'https://api.github.com/graphql'\n\n request = requests.post(URL, json=query,auth=HTTPBasicAuth('gleisonbt', 'Aleister93'))\n\n if request.status_code == 200:\n return request.json()\n else:\n raise Exception(\"Query failed to run by returning code of {}. {}\".format(request.status_code, query))\n\ndef user_get_starred(self, username):\n query = \"\"\"\n query userGetStarred($username: String!){\n user(login: $username){\n starredRepositories(first:100){\n nodes{\n nameWithOwner\n description\n stargazers{\n totalCount\n }\n }\n }\n following(first:100){\n nodes{\n starredRepositories(first:100){\n nodes{\n nameWithOwner\n description\n stargazers{\n totalCount\n }\n }\n }\n }\n }\n }\n }\n \"\"\"\n\n json = {\n \"query\": query, \"variables\":{\n \"username\": username\n }\n }\n\n return __run_query(self, json)\n\ndef repos_for_query(self, query):\n query2 = \"\"\"\n query queryByItems($queryString: String!){\n search(query:$queryString, type:REPOSITORY, first: 100){\n nodes{\n ... on Repository{\n nameWithOwner\n description\n stargazers{\n totalCount\n }\n }\n }\n }\n }\n \"\"\"\n\n json = {\n \"query\": query2, \"variables\":{\n \"queryString\": query\n }\n }\n\n return __run_query(self, json)\n\n\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
from .score_funcs import *
from cryptonita.fuzzy_set import FuzzySet
from cryptonita.helpers import are_bytes_or_fail
def scoring(msg, space, score_func, min_score=0.5, **score_func_params):
''' Run the score function over the given message and over a parametric
value x. Return all the values x as a FuzzySet (guess)
which scores is greather than the minimum score.
The parametric space <space> can is defined as:
- a range object
- or any other iterable of the parametric values x
For each possible x, score each using <score_func> and
drop anyone with a score of <min_score> or less.
Extra parameters can be passed to the <score_func> using
<score_func_params>.
Return a FuzzySet with the x values.
'''
assert 0.0 <= min_score <= 1.0
are_bytes_or_fail(msg, 'msg')
params = score_func_params
lengths = FuzzySet(
((x, score_func(msg, x, **params)) for x in space),
pr='tuple',
min_membership=min_score
)
return lengths
|
normal
|
{
"blob_id": "99048ddb3f42382c8b8b435d832a45011a031cf1",
"index": 8537,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef scoring(msg, space, score_func, min_score=0.5, **score_func_params):\n \"\"\" Run the score function over the given message and over a parametric\n value x. Return all the values x as a FuzzySet (guess)\n which scores is greather than the minimum score.\n\n The parametric space <space> can is defined as:\n - a range object\n - or any other iterable of the parametric values x\n\n For each possible x, score each using <score_func> and\n drop anyone with a score of <min_score> or less.\n\n Extra parameters can be passed to the <score_func> using\n <score_func_params>.\n\n Return a FuzzySet with the x values.\n \"\"\"\n assert 0.0 <= min_score <= 1.0\n are_bytes_or_fail(msg, 'msg')\n params = score_func_params\n lengths = FuzzySet(((x, score_func(msg, x, **params)) for x in space),\n pr='tuple', min_membership=min_score)\n return lengths\n",
"step-3": "from .score_funcs import *\nfrom cryptonita.fuzzy_set import FuzzySet\nfrom cryptonita.helpers import are_bytes_or_fail\n\n\ndef scoring(msg, space, score_func, min_score=0.5, **score_func_params):\n \"\"\" Run the score function over the given message and over a parametric\n value x. Return all the values x as a FuzzySet (guess)\n which scores is greather than the minimum score.\n\n The parametric space <space> can is defined as:\n - a range object\n - or any other iterable of the parametric values x\n\n For each possible x, score each using <score_func> and\n drop anyone with a score of <min_score> or less.\n\n Extra parameters can be passed to the <score_func> using\n <score_func_params>.\n\n Return a FuzzySet with the x values.\n \"\"\"\n assert 0.0 <= min_score <= 1.0\n are_bytes_or_fail(msg, 'msg')\n params = score_func_params\n lengths = FuzzySet(((x, score_func(msg, x, **params)) for x in space),\n pr='tuple', min_membership=min_score)\n return lengths\n",
"step-4": "from .score_funcs import *\n\nfrom cryptonita.fuzzy_set import FuzzySet\nfrom cryptonita.helpers import are_bytes_or_fail\n\n\ndef scoring(msg, space, score_func, min_score=0.5, **score_func_params):\n ''' Run the score function over the given message and over a parametric\n value x. Return all the values x as a FuzzySet (guess)\n which scores is greather than the minimum score.\n\n The parametric space <space> can is defined as:\n - a range object\n - or any other iterable of the parametric values x\n\n For each possible x, score each using <score_func> and\n drop anyone with a score of <min_score> or less.\n\n Extra parameters can be passed to the <score_func> using\n <score_func_params>.\n\n Return a FuzzySet with the x values.\n '''\n assert 0.0 <= min_score <= 1.0\n are_bytes_or_fail(msg, 'msg')\n\n params = score_func_params\n lengths = FuzzySet(\n ((x, score_func(msg, x, **params)) for x in space),\n pr='tuple',\n min_membership=min_score\n )\n return lengths\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import requests
import os
import numpy as np
from bs4 import BeautifulSoup
from nltk import word_tokenize
from collections import Counter
import random
from utils import save_pickle
root = 'data'
ratios = [('train', 0.85), ('valid', 0.05), ('test', 0.1)]
max_len = 64
vocab_size = 16000
data = []
path = os.path.join(root,'main')
topics = os.listdir(path)
i = 0
for topic in topics:
i += 1
arts = os.listdir(os.path.join(path,topic))
j = 0
for art in arts:
j += 1
with open(os.path.join(path,topic,art),encoding='UTF-8') as f:
#lines = unicode(f.read(), errors='ignore')
lines = f.read()
#print(type(lines))
#print(i,j)
soup = BeautifulSoup(lines, 'html.parser')
for text in soup.find_all('p'):
# replace punctuation characters with spaces
text = text.get_text()
filters = '!"\'#$%&()*+,-./:;<=>?@[\\]^_`{|}~\t\n'
translate_dict = dict((c, " ") for c in filters)
translate_map = str.maketrans(translate_dict)
text = text.translate(translate_map)
tokens = word_tokenize(text)
lines = [str(i)] + [str(j)] + tokens
if(len(tokens)==0):
break
else:
data.append(' '.join(lines))
if True:
random.shuffle(data)
num_samples = len(data)
for split, ratio in ratios:
with open(os.path.join(root, "%s.txt"%split), 'w') as f:
length = int(num_samples * ratio)
f.write('\n'.join(data[:length]))
data = data[length:]
print("Building vocabulary from DUC data")
counter = Counter()
with open(os.path.join(root, 'train.txt')) as f:
for line in f:
words = line.strip().lower().split()[:max_len]
counter.update(words)
word_to_idx = {'<pad>': 0, '<unk>': 1, '<bos>': 2, '<eos>': 3}
vocab = [word for word, freq in counter.most_common() if freq > 5]
for word in vocab[:vocab_size - 2]:
word_to_idx[word] = len(word_to_idx)
# exclude <bos> and <pad> symbols
print("Vocabulary size: %d" % (len(word_to_idx) - 2))
save_pickle(word_to_idx, os.path.join(root, 'vocab.pkl'))
splits = ['train', 'valid', 'test']
num_sents, num_words = 0, 0
func = lambda seq: np.array([
word_to_idx.get(symbol, word_to_idx['<unk>']) for symbol in seq])
for split in splits:
print("Creating %s DUC data" % split)
data = []
with open(os.path.join(root, "%s.txt" % split)) as f:
for line in f:
words = line.strip().lower().split()[:max_len + 2]
topic, art, words = int(words[0]), int(words[1]), words[2:] ###
length = len(words)
paddings = ['<pad>'] * (max_len - length)
enc_input = func(words + paddings)
dec_input = func(['<bos>'] + words + paddings)
target = func(words + ['<eos>'] + paddings)
data.append((enc_input, dec_input, target, length, topic)) ###
num_words += length
print("%s samples: %d" %(split.capitalize(), len(data)))
save_pickle(data, os.path.join(root, "%s.pkl" % split))
num_sents += len(data)
print("Average length: %.2f" %(num_words / num_sents))
|
normal
|
{
"blob_id": "977841e0bb73cec879fbb1868f1e64102c6d8c1a",
"index": 2119,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor topic in topics:\n i += 1\n arts = os.listdir(os.path.join(path, topic))\n j = 0\n for art in arts:\n j += 1\n with open(os.path.join(path, topic, art), encoding='UTF-8') as f:\n lines = f.read()\n soup = BeautifulSoup(lines, 'html.parser')\n for text in soup.find_all('p'):\n text = text.get_text()\n filters = '!\"\\'#$%&()*+,-./:;<=>?@[\\\\]^_`{|}~\\t\\n'\n translate_dict = dict((c, ' ') for c in filters)\n translate_map = str.maketrans(translate_dict)\n text = text.translate(translate_map)\n tokens = word_tokenize(text)\n lines = [str(i)] + [str(j)] + tokens\n if len(tokens) == 0:\n break\n else:\n data.append(' '.join(lines))\nif True:\n random.shuffle(data)\n num_samples = len(data)\n for split, ratio in ratios:\n with open(os.path.join(root, '%s.txt' % split), 'w') as f:\n length = int(num_samples * ratio)\n f.write('\\n'.join(data[:length]))\n data = data[length:]\n print('Building vocabulary from DUC data')\n counter = Counter()\n with open(os.path.join(root, 'train.txt')) as f:\n for line in f:\n words = line.strip().lower().split()[:max_len]\n counter.update(words)\n word_to_idx = {'<pad>': 0, '<unk>': 1, '<bos>': 2, '<eos>': 3}\n vocab = [word for word, freq in counter.most_common() if freq > 5]\n for word in vocab[:vocab_size - 2]:\n word_to_idx[word] = len(word_to_idx)\n print('Vocabulary size: %d' % (len(word_to_idx) - 2))\n save_pickle(word_to_idx, os.path.join(root, 'vocab.pkl'))\n splits = ['train', 'valid', 'test']\n num_sents, num_words = 0, 0\n func = lambda seq: np.array([word_to_idx.get(symbol, word_to_idx[\n '<unk>']) for symbol in seq])\n for split in splits:\n print('Creating %s DUC data' % split)\n data = []\n with open(os.path.join(root, '%s.txt' % split)) as f:\n for line in f:\n words = line.strip().lower().split()[:max_len + 2]\n topic, art, words = int(words[0]), int(words[1]), words[2:]\n length = len(words)\n paddings = ['<pad>'] * (max_len - length)\n enc_input = func(words + paddings)\n dec_input = func(['<bos>'] + words + paddings)\n target = func(words + ['<eos>'] + paddings)\n data.append((enc_input, dec_input, target, length, topic))\n num_words += length\n print('%s samples: %d' % (split.capitalize(), len(data)))\n save_pickle(data, os.path.join(root, '%s.pkl' % split))\n num_sents += len(data)\n print('Average length: %.2f' % (num_words / num_sents))\n",
"step-3": "<mask token>\nroot = 'data'\nratios = [('train', 0.85), ('valid', 0.05), ('test', 0.1)]\nmax_len = 64\nvocab_size = 16000\ndata = []\npath = os.path.join(root, 'main')\ntopics = os.listdir(path)\ni = 0\nfor topic in topics:\n i += 1\n arts = os.listdir(os.path.join(path, topic))\n j = 0\n for art in arts:\n j += 1\n with open(os.path.join(path, topic, art), encoding='UTF-8') as f:\n lines = f.read()\n soup = BeautifulSoup(lines, 'html.parser')\n for text in soup.find_all('p'):\n text = text.get_text()\n filters = '!\"\\'#$%&()*+,-./:;<=>?@[\\\\]^_`{|}~\\t\\n'\n translate_dict = dict((c, ' ') for c in filters)\n translate_map = str.maketrans(translate_dict)\n text = text.translate(translate_map)\n tokens = word_tokenize(text)\n lines = [str(i)] + [str(j)] + tokens\n if len(tokens) == 0:\n break\n else:\n data.append(' '.join(lines))\nif True:\n random.shuffle(data)\n num_samples = len(data)\n for split, ratio in ratios:\n with open(os.path.join(root, '%s.txt' % split), 'w') as f:\n length = int(num_samples * ratio)\n f.write('\\n'.join(data[:length]))\n data = data[length:]\n print('Building vocabulary from DUC data')\n counter = Counter()\n with open(os.path.join(root, 'train.txt')) as f:\n for line in f:\n words = line.strip().lower().split()[:max_len]\n counter.update(words)\n word_to_idx = {'<pad>': 0, '<unk>': 1, '<bos>': 2, '<eos>': 3}\n vocab = [word for word, freq in counter.most_common() if freq > 5]\n for word in vocab[:vocab_size - 2]:\n word_to_idx[word] = len(word_to_idx)\n print('Vocabulary size: %d' % (len(word_to_idx) - 2))\n save_pickle(word_to_idx, os.path.join(root, 'vocab.pkl'))\n splits = ['train', 'valid', 'test']\n num_sents, num_words = 0, 0\n func = lambda seq: np.array([word_to_idx.get(symbol, word_to_idx[\n '<unk>']) for symbol in seq])\n for split in splits:\n print('Creating %s DUC data' % split)\n data = []\n with open(os.path.join(root, '%s.txt' % split)) as f:\n for line in f:\n words = line.strip().lower().split()[:max_len + 2]\n topic, art, words = int(words[0]), int(words[1]), words[2:]\n length = len(words)\n paddings = ['<pad>'] * (max_len - length)\n enc_input = func(words + paddings)\n dec_input = func(['<bos>'] + words + paddings)\n target = func(words + ['<eos>'] + paddings)\n data.append((enc_input, dec_input, target, length, topic))\n num_words += length\n print('%s samples: %d' % (split.capitalize(), len(data)))\n save_pickle(data, os.path.join(root, '%s.pkl' % split))\n num_sents += len(data)\n print('Average length: %.2f' % (num_words / num_sents))\n",
"step-4": "import requests\nimport os\nimport numpy as np\nfrom bs4 import BeautifulSoup\nfrom nltk import word_tokenize\nfrom collections import Counter\nimport random\nfrom utils import save_pickle\nroot = 'data'\nratios = [('train', 0.85), ('valid', 0.05), ('test', 0.1)]\nmax_len = 64\nvocab_size = 16000\ndata = []\npath = os.path.join(root, 'main')\ntopics = os.listdir(path)\ni = 0\nfor topic in topics:\n i += 1\n arts = os.listdir(os.path.join(path, topic))\n j = 0\n for art in arts:\n j += 1\n with open(os.path.join(path, topic, art), encoding='UTF-8') as f:\n lines = f.read()\n soup = BeautifulSoup(lines, 'html.parser')\n for text in soup.find_all('p'):\n text = text.get_text()\n filters = '!\"\\'#$%&()*+,-./:;<=>?@[\\\\]^_`{|}~\\t\\n'\n translate_dict = dict((c, ' ') for c in filters)\n translate_map = str.maketrans(translate_dict)\n text = text.translate(translate_map)\n tokens = word_tokenize(text)\n lines = [str(i)] + [str(j)] + tokens\n if len(tokens) == 0:\n break\n else:\n data.append(' '.join(lines))\nif True:\n random.shuffle(data)\n num_samples = len(data)\n for split, ratio in ratios:\n with open(os.path.join(root, '%s.txt' % split), 'w') as f:\n length = int(num_samples * ratio)\n f.write('\\n'.join(data[:length]))\n data = data[length:]\n print('Building vocabulary from DUC data')\n counter = Counter()\n with open(os.path.join(root, 'train.txt')) as f:\n for line in f:\n words = line.strip().lower().split()[:max_len]\n counter.update(words)\n word_to_idx = {'<pad>': 0, '<unk>': 1, '<bos>': 2, '<eos>': 3}\n vocab = [word for word, freq in counter.most_common() if freq > 5]\n for word in vocab[:vocab_size - 2]:\n word_to_idx[word] = len(word_to_idx)\n print('Vocabulary size: %d' % (len(word_to_idx) - 2))\n save_pickle(word_to_idx, os.path.join(root, 'vocab.pkl'))\n splits = ['train', 'valid', 'test']\n num_sents, num_words = 0, 0\n func = lambda seq: np.array([word_to_idx.get(symbol, word_to_idx[\n '<unk>']) for symbol in seq])\n for split in splits:\n print('Creating %s DUC data' % split)\n data = []\n with open(os.path.join(root, '%s.txt' % split)) as f:\n for line in f:\n words = line.strip().lower().split()[:max_len + 2]\n topic, art, words = int(words[0]), int(words[1]), words[2:]\n length = len(words)\n paddings = ['<pad>'] * (max_len - length)\n enc_input = func(words + paddings)\n dec_input = func(['<bos>'] + words + paddings)\n target = func(words + ['<eos>'] + paddings)\n data.append((enc_input, dec_input, target, length, topic))\n num_words += length\n print('%s samples: %d' % (split.capitalize(), len(data)))\n save_pickle(data, os.path.join(root, '%s.pkl' % split))\n num_sents += len(data)\n print('Average length: %.2f' % (num_words / num_sents))\n",
"step-5": "import requests\nimport os\nimport numpy as np\nfrom bs4 import BeautifulSoup\nfrom nltk import word_tokenize\nfrom collections import Counter\nimport random\n\nfrom utils import save_pickle\n\n\nroot = 'data'\nratios = [('train', 0.85), ('valid', 0.05), ('test', 0.1)]\nmax_len = 64\nvocab_size = 16000\n\n\ndata = []\npath = os.path.join(root,'main')\ntopics = os.listdir(path)\ni = 0\nfor topic in topics:\n i += 1\n arts = os.listdir(os.path.join(path,topic))\n j = 0\n for art in arts:\n j += 1\n with open(os.path.join(path,topic,art),encoding='UTF-8') as f:\n #lines = unicode(f.read(), errors='ignore')\n lines = f.read()\n #print(type(lines))\n #print(i,j)\n soup = BeautifulSoup(lines, 'html.parser')\n for text in soup.find_all('p'):\n # replace punctuation characters with spaces\n text = text.get_text()\n filters = '!\"\\'#$%&()*+,-./:;<=>?@[\\\\]^_`{|}~\\t\\n'\n translate_dict = dict((c, \" \") for c in filters)\n translate_map = str.maketrans(translate_dict)\n text = text.translate(translate_map)\n tokens = word_tokenize(text)\n lines = [str(i)] + [str(j)] + tokens\n if(len(tokens)==0):\n break\n else:\n data.append(' '.join(lines))\n\nif True:\n random.shuffle(data)\n\n num_samples = len(data)\n for split, ratio in ratios:\n with open(os.path.join(root, \"%s.txt\"%split), 'w') as f:\n length = int(num_samples * ratio)\n f.write('\\n'.join(data[:length]))\n data = data[length:]\n\n print(\"Building vocabulary from DUC data\")\n counter = Counter()\n with open(os.path.join(root, 'train.txt')) as f:\n for line in f:\n words = line.strip().lower().split()[:max_len]\n counter.update(words)\n\n word_to_idx = {'<pad>': 0, '<unk>': 1, '<bos>': 2, '<eos>': 3}\n vocab = [word for word, freq in counter.most_common() if freq > 5]\n for word in vocab[:vocab_size - 2]:\n word_to_idx[word] = len(word_to_idx)\n\n # exclude <bos> and <pad> symbols\n print(\"Vocabulary size: %d\" % (len(word_to_idx) - 2))\n save_pickle(word_to_idx, os.path.join(root, 'vocab.pkl'))\n\n splits = ['train', 'valid', 'test']\n num_sents, num_words = 0, 0\n func = lambda seq: np.array([\n word_to_idx.get(symbol, word_to_idx['<unk>']) for symbol in seq])\n for split in splits:\n print(\"Creating %s DUC data\" % split)\n data = []\n with open(os.path.join(root, \"%s.txt\" % split)) as f:\n for line in f:\n words = line.strip().lower().split()[:max_len + 2]\n topic, art, words = int(words[0]), int(words[1]), words[2:] ###\n length = len(words)\n paddings = ['<pad>'] * (max_len - length)\n enc_input = func(words + paddings)\n dec_input = func(['<bos>'] + words + paddings)\n target = func(words + ['<eos>'] + paddings)\n data.append((enc_input, dec_input, target, length, topic)) ###\n num_words += length\n print(\"%s samples: %d\" %(split.capitalize(), len(data)))\n save_pickle(data, os.path.join(root, \"%s.pkl\" % split))\n num_sents += len(data)\n\n print(\"Average length: %.2f\" %(num_words / num_sents))",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import nltk
import A
from collections import defaultdict
from nltk.align import Alignment, AlignedSent
class BerkeleyAligner():
def __init__(self, align_sents, num_iter):
self.t, self.q = self.train(align_sents, num_iter)
# TODO: Computes the alignments for align_sent, using this model's parameters. Return
# an AlignedSent object, with the sentence pair and the alignments computed.
def align(self, align_sent):
# #will return german --> english alignments
alignments = []
german = align_sent.words
english = align_sent.mots
len_g = len(german)
len_e = len(english)
for j in range(len_g):
g = german[j]
best_prob = (self.t[(g,None)] * self.q[(0,j,len_e,len_g)], None)
best_alignment_point = None
for i in range(len_e):
e = english[i]
ge_prob = (self.t[(e,g)]*self.q[(j,i,len_g,len_e)], i)
eg_prob = (self.t[(g,e)]*self.q[(i,j,len_e,len_g)], i)
best_prob = max(best_prob, ge_prob, eg_prob)
alignments.append((j, best_prob[1]))
return AlignedSent(align_sent.words, align_sent.mots, alignments)
# TODO: Implement the EM algorithm. num_iters is the number of iterations. Returns the
# translation and distortion parameters as a tuple.
def train(self, aligned_sents, num_iters):
MIN_PROB = 1.0e-12
#INITIALIZATION
#defining the vocabulary for each language:
#german = words
#english = mots
g_vocab = set()
e_vocab = set()
for sentence in aligned_sents:
g_vocab.update(sentence.words)
e_vocab.update(sentence.mots)
# initializing translation table for english --> german and german --> english
t = defaultdict(float)
for g in g_vocab:
for e in e_vocab:
t[(g,e)] = 1.0 / float(len(g_vocab))
t[(e,g)] = 1.0 / float(len(e_vocab))
# initializing separate alignment tables for english --> german and german --> english
q_eg = defaultdict(float)
q_ge = defaultdict(float)
for sentence in aligned_sents:
len_e=len(sentence.mots)
len_g=len(sentence.words)
for i in range(len_e):
for j in range(len_g):
q_eg[(i,j,len_e,len_g)] = 1.0 / float((len_e+1))
q_ge[(j,i,len_g,len_e)] = 1.0 / float((len_g+1))
print 'Initialization complete'
#INITIALIZATION COMPLETE
for i in range(num_iters):
print 'Iteration ' + str(i+1) + ' /' + str(num_iters)
#E step
count_g_given_e = defaultdict(float)
count_any_g_given_e = defaultdict(float)
eg_alignment_count = defaultdict(float)
eg_alignment_count_for_any_i = defaultdict(float)
count_e_given_g = defaultdict(float)
count_any_e_given_g = defaultdict(float)
ge_alignment_count = defaultdict(float)
ge_alignment_count_for_any_j = defaultdict(float)
for sentence in aligned_sents:
g_sentence = sentence.words
e_sentence = sentence.mots
len_e = len(sentence.mots)
len_g = len(sentence.words)
eg_total = defaultdict(float)
ge_total = defaultdict(float)
#E step (a): compute normalization
for j in range(len_g):
g = g_sentence[j]
for i in range(len_e):
e = e_sentence[i]
eg_count = (t[(g_sentence[j],e_sentence[i])] * q_eg[(i,j,len_e,len_g)])
eg_total[g] += eg_count
ge_count = (t[(e_sentence[i], g_sentence[j])] * q_ge[(j,i,len_g,len_e)])
ge_total[e] += ge_count
# E step (b): collect fractional counts
for j in range(len_g):
g = g_sentence[j]
for i in range(len_e):
e = e_sentence[i]
#English --> German
eg_count = (t[(g_sentence[j],e_sentence[i])] * q_eg[(i,j,len_e,len_g)])
eg_normalized = eg_count / eg_total[g]
#German --> English
ge_count = (t[(e_sentence[i], g_sentence[j])] * q_ge[(j,i,len_g,len_e)])
ge_normalized = ge_count / ge_total[e]
#Averaging the probablities
avg_normalized = (eg_normalized + ge_normalized) / 2.0
#Storing counts
count_g_given_e[(g,e)] += avg_normalized
count_any_g_given_e[e] += avg_normalized
eg_alignment_count[(i,j,len_e,len_g)] += avg_normalized
eg_alignment_count_for_any_i[(j,len_e,len_g)] += avg_normalized
count_e_given_g[(e,g)] += avg_normalized
count_any_e_given_g[g] += avg_normalized
ge_alignment_count[(j,i,len_g,len_e)] += avg_normalized
ge_alignment_count_for_any_j[(i,len_g,len_e)] += avg_normalized
#M step
q = defaultdict(float)
for sentence in aligned_sents:
for e in sentence.mots:
for g in sentence.words:
#eng --> germ
t[(g,e)]= count_g_given_e[(g,e)] / count_any_g_given_e[e]
#germ --> eng
t[(e,g)]= count_e_given_g[(e,g)] / count_any_e_given_g[g]
len_e=len(sentence.mots)
len_g=len(sentence.words)
for i in range(len_e):
for j in range(len_g):
#eng --> germ
q[(i,j,len_e,len_g)] = eg_alignment_count[(i,j,len_e,len_g)] / eg_alignment_count_for_any_i[(j,len_e, len_g)]
#germ --> eng
q[(j,i,len_g,len_e)] = ge_alignment_count[(j,i,len_g,len_e)] / ge_alignment_count_for_any_j[(i,len_g,len_e)]
return (t,q)
def main(aligned_sents):
ba = BerkeleyAligner(aligned_sents, 10)
A.save_model_output(aligned_sents, ba, "ba.txt")
avg_aer = A.compute_avg_aer(aligned_sents, ba, 50)
print ('Berkeley Aligner')
print ('---------------------------')
print('Average AER: {0:.3f}\n'.format(avg_aer))
|
normal
|
{
"blob_id": "bf40b516e202af14469cd4012597ba412e663f56",
"index": 5898,
"step-1": "import nltk\nimport A\nfrom collections import defaultdict\nfrom nltk.align import Alignment, AlignedSent\n\nclass BerkeleyAligner():\n\n def __init__(self, align_sents, num_iter):\n\tself.t, self.q = self.train(align_sents, num_iter)\n\t\n # TODO: Computes the alignments for align_sent, using this model's parameters. Return\n # an AlignedSent object, with the sentence pair and the alignments computed.\n def align(self, align_sent):\n#\t#will return german --> english alignments\n \talignments = []\n german = align_sent.words\n english = align_sent.mots\n len_g = len(german)\n len_e = len(english)\n\n for j in range(len_g):\n\t\tg = german[j]\n\t\tbest_prob = (self.t[(g,None)] * self.q[(0,j,len_e,len_g)], None)\n\t\tbest_alignment_point = None\n\t\tfor i in range(len_e):\n \te = english[i]\n \t \t\tge_prob = (self.t[(e,g)]*self.q[(j,i,len_g,len_e)], i)\n\t\t\teg_prob = (self.t[(g,e)]*self.q[(i,j,len_e,len_g)], i)\n\t\t\tbest_prob = max(best_prob, ge_prob, eg_prob)\n\t\t\t\n\t\talignments.append((j, best_prob[1]))\n\n\treturn AlignedSent(align_sent.words, align_sent.mots, alignments)\n\n \n # TODO: Implement the EM algorithm. num_iters is the number of iterations. Returns the \n # translation and distortion parameters as a tuple.\n def train(self, aligned_sents, num_iters):\n\tMIN_PROB = 1.0e-12\n\t#INITIALIZATION\n\t#defining the vocabulary for each language:\n\t#german = words\n\t#english = mots\n\tg_vocab = set()\n\te_vocab = set()\n\tfor sentence in aligned_sents:\n\t\tg_vocab.update(sentence.words)\n\t\te_vocab.update(sentence.mots)\n\n\t# initializing translation table for english --> german and german --> english\n\tt = defaultdict(float)\n\tfor g in g_vocab:\n\t\tfor e in e_vocab:\n\t\t\tt[(g,e)] = 1.0 / float(len(g_vocab))\n\t\t\tt[(e,g)] = 1.0 / float(len(e_vocab))\n\t\n\t# initializing separate alignment tables for english --> german and german --> english\n\tq_eg = defaultdict(float)\n\tq_ge = defaultdict(float)\n\tfor sentence in aligned_sents:\n\t\tlen_e=len(sentence.mots)\n\t\tlen_g=len(sentence.words)\n\t\tfor i in range(len_e):\n\t\t\tfor j in range(len_g):\n\t\t\t\tq_eg[(i,j,len_e,len_g)] = 1.0 / float((len_e+1))\n\t\t\t\tq_ge[(j,i,len_g,len_e)] = 1.0 / float((len_g+1))\n\n\tprint 'Initialization complete'\n\t#INITIALIZATION COMPLETE\n\n\tfor i in range(num_iters):\n\t\tprint 'Iteration ' + str(i+1) + ' /' + str(num_iters)\n\t\t#E step\n\t\tcount_g_given_e = defaultdict(float)\n\t\tcount_any_g_given_e = defaultdict(float)\n\t\teg_alignment_count = defaultdict(float)\n\t\teg_alignment_count_for_any_i = defaultdict(float)\n\t\tcount_e_given_g = defaultdict(float)\n\t\tcount_any_e_given_g = defaultdict(float)\n\t\tge_alignment_count = defaultdict(float)\n\t\tge_alignment_count_for_any_j = defaultdict(float)\n\t\t\n\t\tfor sentence in aligned_sents:\n\t\t\tg_sentence = sentence.words\n\t\t\te_sentence = sentence.mots\n\t\t\tlen_e = len(sentence.mots)\n\t\t\tlen_g = len(sentence.words)\n\t\t\teg_total = defaultdict(float)\n\t\t\tge_total = defaultdict(float)\n\n\t\t\t#E step (a): compute normalization\n\t\t\tfor j in range(len_g):\n\t\t\t\t\n\t\t\t\tg = g_sentence[j]\n\t\n\t\t\t\tfor i in range(len_e):\n\t\t\t\t\t\n\t\t\t\t\te = e_sentence[i]\n\n\t\t\t\t\teg_count = (t[(g_sentence[j],e_sentence[i])] * q_eg[(i,j,len_e,len_g)])\n\t\t\t\t\teg_total[g] += eg_count\n\n\t\t\t\t\tge_count = (t[(e_sentence[i], g_sentence[j])] * q_ge[(j,i,len_g,len_e)])\n\t\t\t\t\tge_total[e] += ge_count \n\n\t\t\t# E step (b): collect fractional counts\n\t\t\tfor j in range(len_g):\n\t\t\t\t\n\t\t\t\tg = g_sentence[j]\n\n\t\t\t\tfor i in range(len_e):\n\t\t\t\t\t\n\t\t\t\t\te = e_sentence[i]\n\t\n\t\t\t\t\t#English --> German\n\t\t\t\t\teg_count = (t[(g_sentence[j],e_sentence[i])] * q_eg[(i,j,len_e,len_g)])\n\t\t\t\t\teg_normalized = eg_count / eg_total[g]\n\n\t\t\t\t\t#German --> English\n\t\t\t\t\tge_count = (t[(e_sentence[i], g_sentence[j])] * q_ge[(j,i,len_g,len_e)])\n\t\t\t\t\tge_normalized = ge_count / ge_total[e]\n\n\t\t\t\t\t#Averaging the probablities\n\t\t\t\t\tavg_normalized = (eg_normalized + ge_normalized) / 2.0\n\t\t\t\t\t#Storing counts\n\t\t\t\t\tcount_g_given_e[(g,e)] += avg_normalized\n\t\t\t\t\tcount_any_g_given_e[e] += avg_normalized\n\t\t\t\t\teg_alignment_count[(i,j,len_e,len_g)] += avg_normalized\n\t\t\t\t\teg_alignment_count_for_any_i[(j,len_e,len_g)] += avg_normalized\n\t\t\t\t\tcount_e_given_g[(e,g)] += avg_normalized\n\t\t\t\t\tcount_any_e_given_g[g] += avg_normalized\n\t\t\t\t\tge_alignment_count[(j,i,len_g,len_e)] += avg_normalized\n\t\t\t\t\tge_alignment_count_for_any_j[(i,len_g,len_e)] += avg_normalized\n\n\t\t#M step\n\t\tq = defaultdict(float)\n\t\tfor sentence in aligned_sents:\n\t\t\tfor e in sentence.mots:\n\t\t\t\tfor g in sentence.words:\n\t\t\t\t\t#eng --> germ\n\t\t\t\t\tt[(g,e)]= count_g_given_e[(g,e)] / count_any_g_given_e[e]\n\t\t\t\t\t#germ --> eng\n\t\t\t\t\tt[(e,g)]= count_e_given_g[(e,g)] / count_any_e_given_g[g]\n\n\t\t\tlen_e=len(sentence.mots)\n\t\t\tlen_g=len(sentence.words)\n\t\t\tfor i in range(len_e):\n\t\t\t\tfor j in range(len_g):\n\t\t\t\t\t#eng --> germ\n\t\t\t\t\tq[(i,j,len_e,len_g)] = eg_alignment_count[(i,j,len_e,len_g)] / eg_alignment_count_for_any_i[(j,len_e, len_g)]\n\t\t\t\t\t#germ --> eng\n\t\t\t\t\tq[(j,i,len_g,len_e)] = ge_alignment_count[(j,i,len_g,len_e)] / ge_alignment_count_for_any_j[(i,len_g,len_e)]\n\treturn (t,q)\n\n\ndef main(aligned_sents):\n ba = BerkeleyAligner(aligned_sents, 10)\n A.save_model_output(aligned_sents, ba, \"ba.txt\")\n avg_aer = A.compute_avg_aer(aligned_sents, ba, 50)\n\n print ('Berkeley Aligner')\n print ('---------------------------')\n print('Average AER: {0:.3f}\\n'.format(avg_aer))\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
#Opens the file that the user specifies
fileopen = open(input("Please enter the name of the file that you wish to open."), 'r')
#Reads the lines within the file and determines the length of the file
lines = fileopen.readlines()
count = len(lines)
#Count is how long the file is, so number is the index values basically.
#As long as the number variable is less than the amount of lines in the file (because one must be subtracted since the index starts at 0) the
#number will be printed in front of the lines found in the file.
number = 0
while number < count:
print(number,".",lines[number])
number = number + 1
fileopen.close()
|
normal
|
{
"blob_id": "258b28153124ce42578c9eede429354069d8a7d6",
"index": 2869,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwhile number < count:\n print(number, '.', lines[number])\n number = number + 1\nfileopen.close()\n",
"step-3": "fileopen = open(input(\n 'Please enter the name of the file that you wish to open.'), 'r')\nlines = fileopen.readlines()\ncount = len(lines)\nnumber = 0\nwhile number < count:\n print(number, '.', lines[number])\n number = number + 1\nfileopen.close()\n",
"step-4": "#Opens the file that the user specifies\nfileopen = open(input(\"Please enter the name of the file that you wish to open.\"), 'r')\n\n#Reads the lines within the file and determines the length of the file\nlines = fileopen.readlines()\ncount = len(lines)\n\n#Count is how long the file is, so number is the index values basically.\n#As long as the number variable is less than the amount of lines in the file (because one must be subtracted since the index starts at 0) the \n#number will be printed in front of the lines found in the file.\nnumber = 0\nwhile number < count:\n print(number,\".\",lines[number])\n number = number + 1\nfileopen.close()",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
if __name__ == '__main__':
import sys
import os.path
srcpath = sys.argv[1] if len(sys.argv) >= 1 else './'
verfn = sys.argv[2] if len(sys.argv) >= 2 else None
try :
with open(os.path.join(srcpath,'.svn/entries'),'r') as fp:
x = fp.read().splitlines()[3]
if verfn :
with open(verfn,'w') as fp :
fp.write(x)
else :
sys.stdout.write(x)
except IOError, e :
import traceback
traceback.print_exc()
pass
|
normal
|
{
"blob_id": "1ebf92cf40053e561b04a666eb1dd36f54999e2c",
"index": 7324,
"step-1": "\r\n\r\n\r\nif __name__ == '__main__':\r\n \r\n import sys\r\n import os.path\r\n \r\n srcpath = sys.argv[1] if len(sys.argv) >= 1 else './'\r\n verfn = sys.argv[2] if len(sys.argv) >= 2 else None\r\n \r\n try :\r\n \r\n with open(os.path.join(srcpath,'.svn/entries'),'r') as fp:\r\n x = fp.read().splitlines()[3]\r\n \r\n if verfn :\r\n with open(verfn,'w') as fp :\r\n fp.write(x)\r\n else :\r\n sys.stdout.write(x)\r\n \r\n except IOError, e :\r\n \r\n import traceback\r\n traceback.print_exc()\r\n \r\n pass\r\n ",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
"""
Created on 01/10/18.
Author: morgan
Copyright defined in text_classification/LICENSE.txt
"""
import torch
import torch.nn as nn
from torch.autograd import Variable
import torch.nn.functional as F
class RNNClassifier(nn.Module):
def __init__(self, batch_size, num_classes, hidden_size, vocab_size, embed_size, weights):
super(RNNClassifier, self).__init__()
# weights: Pre-trained GloVe word_embeddings that we will use to create our word_embedding lookup table
self.batch_size = batch_size
self.num_classes = num_classes
self.hidden_size = hidden_size
self.vocab_size = vocab_size
self.embed_size = embed_size
self.word_embeddings = nn.Embedding(vocab_size, embed_size) # initialize the lookup table
# Assigning the look-up table to the pre-trained GloVe word embedding.
self.word_embeddings.weight = nn.Parameter(weights, requires_grad=False)
self.rnn = nn.RNN(embed_size, hidden_size, num_layers=2, bidirectional=True)
self.proj = nn.Linear(4*hidden_size, num_classes)
def forward(self, input_sentence):
batch_size = input_sentence.size()[0]
# input: [batch_size, seq_len], [64, 100]
# print('input 0:', input_sentence.size())
input = self.word_embeddings(input_sentence) # [batch_size, seq_len, embed_size]p
# print('input 1:', input.size())
input = input.permute(1, 0, 2).contiguous() # [seq_len, batch_size, embed_size]
# Initiate hidden/cell state of the LSTM
h_0 = Variable(torch.zeros(4, batch_size, self.hidden_size).cuda())
# [4, batch_size, hidden_size]
output, h_n = self.rnn(input, h_0)
# h_n: [4, batch_size, hidden_size]
# output: [max_len, batch_size, hidden]
# print('h_n:', h_n.size())
# print('output', output.size())
h_n = h_n.permute(1, 0, 2).contiguous() #[batch_size, 4, hidden_size]
# print('h_n1:', h_n.size())
h_n = h_n.contiguous().view(h_n.size()[0], h_n.size()[1]*h_n.size()[2])
# [batch_size, 4*hidden_size]
# print('h_n2:', h_n.size())
# final_hidden_state: [1, batch_size, hidden_size]
logtis = self.proj(h_n)
# print('logtis:', logtis.size())
# final_output: [batch_size, num_classes]
return logtis
|
normal
|
{
"blob_id": "41417e3ce52edf6aee432886bbab6d16ec5bc88d",
"index": 164,
"step-1": "<mask token>\n\n\nclass RNNClassifier(nn.Module):\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass RNNClassifier(nn.Module):\n\n def __init__(self, batch_size, num_classes, hidden_size, vocab_size,\n embed_size, weights):\n super(RNNClassifier, self).__init__()\n self.batch_size = batch_size\n self.num_classes = num_classes\n self.hidden_size = hidden_size\n self.vocab_size = vocab_size\n self.embed_size = embed_size\n self.word_embeddings = nn.Embedding(vocab_size, embed_size)\n self.word_embeddings.weight = nn.Parameter(weights, requires_grad=False\n )\n self.rnn = nn.RNN(embed_size, hidden_size, num_layers=2,\n bidirectional=True)\n self.proj = nn.Linear(4 * hidden_size, num_classes)\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass RNNClassifier(nn.Module):\n\n def __init__(self, batch_size, num_classes, hidden_size, vocab_size,\n embed_size, weights):\n super(RNNClassifier, self).__init__()\n self.batch_size = batch_size\n self.num_classes = num_classes\n self.hidden_size = hidden_size\n self.vocab_size = vocab_size\n self.embed_size = embed_size\n self.word_embeddings = nn.Embedding(vocab_size, embed_size)\n self.word_embeddings.weight = nn.Parameter(weights, requires_grad=False\n )\n self.rnn = nn.RNN(embed_size, hidden_size, num_layers=2,\n bidirectional=True)\n self.proj = nn.Linear(4 * hidden_size, num_classes)\n\n def forward(self, input_sentence):\n batch_size = input_sentence.size()[0]\n input = self.word_embeddings(input_sentence)\n input = input.permute(1, 0, 2).contiguous()\n h_0 = Variable(torch.zeros(4, batch_size, self.hidden_size).cuda())\n output, h_n = self.rnn(input, h_0)\n h_n = h_n.permute(1, 0, 2).contiguous()\n h_n = h_n.contiguous().view(h_n.size()[0], h_n.size()[1] * h_n.size\n ()[2])\n logtis = self.proj(h_n)\n return logtis\n",
"step-4": "<mask token>\nimport torch\nimport torch.nn as nn\nfrom torch.autograd import Variable\nimport torch.nn.functional as F\n\n\nclass RNNClassifier(nn.Module):\n\n def __init__(self, batch_size, num_classes, hidden_size, vocab_size,\n embed_size, weights):\n super(RNNClassifier, self).__init__()\n self.batch_size = batch_size\n self.num_classes = num_classes\n self.hidden_size = hidden_size\n self.vocab_size = vocab_size\n self.embed_size = embed_size\n self.word_embeddings = nn.Embedding(vocab_size, embed_size)\n self.word_embeddings.weight = nn.Parameter(weights, requires_grad=False\n )\n self.rnn = nn.RNN(embed_size, hidden_size, num_layers=2,\n bidirectional=True)\n self.proj = nn.Linear(4 * hidden_size, num_classes)\n\n def forward(self, input_sentence):\n batch_size = input_sentence.size()[0]\n input = self.word_embeddings(input_sentence)\n input = input.permute(1, 0, 2).contiguous()\n h_0 = Variable(torch.zeros(4, batch_size, self.hidden_size).cuda())\n output, h_n = self.rnn(input, h_0)\n h_n = h_n.permute(1, 0, 2).contiguous()\n h_n = h_n.contiguous().view(h_n.size()[0], h_n.size()[1] * h_n.size\n ()[2])\n logtis = self.proj(h_n)\n return logtis\n",
"step-5": "\"\"\"\nCreated on 01/10/18.\nAuthor: morgan\nCopyright defined in text_classification/LICENSE.txt\n\"\"\"\nimport torch\nimport torch.nn as nn\nfrom torch.autograd import Variable\nimport torch.nn.functional as F\n\n\nclass RNNClassifier(nn.Module):\n def __init__(self, batch_size, num_classes, hidden_size, vocab_size, embed_size, weights):\n super(RNNClassifier, self).__init__()\n # weights: Pre-trained GloVe word_embeddings that we will use to create our word_embedding lookup table\n self.batch_size = batch_size\n self.num_classes = num_classes\n self.hidden_size = hidden_size\n self.vocab_size = vocab_size\n self.embed_size = embed_size\n self.word_embeddings = nn.Embedding(vocab_size, embed_size) # initialize the lookup table\n # Assigning the look-up table to the pre-trained GloVe word embedding.\n self.word_embeddings.weight = nn.Parameter(weights, requires_grad=False)\n self.rnn = nn.RNN(embed_size, hidden_size, num_layers=2, bidirectional=True)\n self.proj = nn.Linear(4*hidden_size, num_classes)\n\n def forward(self, input_sentence):\n batch_size = input_sentence.size()[0]\n\n # input: [batch_size, seq_len], [64, 100]\n # print('input 0:', input_sentence.size())\n input = self.word_embeddings(input_sentence) # [batch_size, seq_len, embed_size]p\n # print('input 1:', input.size())\n input = input.permute(1, 0, 2).contiguous() # [seq_len, batch_size, embed_size]\n\n # Initiate hidden/cell state of the LSTM\n h_0 = Variable(torch.zeros(4, batch_size, self.hidden_size).cuda())\n # [4, batch_size, hidden_size]\n\n output, h_n = self.rnn(input, h_0)\n # h_n: [4, batch_size, hidden_size]\n # output: [max_len, batch_size, hidden]\n # print('h_n:', h_n.size())\n # print('output', output.size())\n h_n = h_n.permute(1, 0, 2).contiguous() #[batch_size, 4, hidden_size]\n # print('h_n1:', h_n.size())\n\n h_n = h_n.contiguous().view(h_n.size()[0], h_n.size()[1]*h_n.size()[2])\n # [batch_size, 4*hidden_size]\n\n # print('h_n2:', h_n.size())\n # final_hidden_state: [1, batch_size, hidden_size]\n\n logtis = self.proj(h_n)\n # print('logtis:', logtis.size())\n # final_output: [batch_size, num_classes]\n\n return logtis\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
#!/usr/bin/env python
'''
Usage:
dep_tree.py [-h] [-v] [-p P] [-m component_map]
repos_root top_dir [top_depfile]
Parse design dependency tree and generate build scripts and other useful files
positional arguments:
repos_root repository root
top_dir top level design directory
top_depfile top level dep file
optional arguments:
-h, --help show this help message and exit
-v verbosity
-p P output product: x (xtclsh script); s (Modelsim script); c
(component list}; a (address table list); b (address
decoder script); f (flat file list)
-m component_map location of component map file
-D set or override script directives
default: nothing is done
---
Repository layout in each component / top-level area:
firmware/cfg: contains .dep files and project config files
firmware/hdl: contains source files
firmware/cgn: contains XCO core build files
/addr_table: contains uHAL address table XML files
---
.dep file format
# Comment line
common options:
-c component_name: look under different component to find referenced file
-d: descend a level in dir tree to find referenced file
-s dir: look in subdir path to find referenced file
include [dep_file_list]
default is to take file component_name.dep
setup [-z] [tcl_file_list]
default is to take file component_name.tcl
-z: coregen project configuration script
src [-l library] [-g] [-n] src_file_list
src_file_list under firmware/hdl by default; may contain glob patterns
-g: find 'generated' src in ipcore directory
-n: for XCO files, build but don't include
addrtab [-t] [file_list]
default is to reference file component_name.xml
-t: top-level address table file
---
component_map file format
logical_name physical_dir
The 'physical_dir' is relative to the trunk/
'''
from __future__ import print_function
import argparse
import sys
import os
import time
import glob
from dep_tree.DepFileParser import DepFileParser
from dep_tree.CommandLineParser import CommandLineParser
from dep_tree.Pathmaker import Pathmaker
from dep_tree.AddressTableGeneratorWriter import AddressTableGeneratorWriter
from dep_tree.AddressTableListWriter import AddressTableListWriter
from dep_tree.ComponentListWriter import ComponentListWriter
from dep_tree.IPCoreSimScriptWriter import IPCoreSimScriptWriter
from dep_tree.ModelsimScriptWriter import ModelsimScriptWriter
from dep_tree.SourceListWriter import SourceListWriter
from dep_tree.SourceListWriter2 import SourceListWriter2
from dep_tree.XtclshScriptWriter import XtclshScriptWriter
from dep_tree.VivadoScriptWriter import VivadoScriptWriter
#--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
def main():
#--------------------------------------------------------------
# Set up the three objects which do the real hardwork
lCommandLineArgs = CommandLineParser().parse()
lPathmaker = Pathmaker( lCommandLineArgs.root , lCommandLineArgs.top , lCommandLineArgs.componentmap , lCommandLineArgs.verbosity )
lDepFileParser = DepFileParser( lCommandLineArgs , lPathmaker )
#--------------------------------------------------------------
#--------------------------------------------------------------
# Assign the product handlers to the appropriate commandline flag and check we know how to handle the requested product
lWriters = {
"c":ComponentListWriter , # Output file lists
"f":SourceListWriter , # Output file lists
"f2":SourceListWriter2 , # Output file lists
"a":AddressTableListWriter , # Output file lists
"b":AddressTableGeneratorWriter , # Output address table generator file
"s":ModelsimScriptWriter , # Output Modelsim script
"ip":IPCoreSimScriptWriter , # Output IPSim script
"x":XtclshScriptWriter , # Output xtclsh script
"v":VivadoScriptWriter # Output vivado script
}
if lCommandLineArgs.product not in lWriters:
raise SystemExit( "No handler for product option '{0}' supplied".format( lCommandLineArgs.product ) )
#--------------------------------------------------------------
#--------------------------------------------------------------
# Set the entrypoint for depfile parsing
lTopFile = lPathmaker.getpath( lCommandLineArgs.top , "include" , lCommandLineArgs.dep )
#--------------------------------------------------------------
#--------------------------------------------------------------
# Debugging
if lCommandLineArgs.verbosity > 0:
print( "Top:" , lTopFile )
#--------------------------------------------------------------
#--------------------------------------------------------------
# Parse the requested dep file
lDepFileParser.parse( lTopFile , lCommandLineArgs.top )
#--------------------------------------------------------------
#--------------------------------------------------------------
# Debugging
if lCommandLineArgs.verbosity > 0:
print( "-"*20 )
for i,j in sorted( lDepFileParser.CommandList.iteritems() ):
print( i , ":" , len( j ) , "files" )
print( "-"*20 )
print( "Build settings:" )
for i,j in sorted( lDepFileParser.ScriptVariables.iteritems() ):
print( " " , i , ":" , j )
print( "-"*20 )
if len( lDepFileParser.FilesNotFound ):
print( "-"*20 )
print( "Warning: Files not found" )
for i in lDepFileParser.FilesNotFound:
print ( ">" , i )
print( "-"*20 )
#--------------------------------------------------------------
#--------------------------------------------------------------
# Look up the Writer object in the dictionary, create an object of that type and call the write function
try:
lWriters[lCommandLineArgs.product]( lCommandLineArgs , lPathmaker ).write( lDepFileParser.ScriptVariables , lDepFileParser.ComponentPaths , lDepFileParser.CommandList , lDepFileParser.Libs, lDepFileParser.Maps )
except Exception as e:
import sys, traceback
traceback.print_exc(file=sys.stdout)
print('ERROR:', e)
raise SystemExit(-1)
#--------------------------------------------------------------
raise SystemExit(0)
#--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
#--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
if __name__ == '__main__':
main()
#--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
|
normal
|
{
"blob_id": "ccfc78ae430f835244e0618afdeebe960c868415",
"index": 6126,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef main():\n lCommandLineArgs = CommandLineParser().parse()\n lPathmaker = Pathmaker(lCommandLineArgs.root, lCommandLineArgs.top,\n lCommandLineArgs.componentmap, lCommandLineArgs.verbosity)\n lDepFileParser = DepFileParser(lCommandLineArgs, lPathmaker)\n lWriters = {'c': ComponentListWriter, 'f': SourceListWriter, 'f2':\n SourceListWriter2, 'a': AddressTableListWriter, 'b':\n AddressTableGeneratorWriter, 's': ModelsimScriptWriter, 'ip':\n IPCoreSimScriptWriter, 'x': XtclshScriptWriter, 'v': VivadoScriptWriter\n }\n if lCommandLineArgs.product not in lWriters:\n raise SystemExit(\"No handler for product option '{0}' supplied\".\n format(lCommandLineArgs.product))\n lTopFile = lPathmaker.getpath(lCommandLineArgs.top, 'include',\n lCommandLineArgs.dep)\n if lCommandLineArgs.verbosity > 0:\n print('Top:', lTopFile)\n lDepFileParser.parse(lTopFile, lCommandLineArgs.top)\n if lCommandLineArgs.verbosity > 0:\n print('-' * 20)\n for i, j in sorted(lDepFileParser.CommandList.iteritems()):\n print(i, ':', len(j), 'files')\n print('-' * 20)\n print('Build settings:')\n for i, j in sorted(lDepFileParser.ScriptVariables.iteritems()):\n print(' ', i, ':', j)\n print('-' * 20)\n if len(lDepFileParser.FilesNotFound):\n print('-' * 20)\n print('Warning: Files not found')\n for i in lDepFileParser.FilesNotFound:\n print('>', i)\n print('-' * 20)\n try:\n lWriters[lCommandLineArgs.product](lCommandLineArgs, lPathmaker).write(\n lDepFileParser.ScriptVariables, lDepFileParser.ComponentPaths,\n lDepFileParser.CommandList, lDepFileParser.Libs, lDepFileParser\n .Maps)\n except Exception as e:\n import sys, traceback\n traceback.print_exc(file=sys.stdout)\n print('ERROR:', e)\n raise SystemExit(-1)\n raise SystemExit(0)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef main():\n lCommandLineArgs = CommandLineParser().parse()\n lPathmaker = Pathmaker(lCommandLineArgs.root, lCommandLineArgs.top,\n lCommandLineArgs.componentmap, lCommandLineArgs.verbosity)\n lDepFileParser = DepFileParser(lCommandLineArgs, lPathmaker)\n lWriters = {'c': ComponentListWriter, 'f': SourceListWriter, 'f2':\n SourceListWriter2, 'a': AddressTableListWriter, 'b':\n AddressTableGeneratorWriter, 's': ModelsimScriptWriter, 'ip':\n IPCoreSimScriptWriter, 'x': XtclshScriptWriter, 'v': VivadoScriptWriter\n }\n if lCommandLineArgs.product not in lWriters:\n raise SystemExit(\"No handler for product option '{0}' supplied\".\n format(lCommandLineArgs.product))\n lTopFile = lPathmaker.getpath(lCommandLineArgs.top, 'include',\n lCommandLineArgs.dep)\n if lCommandLineArgs.verbosity > 0:\n print('Top:', lTopFile)\n lDepFileParser.parse(lTopFile, lCommandLineArgs.top)\n if lCommandLineArgs.verbosity > 0:\n print('-' * 20)\n for i, j in sorted(lDepFileParser.CommandList.iteritems()):\n print(i, ':', len(j), 'files')\n print('-' * 20)\n print('Build settings:')\n for i, j in sorted(lDepFileParser.ScriptVariables.iteritems()):\n print(' ', i, ':', j)\n print('-' * 20)\n if len(lDepFileParser.FilesNotFound):\n print('-' * 20)\n print('Warning: Files not found')\n for i in lDepFileParser.FilesNotFound:\n print('>', i)\n print('-' * 20)\n try:\n lWriters[lCommandLineArgs.product](lCommandLineArgs, lPathmaker).write(\n lDepFileParser.ScriptVariables, lDepFileParser.ComponentPaths,\n lDepFileParser.CommandList, lDepFileParser.Libs, lDepFileParser\n .Maps)\n except Exception as e:\n import sys, traceback\n traceback.print_exc(file=sys.stdout)\n print('ERROR:', e)\n raise SystemExit(-1)\n raise SystemExit(0)\n\n\nif __name__ == '__main__':\n main()\n",
"step-4": "<mask token>\nfrom __future__ import print_function\nimport argparse\nimport sys\nimport os\nimport time\nimport glob\nfrom dep_tree.DepFileParser import DepFileParser\nfrom dep_tree.CommandLineParser import CommandLineParser\nfrom dep_tree.Pathmaker import Pathmaker\nfrom dep_tree.AddressTableGeneratorWriter import AddressTableGeneratorWriter\nfrom dep_tree.AddressTableListWriter import AddressTableListWriter\nfrom dep_tree.ComponentListWriter import ComponentListWriter\nfrom dep_tree.IPCoreSimScriptWriter import IPCoreSimScriptWriter\nfrom dep_tree.ModelsimScriptWriter import ModelsimScriptWriter\nfrom dep_tree.SourceListWriter import SourceListWriter\nfrom dep_tree.SourceListWriter2 import SourceListWriter2\nfrom dep_tree.XtclshScriptWriter import XtclshScriptWriter\nfrom dep_tree.VivadoScriptWriter import VivadoScriptWriter\n\n\ndef main():\n lCommandLineArgs = CommandLineParser().parse()\n lPathmaker = Pathmaker(lCommandLineArgs.root, lCommandLineArgs.top,\n lCommandLineArgs.componentmap, lCommandLineArgs.verbosity)\n lDepFileParser = DepFileParser(lCommandLineArgs, lPathmaker)\n lWriters = {'c': ComponentListWriter, 'f': SourceListWriter, 'f2':\n SourceListWriter2, 'a': AddressTableListWriter, 'b':\n AddressTableGeneratorWriter, 's': ModelsimScriptWriter, 'ip':\n IPCoreSimScriptWriter, 'x': XtclshScriptWriter, 'v': VivadoScriptWriter\n }\n if lCommandLineArgs.product not in lWriters:\n raise SystemExit(\"No handler for product option '{0}' supplied\".\n format(lCommandLineArgs.product))\n lTopFile = lPathmaker.getpath(lCommandLineArgs.top, 'include',\n lCommandLineArgs.dep)\n if lCommandLineArgs.verbosity > 0:\n print('Top:', lTopFile)\n lDepFileParser.parse(lTopFile, lCommandLineArgs.top)\n if lCommandLineArgs.verbosity > 0:\n print('-' * 20)\n for i, j in sorted(lDepFileParser.CommandList.iteritems()):\n print(i, ':', len(j), 'files')\n print('-' * 20)\n print('Build settings:')\n for i, j in sorted(lDepFileParser.ScriptVariables.iteritems()):\n print(' ', i, ':', j)\n print('-' * 20)\n if len(lDepFileParser.FilesNotFound):\n print('-' * 20)\n print('Warning: Files not found')\n for i in lDepFileParser.FilesNotFound:\n print('>', i)\n print('-' * 20)\n try:\n lWriters[lCommandLineArgs.product](lCommandLineArgs, lPathmaker).write(\n lDepFileParser.ScriptVariables, lDepFileParser.ComponentPaths,\n lDepFileParser.CommandList, lDepFileParser.Libs, lDepFileParser\n .Maps)\n except Exception as e:\n import sys, traceback\n traceback.print_exc(file=sys.stdout)\n print('ERROR:', e)\n raise SystemExit(-1)\n raise SystemExit(0)\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "#!/usr/bin/env python\n\n'''\nUsage:\n\ndep_tree.py [-h] [-v] [-p P] [-m component_map]\n repos_root top_dir [top_depfile]\n\nParse design dependency tree and generate build scripts and other useful files\n\npositional arguments:\n repos_root repository root\n top_dir top level design directory\n top_depfile top level dep file\n\noptional arguments:\n -h, --help show this help message and exit\n -v verbosity\n -p P output product: x (xtclsh script); s (Modelsim script); c\n (component list}; a (address table list); b (address\n decoder script); f (flat file list)\n -m component_map location of component map file\n -D set or override script directives\n\n default: nothing is done\n\n---\n\nRepository layout in each component / top-level area:\n\nfirmware/cfg: contains .dep files and project config files\nfirmware/hdl: contains source files\nfirmware/cgn: contains XCO core build files\n/addr_table: contains uHAL address table XML files\n\n---\n\n.dep file format\n\n# Comment line\n\ncommon options:\n\n -c component_name: look under different component to find referenced file\n -d: descend a level in dir tree to find referenced file\n -s dir: look in subdir path to find referenced file\n\ninclude [dep_file_list]\n\n default is to take file component_name.dep\n\nsetup [-z] [tcl_file_list]\n\n default is to take file component_name.tcl\n -z: coregen project configuration script\n\nsrc [-l library] [-g] [-n] src_file_list\n\n src_file_list under firmware/hdl by default; may contain glob patterns\n -g: find 'generated' src in ipcore directory\n -n: for XCO files, build but don't include\n\naddrtab [-t] [file_list]\n\n default is to reference file component_name.xml\n -t: top-level address table file\n\n---\n\ncomponent_map file format\n\nlogical_name physical_dir\n\n The 'physical_dir' is relative to the trunk/\n\n'''\n\nfrom __future__ import print_function\nimport argparse\nimport sys\nimport os\nimport time\nimport glob\n\nfrom dep_tree.DepFileParser import DepFileParser\nfrom dep_tree.CommandLineParser import CommandLineParser\nfrom dep_tree.Pathmaker import Pathmaker\n\n\nfrom dep_tree.AddressTableGeneratorWriter import AddressTableGeneratorWriter\nfrom dep_tree.AddressTableListWriter import AddressTableListWriter\nfrom dep_tree.ComponentListWriter import ComponentListWriter\nfrom dep_tree.IPCoreSimScriptWriter import IPCoreSimScriptWriter\nfrom dep_tree.ModelsimScriptWriter import ModelsimScriptWriter\nfrom dep_tree.SourceListWriter import SourceListWriter\nfrom dep_tree.SourceListWriter2 import SourceListWriter2\nfrom dep_tree.XtclshScriptWriter import XtclshScriptWriter\nfrom dep_tree.VivadoScriptWriter import VivadoScriptWriter\n\n\n#--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\ndef main():\n\n #--------------------------------------------------------------\n # Set up the three objects which do the real hardwork\n lCommandLineArgs = CommandLineParser().parse()\n lPathmaker = Pathmaker( lCommandLineArgs.root , lCommandLineArgs.top , lCommandLineArgs.componentmap , lCommandLineArgs.verbosity )\n lDepFileParser = DepFileParser( lCommandLineArgs , lPathmaker )\n #--------------------------------------------------------------\n\n #--------------------------------------------------------------\n # Assign the product handlers to the appropriate commandline flag and check we know how to handle the requested product\n lWriters = {\n \"c\":ComponentListWriter , # Output file lists\n \"f\":SourceListWriter , # Output file lists\n \"f2\":SourceListWriter2 , # Output file lists\n \"a\":AddressTableListWriter , # Output file lists\n \"b\":AddressTableGeneratorWriter , # Output address table generator file\n \"s\":ModelsimScriptWriter , # Output Modelsim script\n \"ip\":IPCoreSimScriptWriter , # Output IPSim script\n \"x\":XtclshScriptWriter , # Output xtclsh script\n \"v\":VivadoScriptWriter # Output vivado script\n }\n\n if lCommandLineArgs.product not in lWriters:\n raise SystemExit( \"No handler for product option '{0}' supplied\".format( lCommandLineArgs.product ) )\n #--------------------------------------------------------------\n\n #--------------------------------------------------------------\n # Set the entrypoint for depfile parsing\n lTopFile = lPathmaker.getpath( lCommandLineArgs.top , \"include\" , lCommandLineArgs.dep )\n #--------------------------------------------------------------\n\n #--------------------------------------------------------------\n # Debugging\n if lCommandLineArgs.verbosity > 0:\n print( \"Top:\" , lTopFile )\n #--------------------------------------------------------------\n\n #--------------------------------------------------------------\n # Parse the requested dep file\n lDepFileParser.parse( lTopFile , lCommandLineArgs.top )\n #--------------------------------------------------------------\n\n #--------------------------------------------------------------\n # Debugging\n if lCommandLineArgs.verbosity > 0:\n print( \"-\"*20 )\n for i,j in sorted( lDepFileParser.CommandList.iteritems() ):\n print( i , \":\" , len( j ) , \"files\" )\n print( \"-\"*20 )\n print( \"Build settings:\" )\n for i,j in sorted( lDepFileParser.ScriptVariables.iteritems() ):\n print( \" \" , i , \":\" , j )\n print( \"-\"*20 )\n\n if len( lDepFileParser.FilesNotFound ):\n print( \"-\"*20 )\n print( \"Warning: Files not found\" )\n for i in lDepFileParser.FilesNotFound:\n print ( \">\" , i )\n print( \"-\"*20 )\n #--------------------------------------------------------------\n\n #--------------------------------------------------------------\n # Look up the Writer object in the dictionary, create an object of that type and call the write function\n try:\n lWriters[lCommandLineArgs.product]( lCommandLineArgs , lPathmaker ).write( lDepFileParser.ScriptVariables , lDepFileParser.ComponentPaths , lDepFileParser.CommandList , lDepFileParser.Libs, lDepFileParser.Maps )\n except Exception as e:\n import sys, traceback\n traceback.print_exc(file=sys.stdout)\n print('ERROR:', e)\n raise SystemExit(-1)\n #--------------------------------------------------------------\n\n raise SystemExit(0)\n#--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\n\n#--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\nif __name__ == '__main__':\n main()\n#--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
#!/usr/bin/python
# encoding:utf-8
from selenium.webdriver.common.by import By
import random
import basePage
# 门店入库button
stock_in = (By.XPATH, "//android.widget.TextView[contains(@text,'门店入库')]")
# 调拨入库button
transfer_in = (By.XPATH, "//android.widget.TextView[contains(@text,'调拨入库')]")
# 确认签收button
take_receive = (By.ID, '%s:id/take_receive'%basePage.package_name)
# 查看详情
details_text = (By.ID, '%s:id/details_text'%basePage.package_name)
# 调拨单代签收状态
transfer_confirm_state = (By.ID, "%s:id/state")
# 差异签收弹出框
transfer_diff_wizard = (By.ID, "%s:id/multiple_dialog_container")
# 确认签收按钮
text_confirm_button = (By.ID, "%s:id/text_confirm")
# 差异收货button
diff_confirm_button = (By.XPATH, "//android.widget.TextView[contains(@text,'差异收货')]")
# 订单状态
state_num = random.randint(1, 4)
order_of_state = (By.XPATH, "//android.widget.TextView[%s]" % state_num)
# 订单状态下拉
title = (By.ID, '%s:id/title'%basePage.package_name)
# 展开订单详情
fold_image = (By.ID, '%s:id/fold_image'%basePage.package_name)
# 高级搜索button
order_search = (By.ID, '%s:id/order_search'%basePage.package_name)
# 查询
search_query = (By.ID, '%s:id/search_query'%basePage.package_name)
# 调拨单号输入框
search_order_no = (By.ID, '%s:id/search_order_no'%basePage.package_name)
# 商品编码输入框
search_order_sku = (By.ID, '%s:id/search_order_sku'%basePage.package_name)
# 发货店仓输入框
search_order_org = (By.ID, '%s:id/search_order_org'%basePage.package_name)
# 调拨类型
type_edit = (By.ID, '%s:id/type_edit'%basePage.package_name)
# 调拨类型option
transfer_options1 = (By.ID, '%s:id/options1'%basePage.package_name)
transfer_options_submit = (By.ID, '%s:id/btnSubmit'%basePage.package_name)
# 日期范围
all_check = (By.ID, '%s:id/all_check'%basePage.package_name)
out_check = (By.ID, '%s:id/out_check'%basePage.package_name)
in_check = (By.ID, '%s:id/in_check'%basePage.package_name)
# 操作人输入框
operate_edit = (By.ID, '%s:id/operate_edit'%basePage.package_name)
# 重置
search_clear = (By.ID, '%s:id/search_clear'%basePage.package_name)
# 取消
search_up_cancel = (By.ID, '%s:id/search_up_cancel'%basePage.package_name)
# 调拨单状态
order_state = (By.XPATH, "//android.widget.TextView[contains(@text,'已完成')]")
# 调拨单号
allocate_name = (By.ID, '%s:id/allocate_name'%basePage.package_name)
# 高级搜索,选择开始日期
start_at = (By.ID, '%s:id/start_at'%basePage.package_name)
# 高级搜索,选择结束日期
end_at = (By.ID, '%s:id/end_at'%basePage.package_name)
# 高级搜索,选择日
day = (By.ID, '%s:id/day'%basePage.package_name)
# H5定位
# 只看差异
btn_view_diff = (By.CLASS_NAME, 'btn-view-diff')
# 搜索button
searchIcon = (By.ID, 'searchIcon')
# 搜索条件
input_item = (By.CLASS_NAME, 'input-item')
# 清空搜索内容
icon_delete = (By.XPATH, "//div[@class='keyboard']/div[1]/img[@class='icon-delete']")
# 返回
back_btn = (By.XPATH, "//div[@class='icon-back']/img[@alt='<']")
# 保存
btn_save = (By.CLASS_NAME, 'btn-save')
# 手工添加
add_handle = (By.XPATH, "//div[@class='before-focus']/div[1]")
# 扫码添加
add_border_node = (By.XPATH, "//div[@class='before-focus']/div[2]")
# 导入采集
loggingimport = (By.XPATH, "//div[@class='before-focus']/div[3]")
# 更多
btn_more = (By.CLASS_NAME, 'btn-more')
# 清空列表
btn_close_native = (By.CLASS_NAME, 'btn-close-native')
# 点击修改收货数量
icon_edit = (By.XPATH, "//table[@class='el-table__body']/tbody[1]/tr[1]/td[3]/div[1]/div[1]/div[2]")
# 填写收货数量
div_num = random.randint(1,9)
num_key = (By.XPATH, "//div[@class='keyboard']/div[2]/div[%s]"%div_num)
num_keys = (By.XPATH, "//div[@class='keyboard']/div[2]")
# 确认修改收货数量
key_confirm = (By.XPATH, "//div[@class='keyboard']/div[2]/div[12]")
# 订单内容
result_item = (By.CLASS_NAME, 'result-item')
|
normal
|
{
"blob_id": "d1b025ddbf7d0ad48ff92a098d074820a3eb35ed",
"index": 6723,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nstock_in = By.XPATH, \"//android.widget.TextView[contains(@text,'门店入库')]\"\ntransfer_in = By.XPATH, \"//android.widget.TextView[contains(@text,'调拨入库')]\"\ntake_receive = By.ID, '%s:id/take_receive' % basePage.package_name\ndetails_text = By.ID, '%s:id/details_text' % basePage.package_name\ntransfer_confirm_state = By.ID, '%s:id/state'\ntransfer_diff_wizard = By.ID, '%s:id/multiple_dialog_container'\ntext_confirm_button = By.ID, '%s:id/text_confirm'\ndiff_confirm_button = (By.XPATH,\n \"//android.widget.TextView[contains(@text,'差异收货')]\")\nstate_num = random.randint(1, 4)\norder_of_state = By.XPATH, '//android.widget.TextView[%s]' % state_num\ntitle = By.ID, '%s:id/title' % basePage.package_name\nfold_image = By.ID, '%s:id/fold_image' % basePage.package_name\norder_search = By.ID, '%s:id/order_search' % basePage.package_name\nsearch_query = By.ID, '%s:id/search_query' % basePage.package_name\nsearch_order_no = By.ID, '%s:id/search_order_no' % basePage.package_name\nsearch_order_sku = By.ID, '%s:id/search_order_sku' % basePage.package_name\nsearch_order_org = By.ID, '%s:id/search_order_org' % basePage.package_name\ntype_edit = By.ID, '%s:id/type_edit' % basePage.package_name\ntransfer_options1 = By.ID, '%s:id/options1' % basePage.package_name\ntransfer_options_submit = By.ID, '%s:id/btnSubmit' % basePage.package_name\nall_check = By.ID, '%s:id/all_check' % basePage.package_name\nout_check = By.ID, '%s:id/out_check' % basePage.package_name\nin_check = By.ID, '%s:id/in_check' % basePage.package_name\noperate_edit = By.ID, '%s:id/operate_edit' % basePage.package_name\nsearch_clear = By.ID, '%s:id/search_clear' % basePage.package_name\nsearch_up_cancel = By.ID, '%s:id/search_up_cancel' % basePage.package_name\norder_state = By.XPATH, \"//android.widget.TextView[contains(@text,'已完成')]\"\nallocate_name = By.ID, '%s:id/allocate_name' % basePage.package_name\nstart_at = By.ID, '%s:id/start_at' % basePage.package_name\nend_at = By.ID, '%s:id/end_at' % basePage.package_name\nday = By.ID, '%s:id/day' % basePage.package_name\nbtn_view_diff = By.CLASS_NAME, 'btn-view-diff'\nsearchIcon = By.ID, 'searchIcon'\ninput_item = By.CLASS_NAME, 'input-item'\nicon_delete = (By.XPATH,\n \"//div[@class='keyboard']/div[1]/img[@class='icon-delete']\")\nback_btn = By.XPATH, \"//div[@class='icon-back']/img[@alt='<']\"\nbtn_save = By.CLASS_NAME, 'btn-save'\nadd_handle = By.XPATH, \"//div[@class='before-focus']/div[1]\"\nadd_border_node = By.XPATH, \"//div[@class='before-focus']/div[2]\"\nloggingimport = By.XPATH, \"//div[@class='before-focus']/div[3]\"\nbtn_more = By.CLASS_NAME, 'btn-more'\nbtn_close_native = By.CLASS_NAME, 'btn-close-native'\nicon_edit = (By.XPATH,\n \"//table[@class='el-table__body']/tbody[1]/tr[1]/td[3]/div[1]/div[1]/div[2]\"\n )\ndiv_num = random.randint(1, 9)\nnum_key = By.XPATH, \"//div[@class='keyboard']/div[2]/div[%s]\" % div_num\nnum_keys = By.XPATH, \"//div[@class='keyboard']/div[2]\"\nkey_confirm = By.XPATH, \"//div[@class='keyboard']/div[2]/div[12]\"\nresult_item = By.CLASS_NAME, 'result-item'\n",
"step-3": "from selenium.webdriver.common.by import By\nimport random\nimport basePage\nstock_in = By.XPATH, \"//android.widget.TextView[contains(@text,'门店入库')]\"\ntransfer_in = By.XPATH, \"//android.widget.TextView[contains(@text,'调拨入库')]\"\ntake_receive = By.ID, '%s:id/take_receive' % basePage.package_name\ndetails_text = By.ID, '%s:id/details_text' % basePage.package_name\ntransfer_confirm_state = By.ID, '%s:id/state'\ntransfer_diff_wizard = By.ID, '%s:id/multiple_dialog_container'\ntext_confirm_button = By.ID, '%s:id/text_confirm'\ndiff_confirm_button = (By.XPATH,\n \"//android.widget.TextView[contains(@text,'差异收货')]\")\nstate_num = random.randint(1, 4)\norder_of_state = By.XPATH, '//android.widget.TextView[%s]' % state_num\ntitle = By.ID, '%s:id/title' % basePage.package_name\nfold_image = By.ID, '%s:id/fold_image' % basePage.package_name\norder_search = By.ID, '%s:id/order_search' % basePage.package_name\nsearch_query = By.ID, '%s:id/search_query' % basePage.package_name\nsearch_order_no = By.ID, '%s:id/search_order_no' % basePage.package_name\nsearch_order_sku = By.ID, '%s:id/search_order_sku' % basePage.package_name\nsearch_order_org = By.ID, '%s:id/search_order_org' % basePage.package_name\ntype_edit = By.ID, '%s:id/type_edit' % basePage.package_name\ntransfer_options1 = By.ID, '%s:id/options1' % basePage.package_name\ntransfer_options_submit = By.ID, '%s:id/btnSubmit' % basePage.package_name\nall_check = By.ID, '%s:id/all_check' % basePage.package_name\nout_check = By.ID, '%s:id/out_check' % basePage.package_name\nin_check = By.ID, '%s:id/in_check' % basePage.package_name\noperate_edit = By.ID, '%s:id/operate_edit' % basePage.package_name\nsearch_clear = By.ID, '%s:id/search_clear' % basePage.package_name\nsearch_up_cancel = By.ID, '%s:id/search_up_cancel' % basePage.package_name\norder_state = By.XPATH, \"//android.widget.TextView[contains(@text,'已完成')]\"\nallocate_name = By.ID, '%s:id/allocate_name' % basePage.package_name\nstart_at = By.ID, '%s:id/start_at' % basePage.package_name\nend_at = By.ID, '%s:id/end_at' % basePage.package_name\nday = By.ID, '%s:id/day' % basePage.package_name\nbtn_view_diff = By.CLASS_NAME, 'btn-view-diff'\nsearchIcon = By.ID, 'searchIcon'\ninput_item = By.CLASS_NAME, 'input-item'\nicon_delete = (By.XPATH,\n \"//div[@class='keyboard']/div[1]/img[@class='icon-delete']\")\nback_btn = By.XPATH, \"//div[@class='icon-back']/img[@alt='<']\"\nbtn_save = By.CLASS_NAME, 'btn-save'\nadd_handle = By.XPATH, \"//div[@class='before-focus']/div[1]\"\nadd_border_node = By.XPATH, \"//div[@class='before-focus']/div[2]\"\nloggingimport = By.XPATH, \"//div[@class='before-focus']/div[3]\"\nbtn_more = By.CLASS_NAME, 'btn-more'\nbtn_close_native = By.CLASS_NAME, 'btn-close-native'\nicon_edit = (By.XPATH,\n \"//table[@class='el-table__body']/tbody[1]/tr[1]/td[3]/div[1]/div[1]/div[2]\"\n )\ndiv_num = random.randint(1, 9)\nnum_key = By.XPATH, \"//div[@class='keyboard']/div[2]/div[%s]\" % div_num\nnum_keys = By.XPATH, \"//div[@class='keyboard']/div[2]\"\nkey_confirm = By.XPATH, \"//div[@class='keyboard']/div[2]/div[12]\"\nresult_item = By.CLASS_NAME, 'result-item'\n",
"step-4": "#!/usr/bin/python\n# encoding:utf-8\nfrom selenium.webdriver.common.by import By\nimport random\nimport basePage\n\n# 门店入库button\nstock_in = (By.XPATH, \"//android.widget.TextView[contains(@text,'门店入库')]\")\n# 调拨入库button\ntransfer_in = (By.XPATH, \"//android.widget.TextView[contains(@text,'调拨入库')]\")\n# 确认签收button\ntake_receive = (By.ID, '%s:id/take_receive'%basePage.package_name)\n# 查看详情\ndetails_text = (By.ID, '%s:id/details_text'%basePage.package_name)\n\n# 调拨单代签收状态\ntransfer_confirm_state = (By.ID, \"%s:id/state\")\n\n# 差异签收弹出框\ntransfer_diff_wizard = (By.ID, \"%s:id/multiple_dialog_container\")\n# 确认签收按钮\ntext_confirm_button = (By.ID, \"%s:id/text_confirm\")\n# 差异收货button\ndiff_confirm_button = (By.XPATH, \"//android.widget.TextView[contains(@text,'差异收货')]\")\n\n# 订单状态\nstate_num = random.randint(1, 4)\norder_of_state = (By.XPATH, \"//android.widget.TextView[%s]\" % state_num)\n# 订单状态下拉\ntitle = (By.ID, '%s:id/title'%basePage.package_name)\n# 展开订单详情\nfold_image = (By.ID, '%s:id/fold_image'%basePage.package_name)\n\n# 高级搜索button\norder_search = (By.ID, '%s:id/order_search'%basePage.package_name)\n# 查询\nsearch_query = (By.ID, '%s:id/search_query'%basePage.package_name)\n# 调拨单号输入框\nsearch_order_no = (By.ID, '%s:id/search_order_no'%basePage.package_name)\n# 商品编码输入框\nsearch_order_sku = (By.ID, '%s:id/search_order_sku'%basePage.package_name)\n# 发货店仓输入框\nsearch_order_org = (By.ID, '%s:id/search_order_org'%basePage.package_name)\n# 调拨类型\ntype_edit = (By.ID, '%s:id/type_edit'%basePage.package_name)\n# 调拨类型option\ntransfer_options1 = (By.ID, '%s:id/options1'%basePage.package_name)\ntransfer_options_submit = (By.ID, '%s:id/btnSubmit'%basePage.package_name)\n\n# 日期范围\nall_check = (By.ID, '%s:id/all_check'%basePage.package_name)\nout_check = (By.ID, '%s:id/out_check'%basePage.package_name)\nin_check = (By.ID, '%s:id/in_check'%basePage.package_name)\n# 操作人输入框\noperate_edit = (By.ID, '%s:id/operate_edit'%basePage.package_name)\n# 重置\nsearch_clear = (By.ID, '%s:id/search_clear'%basePage.package_name)\n# 取消\nsearch_up_cancel = (By.ID, '%s:id/search_up_cancel'%basePage.package_name)\n# 调拨单状态\norder_state = (By.XPATH, \"//android.widget.TextView[contains(@text,'已完成')]\")\n# 调拨单号\nallocate_name = (By.ID, '%s:id/allocate_name'%basePage.package_name)\n# 高级搜索,选择开始日期\nstart_at = (By.ID, '%s:id/start_at'%basePage.package_name)\n# 高级搜索,选择结束日期\nend_at = (By.ID, '%s:id/end_at'%basePage.package_name)\n# 高级搜索,选择日\nday = (By.ID, '%s:id/day'%basePage.package_name)\n\n# H5定位\n# 只看差异\nbtn_view_diff = (By.CLASS_NAME, 'btn-view-diff')\n# 搜索button\nsearchIcon = (By.ID, 'searchIcon')\n# 搜索条件\ninput_item = (By.CLASS_NAME, 'input-item')\n# 清空搜索内容\nicon_delete = (By.XPATH, \"//div[@class='keyboard']/div[1]/img[@class='icon-delete']\")\n# 返回\nback_btn = (By.XPATH, \"//div[@class='icon-back']/img[@alt='<']\")\n# 保存\nbtn_save = (By.CLASS_NAME, 'btn-save')\n# 手工添加\nadd_handle = (By.XPATH, \"//div[@class='before-focus']/div[1]\")\n# 扫码添加\nadd_border_node = (By.XPATH, \"//div[@class='before-focus']/div[2]\")\n# 导入采集\nloggingimport = (By.XPATH, \"//div[@class='before-focus']/div[3]\")\n# 更多\nbtn_more = (By.CLASS_NAME, 'btn-more')\n# 清空列表\nbtn_close_native = (By.CLASS_NAME, 'btn-close-native')\n# 点击修改收货数量\nicon_edit = (By.XPATH, \"//table[@class='el-table__body']/tbody[1]/tr[1]/td[3]/div[1]/div[1]/div[2]\")\n# 填写收货数量\ndiv_num = random.randint(1,9)\nnum_key = (By.XPATH, \"//div[@class='keyboard']/div[2]/div[%s]\"%div_num)\nnum_keys = (By.XPATH, \"//div[@class='keyboard']/div[2]\")\n# 确认修改收货数量\nkey_confirm = (By.XPATH, \"//div[@class='keyboard']/div[2]/div[12]\")\n# 订单内容\nresult_item = (By.CLASS_NAME, 'result-item')\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import logging
from exceptions.invalid_api_usage import InvalidAPIUsage
from wgadget.endpoints.ep import EP
class EPInfoLight(EP):
NAME = 'info_light'
URL = '/info'
URL_ROUTE_PAR_PAYLOAD = '/'
URL_ROUTE_PAR_URL = '/actuatorId/<actuatorId>'
METHOD = 'GET'
ATTR_ACTUATOR_ID = 'actuatorId'
def __init__(self, web_gadget):
self.web_gadget = web_gadget
def getRequestDescriptionWithPayloadParameters(self):
ret = {}
ret['name'] = EPInfoLight.NAME
ret['url'] = EPInfoLight.URL_ROUTE_PAR_PAYLOAD
ret['method'] = EPInfoLight.METHOD
ret['payload-desc'] = [{},{}]
ret['payload-desc'][0]['attribute'] = EPInfoLight.ATTR_ACTUATOR_ID
ret['payload-desc'][0]['type'] = 'integer'
ret['payload-desc'][0]['value'] = 1
return ret
def executeByParameters(self, actuatorId) -> dict:
payload = {}
payload[EPInfoLight.ATTR_ACTUATOR_ID] = int(actuatorId)
return self.executeByPayload(payload)
def executeByPayload(self, payload) -> dict:
actuatorId = int(payload[EPInfoLight.ATTR_ACTUATOR_ID])
if actuatorId == self.web_gadget.getLightId():
actualValue = self.web_gadget.fetchSavedLightValue()
logging.debug( "WEB request: {0} {1} ('{2}': {3})".format(
EPInfoLight.METHOD, EPInfoLight.URL,
EPInfoLight.ATTR_ACTUATOR_ID, actuatorId)
)
return {"value": actualValue, "thread": self.web_gadget.getThreadControllerStatus()}
# return {"value": actualValue, "thread": {"inProgress": False, "id":1}}
else:
raise InvalidAPIUsage("No such actuator: {0} or value: {1}".format(actuatorId, value), error_code=404)
|
normal
|
{
"blob_id": "e5abab3f718bbbd25dcfc49290383203d53248c3",
"index": 9464,
"step-1": "<mask token>\n\n\nclass EPInfoLight(EP):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass EPInfoLight(EP):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def getRequestDescriptionWithPayloadParameters(self):\n ret = {}\n ret['name'] = EPInfoLight.NAME\n ret['url'] = EPInfoLight.URL_ROUTE_PAR_PAYLOAD\n ret['method'] = EPInfoLight.METHOD\n ret['payload-desc'] = [{}, {}]\n ret['payload-desc'][0]['attribute'] = EPInfoLight.ATTR_ACTUATOR_ID\n ret['payload-desc'][0]['type'] = 'integer'\n ret['payload-desc'][0]['value'] = 1\n return ret\n\n def executeByParameters(self, actuatorId) ->dict:\n payload = {}\n payload[EPInfoLight.ATTR_ACTUATOR_ID] = int(actuatorId)\n return self.executeByPayload(payload)\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass EPInfoLight(EP):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def getRequestDescriptionWithPayloadParameters(self):\n ret = {}\n ret['name'] = EPInfoLight.NAME\n ret['url'] = EPInfoLight.URL_ROUTE_PAR_PAYLOAD\n ret['method'] = EPInfoLight.METHOD\n ret['payload-desc'] = [{}, {}]\n ret['payload-desc'][0]['attribute'] = EPInfoLight.ATTR_ACTUATOR_ID\n ret['payload-desc'][0]['type'] = 'integer'\n ret['payload-desc'][0]['value'] = 1\n return ret\n\n def executeByParameters(self, actuatorId) ->dict:\n payload = {}\n payload[EPInfoLight.ATTR_ACTUATOR_ID] = int(actuatorId)\n return self.executeByPayload(payload)\n\n def executeByPayload(self, payload) ->dict:\n actuatorId = int(payload[EPInfoLight.ATTR_ACTUATOR_ID])\n if actuatorId == self.web_gadget.getLightId():\n actualValue = self.web_gadget.fetchSavedLightValue()\n logging.debug(\"WEB request: {0} {1} ('{2}': {3})\".format(\n EPInfoLight.METHOD, EPInfoLight.URL, EPInfoLight.\n ATTR_ACTUATOR_ID, actuatorId))\n return {'value': actualValue, 'thread': self.web_gadget.\n getThreadControllerStatus()}\n else:\n raise InvalidAPIUsage('No such actuator: {0} or value: {1}'.\n format(actuatorId, value), error_code=404)\n",
"step-4": "<mask token>\n\n\nclass EPInfoLight(EP):\n NAME = 'info_light'\n URL = '/info'\n URL_ROUTE_PAR_PAYLOAD = '/'\n URL_ROUTE_PAR_URL = '/actuatorId/<actuatorId>'\n METHOD = 'GET'\n ATTR_ACTUATOR_ID = 'actuatorId'\n\n def __init__(self, web_gadget):\n self.web_gadget = web_gadget\n\n def getRequestDescriptionWithPayloadParameters(self):\n ret = {}\n ret['name'] = EPInfoLight.NAME\n ret['url'] = EPInfoLight.URL_ROUTE_PAR_PAYLOAD\n ret['method'] = EPInfoLight.METHOD\n ret['payload-desc'] = [{}, {}]\n ret['payload-desc'][0]['attribute'] = EPInfoLight.ATTR_ACTUATOR_ID\n ret['payload-desc'][0]['type'] = 'integer'\n ret['payload-desc'][0]['value'] = 1\n return ret\n\n def executeByParameters(self, actuatorId) ->dict:\n payload = {}\n payload[EPInfoLight.ATTR_ACTUATOR_ID] = int(actuatorId)\n return self.executeByPayload(payload)\n\n def executeByPayload(self, payload) ->dict:\n actuatorId = int(payload[EPInfoLight.ATTR_ACTUATOR_ID])\n if actuatorId == self.web_gadget.getLightId():\n actualValue = self.web_gadget.fetchSavedLightValue()\n logging.debug(\"WEB request: {0} {1} ('{2}': {3})\".format(\n EPInfoLight.METHOD, EPInfoLight.URL, EPInfoLight.\n ATTR_ACTUATOR_ID, actuatorId))\n return {'value': actualValue, 'thread': self.web_gadget.\n getThreadControllerStatus()}\n else:\n raise InvalidAPIUsage('No such actuator: {0} or value: {1}'.\n format(actuatorId, value), error_code=404)\n",
"step-5": "\nimport logging\nfrom exceptions.invalid_api_usage import InvalidAPIUsage\nfrom wgadget.endpoints.ep import EP\n\nclass EPInfoLight(EP):\n\n NAME = 'info_light'\n URL = '/info'\n\n URL_ROUTE_PAR_PAYLOAD = '/'\n URL_ROUTE_PAR_URL = '/actuatorId/<actuatorId>'\n\n METHOD = 'GET'\n\n ATTR_ACTUATOR_ID = 'actuatorId'\n\n def __init__(self, web_gadget):\n self.web_gadget = web_gadget\n\n def getRequestDescriptionWithPayloadParameters(self):\n\n ret = {}\n ret['name'] = EPInfoLight.NAME\n ret['url'] = EPInfoLight.URL_ROUTE_PAR_PAYLOAD\n ret['method'] = EPInfoLight.METHOD\n\n ret['payload-desc'] = [{},{}]\n\n ret['payload-desc'][0]['attribute'] = EPInfoLight.ATTR_ACTUATOR_ID\n ret['payload-desc'][0]['type'] = 'integer'\n ret['payload-desc'][0]['value'] = 1\n\n return ret\n\n def executeByParameters(self, actuatorId) -> dict:\n payload = {}\n payload[EPInfoLight.ATTR_ACTUATOR_ID] = int(actuatorId)\n return self.executeByPayload(payload)\n\n def executeByPayload(self, payload) -> dict:\n\n actuatorId = int(payload[EPInfoLight.ATTR_ACTUATOR_ID])\n\n if actuatorId == self.web_gadget.getLightId():\n\n actualValue = self.web_gadget.fetchSavedLightValue()\n\n logging.debug( \"WEB request: {0} {1} ('{2}': {3})\".format(\n EPInfoLight.METHOD, EPInfoLight.URL,\n EPInfoLight.ATTR_ACTUATOR_ID, actuatorId)\n )\n\n return {\"value\": actualValue, \"thread\": self.web_gadget.getThreadControllerStatus()}\n# return {\"value\": actualValue, \"thread\": {\"inProgress\": False, \"id\":1}}\n\n else:\n raise InvalidAPIUsage(\"No such actuator: {0} or value: {1}\".format(actuatorId, value), error_code=404)\n",
"step-ids": [
1,
3,
4,
6,
8
]
}
|
[
1,
3,
4,
6,
8
] |
# from mini_imagenet_dataloader import MiniImageNetDataLoader
import os
os.environ["KMP_DUPLICATE_LIB_OK"]="TRUE"
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
from tqdm import tqdm
import torch.nn.functional as F
from torchmeta.utils.gradient_based import gradient_update_parameters
from libs.models.maml_model import MetaConvModel
from libs.mini_objecta_dataLoader import FSDataLoader
def get_accuracy(logits, targets):
_, predictions = torch.max(logits, dim=-1)
return torch.mean(predictions.eq(targets).float())
def ModelConvMiniImagenet(out_features, hidden_size=84):
return MetaConvModel(3, out_features, hidden_size=hidden_size,
feature_size=5 * 5 * hidden_size)
if __name__ == "__main__":
classes_num = 5
model = ModelConvMiniImagenet(classes_num)
model.load_state_dict(torch.load('trained parameters/maml_miniimagenet_5shot_5way.th'))
model.zero_grad()
dataloader = FSDataLoader()
meta_optimizer = torch.optim.Adam(model.parameters(), lr=1e-3)
accuracy_l = list()
loss = nn.CrossEntropyLoss()
model.train()
num_of_tasks = 100
epochs = 1
with tqdm(dataloader, total=num_of_tasks) as qbar:
for idx, batch in enumerate(qbar):
model.zero_grad()
train_inputs, train_targets = batch['Train']
test_inputs, test_targets = batch['Test']
for _ in range(epochs):
for task_idx, (train_input, train_target, test_input,
test_target) in enumerate(zip(train_inputs, train_targets,
test_inputs, test_targets)):
outer_loss = torch.tensor(0., device='cuda')
accuracy = torch.tensor(0., device='cuda')
train_logit = model(train_input)
inner_loss = F.cross_entropy(train_logit, train_target)
params = gradient_update_parameters(model, inner_loss)
test_logit = model(test_input , params=params)
outer_loss += F.cross_entropy(test_logit, test_target)
with torch.no_grad():
accuracy += get_accuracy(test_logit, test_target)
outer_loss.div_(1)
accuracy.div_(1)
outer_loss.backward()
meta_optimizer.step()
accuracy_l.append(accuracy.item())
if idx > num_of_tasks-1:
break
plt.title('MAML miniobjectnet training (100 tasks)')
plt.xlabel('Tasks (1 epoch)')
plt.ylabel('Accuracy')
plt.plot(accuracy_l)
plt.show()
print(sum(accuracy_l) / len(accuracy_l))
|
normal
|
{
"blob_id": "e2a50fbd277ab868fbe71f9ff113a68a30b9f893",
"index": 2523,
"step-1": "<mask token>\n\n\ndef ModelConvMiniImagenet(out_features, hidden_size=84):\n return MetaConvModel(3, out_features, hidden_size=hidden_size,\n feature_size=5 * 5 * hidden_size)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef get_accuracy(logits, targets):\n _, predictions = torch.max(logits, dim=-1)\n return torch.mean(predictions.eq(targets).float())\n\n\ndef ModelConvMiniImagenet(out_features, hidden_size=84):\n return MetaConvModel(3, out_features, hidden_size=hidden_size,\n feature_size=5 * 5 * hidden_size)\n\n\n<mask token>\n",
"step-3": "<mask token>\nos.environ['KMP_DUPLICATE_LIB_OK'] = 'TRUE'\n<mask token>\n\n\ndef get_accuracy(logits, targets):\n _, predictions = torch.max(logits, dim=-1)\n return torch.mean(predictions.eq(targets).float())\n\n\ndef ModelConvMiniImagenet(out_features, hidden_size=84):\n return MetaConvModel(3, out_features, hidden_size=hidden_size,\n feature_size=5 * 5 * hidden_size)\n\n\nif __name__ == '__main__':\n classes_num = 5\n model = ModelConvMiniImagenet(classes_num)\n model.load_state_dict(torch.load(\n 'trained parameters/maml_miniimagenet_5shot_5way.th'))\n model.zero_grad()\n dataloader = FSDataLoader()\n meta_optimizer = torch.optim.Adam(model.parameters(), lr=0.001)\n accuracy_l = list()\n loss = nn.CrossEntropyLoss()\n model.train()\n num_of_tasks = 100\n epochs = 1\n with tqdm(dataloader, total=num_of_tasks) as qbar:\n for idx, batch in enumerate(qbar):\n model.zero_grad()\n train_inputs, train_targets = batch['Train']\n test_inputs, test_targets = batch['Test']\n for _ in range(epochs):\n for task_idx, (train_input, train_target, test_input,\n test_target) in enumerate(zip(train_inputs,\n train_targets, test_inputs, test_targets)):\n outer_loss = torch.tensor(0.0, device='cuda')\n accuracy = torch.tensor(0.0, device='cuda')\n train_logit = model(train_input)\n inner_loss = F.cross_entropy(train_logit, train_target)\n params = gradient_update_parameters(model, inner_loss)\n test_logit = model(test_input, params=params)\n outer_loss += F.cross_entropy(test_logit, test_target)\n with torch.no_grad():\n accuracy += get_accuracy(test_logit, test_target)\n outer_loss.div_(1)\n accuracy.div_(1)\n outer_loss.backward()\n meta_optimizer.step()\n accuracy_l.append(accuracy.item())\n if idx > num_of_tasks - 1:\n break\n plt.title('MAML miniobjectnet training (100 tasks)')\n plt.xlabel('Tasks (1 epoch)')\n plt.ylabel('Accuracy')\n plt.plot(accuracy_l)\n plt.show()\n print(sum(accuracy_l) / len(accuracy_l))\n",
"step-4": "import os\nos.environ['KMP_DUPLICATE_LIB_OK'] = 'TRUE'\nimport matplotlib.pyplot as plt\nimport torch\nimport torch.nn as nn\nfrom tqdm import tqdm\nimport torch.nn.functional as F\nfrom torchmeta.utils.gradient_based import gradient_update_parameters\nfrom libs.models.maml_model import MetaConvModel\nfrom libs.mini_objecta_dataLoader import FSDataLoader\n\n\ndef get_accuracy(logits, targets):\n _, predictions = torch.max(logits, dim=-1)\n return torch.mean(predictions.eq(targets).float())\n\n\ndef ModelConvMiniImagenet(out_features, hidden_size=84):\n return MetaConvModel(3, out_features, hidden_size=hidden_size,\n feature_size=5 * 5 * hidden_size)\n\n\nif __name__ == '__main__':\n classes_num = 5\n model = ModelConvMiniImagenet(classes_num)\n model.load_state_dict(torch.load(\n 'trained parameters/maml_miniimagenet_5shot_5way.th'))\n model.zero_grad()\n dataloader = FSDataLoader()\n meta_optimizer = torch.optim.Adam(model.parameters(), lr=0.001)\n accuracy_l = list()\n loss = nn.CrossEntropyLoss()\n model.train()\n num_of_tasks = 100\n epochs = 1\n with tqdm(dataloader, total=num_of_tasks) as qbar:\n for idx, batch in enumerate(qbar):\n model.zero_grad()\n train_inputs, train_targets = batch['Train']\n test_inputs, test_targets = batch['Test']\n for _ in range(epochs):\n for task_idx, (train_input, train_target, test_input,\n test_target) in enumerate(zip(train_inputs,\n train_targets, test_inputs, test_targets)):\n outer_loss = torch.tensor(0.0, device='cuda')\n accuracy = torch.tensor(0.0, device='cuda')\n train_logit = model(train_input)\n inner_loss = F.cross_entropy(train_logit, train_target)\n params = gradient_update_parameters(model, inner_loss)\n test_logit = model(test_input, params=params)\n outer_loss += F.cross_entropy(test_logit, test_target)\n with torch.no_grad():\n accuracy += get_accuracy(test_logit, test_target)\n outer_loss.div_(1)\n accuracy.div_(1)\n outer_loss.backward()\n meta_optimizer.step()\n accuracy_l.append(accuracy.item())\n if idx > num_of_tasks - 1:\n break\n plt.title('MAML miniobjectnet training (100 tasks)')\n plt.xlabel('Tasks (1 epoch)')\n plt.ylabel('Accuracy')\n plt.plot(accuracy_l)\n plt.show()\n print(sum(accuracy_l) / len(accuracy_l))\n",
"step-5": "# from mini_imagenet_dataloader import MiniImageNetDataLoader\nimport os\nos.environ[\"KMP_DUPLICATE_LIB_OK\"]=\"TRUE\"\nimport matplotlib.pyplot as plt\nimport torch\nimport torch.nn as nn\nfrom tqdm import tqdm\nimport torch.nn.functional as F\nfrom torchmeta.utils.gradient_based import gradient_update_parameters\nfrom libs.models.maml_model import MetaConvModel\nfrom libs.mini_objecta_dataLoader import FSDataLoader\n\ndef get_accuracy(logits, targets):\n _, predictions = torch.max(logits, dim=-1)\n return torch.mean(predictions.eq(targets).float())\n\ndef ModelConvMiniImagenet(out_features, hidden_size=84):\n return MetaConvModel(3, out_features, hidden_size=hidden_size,\n feature_size=5 * 5 * hidden_size)\n\nif __name__ == \"__main__\":\n classes_num = 5\n model = ModelConvMiniImagenet(classes_num)\n model.load_state_dict(torch.load('trained parameters/maml_miniimagenet_5shot_5way.th'))\n model.zero_grad()\n\n dataloader = FSDataLoader()\n\n meta_optimizer = torch.optim.Adam(model.parameters(), lr=1e-3)\n accuracy_l = list()\n loss = nn.CrossEntropyLoss()\n model.train()\n num_of_tasks = 100\n epochs = 1\n with tqdm(dataloader, total=num_of_tasks) as qbar:\n for idx, batch in enumerate(qbar):\n model.zero_grad()\n train_inputs, train_targets = batch['Train']\n test_inputs, test_targets = batch['Test']\n \n for _ in range(epochs):\n for task_idx, (train_input, train_target, test_input,\n test_target) in enumerate(zip(train_inputs, train_targets,\n test_inputs, test_targets)):\n outer_loss = torch.tensor(0., device='cuda')\n accuracy = torch.tensor(0., device='cuda')\n train_logit = model(train_input)\n inner_loss = F.cross_entropy(train_logit, train_target)\n \n params = gradient_update_parameters(model, inner_loss)\n\n test_logit = model(test_input , params=params)\n outer_loss += F.cross_entropy(test_logit, test_target)\n\n with torch.no_grad():\n accuracy += get_accuracy(test_logit, test_target)\n outer_loss.div_(1)\n accuracy.div_(1)\n\n outer_loss.backward()\n meta_optimizer.step()\n accuracy_l.append(accuracy.item())\n if idx > num_of_tasks-1:\n break\n plt.title('MAML miniobjectnet training (100 tasks)')\n plt.xlabel('Tasks (1 epoch)')\n plt.ylabel('Accuracy')\n plt.plot(accuracy_l)\n plt.show()\n print(sum(accuracy_l) / len(accuracy_l))\n \n",
"step-ids": [
1,
2,
4,
5,
6
]
}
|
[
1,
2,
4,
5,
6
] |
import cv2
import numpy as np
img1 = cv2.imread('img0008.jpg')
img2 = cv2.imread('img0009.jpg')
#img3 = cv2.imread('img0009.jpg')
img3 = np.zeros(img1.shape)
iter = 51
def sumas(ux, uy, wx, wy, dx, dy, img_i, img_j):
suma = 0
x = ux - wx
y = uy - wy
while x < ux + wx:
while y < uy + wy:
xdx = x + dx if x + dx < img1.shape[0] else x
ydy = y + dy if y + dy < img1.shape[1] else y
suma += np.power(img_i[x][y] - img_j[xdx][ydy], 2)
y += 1
x += 1
return suma
def hazFuncion(iteracion):
for x in range(img1.shape[0]-1):
for y in range(img1.shape[1]-1):
img3[x][y] = sumas(x, y, 1, 1, iteracion, iteracion, img1, img2)
for x in range(iter):
img3 = np.zeros(img1.shape)
hazFuncion(x)
if x % 10 == 0:
#cv2.imwrite("s"+str(x)+"xy.jpg", img3)
cv2.namedWindow(str(x) + "dd.jpg", cv2.WINDOW_NORMAL)
cv2.imshow(str(x) + "dd.jpg", img3)
cv2.waitKey(0)
cv2.destroyAllWindows()
|
normal
|
{
"blob_id": "749e6a1f807843c9e2591f51561174cc51668b11",
"index": 1588,
"step-1": "<mask token>\n\n\ndef sumas(ux, uy, wx, wy, dx, dy, img_i, img_j):\n suma = 0\n x = ux - wx\n y = uy - wy\n while x < ux + wx:\n while y < uy + wy:\n xdx = x + dx if x + dx < img1.shape[0] else x\n ydy = y + dy if y + dy < img1.shape[1] else y\n suma += np.power(img_i[x][y] - img_j[xdx][ydy], 2)\n y += 1\n x += 1\n return suma\n\n\ndef hazFuncion(iteracion):\n for x in range(img1.shape[0] - 1):\n for y in range(img1.shape[1] - 1):\n img3[x][y] = sumas(x, y, 1, 1, iteracion, iteracion, img1, img2)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef sumas(ux, uy, wx, wy, dx, dy, img_i, img_j):\n suma = 0\n x = ux - wx\n y = uy - wy\n while x < ux + wx:\n while y < uy + wy:\n xdx = x + dx if x + dx < img1.shape[0] else x\n ydy = y + dy if y + dy < img1.shape[1] else y\n suma += np.power(img_i[x][y] - img_j[xdx][ydy], 2)\n y += 1\n x += 1\n return suma\n\n\ndef hazFuncion(iteracion):\n for x in range(img1.shape[0] - 1):\n for y in range(img1.shape[1] - 1):\n img3[x][y] = sumas(x, y, 1, 1, iteracion, iteracion, img1, img2)\n\n\nfor x in range(iter):\n img3 = np.zeros(img1.shape)\n hazFuncion(x)\n if x % 10 == 0:\n cv2.namedWindow(str(x) + 'dd.jpg', cv2.WINDOW_NORMAL)\n cv2.imshow(str(x) + 'dd.jpg', img3)\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n",
"step-3": "<mask token>\nimg1 = cv2.imread('img0008.jpg')\nimg2 = cv2.imread('img0009.jpg')\nimg3 = np.zeros(img1.shape)\niter = 51\n\n\ndef sumas(ux, uy, wx, wy, dx, dy, img_i, img_j):\n suma = 0\n x = ux - wx\n y = uy - wy\n while x < ux + wx:\n while y < uy + wy:\n xdx = x + dx if x + dx < img1.shape[0] else x\n ydy = y + dy if y + dy < img1.shape[1] else y\n suma += np.power(img_i[x][y] - img_j[xdx][ydy], 2)\n y += 1\n x += 1\n return suma\n\n\ndef hazFuncion(iteracion):\n for x in range(img1.shape[0] - 1):\n for y in range(img1.shape[1] - 1):\n img3[x][y] = sumas(x, y, 1, 1, iteracion, iteracion, img1, img2)\n\n\nfor x in range(iter):\n img3 = np.zeros(img1.shape)\n hazFuncion(x)\n if x % 10 == 0:\n cv2.namedWindow(str(x) + 'dd.jpg', cv2.WINDOW_NORMAL)\n cv2.imshow(str(x) + 'dd.jpg', img3)\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n",
"step-4": "import cv2\nimport numpy as np\nimg1 = cv2.imread('img0008.jpg')\nimg2 = cv2.imread('img0009.jpg')\nimg3 = np.zeros(img1.shape)\niter = 51\n\n\ndef sumas(ux, uy, wx, wy, dx, dy, img_i, img_j):\n suma = 0\n x = ux - wx\n y = uy - wy\n while x < ux + wx:\n while y < uy + wy:\n xdx = x + dx if x + dx < img1.shape[0] else x\n ydy = y + dy if y + dy < img1.shape[1] else y\n suma += np.power(img_i[x][y] - img_j[xdx][ydy], 2)\n y += 1\n x += 1\n return suma\n\n\ndef hazFuncion(iteracion):\n for x in range(img1.shape[0] - 1):\n for y in range(img1.shape[1] - 1):\n img3[x][y] = sumas(x, y, 1, 1, iteracion, iteracion, img1, img2)\n\n\nfor x in range(iter):\n img3 = np.zeros(img1.shape)\n hazFuncion(x)\n if x % 10 == 0:\n cv2.namedWindow(str(x) + 'dd.jpg', cv2.WINDOW_NORMAL)\n cv2.imshow(str(x) + 'dd.jpg', img3)\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n",
"step-5": "import cv2\nimport numpy as np\nimg1 = cv2.imread('img0008.jpg')\nimg2 = cv2.imread('img0009.jpg')\n#img3 = cv2.imread('img0009.jpg')\nimg3 = np.zeros(img1.shape)\niter = 51\n\n\ndef sumas(ux, uy, wx, wy, dx, dy, img_i, img_j):\n suma = 0\n x = ux - wx\n y = uy - wy\n while x < ux + wx:\n while y < uy + wy:\n xdx = x + dx if x + dx < img1.shape[0] else x\n ydy = y + dy if y + dy < img1.shape[1] else y\n suma += np.power(img_i[x][y] - img_j[xdx][ydy], 2)\n y += 1\n x += 1\n return suma\n\n\ndef hazFuncion(iteracion):\n for x in range(img1.shape[0]-1):\n for y in range(img1.shape[1]-1):\n img3[x][y] = sumas(x, y, 1, 1, iteracion, iteracion, img1, img2)\n\n\nfor x in range(iter):\n img3 = np.zeros(img1.shape)\n hazFuncion(x)\n if x % 10 == 0:\n #cv2.imwrite(\"s\"+str(x)+\"xy.jpg\", img3)\n cv2.namedWindow(str(x) + \"dd.jpg\", cv2.WINDOW_NORMAL)\n cv2.imshow(str(x) + \"dd.jpg\", img3)\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n\n\n\n\n\n\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
import urllib.request
def get_html(url):
"""
Returns the html of url or None if status code is not 200
"""
req = urllib.request.Request(
url,
headers={
'User-Agent': 'Python Learning Program',
'From': '[email protected]'
}
)
resp = urllib.request.urlopen(req)
if resp.code == 200:
return resp.read() # returns the html document
else:
return None
|
normal
|
{
"blob_id": "4572e243f75ad92c04f5cdc0b454df7389183a6a",
"index": 3238,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef get_html(url):\n \"\"\"\n Returns the html of url or None if status code is not 200\n \"\"\"\n req = urllib.request.Request(url, headers={'User-Agent':\n 'Python Learning Program', 'From': '[email protected]'})\n resp = urllib.request.urlopen(req)\n if resp.code == 200:\n return resp.read()\n else:\n return None\n",
"step-3": "import urllib.request\n\n\ndef get_html(url):\n \"\"\"\n Returns the html of url or None if status code is not 200\n \"\"\"\n req = urllib.request.Request(url, headers={'User-Agent':\n 'Python Learning Program', 'From': '[email protected]'})\n resp = urllib.request.urlopen(req)\n if resp.code == 200:\n return resp.read()\n else:\n return None\n",
"step-4": "import urllib.request\n\n\ndef get_html(url):\n \"\"\"\n Returns the html of url or None if status code is not 200\n \"\"\"\n req = urllib.request.Request(\n url,\n headers={\n 'User-Agent': 'Python Learning Program',\n 'From': '[email protected]'\n }\n )\n resp = urllib.request.urlopen(req)\n\n if resp.code == 200:\n return resp.read() # returns the html document\n else:\n return None\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import numpy as np
from .build_processing_chain import build_processing_chain
from collections import namedtuple
from pprint import pprint
def run_one_dsp(tb_data, dsp_config, db_dict=None, fom_function=None, verbosity=0):
"""
Run one iteration of DSP on tb_data
Optionally returns a value for optimization
Parameters:
-----------
tb_data : lh5 Table
An input table of lh5 data. Typically a selection is made prior to
sending tb_data to this function: optimization typically doesn't have to
run over all data
dsp_config : dict
Specifies the DSP to be performed for this iteration (see
build_processing_chain()) and the list of output variables to appear in
the output table
db_dict : dict (optional)
DSP parameters database. See build_processing_chain for formatting info
fom_function : function or None (optional)
When given the output lh5 table of this DSP iteration, the
fom_function must return a scalar figure-of-merit value upon which the
optimization will be based. Should accept verbosity as a second argument
verbosity : int (optional)
verbosity for the processing chain and fom_function calls
Returns:
--------
figure_of_merit : float
If fom_function is not None, returns figure-of-merit value for the DSP iteration
tb_out : lh5 Table
If fom_function is None, returns the output lh5 table for the DSP iteration
"""
pc, _, tb_out = build_processing_chain(tb_data, dsp_config, db_dict=db_dict, verbosity=verbosity)
pc.execute()
if fom_function is not None: return fom_function(tb_out, verbosity)
else: return tb_out
ParGridDimension = namedtuple('ParGridDimension', 'name i_arg value_strs companions')
class ParGrid():
""" Parameter Grid class
Each ParGrid entry corresponds to a dsp parameter to be varied.
The ntuples must follow the pattern:
( name, i_arg, value_strs, companions) : ( str, int, list of str, list, or None )
where name is the name of the dsp routine in dsp_config whose to be
optimized, i_arg is the index of the argument to be varied, value_strs is
the array of strings to set the argument to, and companions is an optional
list of ( name, i_arg, value_strs ) tuples for companion arguments that
need to change along with this one
Optionally, i_arg can be a list of the argument indices to be varied together,
where value_strs is a list of lists correponding to the strings to set the
arguments to in the same order.
"""
def __init__(self):
self.dims = []
def add_dimension(self, name, i_arg, value_strs, companions=None):
self.dims.append( ParGridDimension(name, i_arg, value_strs, companions) )
def get_n_dimensions(self):
return len(self.dims)
def get_n_points_of_dim(self, i):
return len(self.dims[i].value_strs)
def get_shape(self):
shape = ()
for i in range(self.get_n_dimensions()):
shape += (self.get_n_points_of_dim(i),)
return shape
def get_n_grid_points(self):
return np.prod(self.get_shape())
def get_par_meshgrid(self, copy=False, sparse=False):
""" return a meshgrid of parameter values
Always uses Matrix indexing (natural for par grid) so that
mg[i1][i2][...] corresponds to index order in self.dims
Note copy is False by default as opposed to numpy default of True
"""
axes = []
for i in range(self.get_n_dimensions()):
axes.append(self.dims[i].values_strs)
return np.meshgrid(*axes, copy, sparse, indexing='ij')
def get_zero_indices(self):
return np.zeros(self.get_n_dimensions(), dtype=np.uint32)
def iterate_indices(self, indices):
""" iterate given indices [i1, i2, ...] by one.
For easier iteration. The convention here is arbitrary, but its the
order the arrays would be traversed in a series of nested for loops in
the order appearin in dims (first dimension is first for loop, etc):
Return False when the grid runs out of indices. Otherwise returns True.
"""
for iD in reversed(range(self.get_n_dimensions())):
indices[iD] += 1
if indices[iD] < self.get_n_points_of_dim(iD): return True
indices[iD] = 0
return False
def get_data(self, i_dim, i_par):
name = self.dims[i_dim].name
i_arg = self.dims[i_dim].i_arg
value_str = self.dims[i_dim].value_strs[i_par]
companions = self.dims[i_dim].companions
return name, i_arg, value_str, companions
def print_data(self, indices):
print(f"Grid point at indices {indices}:")
for i_dim, i_par in enumerate(indices):
name, i_arg, value_str, _ = self.get_data(i_dim, i_par)
print(f"{name}[{i_arg}] = {value_str}")
def set_dsp_pars(self, dsp_config, indices):
for i_dim, i_par in enumerate(indices):
name, i_arg, value_str, companions = self.get_data(i_dim, i_par)
if dsp_config['processors'][name].get('init_args') is not None:
if np.isscalar(i_arg):
dsp_config['processors'][name]['init_args'][i_arg] = value_str
else:
for i in range(len(i_arg)):
dsp_config['processors'][name]['init_args'][i_arg[i]] = value_str[i]
if companions is None: continue
for ( c_name, c_i_arg, c_value_str ) in companions:
dsp_config['processors'][c_name]['init_args'][c_i_arg] = c_value_str[i_par]
else:
if np.isscalar(i_arg):
dsp_config['processors'][name]['args'][i_arg] = value_str
else:
for i in range(len(i_arg)):
dsp_config['processors'][name]['args'][i_arg[i]] = value_str[i]
if companions is None: continue
for ( c_name, c_i_arg, c_value_str ) in companions:
dsp_config['processors'][c_name]['args'][c_i_arg] = c_value_str[i_par]
def run_grid(tb_data, dsp_config, grid, fom_function, dtype=np.float64, db_dict=None, verbosity=0):
"""Extract a table of optimization values for a grid of DSP parameters
The grid argument defines a list of parameters and values over which to run
the DSP defined in dsp_config on tb_data. At each point, a scalar
figure-of-merit is extracted
Returns a N-dimensional ndarray of figure-of-merit values, where the array
axes are in the order they appear in grid.
Parameters:
-----------
tb_data : lh5 Table
An input table of lh5 data. Typically a selection is made prior to
sending tb_data to this function: optimization typically doesn't have to
run over all data
dsp_config : dict
Specifies the DSP to be performed (see build_processing_chain()) and the
list of output variables to appear in the output table for each grid point
grid : ParGrid
See ParGrid class for format
fom_function : function
When given the output lh5 table of this DSP iteration, the fom_function
must return a scalar figure-of-merit. Should accept verbosity as a
second keyword argument
dtype : dtype (optional)
The data type of the fom_function's return object. Should be np.ndarray if
fom_function is set to None
db_dict : dict (optional)
DSP parameters database. See build_processing_chain for formatting info
verbosity : int (optional)
Verbosity for the processing chain and fom_function calls
Returns:
--------
grid_values : ndarray of floats
An N-dimensional numpy ndarray whose Mth axis corresponds to the Mth row
of the grid argument
"""
grid_values = np.ndarray(shape=grid.get_shape(), dtype=dtype)
iii = grid.get_zero_indices()
if verbosity > 0: print("Starting grid calculations...")
while True:
grid.set_dsp_pars(dsp_config, iii)
if verbosity > 1: pprint(dsp_config)
if verbosity > 0: grid.print_data(iii)
grid_values[tuple(iii)] = run_one_dsp(
tb_data, dsp_config, db_dict=db_dict, fom_function=fom_function, verbosity=verbosity)
if verbosity > 0: print('Value:', grid_values[tuple(iii)])
if not grid.iterate_indices(iii): break
return grid_values
|
normal
|
{
"blob_id": "efe2d6f5da36679b77de32d631cca50c2c1dd29e",
"index": 5170,
"step-1": "<mask token>\n\n\nclass ParGrid:\n <mask token>\n\n def __init__(self):\n self.dims = []\n\n def add_dimension(self, name, i_arg, value_strs, companions=None):\n self.dims.append(ParGridDimension(name, i_arg, value_strs, companions))\n\n def get_n_dimensions(self):\n return len(self.dims)\n <mask token>\n\n def get_shape(self):\n shape = ()\n for i in range(self.get_n_dimensions()):\n shape += self.get_n_points_of_dim(i),\n return shape\n\n def get_n_grid_points(self):\n return np.prod(self.get_shape())\n\n def get_par_meshgrid(self, copy=False, sparse=False):\n \"\"\" return a meshgrid of parameter values\n\n Always uses Matrix indexing (natural for par grid) so that\n mg[i1][i2][...] corresponds to index order in self.dims\n\n Note copy is False by default as opposed to numpy default of True\n \"\"\"\n axes = []\n for i in range(self.get_n_dimensions()):\n axes.append(self.dims[i].values_strs)\n return np.meshgrid(*axes, copy, sparse, indexing='ij')\n\n def get_zero_indices(self):\n return np.zeros(self.get_n_dimensions(), dtype=np.uint32)\n <mask token>\n\n def get_data(self, i_dim, i_par):\n name = self.dims[i_dim].name\n i_arg = self.dims[i_dim].i_arg\n value_str = self.dims[i_dim].value_strs[i_par]\n companions = self.dims[i_dim].companions\n return name, i_arg, value_str, companions\n\n def print_data(self, indices):\n print(f'Grid point at indices {indices}:')\n for i_dim, i_par in enumerate(indices):\n name, i_arg, value_str, _ = self.get_data(i_dim, i_par)\n print(f'{name}[{i_arg}] = {value_str}')\n\n def set_dsp_pars(self, dsp_config, indices):\n for i_dim, i_par in enumerate(indices):\n name, i_arg, value_str, companions = self.get_data(i_dim, i_par)\n if dsp_config['processors'][name].get('init_args') is not None:\n if np.isscalar(i_arg):\n dsp_config['processors'][name]['init_args'][i_arg\n ] = value_str\n else:\n for i in range(len(i_arg)):\n dsp_config['processors'][name]['init_args'][i_arg[i]\n ] = value_str[i]\n if companions is None:\n continue\n for c_name, c_i_arg, c_value_str in companions:\n dsp_config['processors'][c_name]['init_args'][c_i_arg\n ] = c_value_str[i_par]\n else:\n if np.isscalar(i_arg):\n dsp_config['processors'][name]['args'][i_arg] = value_str\n else:\n for i in range(len(i_arg)):\n dsp_config['processors'][name]['args'][i_arg[i]\n ] = value_str[i]\n if companions is None:\n continue\n for c_name, c_i_arg, c_value_str in companions:\n dsp_config['processors'][c_name]['args'][c_i_arg\n ] = c_value_str[i_par]\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass ParGrid:\n \"\"\" Parameter Grid class\n\n Each ParGrid entry corresponds to a dsp parameter to be varied.\n The ntuples must follow the pattern: \n ( name, i_arg, value_strs, companions) : ( str, int, list of str, list, or None )\n where name is the name of the dsp routine in dsp_config whose to be\n optimized, i_arg is the index of the argument to be varied, value_strs is\n the array of strings to set the argument to, and companions is an optional\n list of ( name, i_arg, value_strs ) tuples for companion arguments that\n need to change along with this one\n \n Optionally, i_arg can be a list of the argument indices to be varied together,\n where value_strs is a list of lists correponding to the strings to set the\n arguments to in the same order.\n \"\"\"\n\n def __init__(self):\n self.dims = []\n\n def add_dimension(self, name, i_arg, value_strs, companions=None):\n self.dims.append(ParGridDimension(name, i_arg, value_strs, companions))\n\n def get_n_dimensions(self):\n return len(self.dims)\n\n def get_n_points_of_dim(self, i):\n return len(self.dims[i].value_strs)\n\n def get_shape(self):\n shape = ()\n for i in range(self.get_n_dimensions()):\n shape += self.get_n_points_of_dim(i),\n return shape\n\n def get_n_grid_points(self):\n return np.prod(self.get_shape())\n\n def get_par_meshgrid(self, copy=False, sparse=False):\n \"\"\" return a meshgrid of parameter values\n\n Always uses Matrix indexing (natural for par grid) so that\n mg[i1][i2][...] corresponds to index order in self.dims\n\n Note copy is False by default as opposed to numpy default of True\n \"\"\"\n axes = []\n for i in range(self.get_n_dimensions()):\n axes.append(self.dims[i].values_strs)\n return np.meshgrid(*axes, copy, sparse, indexing='ij')\n\n def get_zero_indices(self):\n return np.zeros(self.get_n_dimensions(), dtype=np.uint32)\n\n def iterate_indices(self, indices):\n \"\"\" iterate given indices [i1, i2, ...] by one.\n\n For easier iteration. The convention here is arbitrary, but its the\n order the arrays would be traversed in a series of nested for loops in\n the order appearin in dims (first dimension is first for loop, etc):\n\n Return False when the grid runs out of indices. Otherwise returns True.\n \"\"\"\n for iD in reversed(range(self.get_n_dimensions())):\n indices[iD] += 1\n if indices[iD] < self.get_n_points_of_dim(iD):\n return True\n indices[iD] = 0\n return False\n\n def get_data(self, i_dim, i_par):\n name = self.dims[i_dim].name\n i_arg = self.dims[i_dim].i_arg\n value_str = self.dims[i_dim].value_strs[i_par]\n companions = self.dims[i_dim].companions\n return name, i_arg, value_str, companions\n\n def print_data(self, indices):\n print(f'Grid point at indices {indices}:')\n for i_dim, i_par in enumerate(indices):\n name, i_arg, value_str, _ = self.get_data(i_dim, i_par)\n print(f'{name}[{i_arg}] = {value_str}')\n\n def set_dsp_pars(self, dsp_config, indices):\n for i_dim, i_par in enumerate(indices):\n name, i_arg, value_str, companions = self.get_data(i_dim, i_par)\n if dsp_config['processors'][name].get('init_args') is not None:\n if np.isscalar(i_arg):\n dsp_config['processors'][name]['init_args'][i_arg\n ] = value_str\n else:\n for i in range(len(i_arg)):\n dsp_config['processors'][name]['init_args'][i_arg[i]\n ] = value_str[i]\n if companions is None:\n continue\n for c_name, c_i_arg, c_value_str in companions:\n dsp_config['processors'][c_name]['init_args'][c_i_arg\n ] = c_value_str[i_par]\n else:\n if np.isscalar(i_arg):\n dsp_config['processors'][name]['args'][i_arg] = value_str\n else:\n for i in range(len(i_arg)):\n dsp_config['processors'][name]['args'][i_arg[i]\n ] = value_str[i]\n if companions is None:\n continue\n for c_name, c_i_arg, c_value_str in companions:\n dsp_config['processors'][c_name]['args'][c_i_arg\n ] = c_value_str[i_par]\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef run_one_dsp(tb_data, dsp_config, db_dict=None, fom_function=None,\n verbosity=0):\n \"\"\"\n Run one iteration of DSP on tb_data \n\n Optionally returns a value for optimization\n\n Parameters:\n -----------\n tb_data : lh5 Table\n An input table of lh5 data. Typically a selection is made prior to\n sending tb_data to this function: optimization typically doesn't have to\n run over all data\n dsp_config : dict\n Specifies the DSP to be performed for this iteration (see\n build_processing_chain()) and the list of output variables to appear in\n the output table\n db_dict : dict (optional)\n DSP parameters database. See build_processing_chain for formatting info\n fom_function : function or None (optional)\n When given the output lh5 table of this DSP iteration, the\n fom_function must return a scalar figure-of-merit value upon which the\n optimization will be based. Should accept verbosity as a second argument\n verbosity : int (optional)\n verbosity for the processing chain and fom_function calls\n\n Returns:\n --------\n figure_of_merit : float\n If fom_function is not None, returns figure-of-merit value for the DSP iteration\n tb_out : lh5 Table\n If fom_function is None, returns the output lh5 table for the DSP iteration\n \"\"\"\n pc, _, tb_out = build_processing_chain(tb_data, dsp_config, db_dict=\n db_dict, verbosity=verbosity)\n pc.execute()\n if fom_function is not None:\n return fom_function(tb_out, verbosity)\n else:\n return tb_out\n\n\n<mask token>\n\n\nclass ParGrid:\n \"\"\" Parameter Grid class\n\n Each ParGrid entry corresponds to a dsp parameter to be varied.\n The ntuples must follow the pattern: \n ( name, i_arg, value_strs, companions) : ( str, int, list of str, list, or None )\n where name is the name of the dsp routine in dsp_config whose to be\n optimized, i_arg is the index of the argument to be varied, value_strs is\n the array of strings to set the argument to, and companions is an optional\n list of ( name, i_arg, value_strs ) tuples for companion arguments that\n need to change along with this one\n \n Optionally, i_arg can be a list of the argument indices to be varied together,\n where value_strs is a list of lists correponding to the strings to set the\n arguments to in the same order.\n \"\"\"\n\n def __init__(self):\n self.dims = []\n\n def add_dimension(self, name, i_arg, value_strs, companions=None):\n self.dims.append(ParGridDimension(name, i_arg, value_strs, companions))\n\n def get_n_dimensions(self):\n return len(self.dims)\n\n def get_n_points_of_dim(self, i):\n return len(self.dims[i].value_strs)\n\n def get_shape(self):\n shape = ()\n for i in range(self.get_n_dimensions()):\n shape += self.get_n_points_of_dim(i),\n return shape\n\n def get_n_grid_points(self):\n return np.prod(self.get_shape())\n\n def get_par_meshgrid(self, copy=False, sparse=False):\n \"\"\" return a meshgrid of parameter values\n\n Always uses Matrix indexing (natural for par grid) so that\n mg[i1][i2][...] corresponds to index order in self.dims\n\n Note copy is False by default as opposed to numpy default of True\n \"\"\"\n axes = []\n for i in range(self.get_n_dimensions()):\n axes.append(self.dims[i].values_strs)\n return np.meshgrid(*axes, copy, sparse, indexing='ij')\n\n def get_zero_indices(self):\n return np.zeros(self.get_n_dimensions(), dtype=np.uint32)\n\n def iterate_indices(self, indices):\n \"\"\" iterate given indices [i1, i2, ...] by one.\n\n For easier iteration. The convention here is arbitrary, but its the\n order the arrays would be traversed in a series of nested for loops in\n the order appearin in dims (first dimension is first for loop, etc):\n\n Return False when the grid runs out of indices. Otherwise returns True.\n \"\"\"\n for iD in reversed(range(self.get_n_dimensions())):\n indices[iD] += 1\n if indices[iD] < self.get_n_points_of_dim(iD):\n return True\n indices[iD] = 0\n return False\n\n def get_data(self, i_dim, i_par):\n name = self.dims[i_dim].name\n i_arg = self.dims[i_dim].i_arg\n value_str = self.dims[i_dim].value_strs[i_par]\n companions = self.dims[i_dim].companions\n return name, i_arg, value_str, companions\n\n def print_data(self, indices):\n print(f'Grid point at indices {indices}:')\n for i_dim, i_par in enumerate(indices):\n name, i_arg, value_str, _ = self.get_data(i_dim, i_par)\n print(f'{name}[{i_arg}] = {value_str}')\n\n def set_dsp_pars(self, dsp_config, indices):\n for i_dim, i_par in enumerate(indices):\n name, i_arg, value_str, companions = self.get_data(i_dim, i_par)\n if dsp_config['processors'][name].get('init_args') is not None:\n if np.isscalar(i_arg):\n dsp_config['processors'][name]['init_args'][i_arg\n ] = value_str\n else:\n for i in range(len(i_arg)):\n dsp_config['processors'][name]['init_args'][i_arg[i]\n ] = value_str[i]\n if companions is None:\n continue\n for c_name, c_i_arg, c_value_str in companions:\n dsp_config['processors'][c_name]['init_args'][c_i_arg\n ] = c_value_str[i_par]\n else:\n if np.isscalar(i_arg):\n dsp_config['processors'][name]['args'][i_arg] = value_str\n else:\n for i in range(len(i_arg)):\n dsp_config['processors'][name]['args'][i_arg[i]\n ] = value_str[i]\n if companions is None:\n continue\n for c_name, c_i_arg, c_value_str in companions:\n dsp_config['processors'][c_name]['args'][c_i_arg\n ] = c_value_str[i_par]\n\n\ndef run_grid(tb_data, dsp_config, grid, fom_function, dtype=np.float64,\n db_dict=None, verbosity=0):\n \"\"\"Extract a table of optimization values for a grid of DSP parameters \n\n The grid argument defines a list of parameters and values over which to run\n the DSP defined in dsp_config on tb_data. At each point, a scalar\n figure-of-merit is extracted\n\n Returns a N-dimensional ndarray of figure-of-merit values, where the array\n axes are in the order they appear in grid.\n\n Parameters:\n -----------\n tb_data : lh5 Table\n An input table of lh5 data. Typically a selection is made prior to\n sending tb_data to this function: optimization typically doesn't have to\n run over all data\n dsp_config : dict\n Specifies the DSP to be performed (see build_processing_chain()) and the\n list of output variables to appear in the output table for each grid point\n grid : ParGrid\n See ParGrid class for format\n fom_function : function \n When given the output lh5 table of this DSP iteration, the fom_function\n must return a scalar figure-of-merit. Should accept verbosity as a\n second keyword argument\n dtype : dtype (optional)\n The data type of the fom_function's return object. Should be np.ndarray if\n fom_function is set to None\n db_dict : dict (optional)\n DSP parameters database. See build_processing_chain for formatting info\n verbosity : int (optional)\n Verbosity for the processing chain and fom_function calls\n\n Returns:\n --------\n grid_values : ndarray of floats\n An N-dimensional numpy ndarray whose Mth axis corresponds to the Mth row\n of the grid argument\n \"\"\"\n grid_values = np.ndarray(shape=grid.get_shape(), dtype=dtype)\n iii = grid.get_zero_indices()\n if verbosity > 0:\n print('Starting grid calculations...')\n while True:\n grid.set_dsp_pars(dsp_config, iii)\n if verbosity > 1:\n pprint(dsp_config)\n if verbosity > 0:\n grid.print_data(iii)\n grid_values[tuple(iii)] = run_one_dsp(tb_data, dsp_config, db_dict=\n db_dict, fom_function=fom_function, verbosity=verbosity)\n if verbosity > 0:\n print('Value:', grid_values[tuple(iii)])\n if not grid.iterate_indices(iii):\n break\n return grid_values\n",
"step-4": "import numpy as np\nfrom .build_processing_chain import build_processing_chain\nfrom collections import namedtuple\nfrom pprint import pprint\n\n\ndef run_one_dsp(tb_data, dsp_config, db_dict=None, fom_function=None,\n verbosity=0):\n \"\"\"\n Run one iteration of DSP on tb_data \n\n Optionally returns a value for optimization\n\n Parameters:\n -----------\n tb_data : lh5 Table\n An input table of lh5 data. Typically a selection is made prior to\n sending tb_data to this function: optimization typically doesn't have to\n run over all data\n dsp_config : dict\n Specifies the DSP to be performed for this iteration (see\n build_processing_chain()) and the list of output variables to appear in\n the output table\n db_dict : dict (optional)\n DSP parameters database. See build_processing_chain for formatting info\n fom_function : function or None (optional)\n When given the output lh5 table of this DSP iteration, the\n fom_function must return a scalar figure-of-merit value upon which the\n optimization will be based. Should accept verbosity as a second argument\n verbosity : int (optional)\n verbosity for the processing chain and fom_function calls\n\n Returns:\n --------\n figure_of_merit : float\n If fom_function is not None, returns figure-of-merit value for the DSP iteration\n tb_out : lh5 Table\n If fom_function is None, returns the output lh5 table for the DSP iteration\n \"\"\"\n pc, _, tb_out = build_processing_chain(tb_data, dsp_config, db_dict=\n db_dict, verbosity=verbosity)\n pc.execute()\n if fom_function is not None:\n return fom_function(tb_out, verbosity)\n else:\n return tb_out\n\n\nParGridDimension = namedtuple('ParGridDimension',\n 'name i_arg value_strs companions')\n\n\nclass ParGrid:\n \"\"\" Parameter Grid class\n\n Each ParGrid entry corresponds to a dsp parameter to be varied.\n The ntuples must follow the pattern: \n ( name, i_arg, value_strs, companions) : ( str, int, list of str, list, or None )\n where name is the name of the dsp routine in dsp_config whose to be\n optimized, i_arg is the index of the argument to be varied, value_strs is\n the array of strings to set the argument to, and companions is an optional\n list of ( name, i_arg, value_strs ) tuples for companion arguments that\n need to change along with this one\n \n Optionally, i_arg can be a list of the argument indices to be varied together,\n where value_strs is a list of lists correponding to the strings to set the\n arguments to in the same order.\n \"\"\"\n\n def __init__(self):\n self.dims = []\n\n def add_dimension(self, name, i_arg, value_strs, companions=None):\n self.dims.append(ParGridDimension(name, i_arg, value_strs, companions))\n\n def get_n_dimensions(self):\n return len(self.dims)\n\n def get_n_points_of_dim(self, i):\n return len(self.dims[i].value_strs)\n\n def get_shape(self):\n shape = ()\n for i in range(self.get_n_dimensions()):\n shape += self.get_n_points_of_dim(i),\n return shape\n\n def get_n_grid_points(self):\n return np.prod(self.get_shape())\n\n def get_par_meshgrid(self, copy=False, sparse=False):\n \"\"\" return a meshgrid of parameter values\n\n Always uses Matrix indexing (natural for par grid) so that\n mg[i1][i2][...] corresponds to index order in self.dims\n\n Note copy is False by default as opposed to numpy default of True\n \"\"\"\n axes = []\n for i in range(self.get_n_dimensions()):\n axes.append(self.dims[i].values_strs)\n return np.meshgrid(*axes, copy, sparse, indexing='ij')\n\n def get_zero_indices(self):\n return np.zeros(self.get_n_dimensions(), dtype=np.uint32)\n\n def iterate_indices(self, indices):\n \"\"\" iterate given indices [i1, i2, ...] by one.\n\n For easier iteration. The convention here is arbitrary, but its the\n order the arrays would be traversed in a series of nested for loops in\n the order appearin in dims (first dimension is first for loop, etc):\n\n Return False when the grid runs out of indices. Otherwise returns True.\n \"\"\"\n for iD in reversed(range(self.get_n_dimensions())):\n indices[iD] += 1\n if indices[iD] < self.get_n_points_of_dim(iD):\n return True\n indices[iD] = 0\n return False\n\n def get_data(self, i_dim, i_par):\n name = self.dims[i_dim].name\n i_arg = self.dims[i_dim].i_arg\n value_str = self.dims[i_dim].value_strs[i_par]\n companions = self.dims[i_dim].companions\n return name, i_arg, value_str, companions\n\n def print_data(self, indices):\n print(f'Grid point at indices {indices}:')\n for i_dim, i_par in enumerate(indices):\n name, i_arg, value_str, _ = self.get_data(i_dim, i_par)\n print(f'{name}[{i_arg}] = {value_str}')\n\n def set_dsp_pars(self, dsp_config, indices):\n for i_dim, i_par in enumerate(indices):\n name, i_arg, value_str, companions = self.get_data(i_dim, i_par)\n if dsp_config['processors'][name].get('init_args') is not None:\n if np.isscalar(i_arg):\n dsp_config['processors'][name]['init_args'][i_arg\n ] = value_str\n else:\n for i in range(len(i_arg)):\n dsp_config['processors'][name]['init_args'][i_arg[i]\n ] = value_str[i]\n if companions is None:\n continue\n for c_name, c_i_arg, c_value_str in companions:\n dsp_config['processors'][c_name]['init_args'][c_i_arg\n ] = c_value_str[i_par]\n else:\n if np.isscalar(i_arg):\n dsp_config['processors'][name]['args'][i_arg] = value_str\n else:\n for i in range(len(i_arg)):\n dsp_config['processors'][name]['args'][i_arg[i]\n ] = value_str[i]\n if companions is None:\n continue\n for c_name, c_i_arg, c_value_str in companions:\n dsp_config['processors'][c_name]['args'][c_i_arg\n ] = c_value_str[i_par]\n\n\ndef run_grid(tb_data, dsp_config, grid, fom_function, dtype=np.float64,\n db_dict=None, verbosity=0):\n \"\"\"Extract a table of optimization values for a grid of DSP parameters \n\n The grid argument defines a list of parameters and values over which to run\n the DSP defined in dsp_config on tb_data. At each point, a scalar\n figure-of-merit is extracted\n\n Returns a N-dimensional ndarray of figure-of-merit values, where the array\n axes are in the order they appear in grid.\n\n Parameters:\n -----------\n tb_data : lh5 Table\n An input table of lh5 data. Typically a selection is made prior to\n sending tb_data to this function: optimization typically doesn't have to\n run over all data\n dsp_config : dict\n Specifies the DSP to be performed (see build_processing_chain()) and the\n list of output variables to appear in the output table for each grid point\n grid : ParGrid\n See ParGrid class for format\n fom_function : function \n When given the output lh5 table of this DSP iteration, the fom_function\n must return a scalar figure-of-merit. Should accept verbosity as a\n second keyword argument\n dtype : dtype (optional)\n The data type of the fom_function's return object. Should be np.ndarray if\n fom_function is set to None\n db_dict : dict (optional)\n DSP parameters database. See build_processing_chain for formatting info\n verbosity : int (optional)\n Verbosity for the processing chain and fom_function calls\n\n Returns:\n --------\n grid_values : ndarray of floats\n An N-dimensional numpy ndarray whose Mth axis corresponds to the Mth row\n of the grid argument\n \"\"\"\n grid_values = np.ndarray(shape=grid.get_shape(), dtype=dtype)\n iii = grid.get_zero_indices()\n if verbosity > 0:\n print('Starting grid calculations...')\n while True:\n grid.set_dsp_pars(dsp_config, iii)\n if verbosity > 1:\n pprint(dsp_config)\n if verbosity > 0:\n grid.print_data(iii)\n grid_values[tuple(iii)] = run_one_dsp(tb_data, dsp_config, db_dict=\n db_dict, fom_function=fom_function, verbosity=verbosity)\n if verbosity > 0:\n print('Value:', grid_values[tuple(iii)])\n if not grid.iterate_indices(iii):\n break\n return grid_values\n",
"step-5": "import numpy as np\nfrom .build_processing_chain import build_processing_chain\nfrom collections import namedtuple\nfrom pprint import pprint\n\n\ndef run_one_dsp(tb_data, dsp_config, db_dict=None, fom_function=None, verbosity=0):\n \"\"\"\n Run one iteration of DSP on tb_data \n\n Optionally returns a value for optimization\n\n Parameters:\n -----------\n tb_data : lh5 Table\n An input table of lh5 data. Typically a selection is made prior to\n sending tb_data to this function: optimization typically doesn't have to\n run over all data\n dsp_config : dict\n Specifies the DSP to be performed for this iteration (see\n build_processing_chain()) and the list of output variables to appear in\n the output table\n db_dict : dict (optional)\n DSP parameters database. See build_processing_chain for formatting info\n fom_function : function or None (optional)\n When given the output lh5 table of this DSP iteration, the\n fom_function must return a scalar figure-of-merit value upon which the\n optimization will be based. Should accept verbosity as a second argument\n verbosity : int (optional)\n verbosity for the processing chain and fom_function calls\n\n Returns:\n --------\n figure_of_merit : float\n If fom_function is not None, returns figure-of-merit value for the DSP iteration\n tb_out : lh5 Table\n If fom_function is None, returns the output lh5 table for the DSP iteration\n \"\"\"\n \n pc, _, tb_out = build_processing_chain(tb_data, dsp_config, db_dict=db_dict, verbosity=verbosity)\n pc.execute()\n if fom_function is not None: return fom_function(tb_out, verbosity)\n else: return tb_out\n\n\n\nParGridDimension = namedtuple('ParGridDimension', 'name i_arg value_strs companions')\n\nclass ParGrid():\n \"\"\" Parameter Grid class\n\n Each ParGrid entry corresponds to a dsp parameter to be varied.\n The ntuples must follow the pattern: \n ( name, i_arg, value_strs, companions) : ( str, int, list of str, list, or None )\n where name is the name of the dsp routine in dsp_config whose to be\n optimized, i_arg is the index of the argument to be varied, value_strs is\n the array of strings to set the argument to, and companions is an optional\n list of ( name, i_arg, value_strs ) tuples for companion arguments that\n need to change along with this one\n \n Optionally, i_arg can be a list of the argument indices to be varied together,\n where value_strs is a list of lists correponding to the strings to set the\n arguments to in the same order.\n \"\"\"\n def __init__(self):\n self.dims = []\n\n def add_dimension(self, name, i_arg, value_strs, companions=None):\n self.dims.append( ParGridDimension(name, i_arg, value_strs, companions) )\n\n def get_n_dimensions(self):\n return len(self.dims)\n\n def get_n_points_of_dim(self, i):\n return len(self.dims[i].value_strs)\n\n def get_shape(self):\n shape = ()\n for i in range(self.get_n_dimensions()):\n shape += (self.get_n_points_of_dim(i),)\n return shape\n\n def get_n_grid_points(self):\n return np.prod(self.get_shape())\n\n def get_par_meshgrid(self, copy=False, sparse=False):\n \"\"\" return a meshgrid of parameter values\n\n Always uses Matrix indexing (natural for par grid) so that\n mg[i1][i2][...] corresponds to index order in self.dims\n\n Note copy is False by default as opposed to numpy default of True\n \"\"\" \n axes = []\n for i in range(self.get_n_dimensions()):\n axes.append(self.dims[i].values_strs)\n return np.meshgrid(*axes, copy, sparse, indexing='ij')\n\n def get_zero_indices(self):\n return np.zeros(self.get_n_dimensions(), dtype=np.uint32)\n\n def iterate_indices(self, indices):\n \"\"\" iterate given indices [i1, i2, ...] by one.\n\n For easier iteration. The convention here is arbitrary, but its the\n order the arrays would be traversed in a series of nested for loops in\n the order appearin in dims (first dimension is first for loop, etc):\n\n Return False when the grid runs out of indices. Otherwise returns True.\n \"\"\"\n for iD in reversed(range(self.get_n_dimensions())):\n indices[iD] += 1\n if indices[iD] < self.get_n_points_of_dim(iD): return True\n indices[iD] = 0\n return False\n\n def get_data(self, i_dim, i_par):\n name = self.dims[i_dim].name\n i_arg = self.dims[i_dim].i_arg\n value_str = self.dims[i_dim].value_strs[i_par]\n companions = self.dims[i_dim].companions\n return name, i_arg, value_str, companions\n\n def print_data(self, indices):\n print(f\"Grid point at indices {indices}:\")\n for i_dim, i_par in enumerate(indices):\n name, i_arg, value_str, _ = self.get_data(i_dim, i_par)\n print(f\"{name}[{i_arg}] = {value_str}\")\n\n def set_dsp_pars(self, dsp_config, indices):\n for i_dim, i_par in enumerate(indices):\n name, i_arg, value_str, companions = self.get_data(i_dim, i_par)\n if dsp_config['processors'][name].get('init_args') is not None:\n if np.isscalar(i_arg):\n dsp_config['processors'][name]['init_args'][i_arg] = value_str\n else:\n for i in range(len(i_arg)):\n dsp_config['processors'][name]['init_args'][i_arg[i]] = value_str[i]\n if companions is None: continue\n for ( c_name, c_i_arg, c_value_str ) in companions:\n dsp_config['processors'][c_name]['init_args'][c_i_arg] = c_value_str[i_par]\n else:\n if np.isscalar(i_arg):\n dsp_config['processors'][name]['args'][i_arg] = value_str\n else:\n for i in range(len(i_arg)):\n dsp_config['processors'][name]['args'][i_arg[i]] = value_str[i]\n if companions is None: continue\n for ( c_name, c_i_arg, c_value_str ) in companions:\n dsp_config['processors'][c_name]['args'][c_i_arg] = c_value_str[i_par]\n\ndef run_grid(tb_data, dsp_config, grid, fom_function, dtype=np.float64, db_dict=None, verbosity=0):\n \"\"\"Extract a table of optimization values for a grid of DSP parameters \n\n The grid argument defines a list of parameters and values over which to run\n the DSP defined in dsp_config on tb_data. At each point, a scalar\n figure-of-merit is extracted\n\n Returns a N-dimensional ndarray of figure-of-merit values, where the array\n axes are in the order they appear in grid.\n\n Parameters:\n -----------\n tb_data : lh5 Table\n An input table of lh5 data. Typically a selection is made prior to\n sending tb_data to this function: optimization typically doesn't have to\n run over all data\n dsp_config : dict\n Specifies the DSP to be performed (see build_processing_chain()) and the\n list of output variables to appear in the output table for each grid point\n grid : ParGrid\n See ParGrid class for format\n fom_function : function \n When given the output lh5 table of this DSP iteration, the fom_function\n must return a scalar figure-of-merit. Should accept verbosity as a\n second keyword argument\n dtype : dtype (optional)\n The data type of the fom_function's return object. Should be np.ndarray if\n fom_function is set to None\n db_dict : dict (optional)\n DSP parameters database. See build_processing_chain for formatting info\n verbosity : int (optional)\n Verbosity for the processing chain and fom_function calls\n\n Returns:\n --------\n grid_values : ndarray of floats\n An N-dimensional numpy ndarray whose Mth axis corresponds to the Mth row\n of the grid argument\n \"\"\"\n\n grid_values = np.ndarray(shape=grid.get_shape(), dtype=dtype)\n iii = grid.get_zero_indices()\n if verbosity > 0: print(\"Starting grid calculations...\")\n while True: \n grid.set_dsp_pars(dsp_config, iii)\n if verbosity > 1: pprint(dsp_config)\n if verbosity > 0: grid.print_data(iii)\n grid_values[tuple(iii)] = run_one_dsp(\n tb_data, dsp_config, db_dict=db_dict, fom_function=fom_function, verbosity=verbosity)\n if verbosity > 0: print('Value:', grid_values[tuple(iii)])\n if not grid.iterate_indices(iii): break\n return grid_values\n \n",
"step-ids": [
11,
14,
16,
18,
19
]
}
|
[
11,
14,
16,
18,
19
] |
#!/usr/bin/python3
"""0. How many subs"""
def number_of_subscribers(subreddit):
"""return the number of subscribers from an Reddit API"""
import requests
resInf = requests.get("https://www.reddit.com/r/{}/about.json"
.format(subreddit),
headers={"User-Agent": "My-User-Agent"},
allow_redirects=False)
if resInf.status_code >= 300:
return 0
return resInf.json().get("data").get("subscribers")
|
normal
|
{
"blob_id": "db1e3a109af2db2c8794a7c9c7dfb0c2ccee5800",
"index": 932,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef number_of_subscribers(subreddit):\n \"\"\"return the number of subscribers from an Reddit API\"\"\"\n import requests\n resInf = requests.get('https://www.reddit.com/r/{}/about.json'.format(\n subreddit), headers={'User-Agent': 'My-User-Agent'},\n allow_redirects=False)\n if resInf.status_code >= 300:\n return 0\n return resInf.json().get('data').get('subscribers')\n",
"step-3": "#!/usr/bin/python3\n\"\"\"0. How many subs\"\"\"\n\ndef number_of_subscribers(subreddit):\n \"\"\"return the number of subscribers from an Reddit API\"\"\"\n\n import requests\n\n resInf = requests.get(\"https://www.reddit.com/r/{}/about.json\"\n .format(subreddit),\n headers={\"User-Agent\": \"My-User-Agent\"},\n allow_redirects=False)\n if resInf.status_code >= 300:\n return 0\n\n return resInf.json().get(\"data\").get(\"subscribers\")\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
# -------------------------------------------
# Created by: jasper
# Date: 11/24/19
# --------------------------------------------
from os import path, mkdir
class IOHandler:
def __init__(self, directory, fName, data_instance):
"""Save the setup of a class instance or load a class instance from a saved setup
Parameters
----------
directory : str
path of the directory the files are saved to or read from
fName : str
Name of the project. File endings will be set automaticaly
data_instance : object
class instance to perform actions on
"""
self.fName = fName
self.data_instance = data_instance
self.directory = directory
def dump_data(self):
"""save the data contained in data_instance, checking whether the
directories already exist and asking whether to create them if not. """
while not path.isdir(self.directory):
print(
"# The directory {} does not exist. Do you want to create it (1, default) or specify another? (2) [1/2]".format(
self.directory))
select = input()
if select == "2":
self.directory = input("Enter new directory: \n")
else:
mkdir(self.directory)
print("# Directory " + self.directory + " created")
self.fullpath = self.directory + "/" + self.fName
self.data_instance.dump_data(self.fullpath)
def dump_data_to_txt(self):
while not path.isdir(self.directory):
print(
"# The directory {} does not exist. Do you want to create it (1, default) or specify another? (2) [1/2]".format(
self.directory))
select = input()
if select == "2":
self.directory = input("Enter new directory: \n")
else:
mkdir(self.directory)
print("# Directory " + self.directory + " created")
self.fullpath = self.directory + "/" + self.fName
self.data_instance.dump_to_txt(self.fullpath)
def read_data(self):
"""Read data into the specified data_instance. If the read process
hits a not existing file, it will be notified to you"""
try:
self.data_instance.read_data(self.directory + self.fName)
except FileNotFoundError as file_error:
print(
"# The file {} belonging to {} do not exist.".format(
file_error.filename, self.fName))
|
normal
|
{
"blob_id": "267276eab470b5216a2102f3e7616f7aecadcfe9",
"index": 9428,
"step-1": "<mask token>\n\n\nclass IOHandler:\n <mask token>\n\n def dump_data(self):\n \"\"\"save the data contained in data_instance, checking whether the\n directories already exist and asking whether to create them if not. \"\"\"\n while not path.isdir(self.directory):\n print(\n '# The directory {} does not exist. Do you want to create it (1, default) or specify another? (2) [1/2]'\n .format(self.directory))\n select = input()\n if select == '2':\n self.directory = input('Enter new directory: \\n')\n else:\n mkdir(self.directory)\n print('# Directory ' + self.directory + ' created')\n self.fullpath = self.directory + '/' + self.fName\n self.data_instance.dump_data(self.fullpath)\n\n def dump_data_to_txt(self):\n while not path.isdir(self.directory):\n print(\n '# The directory {} does not exist. Do you want to create it (1, default) or specify another? (2) [1/2]'\n .format(self.directory))\n select = input()\n if select == '2':\n self.directory = input('Enter new directory: \\n')\n else:\n mkdir(self.directory)\n print('# Directory ' + self.directory + ' created')\n self.fullpath = self.directory + '/' + self.fName\n self.data_instance.dump_to_txt(self.fullpath)\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass IOHandler:\n <mask token>\n\n def dump_data(self):\n \"\"\"save the data contained in data_instance, checking whether the\n directories already exist and asking whether to create them if not. \"\"\"\n while not path.isdir(self.directory):\n print(\n '# The directory {} does not exist. Do you want to create it (1, default) or specify another? (2) [1/2]'\n .format(self.directory))\n select = input()\n if select == '2':\n self.directory = input('Enter new directory: \\n')\n else:\n mkdir(self.directory)\n print('# Directory ' + self.directory + ' created')\n self.fullpath = self.directory + '/' + self.fName\n self.data_instance.dump_data(self.fullpath)\n\n def dump_data_to_txt(self):\n while not path.isdir(self.directory):\n print(\n '# The directory {} does not exist. Do you want to create it (1, default) or specify another? (2) [1/2]'\n .format(self.directory))\n select = input()\n if select == '2':\n self.directory = input('Enter new directory: \\n')\n else:\n mkdir(self.directory)\n print('# Directory ' + self.directory + ' created')\n self.fullpath = self.directory + '/' + self.fName\n self.data_instance.dump_to_txt(self.fullpath)\n\n def read_data(self):\n \"\"\"Read data into the specified data_instance. If the read process\n hits a not existing file, it will be notified to you\"\"\"\n try:\n self.data_instance.read_data(self.directory + self.fName)\n except FileNotFoundError as file_error:\n print('# The file {} belonging to {} do not exist.'.format(\n file_error.filename, self.fName))\n",
"step-3": "<mask token>\n\n\nclass IOHandler:\n\n def __init__(self, directory, fName, data_instance):\n \"\"\"Save the setup of a class instance or load a class instance from a saved setup\n\n\n Parameters\n ----------\n directory : str\n path of the directory the files are saved to or read from\n fName : str\n Name of the project. File endings will be set automaticaly\n data_instance : object\n class instance to perform actions on\n \"\"\"\n self.fName = fName\n self.data_instance = data_instance\n self.directory = directory\n\n def dump_data(self):\n \"\"\"save the data contained in data_instance, checking whether the\n directories already exist and asking whether to create them if not. \"\"\"\n while not path.isdir(self.directory):\n print(\n '# The directory {} does not exist. Do you want to create it (1, default) or specify another? (2) [1/2]'\n .format(self.directory))\n select = input()\n if select == '2':\n self.directory = input('Enter new directory: \\n')\n else:\n mkdir(self.directory)\n print('# Directory ' + self.directory + ' created')\n self.fullpath = self.directory + '/' + self.fName\n self.data_instance.dump_data(self.fullpath)\n\n def dump_data_to_txt(self):\n while not path.isdir(self.directory):\n print(\n '# The directory {} does not exist. Do you want to create it (1, default) or specify another? (2) [1/2]'\n .format(self.directory))\n select = input()\n if select == '2':\n self.directory = input('Enter new directory: \\n')\n else:\n mkdir(self.directory)\n print('# Directory ' + self.directory + ' created')\n self.fullpath = self.directory + '/' + self.fName\n self.data_instance.dump_to_txt(self.fullpath)\n\n def read_data(self):\n \"\"\"Read data into the specified data_instance. If the read process\n hits a not existing file, it will be notified to you\"\"\"\n try:\n self.data_instance.read_data(self.directory + self.fName)\n except FileNotFoundError as file_error:\n print('# The file {} belonging to {} do not exist.'.format(\n file_error.filename, self.fName))\n",
"step-4": "from os import path, mkdir\n\n\nclass IOHandler:\n\n def __init__(self, directory, fName, data_instance):\n \"\"\"Save the setup of a class instance or load a class instance from a saved setup\n\n\n Parameters\n ----------\n directory : str\n path of the directory the files are saved to or read from\n fName : str\n Name of the project. File endings will be set automaticaly\n data_instance : object\n class instance to perform actions on\n \"\"\"\n self.fName = fName\n self.data_instance = data_instance\n self.directory = directory\n\n def dump_data(self):\n \"\"\"save the data contained in data_instance, checking whether the\n directories already exist and asking whether to create them if not. \"\"\"\n while not path.isdir(self.directory):\n print(\n '# The directory {} does not exist. Do you want to create it (1, default) or specify another? (2) [1/2]'\n .format(self.directory))\n select = input()\n if select == '2':\n self.directory = input('Enter new directory: \\n')\n else:\n mkdir(self.directory)\n print('# Directory ' + self.directory + ' created')\n self.fullpath = self.directory + '/' + self.fName\n self.data_instance.dump_data(self.fullpath)\n\n def dump_data_to_txt(self):\n while not path.isdir(self.directory):\n print(\n '# The directory {} does not exist. Do you want to create it (1, default) or specify another? (2) [1/2]'\n .format(self.directory))\n select = input()\n if select == '2':\n self.directory = input('Enter new directory: \\n')\n else:\n mkdir(self.directory)\n print('# Directory ' + self.directory + ' created')\n self.fullpath = self.directory + '/' + self.fName\n self.data_instance.dump_to_txt(self.fullpath)\n\n def read_data(self):\n \"\"\"Read data into the specified data_instance. If the read process\n hits a not existing file, it will be notified to you\"\"\"\n try:\n self.data_instance.read_data(self.directory + self.fName)\n except FileNotFoundError as file_error:\n print('# The file {} belonging to {} do not exist.'.format(\n file_error.filename, self.fName))\n",
"step-5": "# -------------------------------------------\n\n# Created by: jasper\n# Date: 11/24/19\n\n# --------------------------------------------\n\nfrom os import path, mkdir\n\n\nclass IOHandler:\n\n def __init__(self, directory, fName, data_instance):\n \"\"\"Save the setup of a class instance or load a class instance from a saved setup\n\n\n Parameters\n ----------\n directory : str\n path of the directory the files are saved to or read from\n fName : str\n Name of the project. File endings will be set automaticaly\n data_instance : object\n class instance to perform actions on\n \"\"\"\n self.fName = fName\n self.data_instance = data_instance\n self.directory = directory\n\n def dump_data(self):\n \"\"\"save the data contained in data_instance, checking whether the\n directories already exist and asking whether to create them if not. \"\"\"\n while not path.isdir(self.directory):\n print(\n \"# The directory {} does not exist. Do you want to create it (1, default) or specify another? (2) [1/2]\".format(\n self.directory))\n select = input()\n if select == \"2\":\n self.directory = input(\"Enter new directory: \\n\")\n else:\n mkdir(self.directory)\n print(\"# Directory \" + self.directory + \" created\")\n\n self.fullpath = self.directory + \"/\" + self.fName\n\n self.data_instance.dump_data(self.fullpath)\n\n def dump_data_to_txt(self):\n while not path.isdir(self.directory):\n print(\n \"# The directory {} does not exist. Do you want to create it (1, default) or specify another? (2) [1/2]\".format(\n self.directory))\n select = input()\n if select == \"2\":\n self.directory = input(\"Enter new directory: \\n\")\n else:\n mkdir(self.directory)\n print(\"# Directory \" + self.directory + \" created\")\n\n self.fullpath = self.directory + \"/\" + self.fName\n\n self.data_instance.dump_to_txt(self.fullpath)\n\n def read_data(self):\n \"\"\"Read data into the specified data_instance. If the read process\n hits a not existing file, it will be notified to you\"\"\"\n\n try:\n self.data_instance.read_data(self.directory + self.fName)\n except FileNotFoundError as file_error:\n print(\n \"# The file {} belonging to {} do not exist.\".format(\n file_error.filename, self.fName))\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
from ctypes import *
import os
import sys
import time
import datetime
import subprocess
import RPi.GPIO as GPIO
from PIL import Image
from PIL import ImageDraw
from PIL import ImageFont
#import Adafruit_GPIO as GPIO
import Adafruit_GPIO.SPI as SPI
import ST7735 as TFT
import pigpio
# use BCM pin define
pin_meas = 24 # 18 in BOARD
pin_black = 25 # 22 in BOARD
pin_led = 26 # 37 in BOARD
HOME_DIR = "/home/pi/QSS003_python/"
C12880_LIB = HOME_DIR + "Dual_C12880.so"
# use BCM pin define
GATE_LED_PIN1 = 4 # 7 in BOARD
GATE_LED_PIN2 = 22 # 15 in BOARD
PWM_LED_PIN1 = 18 # in pigpio
PWM_LED_PIN2 = 13 # in pigpio
PWM_FREQ = 500
DUTY_MIN = 0
DUTY_MAX = 900000 # original = 1000000
LED_CURR_MIN = 60 #mA
LED_CURR_MAX = 330 #mA
LED_DUTY_CONST = 10000/3
# use BCM pin define
AOPIN = 23 # 16 in BOARD
RSTPIN = 12 # 32 in BOARD
SPI_PORT = 1
SPI_CH = 0
SPI_SPEED = 4000000
COLOR_RED = (255,0,0)
COLOR_GREEN = (0,255,0)
COLOR_BLUE = (0,0,255)
COLOR_WHITE = (255,255,255)
COLOR_BLACK = (0,0,0)
COLOR_YELLOW = (255,255,0)
COLOR_PURPLE = (255,0, 255)
COLOR_CYAN = (0, 255,255)
TFT_SIZE = (128, 128)
LINE1Y = 15
LINE2Y = 30
LINE3Y = 45
LINE4Y = 65
LINE5Y = 80
LINE6Y = 100
SPACE1 = 15
SPACE2 = 20
time.sleep(1)
C12880 = cdll.LoadLibrary(C12880_LIB)
if len(sys.argv) < 6:
error_str = str(sys.argv[0]) + " led1_current led2_current led_stable_time int_time1 int_time2"
print(error_str)
else:
# board initialization
C12880.Setup() # init spectrometer
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
GPIO.setup(pin_meas, GPIO.IN)
GPIO.setup(pin_black, GPIO.IN)
GPIO.setup(pin_led, GPIO.OUT)
GPIO.output(pin_led, GPIO.LOW)
GPIO.setup(GATE_LED_PIN1, GPIO.OUT)
GPIO.setup(GATE_LED_PIN2, GPIO.OUT)
GPIO.output(GATE_LED_PIN1, GPIO.HIGH) #close
GPIO.output(GATE_LED_PIN2, GPIO.HIGH) #close
data1 = (c_uint * 288)() # data to store spectrum data
data2 = (c_uint * 288)()
meas = 1
black = 1
fnameindex = 0
# Display init
spi = SPI.SpiDev(SPI_PORT, SPI_CH, max_speed_hz = SPI_SPEED)
disp = TFT.ST7735(dc = AOPIN, rst = RSTPIN, spi = spi, width = 128, height = 128)
disp.begin()
disp.clear()
img = Image.new('RGB', TFT_SIZE, COLOR_WHITE)
draw = ImageDraw.Draw(img)
font = "/usr/share/fonts/truetype/dejavu/DejaVuSansMono.ttf"
fontout = ImageFont.truetype(font,11)
draw.text((0,LINE1Y), " Mode: Measure", font = fontout, fill = COLOR_BLUE)
draw.text((0,LINE2Y), " Bilirubin", font = fontout, fill = COLOR_BLUE)
draw.text((0,LINE4Y), " SiO2", font = fontout, fill = COLOR_BLUE)
disp.display(img)
led1_current = int(sys.argv[1])
led2_current = int(sys.argv[2])
led_stable_time = float(sys.argv[3])
int_time1 = int(sys.argv[4])
int_time2 = int(sys.argv[5])
if (led1_current < LED_CURR_MIN):
led1_current = LED_CURR_MIN
elif (led1_current > LED_CURR_MAX):
led1_current = LED_CURR_MAX
if (led2_current < LED_CURR_MIN):
led2_current = LED_CURR_MIN
elif (led2_current > LED_CURR_MAX):
led2_current = LED_CURR_MAX
print("led1_current = "+ str(led1_current))
print("led2_current = "+ str(led2_current))
led1_duty = (led1_current - LED_CURR_MIN)*LED_DUTY_CONST
led2_duty = (led2_current - LED_CURR_MIN)*LED_DUTY_CONST
print("led1_duty = "+ str(led1_duty))
print("led2_duty = "+ str(led2_duty))
pi = pigpio.pi()
while (1):
#wait until black or meas buttom is pressed
while (meas and black):
if GPIO.input(pin_meas) == GPIO.LOW:
meas = 0
print("meas low")
if GPIO.input(pin_black) == GPIO.LOW:
black = 0
print("black low")
GPIO.output(pin_led, GPIO.HIGH)
pi.hardware_PWM(PWM_LED_PIN1, PWM_FREQ, int(led1_duty))
pi.hardware_PWM(PWM_LED_PIN2, PWM_FREQ, int(led2_duty))
if (led1_duty > 0):
GPIO.output(GATE_LED_PIN1, GPIO.LOW) # open
if (led2_duty > 0):
GPIO.output(GATE_LED_PIN2, GPIO.LOW) # open
time.sleep(led_stable_time)
if (black == 0):
fname = "dual_black.txt"
else:
fname = "dual_desktop_" + str(fnameindex) + ".txt"
fname = HOME_DIR + fname
#C12880.ReadSpectrometer(int_time, data)
C12880.Read2Spectrometer(int_time1, int_time2, data1, data2)
# print the data on tft screen
draw.rectangle((0, LINE3Y, 128, LINE3Y+SPACE2), COLOR_WHITE)
draw.rectangle((0, LINE5Y, 128, LINE5Y+SPACE2), COLOR_WHITE)
draw.rectangle((0, LINE6Y, 128, LINE6Y+SPACE1), COLOR_WHITE)
fontout = ImageFont.truetype(font,16)
draw.text((0,LINE3Y)," 12.1 mg/dL", font = fontout, fill = COLOR_RED)
draw.text((0,LINE5Y)," 66%", font = fontout, fill = COLOR_RED)
fontout = ImageFont.truetype(font,10)
draw.text((0,LINE6Y),str(datetime.datetime.now()), font = fontout, fill = COLOR_BLUE)
disp.display(img)
#out = [str(line) + '\n' for line in data]
fp = open(fname, "w+")
#print(out)
#fp.writelines(out)
for i in range(0,288):
fp.write(str(data1[i]) + ", " + str(data2[i]) + ", \n")
fp.close()
if (meas == 0):
fnameindex = fnameindex + 1
pi.hardware_PWM(PWM_LED_PIN1, PWM_FREQ, 0)
pi.hardware_PWM(PWM_LED_PIN2, PWM_FREQ, 0)
GPIO.output(GATE_LED_PIN1, GPIO.HIGH) # close
GPIO.output(GATE_LED_PIN2, GPIO.HIGH) # close
# time.sleep(led_stable_time) # for LED test
meas = 1
black = 1
GPIO.output(pin_led, GPIO.LOW) #turn off measure LED
print("done")
|
normal
|
{
"blob_id": "d250cc0aafdd48cb0eb56108d9c7148153cde002",
"index": 6840,
"step-1": "<mask token>\n",
"step-2": "<mask token>\ntime.sleep(1)\n<mask token>\nif len(sys.argv) < 6:\n error_str = str(sys.argv[0]\n ) + ' led1_current led2_current led_stable_time int_time1 int_time2'\n print(error_str)\nelse:\n C12880.Setup()\n GPIO.setmode(GPIO.BCM)\n GPIO.setwarnings(False)\n GPIO.setup(pin_meas, GPIO.IN)\n GPIO.setup(pin_black, GPIO.IN)\n GPIO.setup(pin_led, GPIO.OUT)\n GPIO.output(pin_led, GPIO.LOW)\n GPIO.setup(GATE_LED_PIN1, GPIO.OUT)\n GPIO.setup(GATE_LED_PIN2, GPIO.OUT)\n GPIO.output(GATE_LED_PIN1, GPIO.HIGH)\n GPIO.output(GATE_LED_PIN2, GPIO.HIGH)\n data1 = (c_uint * 288)()\n data2 = (c_uint * 288)()\n meas = 1\n black = 1\n fnameindex = 0\n spi = SPI.SpiDev(SPI_PORT, SPI_CH, max_speed_hz=SPI_SPEED)\n disp = TFT.ST7735(dc=AOPIN, rst=RSTPIN, spi=spi, width=128, height=128)\n disp.begin()\n disp.clear()\n img = Image.new('RGB', TFT_SIZE, COLOR_WHITE)\n draw = ImageDraw.Draw(img)\n font = '/usr/share/fonts/truetype/dejavu/DejaVuSansMono.ttf'\n fontout = ImageFont.truetype(font, 11)\n draw.text((0, LINE1Y), ' Mode: Measure', font=fontout, fill=COLOR_BLUE)\n draw.text((0, LINE2Y), ' Bilirubin', font=fontout, fill=COLOR_BLUE)\n draw.text((0, LINE4Y), ' SiO2', font=fontout, fill=COLOR_BLUE)\n disp.display(img)\n led1_current = int(sys.argv[1])\n led2_current = int(sys.argv[2])\n led_stable_time = float(sys.argv[3])\n int_time1 = int(sys.argv[4])\n int_time2 = int(sys.argv[5])\n if led1_current < LED_CURR_MIN:\n led1_current = LED_CURR_MIN\n elif led1_current > LED_CURR_MAX:\n led1_current = LED_CURR_MAX\n if led2_current < LED_CURR_MIN:\n led2_current = LED_CURR_MIN\n elif led2_current > LED_CURR_MAX:\n led2_current = LED_CURR_MAX\n print('led1_current = ' + str(led1_current))\n print('led2_current = ' + str(led2_current))\n led1_duty = (led1_current - LED_CURR_MIN) * LED_DUTY_CONST\n led2_duty = (led2_current - LED_CURR_MIN) * LED_DUTY_CONST\n print('led1_duty = ' + str(led1_duty))\n print('led2_duty = ' + str(led2_duty))\n pi = pigpio.pi()\n while 1:\n while meas and black:\n if GPIO.input(pin_meas) == GPIO.LOW:\n meas = 0\n print('meas low')\n if GPIO.input(pin_black) == GPIO.LOW:\n black = 0\n print('black low')\n GPIO.output(pin_led, GPIO.HIGH)\n pi.hardware_PWM(PWM_LED_PIN1, PWM_FREQ, int(led1_duty))\n pi.hardware_PWM(PWM_LED_PIN2, PWM_FREQ, int(led2_duty))\n if led1_duty > 0:\n GPIO.output(GATE_LED_PIN1, GPIO.LOW)\n if led2_duty > 0:\n GPIO.output(GATE_LED_PIN2, GPIO.LOW)\n time.sleep(led_stable_time)\n if black == 0:\n fname = 'dual_black.txt'\n else:\n fname = 'dual_desktop_' + str(fnameindex) + '.txt'\n fname = HOME_DIR + fname\n C12880.Read2Spectrometer(int_time1, int_time2, data1, data2)\n draw.rectangle((0, LINE3Y, 128, LINE3Y + SPACE2), COLOR_WHITE)\n draw.rectangle((0, LINE5Y, 128, LINE5Y + SPACE2), COLOR_WHITE)\n draw.rectangle((0, LINE6Y, 128, LINE6Y + SPACE1), COLOR_WHITE)\n fontout = ImageFont.truetype(font, 16)\n draw.text((0, LINE3Y), ' 12.1 mg/dL', font=fontout, fill=COLOR_RED)\n draw.text((0, LINE5Y), ' 66%', font=fontout, fill=COLOR_RED)\n fontout = ImageFont.truetype(font, 10)\n draw.text((0, LINE6Y), str(datetime.datetime.now()), font=fontout,\n fill=COLOR_BLUE)\n disp.display(img)\n fp = open(fname, 'w+')\n for i in range(0, 288):\n fp.write(str(data1[i]) + ', ' + str(data2[i]) + ', \\n')\n fp.close()\n if meas == 0:\n fnameindex = fnameindex + 1\n pi.hardware_PWM(PWM_LED_PIN1, PWM_FREQ, 0)\n pi.hardware_PWM(PWM_LED_PIN2, PWM_FREQ, 0)\n GPIO.output(GATE_LED_PIN1, GPIO.HIGH)\n GPIO.output(GATE_LED_PIN2, GPIO.HIGH)\n meas = 1\n black = 1\n GPIO.output(pin_led, GPIO.LOW)\n print('done')\n",
"step-3": "<mask token>\npin_meas = 24\npin_black = 25\npin_led = 26\nHOME_DIR = '/home/pi/QSS003_python/'\nC12880_LIB = HOME_DIR + 'Dual_C12880.so'\nGATE_LED_PIN1 = 4\nGATE_LED_PIN2 = 22\nPWM_LED_PIN1 = 18\nPWM_LED_PIN2 = 13\nPWM_FREQ = 500\nDUTY_MIN = 0\nDUTY_MAX = 900000\nLED_CURR_MIN = 60\nLED_CURR_MAX = 330\nLED_DUTY_CONST = 10000 / 3\nAOPIN = 23\nRSTPIN = 12\nSPI_PORT = 1\nSPI_CH = 0\nSPI_SPEED = 4000000\nCOLOR_RED = 255, 0, 0\nCOLOR_GREEN = 0, 255, 0\nCOLOR_BLUE = 0, 0, 255\nCOLOR_WHITE = 255, 255, 255\nCOLOR_BLACK = 0, 0, 0\nCOLOR_YELLOW = 255, 255, 0\nCOLOR_PURPLE = 255, 0, 255\nCOLOR_CYAN = 0, 255, 255\nTFT_SIZE = 128, 128\nLINE1Y = 15\nLINE2Y = 30\nLINE3Y = 45\nLINE4Y = 65\nLINE5Y = 80\nLINE6Y = 100\nSPACE1 = 15\nSPACE2 = 20\ntime.sleep(1)\nC12880 = cdll.LoadLibrary(C12880_LIB)\nif len(sys.argv) < 6:\n error_str = str(sys.argv[0]\n ) + ' led1_current led2_current led_stable_time int_time1 int_time2'\n print(error_str)\nelse:\n C12880.Setup()\n GPIO.setmode(GPIO.BCM)\n GPIO.setwarnings(False)\n GPIO.setup(pin_meas, GPIO.IN)\n GPIO.setup(pin_black, GPIO.IN)\n GPIO.setup(pin_led, GPIO.OUT)\n GPIO.output(pin_led, GPIO.LOW)\n GPIO.setup(GATE_LED_PIN1, GPIO.OUT)\n GPIO.setup(GATE_LED_PIN2, GPIO.OUT)\n GPIO.output(GATE_LED_PIN1, GPIO.HIGH)\n GPIO.output(GATE_LED_PIN2, GPIO.HIGH)\n data1 = (c_uint * 288)()\n data2 = (c_uint * 288)()\n meas = 1\n black = 1\n fnameindex = 0\n spi = SPI.SpiDev(SPI_PORT, SPI_CH, max_speed_hz=SPI_SPEED)\n disp = TFT.ST7735(dc=AOPIN, rst=RSTPIN, spi=spi, width=128, height=128)\n disp.begin()\n disp.clear()\n img = Image.new('RGB', TFT_SIZE, COLOR_WHITE)\n draw = ImageDraw.Draw(img)\n font = '/usr/share/fonts/truetype/dejavu/DejaVuSansMono.ttf'\n fontout = ImageFont.truetype(font, 11)\n draw.text((0, LINE1Y), ' Mode: Measure', font=fontout, fill=COLOR_BLUE)\n draw.text((0, LINE2Y), ' Bilirubin', font=fontout, fill=COLOR_BLUE)\n draw.text((0, LINE4Y), ' SiO2', font=fontout, fill=COLOR_BLUE)\n disp.display(img)\n led1_current = int(sys.argv[1])\n led2_current = int(sys.argv[2])\n led_stable_time = float(sys.argv[3])\n int_time1 = int(sys.argv[4])\n int_time2 = int(sys.argv[5])\n if led1_current < LED_CURR_MIN:\n led1_current = LED_CURR_MIN\n elif led1_current > LED_CURR_MAX:\n led1_current = LED_CURR_MAX\n if led2_current < LED_CURR_MIN:\n led2_current = LED_CURR_MIN\n elif led2_current > LED_CURR_MAX:\n led2_current = LED_CURR_MAX\n print('led1_current = ' + str(led1_current))\n print('led2_current = ' + str(led2_current))\n led1_duty = (led1_current - LED_CURR_MIN) * LED_DUTY_CONST\n led2_duty = (led2_current - LED_CURR_MIN) * LED_DUTY_CONST\n print('led1_duty = ' + str(led1_duty))\n print('led2_duty = ' + str(led2_duty))\n pi = pigpio.pi()\n while 1:\n while meas and black:\n if GPIO.input(pin_meas) == GPIO.LOW:\n meas = 0\n print('meas low')\n if GPIO.input(pin_black) == GPIO.LOW:\n black = 0\n print('black low')\n GPIO.output(pin_led, GPIO.HIGH)\n pi.hardware_PWM(PWM_LED_PIN1, PWM_FREQ, int(led1_duty))\n pi.hardware_PWM(PWM_LED_PIN2, PWM_FREQ, int(led2_duty))\n if led1_duty > 0:\n GPIO.output(GATE_LED_PIN1, GPIO.LOW)\n if led2_duty > 0:\n GPIO.output(GATE_LED_PIN2, GPIO.LOW)\n time.sleep(led_stable_time)\n if black == 0:\n fname = 'dual_black.txt'\n else:\n fname = 'dual_desktop_' + str(fnameindex) + '.txt'\n fname = HOME_DIR + fname\n C12880.Read2Spectrometer(int_time1, int_time2, data1, data2)\n draw.rectangle((0, LINE3Y, 128, LINE3Y + SPACE2), COLOR_WHITE)\n draw.rectangle((0, LINE5Y, 128, LINE5Y + SPACE2), COLOR_WHITE)\n draw.rectangle((0, LINE6Y, 128, LINE6Y + SPACE1), COLOR_WHITE)\n fontout = ImageFont.truetype(font, 16)\n draw.text((0, LINE3Y), ' 12.1 mg/dL', font=fontout, fill=COLOR_RED)\n draw.text((0, LINE5Y), ' 66%', font=fontout, fill=COLOR_RED)\n fontout = ImageFont.truetype(font, 10)\n draw.text((0, LINE6Y), str(datetime.datetime.now()), font=fontout,\n fill=COLOR_BLUE)\n disp.display(img)\n fp = open(fname, 'w+')\n for i in range(0, 288):\n fp.write(str(data1[i]) + ', ' + str(data2[i]) + ', \\n')\n fp.close()\n if meas == 0:\n fnameindex = fnameindex + 1\n pi.hardware_PWM(PWM_LED_PIN1, PWM_FREQ, 0)\n pi.hardware_PWM(PWM_LED_PIN2, PWM_FREQ, 0)\n GPIO.output(GATE_LED_PIN1, GPIO.HIGH)\n GPIO.output(GATE_LED_PIN2, GPIO.HIGH)\n meas = 1\n black = 1\n GPIO.output(pin_led, GPIO.LOW)\n print('done')\n",
"step-4": "from ctypes import *\nimport os\nimport sys\nimport time\nimport datetime\nimport subprocess\nimport RPi.GPIO as GPIO\nfrom PIL import Image\nfrom PIL import ImageDraw\nfrom PIL import ImageFont\nimport Adafruit_GPIO.SPI as SPI\nimport ST7735 as TFT\nimport pigpio\npin_meas = 24\npin_black = 25\npin_led = 26\nHOME_DIR = '/home/pi/QSS003_python/'\nC12880_LIB = HOME_DIR + 'Dual_C12880.so'\nGATE_LED_PIN1 = 4\nGATE_LED_PIN2 = 22\nPWM_LED_PIN1 = 18\nPWM_LED_PIN2 = 13\nPWM_FREQ = 500\nDUTY_MIN = 0\nDUTY_MAX = 900000\nLED_CURR_MIN = 60\nLED_CURR_MAX = 330\nLED_DUTY_CONST = 10000 / 3\nAOPIN = 23\nRSTPIN = 12\nSPI_PORT = 1\nSPI_CH = 0\nSPI_SPEED = 4000000\nCOLOR_RED = 255, 0, 0\nCOLOR_GREEN = 0, 255, 0\nCOLOR_BLUE = 0, 0, 255\nCOLOR_WHITE = 255, 255, 255\nCOLOR_BLACK = 0, 0, 0\nCOLOR_YELLOW = 255, 255, 0\nCOLOR_PURPLE = 255, 0, 255\nCOLOR_CYAN = 0, 255, 255\nTFT_SIZE = 128, 128\nLINE1Y = 15\nLINE2Y = 30\nLINE3Y = 45\nLINE4Y = 65\nLINE5Y = 80\nLINE6Y = 100\nSPACE1 = 15\nSPACE2 = 20\ntime.sleep(1)\nC12880 = cdll.LoadLibrary(C12880_LIB)\nif len(sys.argv) < 6:\n error_str = str(sys.argv[0]\n ) + ' led1_current led2_current led_stable_time int_time1 int_time2'\n print(error_str)\nelse:\n C12880.Setup()\n GPIO.setmode(GPIO.BCM)\n GPIO.setwarnings(False)\n GPIO.setup(pin_meas, GPIO.IN)\n GPIO.setup(pin_black, GPIO.IN)\n GPIO.setup(pin_led, GPIO.OUT)\n GPIO.output(pin_led, GPIO.LOW)\n GPIO.setup(GATE_LED_PIN1, GPIO.OUT)\n GPIO.setup(GATE_LED_PIN2, GPIO.OUT)\n GPIO.output(GATE_LED_PIN1, GPIO.HIGH)\n GPIO.output(GATE_LED_PIN2, GPIO.HIGH)\n data1 = (c_uint * 288)()\n data2 = (c_uint * 288)()\n meas = 1\n black = 1\n fnameindex = 0\n spi = SPI.SpiDev(SPI_PORT, SPI_CH, max_speed_hz=SPI_SPEED)\n disp = TFT.ST7735(dc=AOPIN, rst=RSTPIN, spi=spi, width=128, height=128)\n disp.begin()\n disp.clear()\n img = Image.new('RGB', TFT_SIZE, COLOR_WHITE)\n draw = ImageDraw.Draw(img)\n font = '/usr/share/fonts/truetype/dejavu/DejaVuSansMono.ttf'\n fontout = ImageFont.truetype(font, 11)\n draw.text((0, LINE1Y), ' Mode: Measure', font=fontout, fill=COLOR_BLUE)\n draw.text((0, LINE2Y), ' Bilirubin', font=fontout, fill=COLOR_BLUE)\n draw.text((0, LINE4Y), ' SiO2', font=fontout, fill=COLOR_BLUE)\n disp.display(img)\n led1_current = int(sys.argv[1])\n led2_current = int(sys.argv[2])\n led_stable_time = float(sys.argv[3])\n int_time1 = int(sys.argv[4])\n int_time2 = int(sys.argv[5])\n if led1_current < LED_CURR_MIN:\n led1_current = LED_CURR_MIN\n elif led1_current > LED_CURR_MAX:\n led1_current = LED_CURR_MAX\n if led2_current < LED_CURR_MIN:\n led2_current = LED_CURR_MIN\n elif led2_current > LED_CURR_MAX:\n led2_current = LED_CURR_MAX\n print('led1_current = ' + str(led1_current))\n print('led2_current = ' + str(led2_current))\n led1_duty = (led1_current - LED_CURR_MIN) * LED_DUTY_CONST\n led2_duty = (led2_current - LED_CURR_MIN) * LED_DUTY_CONST\n print('led1_duty = ' + str(led1_duty))\n print('led2_duty = ' + str(led2_duty))\n pi = pigpio.pi()\n while 1:\n while meas and black:\n if GPIO.input(pin_meas) == GPIO.LOW:\n meas = 0\n print('meas low')\n if GPIO.input(pin_black) == GPIO.LOW:\n black = 0\n print('black low')\n GPIO.output(pin_led, GPIO.HIGH)\n pi.hardware_PWM(PWM_LED_PIN1, PWM_FREQ, int(led1_duty))\n pi.hardware_PWM(PWM_LED_PIN2, PWM_FREQ, int(led2_duty))\n if led1_duty > 0:\n GPIO.output(GATE_LED_PIN1, GPIO.LOW)\n if led2_duty > 0:\n GPIO.output(GATE_LED_PIN2, GPIO.LOW)\n time.sleep(led_stable_time)\n if black == 0:\n fname = 'dual_black.txt'\n else:\n fname = 'dual_desktop_' + str(fnameindex) + '.txt'\n fname = HOME_DIR + fname\n C12880.Read2Spectrometer(int_time1, int_time2, data1, data2)\n draw.rectangle((0, LINE3Y, 128, LINE3Y + SPACE2), COLOR_WHITE)\n draw.rectangle((0, LINE5Y, 128, LINE5Y + SPACE2), COLOR_WHITE)\n draw.rectangle((0, LINE6Y, 128, LINE6Y + SPACE1), COLOR_WHITE)\n fontout = ImageFont.truetype(font, 16)\n draw.text((0, LINE3Y), ' 12.1 mg/dL', font=fontout, fill=COLOR_RED)\n draw.text((0, LINE5Y), ' 66%', font=fontout, fill=COLOR_RED)\n fontout = ImageFont.truetype(font, 10)\n draw.text((0, LINE6Y), str(datetime.datetime.now()), font=fontout,\n fill=COLOR_BLUE)\n disp.display(img)\n fp = open(fname, 'w+')\n for i in range(0, 288):\n fp.write(str(data1[i]) + ', ' + str(data2[i]) + ', \\n')\n fp.close()\n if meas == 0:\n fnameindex = fnameindex + 1\n pi.hardware_PWM(PWM_LED_PIN1, PWM_FREQ, 0)\n pi.hardware_PWM(PWM_LED_PIN2, PWM_FREQ, 0)\n GPIO.output(GATE_LED_PIN1, GPIO.HIGH)\n GPIO.output(GATE_LED_PIN2, GPIO.HIGH)\n meas = 1\n black = 1\n GPIO.output(pin_led, GPIO.LOW)\n print('done')\n",
"step-5": "from ctypes import *\nimport os\nimport sys\nimport time\nimport datetime\nimport subprocess\nimport RPi.GPIO as GPIO\nfrom PIL import Image\nfrom PIL import ImageDraw\nfrom PIL import ImageFont\n#import Adafruit_GPIO as GPIO\nimport Adafruit_GPIO.SPI as SPI\nimport ST7735 as TFT\nimport pigpio\n\n# use BCM pin define\npin_meas = 24 \t# 18 in BOARD\npin_black = 25\t# 22 in BOARD\npin_led = 26 # 37 in BOARD\n\nHOME_DIR = \"/home/pi/QSS003_python/\"\nC12880_LIB = HOME_DIR + \"Dual_C12880.so\"\n\n# use BCM pin define\nGATE_LED_PIN1 = 4\t# 7 in BOARD\nGATE_LED_PIN2 = 22\t# 15 in BOARD\nPWM_LED_PIN1 = 18 # in pigpio\nPWM_LED_PIN2 = 13 # in pigpio\n\nPWM_FREQ = 500\nDUTY_MIN = 0\nDUTY_MAX = 900000\t# original = 1000000\nLED_CURR_MIN = 60\t#mA\nLED_CURR_MAX = 330\t#mA\nLED_DUTY_CONST = 10000/3\n\n# use BCM pin define\nAOPIN = 23\t# 16 in BOARD\nRSTPIN = 12\t# 32 in BOARD\n\nSPI_PORT = 1\nSPI_CH = 0\nSPI_SPEED = 4000000\n\nCOLOR_RED \t= (255,0,0)\nCOLOR_GREEN = (0,255,0)\nCOLOR_BLUE\t= (0,0,255)\nCOLOR_WHITE\t= (255,255,255)\nCOLOR_BLACK = (0,0,0)\nCOLOR_YELLOW = (255,255,0)\nCOLOR_PURPLE = (255,0, 255)\nCOLOR_CYAN = (0, 255,255)\nTFT_SIZE = (128, 128)\n\nLINE1Y = 15\nLINE2Y = 30\nLINE3Y = 45\nLINE4Y = 65\nLINE5Y = 80\nLINE6Y = 100\n\nSPACE1 = 15\nSPACE2 = 20\n\ntime.sleep(1)\nC12880 = cdll.LoadLibrary(C12880_LIB)\n\nif len(sys.argv) < 6:\n\terror_str = str(sys.argv[0]) + \" led1_current led2_current led_stable_time int_time1 int_time2\"\n\tprint(error_str)\nelse:\n\t# board initialization \n\tC12880.Setup() # init spectrometer\n\tGPIO.setmode(GPIO.BCM)\n\tGPIO.setwarnings(False)\n\tGPIO.setup(pin_meas, GPIO.IN)\n\tGPIO.setup(pin_black, GPIO.IN)\n\tGPIO.setup(pin_led, GPIO.OUT)\n\tGPIO.output(pin_led, GPIO.LOW)\n\tGPIO.setup(GATE_LED_PIN1, GPIO.OUT)\n\tGPIO.setup(GATE_LED_PIN2, GPIO.OUT)\n\tGPIO.output(GATE_LED_PIN1, GPIO.HIGH)\t#close\n\tGPIO.output(GATE_LED_PIN2, GPIO.HIGH)\t#close\n\n\tdata1 = (c_uint * 288)() # data to store spectrum data\n\tdata2 = (c_uint * 288)()\n\tmeas = 1\n\tblack = 1\n\tfnameindex = 0\n\n\t# Display init\n\tspi = SPI.SpiDev(SPI_PORT, SPI_CH, max_speed_hz = SPI_SPEED)\n\tdisp = TFT.ST7735(dc = AOPIN, rst = RSTPIN, spi = spi, width = 128, height = 128)\n\tdisp.begin()\n\tdisp.clear()\n\timg = Image.new('RGB', TFT_SIZE, COLOR_WHITE)\n\tdraw = ImageDraw.Draw(img)\n\tfont = \"/usr/share/fonts/truetype/dejavu/DejaVuSansMono.ttf\"\n\tfontout = ImageFont.truetype(font,11)\n\tdraw.text((0,LINE1Y), \" Mode: Measure\", font = fontout, fill = COLOR_BLUE)\n\tdraw.text((0,LINE2Y), \" Bilirubin\", font = fontout, fill = COLOR_BLUE)\n\tdraw.text((0,LINE4Y), \" SiO2\", font = fontout, fill = COLOR_BLUE)\n\tdisp.display(img)\n\n\tled1_current = int(sys.argv[1])\n\tled2_current = int(sys.argv[2])\n\tled_stable_time = float(sys.argv[3])\n\tint_time1 = int(sys.argv[4])\n\tint_time2 = int(sys.argv[5])\n\n\tif (led1_current < LED_CURR_MIN):\n\t\tled1_current = LED_CURR_MIN\n\telif (led1_current > LED_CURR_MAX):\n\t\tled1_current = LED_CURR_MAX\n\n\tif (led2_current < LED_CURR_MIN):\n\t\tled2_current = LED_CURR_MIN\n\telif (led2_current > LED_CURR_MAX):\n\t\tled2_current = LED_CURR_MAX\n\n\tprint(\"led1_current = \"+ str(led1_current))\n\tprint(\"led2_current = \"+ str(led2_current))\n\n\tled1_duty = (led1_current - LED_CURR_MIN)*LED_DUTY_CONST\n\tled2_duty = (led2_current - LED_CURR_MIN)*LED_DUTY_CONST\n\n\tprint(\"led1_duty = \"+ str(led1_duty))\n\tprint(\"led2_duty = \"+ str(led2_duty))\n\n\tpi = pigpio.pi()\n\n\twhile (1):\n\t\t#wait until black or meas buttom is pressed\n\t\twhile (meas and black):\n\t\t\tif GPIO.input(pin_meas) == GPIO.LOW:\n\t\t\t\tmeas = 0\n\t\t\t\tprint(\"meas low\")\n\t\t\tif GPIO.input(pin_black) == GPIO.LOW:\n\t\t\t\tblack = 0\n\t\t\t\tprint(\"black low\")\n\n\t\tGPIO.output(pin_led, GPIO.HIGH)\n\t\tpi.hardware_PWM(PWM_LED_PIN1, PWM_FREQ, int(led1_duty))\n\t\tpi.hardware_PWM(PWM_LED_PIN2, PWM_FREQ, int(led2_duty))\n\t\tif (led1_duty > 0):\n\t\t\tGPIO.output(GATE_LED_PIN1, GPIO.LOW)\t# open\n\t\tif (led2_duty > 0):\n\t\t\tGPIO.output(GATE_LED_PIN2, GPIO.LOW)\t# open\n\n\t\ttime.sleep(led_stable_time)\n\n\t\tif (black == 0):\n\t\t\tfname = \"dual_black.txt\"\n\t\telse:\n\t\t\tfname = \"dual_desktop_\" + str(fnameindex) + \".txt\"\n\t\tfname = HOME_DIR + fname\n\n\t\t#C12880.ReadSpectrometer(int_time, data)\n\t\tC12880.Read2Spectrometer(int_time1, int_time2, data1, data2)\n\n\t\t# print the data on tft screen \n\t\tdraw.rectangle((0, LINE3Y, 128, LINE3Y+SPACE2), COLOR_WHITE)\n\t\tdraw.rectangle((0, LINE5Y, 128, LINE5Y+SPACE2), COLOR_WHITE)\n\t\tdraw.rectangle((0, LINE6Y, 128, LINE6Y+SPACE1), COLOR_WHITE)\n\t\tfontout = ImageFont.truetype(font,16)\n\t\tdraw.text((0,LINE3Y),\" 12.1 mg/dL\", font = fontout, fill = COLOR_RED)\n\t\tdraw.text((0,LINE5Y),\" 66%\", font = fontout, fill = COLOR_RED)\n\t\tfontout = ImageFont.truetype(font,10)\n\t\tdraw.text((0,LINE6Y),str(datetime.datetime.now()), font = fontout, fill = COLOR_BLUE)\n\t\tdisp.display(img)\n\n\t\t#out = [str(line) + '\\n' for line in data]\n\t\tfp = open(fname, \"w+\")\n\t\t#print(out)\n\t\t#fp.writelines(out)\n\t\tfor i in range(0,288):\n\t\t\tfp.write(str(data1[i]) + \", \" + str(data2[i]) + \", \\n\")\n\t\tfp.close()\n\n\t\tif (meas == 0):\n\t\t\tfnameindex = fnameindex + 1\n\n\t\tpi.hardware_PWM(PWM_LED_PIN1, PWM_FREQ, 0)\n\t\tpi.hardware_PWM(PWM_LED_PIN2, PWM_FREQ, 0)\n\t\tGPIO.output(GATE_LED_PIN1, GPIO.HIGH) # close\n\t\tGPIO.output(GATE_LED_PIN2, GPIO.HIGH) # close\n\n\t\t# time.sleep(led_stable_time)\t# for LED test\n\n\t\tmeas = 1\n\t\tblack = 1\n\n\t\tGPIO.output(pin_led, GPIO.LOW) #turn off measure LED\n\t\tprint(\"done\")\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
#-*- coding: utf-8 -*-
#############################################################################
# #
# Copyright (c) 2008 Rok Garbas <[email protected]> #
# #
# This program is free software; you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation; either version 3 of the License, or #
# (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
# #
#############################################################################
__docformat__ = "reStructuredText"
import z3c.form
import zope.schema
import zope.interface
import zope.component
from widget_date import DateWidget
from interfaces import IMonthYearWidget
class MonthYearWidget(DateWidget):
""" Month and year widget """
zope.interface.implementsOnly(IMonthYearWidget)
klass = u'monthyear-widget'
value = ('', '', 1)
@zope.component.adapter(zope.schema.interfaces.IField, z3c.form.interfaces.IFormLayer)
@zope.interface.implementer(z3c.form.interfaces.IFieldWidget)
def MonthYearFieldWidget(field, request):
"""IFieldWidget factory for MonthYearWidget."""
return z3c.form.widget.FieldWidget(field, MonthYearWidget(request))
|
normal
|
{
"blob_id": "d0f9dd0a06023dd844b0bf70dff360f6bb46c152",
"index": 4412,
"step-1": "<mask token>\n\n\nclass MonthYearWidget(DateWidget):\n \"\"\" Month and year widget \"\"\"\n zope.interface.implementsOnly(IMonthYearWidget)\n klass = u'monthyear-widget'\n value = '', '', 1\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass MonthYearWidget(DateWidget):\n \"\"\" Month and year widget \"\"\"\n zope.interface.implementsOnly(IMonthYearWidget)\n klass = u'monthyear-widget'\n value = '', '', 1\n\n\[email protected](zope.schema.interfaces.IField, z3c.form.interfaces.\n IFormLayer)\[email protected](z3c.form.interfaces.IFieldWidget)\ndef MonthYearFieldWidget(field, request):\n \"\"\"IFieldWidget factory for MonthYearWidget.\"\"\"\n return z3c.form.widget.FieldWidget(field, MonthYearWidget(request))\n",
"step-3": "__docformat__ = 'reStructuredText'\n<mask token>\n\n\nclass MonthYearWidget(DateWidget):\n \"\"\" Month and year widget \"\"\"\n zope.interface.implementsOnly(IMonthYearWidget)\n klass = u'monthyear-widget'\n value = '', '', 1\n\n\[email protected](zope.schema.interfaces.IField, z3c.form.interfaces.\n IFormLayer)\[email protected](z3c.form.interfaces.IFieldWidget)\ndef MonthYearFieldWidget(field, request):\n \"\"\"IFieldWidget factory for MonthYearWidget.\"\"\"\n return z3c.form.widget.FieldWidget(field, MonthYearWidget(request))\n",
"step-4": "__docformat__ = 'reStructuredText'\nimport z3c.form\nimport zope.schema\nimport zope.interface\nimport zope.component\nfrom widget_date import DateWidget\nfrom interfaces import IMonthYearWidget\n\n\nclass MonthYearWidget(DateWidget):\n \"\"\" Month and year widget \"\"\"\n zope.interface.implementsOnly(IMonthYearWidget)\n klass = u'monthyear-widget'\n value = '', '', 1\n\n\[email protected](zope.schema.interfaces.IField, z3c.form.interfaces.\n IFormLayer)\[email protected](z3c.form.interfaces.IFieldWidget)\ndef MonthYearFieldWidget(field, request):\n \"\"\"IFieldWidget factory for MonthYearWidget.\"\"\"\n return z3c.form.widget.FieldWidget(field, MonthYearWidget(request))\n",
"step-5": "#-*- coding: utf-8 -*- \n\n#############################################################################\n# #\n# Copyright (c) 2008 Rok Garbas <[email protected]> #\n# #\n# This program is free software; you can redistribute it and/or modify #\n# it under the terms of the GNU General Public License as published by #\n# the Free Software Foundation; either version 3 of the License, or #\n# (at your option) any later version. #\n# #\n# This program is distributed in the hope that it will be useful, #\n# but WITHOUT ANY WARRANTY; without even the implied warranty of #\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #\n# GNU General Public License for more details. #\n# #\n# You should have received a copy of the GNU General Public License #\n# along with this program. If not, see <http://www.gnu.org/licenses/>. #\n# #\n#############################################################################\n__docformat__ = \"reStructuredText\"\n\nimport z3c.form\nimport zope.schema\nimport zope.interface\nimport zope.component\nfrom widget_date import DateWidget\nfrom interfaces import IMonthYearWidget\n\n\nclass MonthYearWidget(DateWidget):\n \"\"\" Month and year widget \"\"\"\n\n zope.interface.implementsOnly(IMonthYearWidget)\n\n klass = u'monthyear-widget'\n value = ('', '', 1)\n\[email protected](zope.schema.interfaces.IField, z3c.form.interfaces.IFormLayer)\[email protected](z3c.form.interfaces.IFieldWidget)\ndef MonthYearFieldWidget(field, request):\n \"\"\"IFieldWidget factory for MonthYearWidget.\"\"\"\n return z3c.form.widget.FieldWidget(field, MonthYearWidget(request))\n\n\n\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
import wx
from six import print_
import os
FONTSIZE = 10
class TextDocPrintout(wx.Printout):
"""
A printout class that is able to print simple text documents.
Does not handle page numbers or titles, and it assumes that no
lines are longer than what will fit within the page width. Those
features are left as an exercise for the reader. ;-)
"""
def __init__(self, text, title, margins):
wx.Printout.__init__(self, title)
self.lines = text.split('\n')
self.margins = margins
def HasPage(self, page):
return page <= self.numPages
def GetPageInfo(self):
return (1, self.numPages, 1, self.numPages)
def CalculateScale(self, dc):
# Scale the DC such that the printout is roughly the same as
# the screen scaling.
ppiPrinterX, ppiPrinterY = self.GetPPIPrinter()
ppiScreenX, ppiScreenY = self.GetPPIScreen()
logScale = float(ppiPrinterX)/float(ppiScreenX)
# Now adjust if the real page size is reduced (such as when
# drawing on a scaled wx.MemoryDC in the Print Preview.) If
# page width == DC width then nothing changes, otherwise we
# scale down for the DC.
pw, ph = self.GetPageSizePixels()
dw, dh = dc.GetSize()
scale = logScale * float(dw)/float(pw)
# Set the DC's scale.
dc.SetUserScale(scale, scale)
# Find the logical units per millimeter (for calculating the
# margins)
self.logUnitsMM = float(ppiPrinterX)/(logScale*25.4)
def CalculateLayout(self, dc):
# Determine the position of the margins and the
# page/line height
topLeft, bottomRight = self.margins
dw, dh = dc.GetSize()
self.x1 = topLeft.x * self.logUnitsMM
self.y1 = topLeft.y * self.logUnitsMM
self.x2 = dc.DeviceToLogicalXRel(dw) - bottomRight.x * self.logUnitsMM
self.y2 = dc.DeviceToLogicalYRel(dh) - bottomRight.y * self.logUnitsMM
# use a 1mm buffer around the inside of the box, and a few
# pixels between each line
self.pageHeight = self.y2 - self.y1 - 2*self.logUnitsMM
font = wx.Font(FONTSIZE, wx.FONTFAMILY_TELETYPE,
wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL)
dc.SetFont(font)
self.lineHeight = dc.GetCharHeight()
self.linesPerPage = int(self.pageHeight/self.lineHeight)
def OnPreparePrinting(self):
# calculate the number of pages
dc = self.GetDC()
self.CalculateScale(dc)
self.CalculateLayout(dc)
self.numPages = len(self.lines) / self.linesPerPage
if len(self.lines) % self.linesPerPage != 0:
self.numPages += 1
def OnPrintPage(self, page):
dc = self.GetDC()
self.CalculateScale(dc)
self.CalculateLayout(dc)
# draw a page outline at the margin points
dc.SetPen(wx.Pen("black", 0))
dc.SetBrush(wx.TRANSPARENT_BRUSH)
r = wx.Rect(wx.Point(self.x1, self.y1), wx.Point(self.x2, self.y2))
dc.DrawRectangle(r)
dc.SetClippingRegion(r)
# Draw the text lines for this page
line = (page-1) * self.linesPerPage
x = self.x1 + self.logUnitsMM
y = self.y1 + self.logUnitsMM
while line < (page * self.linesPerPage):
dc.DrawText(self.lines[line], x, y)
y += self.lineHeight
line += 1
if line >= len(self.lines):
break
return True
class PrintFrameworkSample(wx.Frame):
def __init__(self):
wx.Frame.__init__(self, None, size=(640, 480),
title="Print Framework Sample")
self.CreateStatusBar()
# A text widget to display the doc and let it be edited
self.tc = wx.TextCtrl(self, -1, "",
style=wx.TE_MULTILINE|wx.TE_DONTWRAP)
self.tc.SetFont(wx.Font(FONTSIZE, wx.FONTFAMILY_TELETYPE,
wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL))
filename = os.path.join(os.path.dirname(__file__), "sample-text.txt")
with open(filename) as fid:
self.tc.SetValue(fid.read())
self.tc.Bind(wx.EVT_SET_FOCUS, self.OnClearSelection)
wx.CallAfter(self.tc.SetInsertionPoint, 0)
# Create the menu and menubar
menu = wx.Menu()
item = menu.Append(-1, "Page Setup...\tF5",
"Set up page margins and etc.")
self.Bind(wx.EVT_MENU, self.OnPageSetup, item)
item = menu.Append(-1, "Print Preview...\tF6",
"View the printout on-screen")
self.Bind(wx.EVT_MENU, self.OnPrintPreview, item)
item = menu.Append(-1, "Print...\tF7", "Print the document")
self.Bind(wx.EVT_MENU, self.OnPrint, item)
menu.AppendSeparator()
## item = menu.Append(-1, "Test other stuff...\tF9", "")
## self.Bind(wx.EVT_MENU, self.OnPrintTest, item)
## menu.AppendSeparator()
item = menu.Append(wx.ID_ABOUT, "About", "About this application")
self.Bind(wx.EVT_MENU, self.OnAbout, item)
item = menu.Append(wx.ID_EXIT, "E&xit\tCtrl-Q", "Close this application")
self.Bind(wx.EVT_MENU, self.OnExit, item)
menubar = wx.MenuBar()
menubar.Append(menu, "&File")
self.SetMenuBar(menubar)
# initialize the print data and set some default values
self.pdata = wx.PrintData()
self.pdata.SetPaperId(wx.PAPER_LETTER)
self.pdata.SetOrientation(wx.PORTRAIT)
self.margins = (wx.Point(15,15), wx.Point(15,15))
def OnExit(self, evt):
self.Close()
def OnAbout(self, evt):
wx.MessageBox('Print framework sample application\n'
'\n'
'Using wxPython %s' % wx.version(),
'About')
def OnClearSelection(self, evt):
evt.Skip()
wx.CallAfter(self.tc.SetInsertionPoint,
self.tc.GetInsertionPoint())
def OnPageSetup(self, evt):
data = wx.PageSetupDialogData()
data.SetPrintData(self.pdata)
data.SetDefaultMinMargins(True)
data.SetMarginTopLeft(self.margins[0])
data.SetMarginBottomRight(self.margins[1])
dlg = wx.PageSetupDialog(self, data)
if dlg.ShowModal() == wx.ID_OK:
data = dlg.GetPageSetupData()
self.pdata = wx.PrintData(data.GetPrintData()) # force a copy
self.pdata.SetPaperId(data.GetPaperId())
#print_("paperID %r, paperSize %r" % (self.pdata.GetPaperId(), self.pdata.GetPaperSize()))
self.margins = (data.GetMarginTopLeft(),
data.GetMarginBottomRight())
dlg.Destroy()
def OnPrintPreview(self, evt):
data = wx.PrintDialogData(self.pdata)
text = self.tc.GetValue()
printout1 = TextDocPrintout(text, "title", self.margins)
printout2 = TextDocPrintout(text, "title", self.margins)
preview = wx.PrintPreview(printout1, printout2, data)
if not preview:
wx.MessageBox("Unable to create PrintPreview!", "Error")
else:
# create the preview frame such that it overlays the app frame
frame = wx.PreviewFrame(preview, self, "Print Preview",
pos=self.GetPosition(),
size=self.GetSize())
frame.Initialize()
frame.Show()
def OnPrint(self, evt):
data = wx.PrintDialogData(self.pdata)
printer = wx.Printer(data)
text = self.tc.GetValue()
printout = TextDocPrintout(text, "title", self.margins)
useSetupDialog = True
if not printer.Print(self, printout, useSetupDialog) \
and printer.GetLastError() == wx.PRINTER_ERROR:
wx.MessageBox(
"There was a problem printing.\n"
"Perhaps your current printer is not set correctly?",
"Printing Error", wx.OK)
else:
data = printer.GetPrintDialogData()
self.pdata = wx.PrintData(data.GetPrintData()) # force a copy
printout.Destroy()
def OnPrintTest(self, evt):
data = wx.PrintDialogData(self.pdata)
dlg = wx.PrintDialog(self, data)
if dlg.ShowModal() == wx.ID_OK:
data = dlg.GetPrintDialogData()
print_()
print_("GetFromPage:", data.GetFromPage())
print_("GetToPage:", data.GetToPage())
print_("GetMinPage:", data.GetMinPage())
print_("GetMaxPage:", data.GetMaxPage())
print_("GetNoCopies:", data.GetNoCopies())
print_("GetAllPages:", data.GetAllPages())
print_("GetSelection:", data.GetSelection())
print_("GetCollate:", data.GetCollate())
print_("GetPrintToFile:", data.GetPrintToFile())
self.pdata = wx.PrintData(data.GetPrintData())
print_()
print_("GetPrinterName:", self.pdata.GetPrinterName())
dlg.Destroy()
app = wx.App()
frm = PrintFrameworkSample()
frm.Show()
app.MainLoop()
|
normal
|
{
"blob_id": "2790bd80949bafe4e98ab9aca9cf80a6a0f31490",
"index": 6200,
"step-1": "<mask token>\n\n\nclass TextDocPrintout(wx.Printout):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\nclass PrintFrameworkSample(wx.Frame):\n\n def __init__(self):\n wx.Frame.__init__(self, None, size=(640, 480), title=\n 'Print Framework Sample')\n self.CreateStatusBar()\n self.tc = wx.TextCtrl(self, -1, '', style=wx.TE_MULTILINE | wx.\n TE_DONTWRAP)\n self.tc.SetFont(wx.Font(FONTSIZE, wx.FONTFAMILY_TELETYPE, wx.\n FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL))\n filename = os.path.join(os.path.dirname(__file__), 'sample-text.txt')\n with open(filename) as fid:\n self.tc.SetValue(fid.read())\n self.tc.Bind(wx.EVT_SET_FOCUS, self.OnClearSelection)\n wx.CallAfter(self.tc.SetInsertionPoint, 0)\n menu = wx.Menu()\n item = menu.Append(-1, 'Page Setup...\\tF5',\n 'Set up page margins and etc.')\n self.Bind(wx.EVT_MENU, self.OnPageSetup, item)\n item = menu.Append(-1, 'Print Preview...\\tF6',\n 'View the printout on-screen')\n self.Bind(wx.EVT_MENU, self.OnPrintPreview, item)\n item = menu.Append(-1, 'Print...\\tF7', 'Print the document')\n self.Bind(wx.EVT_MENU, self.OnPrint, item)\n menu.AppendSeparator()\n item = menu.Append(wx.ID_ABOUT, 'About', 'About this application')\n self.Bind(wx.EVT_MENU, self.OnAbout, item)\n item = menu.Append(wx.ID_EXIT, 'E&xit\\tCtrl-Q',\n 'Close this application')\n self.Bind(wx.EVT_MENU, self.OnExit, item)\n menubar = wx.MenuBar()\n menubar.Append(menu, '&File')\n self.SetMenuBar(menubar)\n self.pdata = wx.PrintData()\n self.pdata.SetPaperId(wx.PAPER_LETTER)\n self.pdata.SetOrientation(wx.PORTRAIT)\n self.margins = wx.Point(15, 15), wx.Point(15, 15)\n\n def OnExit(self, evt):\n self.Close()\n\n def OnAbout(self, evt):\n wx.MessageBox(\n 'Print framework sample application\\n\\nUsing wxPython %s' % wx.\n version(), 'About')\n\n def OnClearSelection(self, evt):\n evt.Skip()\n wx.CallAfter(self.tc.SetInsertionPoint, self.tc.GetInsertionPoint())\n\n def OnPageSetup(self, evt):\n data = wx.PageSetupDialogData()\n data.SetPrintData(self.pdata)\n data.SetDefaultMinMargins(True)\n data.SetMarginTopLeft(self.margins[0])\n data.SetMarginBottomRight(self.margins[1])\n dlg = wx.PageSetupDialog(self, data)\n if dlg.ShowModal() == wx.ID_OK:\n data = dlg.GetPageSetupData()\n self.pdata = wx.PrintData(data.GetPrintData())\n self.pdata.SetPaperId(data.GetPaperId())\n self.margins = data.GetMarginTopLeft(), data.GetMarginBottomRight()\n dlg.Destroy()\n\n def OnPrintPreview(self, evt):\n data = wx.PrintDialogData(self.pdata)\n text = self.tc.GetValue()\n printout1 = TextDocPrintout(text, 'title', self.margins)\n printout2 = TextDocPrintout(text, 'title', self.margins)\n preview = wx.PrintPreview(printout1, printout2, data)\n if not preview:\n wx.MessageBox('Unable to create PrintPreview!', 'Error')\n else:\n frame = wx.PreviewFrame(preview, self, 'Print Preview', pos=\n self.GetPosition(), size=self.GetSize())\n frame.Initialize()\n frame.Show()\n\n def OnPrint(self, evt):\n data = wx.PrintDialogData(self.pdata)\n printer = wx.Printer(data)\n text = self.tc.GetValue()\n printout = TextDocPrintout(text, 'title', self.margins)\n useSetupDialog = True\n if not printer.Print(self, printout, useSetupDialog\n ) and printer.GetLastError() == wx.PRINTER_ERROR:\n wx.MessageBox(\n \"\"\"There was a problem printing.\nPerhaps your current printer is not set correctly?\"\"\"\n , 'Printing Error', wx.OK)\n else:\n data = printer.GetPrintDialogData()\n self.pdata = wx.PrintData(data.GetPrintData())\n printout.Destroy()\n\n def OnPrintTest(self, evt):\n data = wx.PrintDialogData(self.pdata)\n dlg = wx.PrintDialog(self, data)\n if dlg.ShowModal() == wx.ID_OK:\n data = dlg.GetPrintDialogData()\n print_()\n print_('GetFromPage:', data.GetFromPage())\n print_('GetToPage:', data.GetToPage())\n print_('GetMinPage:', data.GetMinPage())\n print_('GetMaxPage:', data.GetMaxPage())\n print_('GetNoCopies:', data.GetNoCopies())\n print_('GetAllPages:', data.GetAllPages())\n print_('GetSelection:', data.GetSelection())\n print_('GetCollate:', data.GetCollate())\n print_('GetPrintToFile:', data.GetPrintToFile())\n self.pdata = wx.PrintData(data.GetPrintData())\n print_()\n print_('GetPrinterName:', self.pdata.GetPrinterName())\n dlg.Destroy()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass TextDocPrintout(wx.Printout):\n <mask token>\n <mask token>\n <mask token>\n\n def GetPageInfo(self):\n return 1, self.numPages, 1, self.numPages\n\n def CalculateScale(self, dc):\n ppiPrinterX, ppiPrinterY = self.GetPPIPrinter()\n ppiScreenX, ppiScreenY = self.GetPPIScreen()\n logScale = float(ppiPrinterX) / float(ppiScreenX)\n pw, ph = self.GetPageSizePixels()\n dw, dh = dc.GetSize()\n scale = logScale * float(dw) / float(pw)\n dc.SetUserScale(scale, scale)\n self.logUnitsMM = float(ppiPrinterX) / (logScale * 25.4)\n\n def CalculateLayout(self, dc):\n topLeft, bottomRight = self.margins\n dw, dh = dc.GetSize()\n self.x1 = topLeft.x * self.logUnitsMM\n self.y1 = topLeft.y * self.logUnitsMM\n self.x2 = dc.DeviceToLogicalXRel(dw) - bottomRight.x * self.logUnitsMM\n self.y2 = dc.DeviceToLogicalYRel(dh) - bottomRight.y * self.logUnitsMM\n self.pageHeight = self.y2 - self.y1 - 2 * self.logUnitsMM\n font = wx.Font(FONTSIZE, wx.FONTFAMILY_TELETYPE, wx.\n FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL)\n dc.SetFont(font)\n self.lineHeight = dc.GetCharHeight()\n self.linesPerPage = int(self.pageHeight / self.lineHeight)\n <mask token>\n\n def OnPrintPage(self, page):\n dc = self.GetDC()\n self.CalculateScale(dc)\n self.CalculateLayout(dc)\n dc.SetPen(wx.Pen('black', 0))\n dc.SetBrush(wx.TRANSPARENT_BRUSH)\n r = wx.Rect(wx.Point(self.x1, self.y1), wx.Point(self.x2, self.y2))\n dc.DrawRectangle(r)\n dc.SetClippingRegion(r)\n line = (page - 1) * self.linesPerPage\n x = self.x1 + self.logUnitsMM\n y = self.y1 + self.logUnitsMM\n while line < page * self.linesPerPage:\n dc.DrawText(self.lines[line], x, y)\n y += self.lineHeight\n line += 1\n if line >= len(self.lines):\n break\n return True\n\n\nclass PrintFrameworkSample(wx.Frame):\n\n def __init__(self):\n wx.Frame.__init__(self, None, size=(640, 480), title=\n 'Print Framework Sample')\n self.CreateStatusBar()\n self.tc = wx.TextCtrl(self, -1, '', style=wx.TE_MULTILINE | wx.\n TE_DONTWRAP)\n self.tc.SetFont(wx.Font(FONTSIZE, wx.FONTFAMILY_TELETYPE, wx.\n FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL))\n filename = os.path.join(os.path.dirname(__file__), 'sample-text.txt')\n with open(filename) as fid:\n self.tc.SetValue(fid.read())\n self.tc.Bind(wx.EVT_SET_FOCUS, self.OnClearSelection)\n wx.CallAfter(self.tc.SetInsertionPoint, 0)\n menu = wx.Menu()\n item = menu.Append(-1, 'Page Setup...\\tF5',\n 'Set up page margins and etc.')\n self.Bind(wx.EVT_MENU, self.OnPageSetup, item)\n item = menu.Append(-1, 'Print Preview...\\tF6',\n 'View the printout on-screen')\n self.Bind(wx.EVT_MENU, self.OnPrintPreview, item)\n item = menu.Append(-1, 'Print...\\tF7', 'Print the document')\n self.Bind(wx.EVT_MENU, self.OnPrint, item)\n menu.AppendSeparator()\n item = menu.Append(wx.ID_ABOUT, 'About', 'About this application')\n self.Bind(wx.EVT_MENU, self.OnAbout, item)\n item = menu.Append(wx.ID_EXIT, 'E&xit\\tCtrl-Q',\n 'Close this application')\n self.Bind(wx.EVT_MENU, self.OnExit, item)\n menubar = wx.MenuBar()\n menubar.Append(menu, '&File')\n self.SetMenuBar(menubar)\n self.pdata = wx.PrintData()\n self.pdata.SetPaperId(wx.PAPER_LETTER)\n self.pdata.SetOrientation(wx.PORTRAIT)\n self.margins = wx.Point(15, 15), wx.Point(15, 15)\n\n def OnExit(self, evt):\n self.Close()\n\n def OnAbout(self, evt):\n wx.MessageBox(\n 'Print framework sample application\\n\\nUsing wxPython %s' % wx.\n version(), 'About')\n\n def OnClearSelection(self, evt):\n evt.Skip()\n wx.CallAfter(self.tc.SetInsertionPoint, self.tc.GetInsertionPoint())\n\n def OnPageSetup(self, evt):\n data = wx.PageSetupDialogData()\n data.SetPrintData(self.pdata)\n data.SetDefaultMinMargins(True)\n data.SetMarginTopLeft(self.margins[0])\n data.SetMarginBottomRight(self.margins[1])\n dlg = wx.PageSetupDialog(self, data)\n if dlg.ShowModal() == wx.ID_OK:\n data = dlg.GetPageSetupData()\n self.pdata = wx.PrintData(data.GetPrintData())\n self.pdata.SetPaperId(data.GetPaperId())\n self.margins = data.GetMarginTopLeft(), data.GetMarginBottomRight()\n dlg.Destroy()\n\n def OnPrintPreview(self, evt):\n data = wx.PrintDialogData(self.pdata)\n text = self.tc.GetValue()\n printout1 = TextDocPrintout(text, 'title', self.margins)\n printout2 = TextDocPrintout(text, 'title', self.margins)\n preview = wx.PrintPreview(printout1, printout2, data)\n if not preview:\n wx.MessageBox('Unable to create PrintPreview!', 'Error')\n else:\n frame = wx.PreviewFrame(preview, self, 'Print Preview', pos=\n self.GetPosition(), size=self.GetSize())\n frame.Initialize()\n frame.Show()\n\n def OnPrint(self, evt):\n data = wx.PrintDialogData(self.pdata)\n printer = wx.Printer(data)\n text = self.tc.GetValue()\n printout = TextDocPrintout(text, 'title', self.margins)\n useSetupDialog = True\n if not printer.Print(self, printout, useSetupDialog\n ) and printer.GetLastError() == wx.PRINTER_ERROR:\n wx.MessageBox(\n \"\"\"There was a problem printing.\nPerhaps your current printer is not set correctly?\"\"\"\n , 'Printing Error', wx.OK)\n else:\n data = printer.GetPrintDialogData()\n self.pdata = wx.PrintData(data.GetPrintData())\n printout.Destroy()\n\n def OnPrintTest(self, evt):\n data = wx.PrintDialogData(self.pdata)\n dlg = wx.PrintDialog(self, data)\n if dlg.ShowModal() == wx.ID_OK:\n data = dlg.GetPrintDialogData()\n print_()\n print_('GetFromPage:', data.GetFromPage())\n print_('GetToPage:', data.GetToPage())\n print_('GetMinPage:', data.GetMinPage())\n print_('GetMaxPage:', data.GetMaxPage())\n print_('GetNoCopies:', data.GetNoCopies())\n print_('GetAllPages:', data.GetAllPages())\n print_('GetSelection:', data.GetSelection())\n print_('GetCollate:', data.GetCollate())\n print_('GetPrintToFile:', data.GetPrintToFile())\n self.pdata = wx.PrintData(data.GetPrintData())\n print_()\n print_('GetPrinterName:', self.pdata.GetPrinterName())\n dlg.Destroy()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass TextDocPrintout(wx.Printout):\n <mask token>\n\n def __init__(self, text, title, margins):\n wx.Printout.__init__(self, title)\n self.lines = text.split('\\n')\n self.margins = margins\n\n def HasPage(self, page):\n return page <= self.numPages\n\n def GetPageInfo(self):\n return 1, self.numPages, 1, self.numPages\n\n def CalculateScale(self, dc):\n ppiPrinterX, ppiPrinterY = self.GetPPIPrinter()\n ppiScreenX, ppiScreenY = self.GetPPIScreen()\n logScale = float(ppiPrinterX) / float(ppiScreenX)\n pw, ph = self.GetPageSizePixels()\n dw, dh = dc.GetSize()\n scale = logScale * float(dw) / float(pw)\n dc.SetUserScale(scale, scale)\n self.logUnitsMM = float(ppiPrinterX) / (logScale * 25.4)\n\n def CalculateLayout(self, dc):\n topLeft, bottomRight = self.margins\n dw, dh = dc.GetSize()\n self.x1 = topLeft.x * self.logUnitsMM\n self.y1 = topLeft.y * self.logUnitsMM\n self.x2 = dc.DeviceToLogicalXRel(dw) - bottomRight.x * self.logUnitsMM\n self.y2 = dc.DeviceToLogicalYRel(dh) - bottomRight.y * self.logUnitsMM\n self.pageHeight = self.y2 - self.y1 - 2 * self.logUnitsMM\n font = wx.Font(FONTSIZE, wx.FONTFAMILY_TELETYPE, wx.\n FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL)\n dc.SetFont(font)\n self.lineHeight = dc.GetCharHeight()\n self.linesPerPage = int(self.pageHeight / self.lineHeight)\n <mask token>\n\n def OnPrintPage(self, page):\n dc = self.GetDC()\n self.CalculateScale(dc)\n self.CalculateLayout(dc)\n dc.SetPen(wx.Pen('black', 0))\n dc.SetBrush(wx.TRANSPARENT_BRUSH)\n r = wx.Rect(wx.Point(self.x1, self.y1), wx.Point(self.x2, self.y2))\n dc.DrawRectangle(r)\n dc.SetClippingRegion(r)\n line = (page - 1) * self.linesPerPage\n x = self.x1 + self.logUnitsMM\n y = self.y1 + self.logUnitsMM\n while line < page * self.linesPerPage:\n dc.DrawText(self.lines[line], x, y)\n y += self.lineHeight\n line += 1\n if line >= len(self.lines):\n break\n return True\n\n\nclass PrintFrameworkSample(wx.Frame):\n\n def __init__(self):\n wx.Frame.__init__(self, None, size=(640, 480), title=\n 'Print Framework Sample')\n self.CreateStatusBar()\n self.tc = wx.TextCtrl(self, -1, '', style=wx.TE_MULTILINE | wx.\n TE_DONTWRAP)\n self.tc.SetFont(wx.Font(FONTSIZE, wx.FONTFAMILY_TELETYPE, wx.\n FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL))\n filename = os.path.join(os.path.dirname(__file__), 'sample-text.txt')\n with open(filename) as fid:\n self.tc.SetValue(fid.read())\n self.tc.Bind(wx.EVT_SET_FOCUS, self.OnClearSelection)\n wx.CallAfter(self.tc.SetInsertionPoint, 0)\n menu = wx.Menu()\n item = menu.Append(-1, 'Page Setup...\\tF5',\n 'Set up page margins and etc.')\n self.Bind(wx.EVT_MENU, self.OnPageSetup, item)\n item = menu.Append(-1, 'Print Preview...\\tF6',\n 'View the printout on-screen')\n self.Bind(wx.EVT_MENU, self.OnPrintPreview, item)\n item = menu.Append(-1, 'Print...\\tF7', 'Print the document')\n self.Bind(wx.EVT_MENU, self.OnPrint, item)\n menu.AppendSeparator()\n item = menu.Append(wx.ID_ABOUT, 'About', 'About this application')\n self.Bind(wx.EVT_MENU, self.OnAbout, item)\n item = menu.Append(wx.ID_EXIT, 'E&xit\\tCtrl-Q',\n 'Close this application')\n self.Bind(wx.EVT_MENU, self.OnExit, item)\n menubar = wx.MenuBar()\n menubar.Append(menu, '&File')\n self.SetMenuBar(menubar)\n self.pdata = wx.PrintData()\n self.pdata.SetPaperId(wx.PAPER_LETTER)\n self.pdata.SetOrientation(wx.PORTRAIT)\n self.margins = wx.Point(15, 15), wx.Point(15, 15)\n\n def OnExit(self, evt):\n self.Close()\n\n def OnAbout(self, evt):\n wx.MessageBox(\n 'Print framework sample application\\n\\nUsing wxPython %s' % wx.\n version(), 'About')\n\n def OnClearSelection(self, evt):\n evt.Skip()\n wx.CallAfter(self.tc.SetInsertionPoint, self.tc.GetInsertionPoint())\n\n def OnPageSetup(self, evt):\n data = wx.PageSetupDialogData()\n data.SetPrintData(self.pdata)\n data.SetDefaultMinMargins(True)\n data.SetMarginTopLeft(self.margins[0])\n data.SetMarginBottomRight(self.margins[1])\n dlg = wx.PageSetupDialog(self, data)\n if dlg.ShowModal() == wx.ID_OK:\n data = dlg.GetPageSetupData()\n self.pdata = wx.PrintData(data.GetPrintData())\n self.pdata.SetPaperId(data.GetPaperId())\n self.margins = data.GetMarginTopLeft(), data.GetMarginBottomRight()\n dlg.Destroy()\n\n def OnPrintPreview(self, evt):\n data = wx.PrintDialogData(self.pdata)\n text = self.tc.GetValue()\n printout1 = TextDocPrintout(text, 'title', self.margins)\n printout2 = TextDocPrintout(text, 'title', self.margins)\n preview = wx.PrintPreview(printout1, printout2, data)\n if not preview:\n wx.MessageBox('Unable to create PrintPreview!', 'Error')\n else:\n frame = wx.PreviewFrame(preview, self, 'Print Preview', pos=\n self.GetPosition(), size=self.GetSize())\n frame.Initialize()\n frame.Show()\n\n def OnPrint(self, evt):\n data = wx.PrintDialogData(self.pdata)\n printer = wx.Printer(data)\n text = self.tc.GetValue()\n printout = TextDocPrintout(text, 'title', self.margins)\n useSetupDialog = True\n if not printer.Print(self, printout, useSetupDialog\n ) and printer.GetLastError() == wx.PRINTER_ERROR:\n wx.MessageBox(\n \"\"\"There was a problem printing.\nPerhaps your current printer is not set correctly?\"\"\"\n , 'Printing Error', wx.OK)\n else:\n data = printer.GetPrintDialogData()\n self.pdata = wx.PrintData(data.GetPrintData())\n printout.Destroy()\n\n def OnPrintTest(self, evt):\n data = wx.PrintDialogData(self.pdata)\n dlg = wx.PrintDialog(self, data)\n if dlg.ShowModal() == wx.ID_OK:\n data = dlg.GetPrintDialogData()\n print_()\n print_('GetFromPage:', data.GetFromPage())\n print_('GetToPage:', data.GetToPage())\n print_('GetMinPage:', data.GetMinPage())\n print_('GetMaxPage:', data.GetMaxPage())\n print_('GetNoCopies:', data.GetNoCopies())\n print_('GetAllPages:', data.GetAllPages())\n print_('GetSelection:', data.GetSelection())\n print_('GetCollate:', data.GetCollate())\n print_('GetPrintToFile:', data.GetPrintToFile())\n self.pdata = wx.PrintData(data.GetPrintData())\n print_()\n print_('GetPrinterName:', self.pdata.GetPrinterName())\n dlg.Destroy()\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\nclass TextDocPrintout(wx.Printout):\n \"\"\"\n A printout class that is able to print simple text documents.\n Does not handle page numbers or titles, and it assumes that no\n lines are longer than what will fit within the page width. Those\n features are left as an exercise for the reader. ;-)\n \"\"\"\n\n def __init__(self, text, title, margins):\n wx.Printout.__init__(self, title)\n self.lines = text.split('\\n')\n self.margins = margins\n\n def HasPage(self, page):\n return page <= self.numPages\n\n def GetPageInfo(self):\n return 1, self.numPages, 1, self.numPages\n\n def CalculateScale(self, dc):\n ppiPrinterX, ppiPrinterY = self.GetPPIPrinter()\n ppiScreenX, ppiScreenY = self.GetPPIScreen()\n logScale = float(ppiPrinterX) / float(ppiScreenX)\n pw, ph = self.GetPageSizePixels()\n dw, dh = dc.GetSize()\n scale = logScale * float(dw) / float(pw)\n dc.SetUserScale(scale, scale)\n self.logUnitsMM = float(ppiPrinterX) / (logScale * 25.4)\n\n def CalculateLayout(self, dc):\n topLeft, bottomRight = self.margins\n dw, dh = dc.GetSize()\n self.x1 = topLeft.x * self.logUnitsMM\n self.y1 = topLeft.y * self.logUnitsMM\n self.x2 = dc.DeviceToLogicalXRel(dw) - bottomRight.x * self.logUnitsMM\n self.y2 = dc.DeviceToLogicalYRel(dh) - bottomRight.y * self.logUnitsMM\n self.pageHeight = self.y2 - self.y1 - 2 * self.logUnitsMM\n font = wx.Font(FONTSIZE, wx.FONTFAMILY_TELETYPE, wx.\n FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL)\n dc.SetFont(font)\n self.lineHeight = dc.GetCharHeight()\n self.linesPerPage = int(self.pageHeight / self.lineHeight)\n\n def OnPreparePrinting(self):\n dc = self.GetDC()\n self.CalculateScale(dc)\n self.CalculateLayout(dc)\n self.numPages = len(self.lines) / self.linesPerPage\n if len(self.lines) % self.linesPerPage != 0:\n self.numPages += 1\n\n def OnPrintPage(self, page):\n dc = self.GetDC()\n self.CalculateScale(dc)\n self.CalculateLayout(dc)\n dc.SetPen(wx.Pen('black', 0))\n dc.SetBrush(wx.TRANSPARENT_BRUSH)\n r = wx.Rect(wx.Point(self.x1, self.y1), wx.Point(self.x2, self.y2))\n dc.DrawRectangle(r)\n dc.SetClippingRegion(r)\n line = (page - 1) * self.linesPerPage\n x = self.x1 + self.logUnitsMM\n y = self.y1 + self.logUnitsMM\n while line < page * self.linesPerPage:\n dc.DrawText(self.lines[line], x, y)\n y += self.lineHeight\n line += 1\n if line >= len(self.lines):\n break\n return True\n\n\nclass PrintFrameworkSample(wx.Frame):\n\n def __init__(self):\n wx.Frame.__init__(self, None, size=(640, 480), title=\n 'Print Framework Sample')\n self.CreateStatusBar()\n self.tc = wx.TextCtrl(self, -1, '', style=wx.TE_MULTILINE | wx.\n TE_DONTWRAP)\n self.tc.SetFont(wx.Font(FONTSIZE, wx.FONTFAMILY_TELETYPE, wx.\n FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL))\n filename = os.path.join(os.path.dirname(__file__), 'sample-text.txt')\n with open(filename) as fid:\n self.tc.SetValue(fid.read())\n self.tc.Bind(wx.EVT_SET_FOCUS, self.OnClearSelection)\n wx.CallAfter(self.tc.SetInsertionPoint, 0)\n menu = wx.Menu()\n item = menu.Append(-1, 'Page Setup...\\tF5',\n 'Set up page margins and etc.')\n self.Bind(wx.EVT_MENU, self.OnPageSetup, item)\n item = menu.Append(-1, 'Print Preview...\\tF6',\n 'View the printout on-screen')\n self.Bind(wx.EVT_MENU, self.OnPrintPreview, item)\n item = menu.Append(-1, 'Print...\\tF7', 'Print the document')\n self.Bind(wx.EVT_MENU, self.OnPrint, item)\n menu.AppendSeparator()\n item = menu.Append(wx.ID_ABOUT, 'About', 'About this application')\n self.Bind(wx.EVT_MENU, self.OnAbout, item)\n item = menu.Append(wx.ID_EXIT, 'E&xit\\tCtrl-Q',\n 'Close this application')\n self.Bind(wx.EVT_MENU, self.OnExit, item)\n menubar = wx.MenuBar()\n menubar.Append(menu, '&File')\n self.SetMenuBar(menubar)\n self.pdata = wx.PrintData()\n self.pdata.SetPaperId(wx.PAPER_LETTER)\n self.pdata.SetOrientation(wx.PORTRAIT)\n self.margins = wx.Point(15, 15), wx.Point(15, 15)\n\n def OnExit(self, evt):\n self.Close()\n\n def OnAbout(self, evt):\n wx.MessageBox(\n 'Print framework sample application\\n\\nUsing wxPython %s' % wx.\n version(), 'About')\n\n def OnClearSelection(self, evt):\n evt.Skip()\n wx.CallAfter(self.tc.SetInsertionPoint, self.tc.GetInsertionPoint())\n\n def OnPageSetup(self, evt):\n data = wx.PageSetupDialogData()\n data.SetPrintData(self.pdata)\n data.SetDefaultMinMargins(True)\n data.SetMarginTopLeft(self.margins[0])\n data.SetMarginBottomRight(self.margins[1])\n dlg = wx.PageSetupDialog(self, data)\n if dlg.ShowModal() == wx.ID_OK:\n data = dlg.GetPageSetupData()\n self.pdata = wx.PrintData(data.GetPrintData())\n self.pdata.SetPaperId(data.GetPaperId())\n self.margins = data.GetMarginTopLeft(), data.GetMarginBottomRight()\n dlg.Destroy()\n\n def OnPrintPreview(self, evt):\n data = wx.PrintDialogData(self.pdata)\n text = self.tc.GetValue()\n printout1 = TextDocPrintout(text, 'title', self.margins)\n printout2 = TextDocPrintout(text, 'title', self.margins)\n preview = wx.PrintPreview(printout1, printout2, data)\n if not preview:\n wx.MessageBox('Unable to create PrintPreview!', 'Error')\n else:\n frame = wx.PreviewFrame(preview, self, 'Print Preview', pos=\n self.GetPosition(), size=self.GetSize())\n frame.Initialize()\n frame.Show()\n\n def OnPrint(self, evt):\n data = wx.PrintDialogData(self.pdata)\n printer = wx.Printer(data)\n text = self.tc.GetValue()\n printout = TextDocPrintout(text, 'title', self.margins)\n useSetupDialog = True\n if not printer.Print(self, printout, useSetupDialog\n ) and printer.GetLastError() == wx.PRINTER_ERROR:\n wx.MessageBox(\n \"\"\"There was a problem printing.\nPerhaps your current printer is not set correctly?\"\"\"\n , 'Printing Error', wx.OK)\n else:\n data = printer.GetPrintDialogData()\n self.pdata = wx.PrintData(data.GetPrintData())\n printout.Destroy()\n\n def OnPrintTest(self, evt):\n data = wx.PrintDialogData(self.pdata)\n dlg = wx.PrintDialog(self, data)\n if dlg.ShowModal() == wx.ID_OK:\n data = dlg.GetPrintDialogData()\n print_()\n print_('GetFromPage:', data.GetFromPage())\n print_('GetToPage:', data.GetToPage())\n print_('GetMinPage:', data.GetMinPage())\n print_('GetMaxPage:', data.GetMaxPage())\n print_('GetNoCopies:', data.GetNoCopies())\n print_('GetAllPages:', data.GetAllPages())\n print_('GetSelection:', data.GetSelection())\n print_('GetCollate:', data.GetCollate())\n print_('GetPrintToFile:', data.GetPrintToFile())\n self.pdata = wx.PrintData(data.GetPrintData())\n print_()\n print_('GetPrinterName:', self.pdata.GetPrinterName())\n dlg.Destroy()\n\n\n<mask token>\nfrm.Show()\napp.MainLoop()\n",
"step-5": "import wx\nfrom six import print_\nimport os\n\nFONTSIZE = 10\n\nclass TextDocPrintout(wx.Printout):\n \"\"\"\n A printout class that is able to print simple text documents.\n Does not handle page numbers or titles, and it assumes that no\n lines are longer than what will fit within the page width. Those\n features are left as an exercise for the reader. ;-)\n \"\"\"\n def __init__(self, text, title, margins):\n wx.Printout.__init__(self, title)\n self.lines = text.split('\\n')\n self.margins = margins\n\n\n def HasPage(self, page):\n return page <= self.numPages\n\n def GetPageInfo(self):\n return (1, self.numPages, 1, self.numPages)\n\n\n def CalculateScale(self, dc):\n # Scale the DC such that the printout is roughly the same as\n # the screen scaling.\n ppiPrinterX, ppiPrinterY = self.GetPPIPrinter()\n ppiScreenX, ppiScreenY = self.GetPPIScreen()\n logScale = float(ppiPrinterX)/float(ppiScreenX)\n\n # Now adjust if the real page size is reduced (such as when\n # drawing on a scaled wx.MemoryDC in the Print Preview.) If\n # page width == DC width then nothing changes, otherwise we\n # scale down for the DC.\n pw, ph = self.GetPageSizePixels()\n dw, dh = dc.GetSize()\n scale = logScale * float(dw)/float(pw)\n\n # Set the DC's scale.\n dc.SetUserScale(scale, scale)\n\n # Find the logical units per millimeter (for calculating the\n # margins)\n self.logUnitsMM = float(ppiPrinterX)/(logScale*25.4)\n\n\n def CalculateLayout(self, dc):\n # Determine the position of the margins and the\n # page/line height\n topLeft, bottomRight = self.margins\n dw, dh = dc.GetSize()\n self.x1 = topLeft.x * self.logUnitsMM\n self.y1 = topLeft.y * self.logUnitsMM\n self.x2 = dc.DeviceToLogicalXRel(dw) - bottomRight.x * self.logUnitsMM\n self.y2 = dc.DeviceToLogicalYRel(dh) - bottomRight.y * self.logUnitsMM\n\n # use a 1mm buffer around the inside of the box, and a few\n # pixels between each line\n self.pageHeight = self.y2 - self.y1 - 2*self.logUnitsMM\n font = wx.Font(FONTSIZE, wx.FONTFAMILY_TELETYPE,\n wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL)\n dc.SetFont(font)\n self.lineHeight = dc.GetCharHeight()\n self.linesPerPage = int(self.pageHeight/self.lineHeight)\n\n\n def OnPreparePrinting(self):\n # calculate the number of pages\n dc = self.GetDC()\n self.CalculateScale(dc)\n self.CalculateLayout(dc)\n self.numPages = len(self.lines) / self.linesPerPage\n if len(self.lines) % self.linesPerPage != 0:\n self.numPages += 1\n\n\n def OnPrintPage(self, page):\n dc = self.GetDC()\n self.CalculateScale(dc)\n self.CalculateLayout(dc)\n\n # draw a page outline at the margin points\n dc.SetPen(wx.Pen(\"black\", 0))\n dc.SetBrush(wx.TRANSPARENT_BRUSH)\n r = wx.Rect(wx.Point(self.x1, self.y1), wx.Point(self.x2, self.y2))\n dc.DrawRectangle(r)\n dc.SetClippingRegion(r)\n\n # Draw the text lines for this page\n line = (page-1) * self.linesPerPage\n x = self.x1 + self.logUnitsMM\n y = self.y1 + self.logUnitsMM\n while line < (page * self.linesPerPage):\n dc.DrawText(self.lines[line], x, y)\n y += self.lineHeight\n line += 1\n if line >= len(self.lines):\n break\n return True\n\n\n\nclass PrintFrameworkSample(wx.Frame):\n def __init__(self):\n wx.Frame.__init__(self, None, size=(640, 480),\n title=\"Print Framework Sample\")\n self.CreateStatusBar()\n\n # A text widget to display the doc and let it be edited\n self.tc = wx.TextCtrl(self, -1, \"\",\n style=wx.TE_MULTILINE|wx.TE_DONTWRAP)\n self.tc.SetFont(wx.Font(FONTSIZE, wx.FONTFAMILY_TELETYPE,\n wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL))\n filename = os.path.join(os.path.dirname(__file__), \"sample-text.txt\")\n with open(filename) as fid:\n self.tc.SetValue(fid.read())\n self.tc.Bind(wx.EVT_SET_FOCUS, self.OnClearSelection)\n wx.CallAfter(self.tc.SetInsertionPoint, 0)\n\n # Create the menu and menubar\n menu = wx.Menu()\n item = menu.Append(-1, \"Page Setup...\\tF5\",\n \"Set up page margins and etc.\")\n self.Bind(wx.EVT_MENU, self.OnPageSetup, item)\n item = menu.Append(-1, \"Print Preview...\\tF6\",\n \"View the printout on-screen\")\n self.Bind(wx.EVT_MENU, self.OnPrintPreview, item)\n item = menu.Append(-1, \"Print...\\tF7\", \"Print the document\")\n self.Bind(wx.EVT_MENU, self.OnPrint, item)\n menu.AppendSeparator()\n## item = menu.Append(-1, \"Test other stuff...\\tF9\", \"\")\n## self.Bind(wx.EVT_MENU, self.OnPrintTest, item)\n## menu.AppendSeparator()\n\n item = menu.Append(wx.ID_ABOUT, \"About\", \"About this application\")\n self.Bind(wx.EVT_MENU, self.OnAbout, item)\n item = menu.Append(wx.ID_EXIT, \"E&xit\\tCtrl-Q\", \"Close this application\")\n self.Bind(wx.EVT_MENU, self.OnExit, item)\n\n menubar = wx.MenuBar()\n menubar.Append(menu, \"&File\")\n self.SetMenuBar(menubar)\n\n # initialize the print data and set some default values\n self.pdata = wx.PrintData()\n self.pdata.SetPaperId(wx.PAPER_LETTER)\n self.pdata.SetOrientation(wx.PORTRAIT)\n self.margins = (wx.Point(15,15), wx.Point(15,15))\n\n\n def OnExit(self, evt):\n self.Close()\n\n\n def OnAbout(self, evt):\n wx.MessageBox('Print framework sample application\\n'\n '\\n'\n 'Using wxPython %s' % wx.version(),\n 'About')\n\n def OnClearSelection(self, evt):\n evt.Skip()\n wx.CallAfter(self.tc.SetInsertionPoint,\n self.tc.GetInsertionPoint())\n\n\n def OnPageSetup(self, evt):\n data = wx.PageSetupDialogData()\n data.SetPrintData(self.pdata)\n\n data.SetDefaultMinMargins(True)\n data.SetMarginTopLeft(self.margins[0])\n data.SetMarginBottomRight(self.margins[1])\n\n dlg = wx.PageSetupDialog(self, data)\n if dlg.ShowModal() == wx.ID_OK:\n data = dlg.GetPageSetupData()\n self.pdata = wx.PrintData(data.GetPrintData()) # force a copy\n self.pdata.SetPaperId(data.GetPaperId())\n #print_(\"paperID %r, paperSize %r\" % (self.pdata.GetPaperId(), self.pdata.GetPaperSize()))\n self.margins = (data.GetMarginTopLeft(),\n data.GetMarginBottomRight())\n dlg.Destroy()\n\n\n def OnPrintPreview(self, evt):\n data = wx.PrintDialogData(self.pdata)\n text = self.tc.GetValue()\n printout1 = TextDocPrintout(text, \"title\", self.margins)\n printout2 = TextDocPrintout(text, \"title\", self.margins)\n preview = wx.PrintPreview(printout1, printout2, data)\n if not preview:\n wx.MessageBox(\"Unable to create PrintPreview!\", \"Error\")\n else:\n # create the preview frame such that it overlays the app frame\n frame = wx.PreviewFrame(preview, self, \"Print Preview\",\n pos=self.GetPosition(),\n size=self.GetSize())\n frame.Initialize()\n frame.Show()\n\n\n def OnPrint(self, evt):\n data = wx.PrintDialogData(self.pdata)\n printer = wx.Printer(data)\n text = self.tc.GetValue()\n printout = TextDocPrintout(text, \"title\", self.margins)\n useSetupDialog = True\n if not printer.Print(self, printout, useSetupDialog) \\\n and printer.GetLastError() == wx.PRINTER_ERROR:\n wx.MessageBox(\n \"There was a problem printing.\\n\"\n \"Perhaps your current printer is not set correctly?\",\n \"Printing Error\", wx.OK)\n else:\n data = printer.GetPrintDialogData()\n self.pdata = wx.PrintData(data.GetPrintData()) # force a copy\n printout.Destroy()\n\n\n def OnPrintTest(self, evt):\n data = wx.PrintDialogData(self.pdata)\n dlg = wx.PrintDialog(self, data)\n if dlg.ShowModal() == wx.ID_OK:\n data = dlg.GetPrintDialogData()\n print_()\n print_(\"GetFromPage:\", data.GetFromPage())\n print_(\"GetToPage:\", data.GetToPage())\n print_(\"GetMinPage:\", data.GetMinPage())\n print_(\"GetMaxPage:\", data.GetMaxPage())\n print_(\"GetNoCopies:\", data.GetNoCopies())\n print_(\"GetAllPages:\", data.GetAllPages())\n print_(\"GetSelection:\", data.GetSelection())\n print_(\"GetCollate:\", data.GetCollate())\n print_(\"GetPrintToFile:\", data.GetPrintToFile())\n\n self.pdata = wx.PrintData(data.GetPrintData())\n print_()\n print_(\"GetPrinterName:\", self.pdata.GetPrinterName())\n\n dlg.Destroy()\n\n\napp = wx.App()\nfrm = PrintFrameworkSample()\nfrm.Show()\napp.MainLoop()\n",
"step-ids": [
10,
14,
16,
19,
22
]
}
|
[
10,
14,
16,
19,
22
] |
from accessor import *
from order import Order
from copy import deepcopy
import pandas as pd
import numpy as np
import util
class Broker:
def __init__(self, equity):
self.execute = Execute(equity) # Execute
def make_order(self, unit, limit_price, stop_loss, stop_profit):
order_queue.append(Order(unit, limit_price, stop_loss, stop_profit))
def check_order(self, ohlc, date, commission):
"""
check the order and set the information to order by different condition
"""
op = ohlc[0]
for o in order_queue:
if position() != 0 and position() + o.units != 0 and len(order_queue) == 1:
o.is_parents = False
if o.limit_price:
trading_price = o.limit_price
else:
trading_price = op
setattr(o, 'trading_price', trading_price)
setattr(o, 'trading_date', date)
if o.is_long:
if 1 > o.units > 0:
size = int((self.execute.equity * o.units) / trading_price)
setattr(o, 'units', size)
if o.stop_loss:
stop_loss_price = o.trading_price * (1 - o.stop_loss)
setattr(o, 'stop_loss_prices', stop_loss_price)
if o.stop_profit:
stop_profit_price = o.trading_price * (1 + o.stop_profit)
setattr(o, 'stop_profit_prices', stop_profit_price)
if not o.is_parents:
add_position_long_order.append(o)
elif o.is_short:
if -1 < o.units < 0:
size = int((self.execute.equity * o.units) / trading_price)
setattr(o, 'units', size)
if o.stop_loss:
stop_loss_price = o.trading_price * (1 + o.stop_loss)
setattr(o, 'stop_loss_prices', stop_loss_price)
if o.stop_profit:
stop_profit_price = o.trading_price * (1 - o.stop_profit)
setattr(o, 'stop_profit_prices', stop_profit_price)
if not o.is_parents:
add_position_short_order.append(o)
order_execute.append(o)
self.work(ohlc, date=date, commission=commission)
order_queue.clear()
self.check_if_sl_or_sp(ohlc=ohlc, date=date, commission=commission)
def check_if_sl_or_sp(self, ohlc, date, commission):
for t in order_execute:
origin_o = deepcopy(t).is_parents
if util.touch_stop_loss(order=t, price=ohlc[3], date=date) :
t.replace(_unit=-t.units, _trading_price=t.stop_loss_prices, trading_date=date, _is_fill=False,
_is_parent=False, stop_loss=None)
elif util.touch_stop_profit(order=t, price=ohlc[3], date=date):
t.replace(_unit=-t.units, _trading_price=t.stop_profit_prices, trading_date=date, _is_fill=False,
_is_parent=False, stop_loss=None)
if not origin_o:
order_execute.remove(t)
self.work(ohlc, date=date, commission=commission)
def work(self, price, date, commission):
self.execute.trading(price, date, commission)
def liquidation(self, pos, price, date, commission):
"""
clean the last position
"""
o = Order(-1 * pos, limit_price=None, stop_loss=None, stop_profit=None, is_fill=False)
setattr(o, 'trading_price', price[0])
setattr(o, 'trading_date', date)
order_execute.append(o)
self.work(price=price, date=date, commission=commission)
def get_log(self):
log_dict = {'BuyDate': buy_date, 'BuyPrice': buy_price, 'BuyUnits': buy_unit, 'CashPaying': amnt_paying,
'SellDate': sell_date, 'SellPrice': sell_price, 'SellUnits': sell_unit,
'CashReceiving': amnt_receiving}
log = pd.DataFrame(log_dict)
for i in list(log_dict.values()):
i.clear()
return log
class Execute:
def __init__(self, equity):
self.__equity = equity
def trading(self, price, date, commission):
c = price[3]
for t in order_execute:
if not t.is_filled:
position_list.append(t.units)
if t.is_short and add_position_long_order and t.is_parents:
self.split_add_pos_order(t, add_position_long_order, commission)
elif t.is_long and add_position_short_order and t.is_parents:
self.split_add_pos_order(t, add_position_short_order, commission)
else:
self.fill(t, commission)
# if self._touch_stop_loss(order=t, price=c):
# origin_o = deepcopy(t).is_parents
# t.replace(units=-t.units, trading_prices=t.stop_loss_price, trading_date=date, is_filled=False,
# is_parent=False, stop_loss=None)
# if not origin_o:
# order_execute.remove(t)
if position() == 0 and t in order_execute: del order_execute[: order_execute.index(t) + 1]
def fill(self, t, commission):
adj_price = util.adjust_price(trade=t, commission=commission)
if t.is_long:
assert self.__equity >= adj_price * t.units, 'Your money is empty'
buy_price.append(t.trading_price)
buy_date.append(t.trading_date)
buy_unit.append(t.units)
amnt_paying.append(adj_price * t.units)
self.__equity -= t.units * adj_price
setattr(t, 'is_filled', True)
elif t.is_short:
sell_price.append(t.trading_price)
sell_date.append(t.trading_date)
sell_unit.append(t.units)
amnt_receiving.append(abs(t.units) * adj_price)
self.__equity += abs(t.units) * adj_price
setattr(t, 'is_filled', True)
def split_add_pos_order(self, trade_order, add_position_order: list, commission):
"""
split the order which include overweight order into a list of single order and fill them
e.g. a sell order [with 6 units has an parent order and an overweight order] becomes
[an parent order with -4 units , an order with -2 units]
"""
temp_order_list = []
origin_trader_order_sign = np.sign(trade_order.units)
if trade_order.is_short:
parents_unit = trade_order.units + sum(abs(_o.units) for _o in add_position_order)
else:
parents_unit = trade_order.units - sum(abs(_o.units) for _o in add_position_order)
trade_order.units = parents_unit
if trade_order.units != 0:
temp_order_list.append(trade_order)
for _t in add_position_order:
if np.sign(_t.units) == origin_trader_order_sign:
temp_order_list.append(_t)
else:
ct = deepcopy(_t)
ct.units = -_t.units
ct.trading_date = trade_order.trading_date
ct.trading_prices = trade_order.trading_price
temp_order_list.append(ct)
for temp_o in temp_order_list:
self.fill(temp_o, commission)
add_position_order.clear()
@property
def equity(self):
return self.__equity
def position():
return sum(size for size in position_list)
|
normal
|
{
"blob_id": "ca0aedcfb997299240870649823fb872e0d9f99a",
"index": 6023,
"step-1": "<mask token>\n\n\nclass Broker:\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def liquidation(self, pos, price, date, commission):\n \"\"\"\n clean the last position\n \"\"\"\n o = Order(-1 * pos, limit_price=None, stop_loss=None, stop_profit=\n None, is_fill=False)\n setattr(o, 'trading_price', price[0])\n setattr(o, 'trading_date', date)\n order_execute.append(o)\n self.work(price=price, date=date, commission=commission)\n\n def get_log(self):\n log_dict = {'BuyDate': buy_date, 'BuyPrice': buy_price, 'BuyUnits':\n buy_unit, 'CashPaying': amnt_paying, 'SellDate': sell_date,\n 'SellPrice': sell_price, 'SellUnits': sell_unit,\n 'CashReceiving': amnt_receiving}\n log = pd.DataFrame(log_dict)\n for i in list(log_dict.values()):\n i.clear()\n return log\n\n\nclass Execute:\n\n def __init__(self, equity):\n self.__equity = equity\n\n def trading(self, price, date, commission):\n c = price[3]\n for t in order_execute:\n if not t.is_filled:\n position_list.append(t.units)\n if t.is_short and add_position_long_order and t.is_parents:\n self.split_add_pos_order(t, add_position_long_order,\n commission)\n elif t.is_long and add_position_short_order and t.is_parents:\n self.split_add_pos_order(t, add_position_short_order,\n commission)\n else:\n self.fill(t, commission)\n if position() == 0 and t in order_execute:\n del order_execute[:order_execute.index(t) + 1]\n\n def fill(self, t, commission):\n adj_price = util.adjust_price(trade=t, commission=commission)\n if t.is_long:\n assert self.__equity >= adj_price * t.units, 'Your money is empty'\n buy_price.append(t.trading_price)\n buy_date.append(t.trading_date)\n buy_unit.append(t.units)\n amnt_paying.append(adj_price * t.units)\n self.__equity -= t.units * adj_price\n setattr(t, 'is_filled', True)\n elif t.is_short:\n sell_price.append(t.trading_price)\n sell_date.append(t.trading_date)\n sell_unit.append(t.units)\n amnt_receiving.append(abs(t.units) * adj_price)\n self.__equity += abs(t.units) * adj_price\n setattr(t, 'is_filled', True)\n\n def split_add_pos_order(self, trade_order, add_position_order: list,\n commission):\n \"\"\"\n split the order which include overweight order into a list of single order and fill them\n e.g. a sell order [with 6 units has an parent order and an overweight order] becomes\n [an parent order with -4 units , an order with -2 units]\n \"\"\"\n temp_order_list = []\n origin_trader_order_sign = np.sign(trade_order.units)\n if trade_order.is_short:\n parents_unit = trade_order.units + sum(abs(_o.units) for _o in\n add_position_order)\n else:\n parents_unit = trade_order.units - sum(abs(_o.units) for _o in\n add_position_order)\n trade_order.units = parents_unit\n if trade_order.units != 0:\n temp_order_list.append(trade_order)\n for _t in add_position_order:\n if np.sign(_t.units) == origin_trader_order_sign:\n temp_order_list.append(_t)\n else:\n ct = deepcopy(_t)\n ct.units = -_t.units\n ct.trading_date = trade_order.trading_date\n ct.trading_prices = trade_order.trading_price\n temp_order_list.append(ct)\n for temp_o in temp_order_list:\n self.fill(temp_o, commission)\n add_position_order.clear()\n\n @property\n def equity(self):\n return self.__equity\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Broker:\n\n def __init__(self, equity):\n self.execute = Execute(equity)\n <mask token>\n\n def check_order(self, ohlc, date, commission):\n \"\"\"\n check the order and set the information to order by different condition\n \"\"\"\n op = ohlc[0]\n for o in order_queue:\n if position() != 0 and position() + o.units != 0 and len(\n order_queue) == 1:\n o.is_parents = False\n if o.limit_price:\n trading_price = o.limit_price\n else:\n trading_price = op\n setattr(o, 'trading_price', trading_price)\n setattr(o, 'trading_date', date)\n if o.is_long:\n if 1 > o.units > 0:\n size = int(self.execute.equity * o.units / trading_price)\n setattr(o, 'units', size)\n if o.stop_loss:\n stop_loss_price = o.trading_price * (1 - o.stop_loss)\n setattr(o, 'stop_loss_prices', stop_loss_price)\n if o.stop_profit:\n stop_profit_price = o.trading_price * (1 + o.stop_profit)\n setattr(o, 'stop_profit_prices', stop_profit_price)\n if not o.is_parents:\n add_position_long_order.append(o)\n elif o.is_short:\n if -1 < o.units < 0:\n size = int(self.execute.equity * o.units / trading_price)\n setattr(o, 'units', size)\n if o.stop_loss:\n stop_loss_price = o.trading_price * (1 + o.stop_loss)\n setattr(o, 'stop_loss_prices', stop_loss_price)\n if o.stop_profit:\n stop_profit_price = o.trading_price * (1 - o.stop_profit)\n setattr(o, 'stop_profit_prices', stop_profit_price)\n if not o.is_parents:\n add_position_short_order.append(o)\n order_execute.append(o)\n self.work(ohlc, date=date, commission=commission)\n order_queue.clear()\n self.check_if_sl_or_sp(ohlc=ohlc, date=date, commission=commission)\n <mask token>\n <mask token>\n\n def liquidation(self, pos, price, date, commission):\n \"\"\"\n clean the last position\n \"\"\"\n o = Order(-1 * pos, limit_price=None, stop_loss=None, stop_profit=\n None, is_fill=False)\n setattr(o, 'trading_price', price[0])\n setattr(o, 'trading_date', date)\n order_execute.append(o)\n self.work(price=price, date=date, commission=commission)\n\n def get_log(self):\n log_dict = {'BuyDate': buy_date, 'BuyPrice': buy_price, 'BuyUnits':\n buy_unit, 'CashPaying': amnt_paying, 'SellDate': sell_date,\n 'SellPrice': sell_price, 'SellUnits': sell_unit,\n 'CashReceiving': amnt_receiving}\n log = pd.DataFrame(log_dict)\n for i in list(log_dict.values()):\n i.clear()\n return log\n\n\nclass Execute:\n\n def __init__(self, equity):\n self.__equity = equity\n\n def trading(self, price, date, commission):\n c = price[3]\n for t in order_execute:\n if not t.is_filled:\n position_list.append(t.units)\n if t.is_short and add_position_long_order and t.is_parents:\n self.split_add_pos_order(t, add_position_long_order,\n commission)\n elif t.is_long and add_position_short_order and t.is_parents:\n self.split_add_pos_order(t, add_position_short_order,\n commission)\n else:\n self.fill(t, commission)\n if position() == 0 and t in order_execute:\n del order_execute[:order_execute.index(t) + 1]\n\n def fill(self, t, commission):\n adj_price = util.adjust_price(trade=t, commission=commission)\n if t.is_long:\n assert self.__equity >= adj_price * t.units, 'Your money is empty'\n buy_price.append(t.trading_price)\n buy_date.append(t.trading_date)\n buy_unit.append(t.units)\n amnt_paying.append(adj_price * t.units)\n self.__equity -= t.units * adj_price\n setattr(t, 'is_filled', True)\n elif t.is_short:\n sell_price.append(t.trading_price)\n sell_date.append(t.trading_date)\n sell_unit.append(t.units)\n amnt_receiving.append(abs(t.units) * adj_price)\n self.__equity += abs(t.units) * adj_price\n setattr(t, 'is_filled', True)\n\n def split_add_pos_order(self, trade_order, add_position_order: list,\n commission):\n \"\"\"\n split the order which include overweight order into a list of single order and fill them\n e.g. a sell order [with 6 units has an parent order and an overweight order] becomes\n [an parent order with -4 units , an order with -2 units]\n \"\"\"\n temp_order_list = []\n origin_trader_order_sign = np.sign(trade_order.units)\n if trade_order.is_short:\n parents_unit = trade_order.units + sum(abs(_o.units) for _o in\n add_position_order)\n else:\n parents_unit = trade_order.units - sum(abs(_o.units) for _o in\n add_position_order)\n trade_order.units = parents_unit\n if trade_order.units != 0:\n temp_order_list.append(trade_order)\n for _t in add_position_order:\n if np.sign(_t.units) == origin_trader_order_sign:\n temp_order_list.append(_t)\n else:\n ct = deepcopy(_t)\n ct.units = -_t.units\n ct.trading_date = trade_order.trading_date\n ct.trading_prices = trade_order.trading_price\n temp_order_list.append(ct)\n for temp_o in temp_order_list:\n self.fill(temp_o, commission)\n add_position_order.clear()\n\n @property\n def equity(self):\n return self.__equity\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Broker:\n\n def __init__(self, equity):\n self.execute = Execute(equity)\n <mask token>\n\n def check_order(self, ohlc, date, commission):\n \"\"\"\n check the order and set the information to order by different condition\n \"\"\"\n op = ohlc[0]\n for o in order_queue:\n if position() != 0 and position() + o.units != 0 and len(\n order_queue) == 1:\n o.is_parents = False\n if o.limit_price:\n trading_price = o.limit_price\n else:\n trading_price = op\n setattr(o, 'trading_price', trading_price)\n setattr(o, 'trading_date', date)\n if o.is_long:\n if 1 > o.units > 0:\n size = int(self.execute.equity * o.units / trading_price)\n setattr(o, 'units', size)\n if o.stop_loss:\n stop_loss_price = o.trading_price * (1 - o.stop_loss)\n setattr(o, 'stop_loss_prices', stop_loss_price)\n if o.stop_profit:\n stop_profit_price = o.trading_price * (1 + o.stop_profit)\n setattr(o, 'stop_profit_prices', stop_profit_price)\n if not o.is_parents:\n add_position_long_order.append(o)\n elif o.is_short:\n if -1 < o.units < 0:\n size = int(self.execute.equity * o.units / trading_price)\n setattr(o, 'units', size)\n if o.stop_loss:\n stop_loss_price = o.trading_price * (1 + o.stop_loss)\n setattr(o, 'stop_loss_prices', stop_loss_price)\n if o.stop_profit:\n stop_profit_price = o.trading_price * (1 - o.stop_profit)\n setattr(o, 'stop_profit_prices', stop_profit_price)\n if not o.is_parents:\n add_position_short_order.append(o)\n order_execute.append(o)\n self.work(ohlc, date=date, commission=commission)\n order_queue.clear()\n self.check_if_sl_or_sp(ohlc=ohlc, date=date, commission=commission)\n\n def check_if_sl_or_sp(self, ohlc, date, commission):\n for t in order_execute:\n origin_o = deepcopy(t).is_parents\n if util.touch_stop_loss(order=t, price=ohlc[3], date=date):\n t.replace(_unit=-t.units, _trading_price=t.stop_loss_prices,\n trading_date=date, _is_fill=False, _is_parent=False,\n stop_loss=None)\n elif util.touch_stop_profit(order=t, price=ohlc[3], date=date):\n t.replace(_unit=-t.units, _trading_price=t.\n stop_profit_prices, trading_date=date, _is_fill=False,\n _is_parent=False, stop_loss=None)\n if not origin_o:\n order_execute.remove(t)\n self.work(ohlc, date=date, commission=commission)\n <mask token>\n\n def liquidation(self, pos, price, date, commission):\n \"\"\"\n clean the last position\n \"\"\"\n o = Order(-1 * pos, limit_price=None, stop_loss=None, stop_profit=\n None, is_fill=False)\n setattr(o, 'trading_price', price[0])\n setattr(o, 'trading_date', date)\n order_execute.append(o)\n self.work(price=price, date=date, commission=commission)\n\n def get_log(self):\n log_dict = {'BuyDate': buy_date, 'BuyPrice': buy_price, 'BuyUnits':\n buy_unit, 'CashPaying': amnt_paying, 'SellDate': sell_date,\n 'SellPrice': sell_price, 'SellUnits': sell_unit,\n 'CashReceiving': amnt_receiving}\n log = pd.DataFrame(log_dict)\n for i in list(log_dict.values()):\n i.clear()\n return log\n\n\nclass Execute:\n\n def __init__(self, equity):\n self.__equity = equity\n\n def trading(self, price, date, commission):\n c = price[3]\n for t in order_execute:\n if not t.is_filled:\n position_list.append(t.units)\n if t.is_short and add_position_long_order and t.is_parents:\n self.split_add_pos_order(t, add_position_long_order,\n commission)\n elif t.is_long and add_position_short_order and t.is_parents:\n self.split_add_pos_order(t, add_position_short_order,\n commission)\n else:\n self.fill(t, commission)\n if position() == 0 and t in order_execute:\n del order_execute[:order_execute.index(t) + 1]\n\n def fill(self, t, commission):\n adj_price = util.adjust_price(trade=t, commission=commission)\n if t.is_long:\n assert self.__equity >= adj_price * t.units, 'Your money is empty'\n buy_price.append(t.trading_price)\n buy_date.append(t.trading_date)\n buy_unit.append(t.units)\n amnt_paying.append(adj_price * t.units)\n self.__equity -= t.units * adj_price\n setattr(t, 'is_filled', True)\n elif t.is_short:\n sell_price.append(t.trading_price)\n sell_date.append(t.trading_date)\n sell_unit.append(t.units)\n amnt_receiving.append(abs(t.units) * adj_price)\n self.__equity += abs(t.units) * adj_price\n setattr(t, 'is_filled', True)\n\n def split_add_pos_order(self, trade_order, add_position_order: list,\n commission):\n \"\"\"\n split the order which include overweight order into a list of single order and fill them\n e.g. a sell order [with 6 units has an parent order and an overweight order] becomes\n [an parent order with -4 units , an order with -2 units]\n \"\"\"\n temp_order_list = []\n origin_trader_order_sign = np.sign(trade_order.units)\n if trade_order.is_short:\n parents_unit = trade_order.units + sum(abs(_o.units) for _o in\n add_position_order)\n else:\n parents_unit = trade_order.units - sum(abs(_o.units) for _o in\n add_position_order)\n trade_order.units = parents_unit\n if trade_order.units != 0:\n temp_order_list.append(trade_order)\n for _t in add_position_order:\n if np.sign(_t.units) == origin_trader_order_sign:\n temp_order_list.append(_t)\n else:\n ct = deepcopy(_t)\n ct.units = -_t.units\n ct.trading_date = trade_order.trading_date\n ct.trading_prices = trade_order.trading_price\n temp_order_list.append(ct)\n for temp_o in temp_order_list:\n self.fill(temp_o, commission)\n add_position_order.clear()\n\n @property\n def equity(self):\n return self.__equity\n\n\n<mask token>\n",
"step-4": "from accessor import *\nfrom order import Order\nfrom copy import deepcopy\nimport pandas as pd\nimport numpy as np\nimport util\n\n\nclass Broker:\n\n def __init__(self, equity):\n self.execute = Execute(equity)\n\n def make_order(self, unit, limit_price, stop_loss, stop_profit):\n order_queue.append(Order(unit, limit_price, stop_loss, stop_profit))\n\n def check_order(self, ohlc, date, commission):\n \"\"\"\n check the order and set the information to order by different condition\n \"\"\"\n op = ohlc[0]\n for o in order_queue:\n if position() != 0 and position() + o.units != 0 and len(\n order_queue) == 1:\n o.is_parents = False\n if o.limit_price:\n trading_price = o.limit_price\n else:\n trading_price = op\n setattr(o, 'trading_price', trading_price)\n setattr(o, 'trading_date', date)\n if o.is_long:\n if 1 > o.units > 0:\n size = int(self.execute.equity * o.units / trading_price)\n setattr(o, 'units', size)\n if o.stop_loss:\n stop_loss_price = o.trading_price * (1 - o.stop_loss)\n setattr(o, 'stop_loss_prices', stop_loss_price)\n if o.stop_profit:\n stop_profit_price = o.trading_price * (1 + o.stop_profit)\n setattr(o, 'stop_profit_prices', stop_profit_price)\n if not o.is_parents:\n add_position_long_order.append(o)\n elif o.is_short:\n if -1 < o.units < 0:\n size = int(self.execute.equity * o.units / trading_price)\n setattr(o, 'units', size)\n if o.stop_loss:\n stop_loss_price = o.trading_price * (1 + o.stop_loss)\n setattr(o, 'stop_loss_prices', stop_loss_price)\n if o.stop_profit:\n stop_profit_price = o.trading_price * (1 - o.stop_profit)\n setattr(o, 'stop_profit_prices', stop_profit_price)\n if not o.is_parents:\n add_position_short_order.append(o)\n order_execute.append(o)\n self.work(ohlc, date=date, commission=commission)\n order_queue.clear()\n self.check_if_sl_or_sp(ohlc=ohlc, date=date, commission=commission)\n\n def check_if_sl_or_sp(self, ohlc, date, commission):\n for t in order_execute:\n origin_o = deepcopy(t).is_parents\n if util.touch_stop_loss(order=t, price=ohlc[3], date=date):\n t.replace(_unit=-t.units, _trading_price=t.stop_loss_prices,\n trading_date=date, _is_fill=False, _is_parent=False,\n stop_loss=None)\n elif util.touch_stop_profit(order=t, price=ohlc[3], date=date):\n t.replace(_unit=-t.units, _trading_price=t.\n stop_profit_prices, trading_date=date, _is_fill=False,\n _is_parent=False, stop_loss=None)\n if not origin_o:\n order_execute.remove(t)\n self.work(ohlc, date=date, commission=commission)\n\n def work(self, price, date, commission):\n self.execute.trading(price, date, commission)\n\n def liquidation(self, pos, price, date, commission):\n \"\"\"\n clean the last position\n \"\"\"\n o = Order(-1 * pos, limit_price=None, stop_loss=None, stop_profit=\n None, is_fill=False)\n setattr(o, 'trading_price', price[0])\n setattr(o, 'trading_date', date)\n order_execute.append(o)\n self.work(price=price, date=date, commission=commission)\n\n def get_log(self):\n log_dict = {'BuyDate': buy_date, 'BuyPrice': buy_price, 'BuyUnits':\n buy_unit, 'CashPaying': amnt_paying, 'SellDate': sell_date,\n 'SellPrice': sell_price, 'SellUnits': sell_unit,\n 'CashReceiving': amnt_receiving}\n log = pd.DataFrame(log_dict)\n for i in list(log_dict.values()):\n i.clear()\n return log\n\n\nclass Execute:\n\n def __init__(self, equity):\n self.__equity = equity\n\n def trading(self, price, date, commission):\n c = price[3]\n for t in order_execute:\n if not t.is_filled:\n position_list.append(t.units)\n if t.is_short and add_position_long_order and t.is_parents:\n self.split_add_pos_order(t, add_position_long_order,\n commission)\n elif t.is_long and add_position_short_order and t.is_parents:\n self.split_add_pos_order(t, add_position_short_order,\n commission)\n else:\n self.fill(t, commission)\n if position() == 0 and t in order_execute:\n del order_execute[:order_execute.index(t) + 1]\n\n def fill(self, t, commission):\n adj_price = util.adjust_price(trade=t, commission=commission)\n if t.is_long:\n assert self.__equity >= adj_price * t.units, 'Your money is empty'\n buy_price.append(t.trading_price)\n buy_date.append(t.trading_date)\n buy_unit.append(t.units)\n amnt_paying.append(adj_price * t.units)\n self.__equity -= t.units * adj_price\n setattr(t, 'is_filled', True)\n elif t.is_short:\n sell_price.append(t.trading_price)\n sell_date.append(t.trading_date)\n sell_unit.append(t.units)\n amnt_receiving.append(abs(t.units) * adj_price)\n self.__equity += abs(t.units) * adj_price\n setattr(t, 'is_filled', True)\n\n def split_add_pos_order(self, trade_order, add_position_order: list,\n commission):\n \"\"\"\n split the order which include overweight order into a list of single order and fill them\n e.g. a sell order [with 6 units has an parent order and an overweight order] becomes\n [an parent order with -4 units , an order with -2 units]\n \"\"\"\n temp_order_list = []\n origin_trader_order_sign = np.sign(trade_order.units)\n if trade_order.is_short:\n parents_unit = trade_order.units + sum(abs(_o.units) for _o in\n add_position_order)\n else:\n parents_unit = trade_order.units - sum(abs(_o.units) for _o in\n add_position_order)\n trade_order.units = parents_unit\n if trade_order.units != 0:\n temp_order_list.append(trade_order)\n for _t in add_position_order:\n if np.sign(_t.units) == origin_trader_order_sign:\n temp_order_list.append(_t)\n else:\n ct = deepcopy(_t)\n ct.units = -_t.units\n ct.trading_date = trade_order.trading_date\n ct.trading_prices = trade_order.trading_price\n temp_order_list.append(ct)\n for temp_o in temp_order_list:\n self.fill(temp_o, commission)\n add_position_order.clear()\n\n @property\n def equity(self):\n return self.__equity\n\n\ndef position():\n return sum(size for size in position_list)\n",
"step-5": "from accessor import *\r\nfrom order import Order\r\nfrom copy import deepcopy\r\nimport pandas as pd\r\nimport numpy as np\r\nimport util\r\n\r\n\r\nclass Broker:\r\n def __init__(self, equity):\r\n\r\n self.execute = Execute(equity) # Execute\r\n\r\n def make_order(self, unit, limit_price, stop_loss, stop_profit):\r\n\r\n order_queue.append(Order(unit, limit_price, stop_loss, stop_profit))\r\n\r\n def check_order(self, ohlc, date, commission):\r\n \"\"\"\r\n check the order and set the information to order by different condition\r\n \"\"\"\r\n\r\n op = ohlc[0]\r\n\r\n for o in order_queue:\r\n if position() != 0 and position() + o.units != 0 and len(order_queue) == 1:\r\n o.is_parents = False\r\n\r\n if o.limit_price:\r\n trading_price = o.limit_price\r\n\r\n else:\r\n trading_price = op\r\n\r\n setattr(o, 'trading_price', trading_price)\r\n setattr(o, 'trading_date', date)\r\n\r\n if o.is_long:\r\n if 1 > o.units > 0:\r\n\r\n size = int((self.execute.equity * o.units) / trading_price)\r\n setattr(o, 'units', size)\r\n\r\n if o.stop_loss:\r\n stop_loss_price = o.trading_price * (1 - o.stop_loss)\r\n setattr(o, 'stop_loss_prices', stop_loss_price)\r\n\r\n if o.stop_profit:\r\n stop_profit_price = o.trading_price * (1 + o.stop_profit)\r\n setattr(o, 'stop_profit_prices', stop_profit_price)\r\n\r\n if not o.is_parents:\r\n add_position_long_order.append(o)\r\n\r\n elif o.is_short:\r\n\r\n if -1 < o.units < 0:\r\n size = int((self.execute.equity * o.units) / trading_price)\r\n\r\n setattr(o, 'units', size)\r\n\r\n if o.stop_loss:\r\n stop_loss_price = o.trading_price * (1 + o.stop_loss)\r\n setattr(o, 'stop_loss_prices', stop_loss_price)\r\n\r\n if o.stop_profit:\r\n stop_profit_price = o.trading_price * (1 - o.stop_profit)\r\n setattr(o, 'stop_profit_prices', stop_profit_price)\r\n\r\n if not o.is_parents:\r\n add_position_short_order.append(o)\r\n\r\n order_execute.append(o)\r\n self.work(ohlc, date=date, commission=commission)\r\n\r\n order_queue.clear()\r\n\r\n self.check_if_sl_or_sp(ohlc=ohlc, date=date, commission=commission)\r\n\r\n def check_if_sl_or_sp(self, ohlc, date, commission):\r\n for t in order_execute:\r\n origin_o = deepcopy(t).is_parents\r\n if util.touch_stop_loss(order=t, price=ohlc[3], date=date) :\r\n\r\n t.replace(_unit=-t.units, _trading_price=t.stop_loss_prices, trading_date=date, _is_fill=False,\r\n _is_parent=False, stop_loss=None)\r\n\r\n elif util.touch_stop_profit(order=t, price=ohlc[3], date=date):\r\n t.replace(_unit=-t.units, _trading_price=t.stop_profit_prices, trading_date=date, _is_fill=False,\r\n _is_parent=False, stop_loss=None)\r\n\r\n if not origin_o:\r\n order_execute.remove(t)\r\n\r\n self.work(ohlc, date=date, commission=commission)\r\n\r\n def work(self, price, date, commission):\r\n\r\n self.execute.trading(price, date, commission)\r\n\r\n def liquidation(self, pos, price, date, commission):\r\n \"\"\"\r\n clean the last position\r\n \"\"\"\r\n o = Order(-1 * pos, limit_price=None, stop_loss=None, stop_profit=None, is_fill=False)\r\n setattr(o, 'trading_price', price[0])\r\n setattr(o, 'trading_date', date)\r\n order_execute.append(o)\r\n\r\n self.work(price=price, date=date, commission=commission)\r\n\r\n def get_log(self):\r\n log_dict = {'BuyDate': buy_date, 'BuyPrice': buy_price, 'BuyUnits': buy_unit, 'CashPaying': amnt_paying,\r\n 'SellDate': sell_date, 'SellPrice': sell_price, 'SellUnits': sell_unit,\r\n 'CashReceiving': amnt_receiving}\r\n\r\n log = pd.DataFrame(log_dict)\r\n\r\n for i in list(log_dict.values()):\r\n i.clear()\r\n\r\n return log\r\n\r\n\r\nclass Execute:\r\n def __init__(self, equity):\r\n self.__equity = equity\r\n\r\n def trading(self, price, date, commission):\r\n\r\n c = price[3]\r\n\r\n for t in order_execute:\r\n if not t.is_filled:\r\n position_list.append(t.units)\r\n\r\n if t.is_short and add_position_long_order and t.is_parents:\r\n self.split_add_pos_order(t, add_position_long_order, commission)\r\n elif t.is_long and add_position_short_order and t.is_parents:\r\n self.split_add_pos_order(t, add_position_short_order, commission)\r\n\r\n else:\r\n self.fill(t, commission)\r\n\r\n # if self._touch_stop_loss(order=t, price=c):\r\n # origin_o = deepcopy(t).is_parents\r\n # t.replace(units=-t.units, trading_prices=t.stop_loss_price, trading_date=date, is_filled=False,\r\n # is_parent=False, stop_loss=None)\r\n # if not origin_o:\r\n # order_execute.remove(t)\r\n\r\n if position() == 0 and t in order_execute: del order_execute[: order_execute.index(t) + 1]\r\n\r\n def fill(self, t, commission):\r\n adj_price = util.adjust_price(trade=t, commission=commission)\r\n\r\n if t.is_long:\r\n assert self.__equity >= adj_price * t.units, 'Your money is empty'\r\n\r\n buy_price.append(t.trading_price)\r\n buy_date.append(t.trading_date)\r\n buy_unit.append(t.units)\r\n amnt_paying.append(adj_price * t.units)\r\n\r\n self.__equity -= t.units * adj_price\r\n setattr(t, 'is_filled', True)\r\n\r\n elif t.is_short:\r\n\r\n sell_price.append(t.trading_price)\r\n sell_date.append(t.trading_date)\r\n sell_unit.append(t.units)\r\n amnt_receiving.append(abs(t.units) * adj_price)\r\n\r\n self.__equity += abs(t.units) * adj_price\r\n setattr(t, 'is_filled', True)\r\n\r\n\r\n def split_add_pos_order(self, trade_order, add_position_order: list, commission):\r\n \"\"\"\r\n split the order which include overweight order into a list of single order and fill them\r\n e.g. a sell order [with 6 units has an parent order and an overweight order] becomes\r\n [an parent order with -4 units , an order with -2 units]\r\n \"\"\"\r\n temp_order_list = []\r\n origin_trader_order_sign = np.sign(trade_order.units)\r\n if trade_order.is_short:\r\n parents_unit = trade_order.units + sum(abs(_o.units) for _o in add_position_order)\r\n else:\r\n parents_unit = trade_order.units - sum(abs(_o.units) for _o in add_position_order)\r\n trade_order.units = parents_unit\r\n if trade_order.units != 0:\r\n temp_order_list.append(trade_order)\r\n for _t in add_position_order:\r\n if np.sign(_t.units) == origin_trader_order_sign:\r\n temp_order_list.append(_t)\r\n\r\n else:\r\n ct = deepcopy(_t)\r\n\r\n ct.units = -_t.units\r\n ct.trading_date = trade_order.trading_date\r\n ct.trading_prices = trade_order.trading_price\r\n\r\n temp_order_list.append(ct)\r\n for temp_o in temp_order_list:\r\n self.fill(temp_o, commission)\r\n\r\n add_position_order.clear()\r\n\r\n @property\r\n def equity(self):\r\n return self.__equity\r\n\r\n\r\ndef position():\r\n return sum(size for size in position_list)\r\n",
"step-ids": [
9,
11,
12,
16,
17
]
}
|
[
9,
11,
12,
16,
17
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.